metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JohnAD/flask-track-usage-mongoengine-demo",
"score": 2
} |
#### File: JohnAD/flask-track-usage-mongoengine-demo/app.py
```python
from flask import (
Flask,
render_template,
g
)
import mongoengine as me
from flask_track_usage import TrackUsage
from flask_track_usage.storage.mongo import MongoEngineStorage
from flask_track_usage.summarization import sumUrl, sumUserAgent
#########################
#
# SETUP FLASK and JINJA2
#
#########################
app = Flask(__name__)
def datetimeformat(value, format='%Y-%m-%d %H:%M'):
return value.strftime(format)
app.jinja_env.filters['datetime'] = datetimeformat
#########################
#
# SETUP MONGOENGINE
#
#########################
me.connect("example_website")
#########################
#
# SETUP FLASK_TRACK_USAGE
#
#########################
app.config['TRACK_USAGE_USE_FREEGEOIP'] = False
app.config['TRACK_USAGE_INCLUDE_OR_EXCLUDE_VIEWS'] = 'exclude'
traffic_storage = MongoEngineStorage(hooks=[sumUrl, sumUserAgent])
t = TrackUsage(app, [traffic_storage])
#########################
#
# PUBLIC ROUTES
#
#########################
@app.route('/')
def index():
g.track_var["something"] = 99
return render_template('index.html')
@app.route('/page1')
def page_one():
g.track_var["something"] = 34
return render_template('other_page.html', page_number=1)
@app.route('/page2')
def page_two():
return render_template('other_page.html', page_number=2)
##########################
#
# ADMIN ROUTES
#
##########################
@t.exclude
@app.route('/admin/last20.html')
def last_twenty():
visits = traffic_storage.get_usage(limit=20)
return render_template('last20.html', visits=visits)
@t.exclude
@app.route('/admin/last_url.html')
def last_url():
stats = traffic_storage.get_sum(sumUrl, limit=30, target="http://127.0.0.1:5000/page1")
return render_template('last_url.html', stats=stats)
@t.exclude
@app.route('/admin/last_useragent.html')
def last_useragent():
stats = traffic_storage.get_sum(sumUserAgent, limit=40)
return render_template('last_useragent.html', stats=stats)
``` |
{
"source": "JohnAD/NimLime",
"score": 2
} |
#### File: core/commands/hotkeys.py
```python
import sublime
from sublime_plugin import EventListener
from NimLime.core import settings
class HotkeySyncer(EventListener):
"""Synchronizes certain settings keys with view settings."""
setting_entries = (
("doccontinue.enabled", True),
("doccontinue.autostop", True)
)
def sync_on_change(self):
view = sublime.active_window().active_view()
self.sync(view)
def sync(self, view):
view_settings = view.settings()
for key, default in self.setting_entries:
value = settings.get(key, default)
view_settings.set(key, value)
def on_new(self, view):
self.sync(view)
def on_activated(self, view):
self.sync(view)
def on_clone(self, view):
self.sync(view)
def on_load(self, view):
self.sync(view)
```
#### File: core/commands/nimcheck.py
```python
import os.path
import re
import subprocess
from collections import namedtuple
import sublime
from sublime_plugin import ApplicationCommand, EventListener
from NimLime.core import configuration
from NimLime.core.utils.error_handler import catch_errors
from NimLime.core.utils.misc import (
busy_frames, display_process_error, get_next_method, loop_status_msg,
run_process, send_self, trim_region, view_has_nim_syntax
)
from NimLime.core.utils.mixins import NimLimeMixin, NimLimeOutputMixin
# Constants
ERROR_REGION_TAG = 'NimCheckError'
WARN_REGION_TAG = 'NimCheckWarn'
ERROR_REGION_MARK = 'dot'
ERROR_REGION_STYLE = sublime.DRAW_OUTLINED
ERROR_MSG_FORMAT = '({0},{1}): {2}: {3}'.format
MAX_CONTEXT_LINES = 3
MESSAGE_REGEX = re.compile(
(r"""
^
# File Name
(?P<file_name> {ANYTHING}+)
# Line and Column Number
{SPACE}*
\(
(?P<line_number> {INTEGER}+)
{SPACE}* {COMMA} {SPACE}+
(?P<column_number> {INTEGER}+)
\)
# Message Type and Content
{SPACE}*
(?P<message_type> {LETTER}+)
{SPACE}* {COLON} {SPACE}*
(?P<message> {ANYTHING}+)
# Optional Context
(?P<context> (\n {SPACE}+ {ANYTHING}+)*)
""".format(
SPACE = r'\s',
INTEGER = r'\d',
ANYTHING = r'.',
COMMA = r',',
LETTER = r'\w',
COLON = r':',
)),
flags=re.MULTILINE | re.IGNORECASE | re.VERBOSE
)
# ## Functions ## #
NimCheckEntry = namedtuple(
'NimCheckEntry',
[
'file_name',
'line_number',
'column_number',
'message_type',
'message',
'entire',
]
)
# Functions to run "nim check"
def parse_nimcheck(output):
entry_list = []
for match in MESSAGE_REGEX.finditer(output):
entry = NimCheckEntry(
file_name = match.group('file_name'),
line_number = int(match.group('line_number')) - 1,
column_number = int(match.group('column_number')) - 1,
message_type = match.group('message_type'),
message = match.group('message'),
entire = match.group(0),
)
entry_list.append(entry)
return entry_list
@send_self
@catch_errors
def run_nimcheck(file_path, callback, verbosity, disabled_hints, extra_args):
this = yield
verbosity_opt = '--verbosity:{}'.format(verbosity)
hint_opts = (
'--hint[{}]:off'.format(x) for x in disabled_hints
)
command = [configuration.nim_exe, 'check', verbosity_opt]
command.extend(hint_opts)
command.extend(extra_args)
command.append(file_path)
process, stdout, stderr, error = yield run_process(
command, this.send,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.path.dirname(file_path)
)
if error:
display_process_error(error, 'Nim Check Failed', 'Nim')
yield callback(None)
entries = parse_nimcheck(stdout)
sublime.status_message('run_nimcheck:Nim Check Finished.')
yield callback(entries)
# Functions to store "nim check" results
class NimCheckViewEntries(EventListener):
"""Contains and cleans up regions created by the 'nim check' commands."""
entries = {}
def on_close(self, view):
self.entries.pop(view.id(), None)
def store_nimcheck_entries(view, errors, warnings):
view_id = view.id()
entry_store = {}
if errors is not None:
for entry in errors:
entry_store[entry.line_number] = entry
if warnings is not None:
for entry in warnings:
entry_store[entry.line_number] = entry
NimCheckViewEntries.entries[view_id] = entry_store
def retrieve_nimcheck_entries(view):
return NimCheckViewEntries.entries.get(view.id())
def remove_nimcheck_entries(view):
view.erase_regions(ERROR_REGION_TAG)
view.erase_regions(WARN_REGION_TAG)
NimCheckViewEntries.entries.pop(view.id(), None)
# Commands
class NimClearErrors(NimLimeMixin, ApplicationCommand):
"""Clears error and warning marks generated by the Nim check commands."""
settings_selector = 'check.clear_errors'
requires_nim_syntax = False
def __init__(self, *args, **kwargs):
ApplicationCommand.__init__(self, *args, **kwargs)
NimLimeMixin.__init__(self, *args, **kwargs)
@catch_errors
def run(self, *args, **varargs):
view = sublime.active_window().active_view()
remove_nimcheck_entries(view)
sublime.status_message('Cleared Nim Check Errors & Hints')
class NimDisplayErrorInStatus(EventListener):
"""Displays errors/warnings in the status bar, when a region is clicked."""
def get_current_entry(self, view):
selections = view.sel()
if len(selections) > 1:
return None
entries = retrieve_nimcheck_entries(view)
if entries is None:
return None
selected_point = selections[0].end()
line_number = view.rowcol(selected_point)[0]
return entries.get(line_number)
def on_selection_modified(self, view):
view = sublime.active_window().active_view()
entry = self.get_current_entry(view)
if entry is not None:
message = "NimCheck: " + entry.message
sublime.status_message(message)
class NimCheckCurrentView(NimLimeOutputMixin, ApplicationCommand):
"""Checks the current Nim file for errors."""
requires_nim_syntax = True
settings_selector = 'check.current_file'
setting_entries = (
NimLimeOutputMixin.setting_entries,
('verbosity', '{0}.verbosity', 2),
('disabled_hints', '{0}.disabled_hints', []),
('extra_args', '{0}.extra_args', []),
('highlight_errors', '{0}.highlight_errors', True),
('highlight_warnings', '{0}.highlight_warnings', True),
('include_context', '{0}.list.include_context', True),
('list_errors', '{0}.list.show_errors', True),
('list_warnings', '{0}.list.show_warnings', True),
('move_cursor', '{0}.list.move_cursor', True)
)
@send_self
@catch_errors
def run(self, *args, **varargs):
this = yield
window = sublime.active_window()
view = window.active_view()
view_name = os.path.split(view.file_name() or view.name())[1]
remove_nimcheck_entries(view)
frames = ['Running Nim Check' + f for f in busy_frames]
stop_status_loop = loop_status_msg(frames, 0.15)
# Save view text
if view.is_dirty():
view.run_command('save')
# Run 'nim check' on the current view and retrieve the output.
# project_file = get_nim_project(window, view) or view.file_name()
entries = yield run_nimcheck(
file_path = view.file_name(),
callback = this.send,
verbosity = self.verbosity,
disabled_hints = self.disabled_hints,
extra_args = self.extra_args
)
yield stop_status_loop(get_next_method(this))
if entries is None:
sublime.status_message('Nim Check Failed.')
yield
sublime.status_message('run:Nim Check Finished.')
self.highlight_and_list_entries(entries, window, view)
if self.send_output:
gen = (m[5] for m in entries if view_name == m[0])
content = '\n'.join(gen)
self.write_to_output(content, view)
yield
def display_entries(self, view,
quick_message_list, point_list,
list_entries, highlight_entries):
entries = []
region_list = []
while True:
entry = yield
if entry is None:
yield entries, region_list
entry_point = view.text_point(
entry.line_number,
entry.column_number
)
if list_entries:
quick_message = entry.entire.split('\n')
if self.include_context:
line_count = len(quick_message)
del quick_message[MAX_CONTEXT_LINES:line_count]
for i in range(line_count, MAX_CONTEXT_LINES):
quick_message.append('')
else:
quick_message = quick_message[0]
entries.append(entry)
point_list.append(entry_point)
quick_message_list.append(quick_message)
# For highlighting
if highlight_entries:
message_region = trim_region(view, view.line(entry_point))
region_list.append(message_region)
def highlight_and_list_entries(self, entries, window, view):
"""Highlight and list entries gathered from `nim check` output."""
view_name = os.path.split(view.file_name() or view.name())[1]
# Instantiate entry list containers
if self.list_errors or self.list_warnings:
quick_message_list = []
point_list = []
error_entries = self.display_entries(
view=view,
quick_message_list=quick_message_list,
point_list=point_list,
list_entries=self.list_errors,
highlight_entries=self.highlight_errors
)
warn_entries = self.display_entries(
view=view,
quick_message_list=quick_message_list,
point_list=point_list,
list_entries=self.list_warnings,
highlight_entries=self.highlight_warnings
)
# Instantiate entry highlighting errors
error_entries.send(None)
warn_entries.send(None)
for entry in entries:
# makes it work when `--listFullPaths` is in user config
# TODO: Make this more robust if/when multiple names are allowed,
# PENDING https://github.com/nim-lang/Nim/pull/8614
file_name = entry.file_name
file_name = os.path.basename(file_name)
if file_name.lower() != view_name.lower():
continue
# Determine whether the entry should be highlighted/listed
if entry.message_type == 'Error':
error_entries.send(entry)
else:
warn_entries.send(entry)
error_entries, error_region_list = error_entries.send(None)
warn_entries, warn_region_list = warn_entries.send(None)
store_nimcheck_entries(view, warn_entries, error_entries)
if error_region_list:
view.add_regions(
ERROR_REGION_TAG,
error_region_list,
'invalid.illegal',
ERROR_REGION_MARK,
ERROR_REGION_STYLE
)
if warn_region_list:
view.add_regions(
WARN_REGION_TAG,
warn_region_list,
'invalid.deprecated',
ERROR_REGION_MARK,
ERROR_REGION_STYLE
)
if self.list_errors or self.list_warnings:
def _goto_error(choice):
if choice != -1:
chosen_point = point_list[choice]
view.show(chosen_point)
if self.move_cursor:
view.sel().clear()
view.sel().add(sublime.Region(chosen_point))
flag = 0
if self.include_context:
flag = sublime.MONOSPACE_FONT
window.show_quick_panel(quick_message_list, _goto_error, flag)
class NimCheckOnSaveListener(NimCheckCurrentView, EventListener):
"""Runs the Nim Check command when the current file is saved."""
settings_selector = 'check.on_save'
def on_post_save(self, view):
view = sublime.active_window().active_view()
if self.enabled and view_has_nim_syntax(view):
self.run()
```
#### File: core/commands/project.py
```python
import os
from sublime_plugin import WindowCommand
from NimLime.core.utils.error_handler import catch_errors
from NimLime.core.utils.mixins import NimLimeMixin
from NimLime.core.utils.project import _get_project_file, set_nim_project
class SetProjectCommand(NimLimeMixin, WindowCommand):
"""Sets the main Nim file for the current Sublime Text project."""
enabled = True
settings_selector = 'project'
def __init__(self, *args, **kwargs):
NimLimeMixin.__init__(self)
WindowCommand.__init__(self, *args, **kwargs)
@catch_errors
def run(self):
# Retrieve path of project
st_project = _get_project_file(self.window.id())
if st_project is not None:
active_view = self.window.active_view()
filename = active_view.file_name()
directory = os.path.dirname(st_project)
relative_path = os.path.relpath(filename, directory)
# Set input file
name, extension = os.path.splitext(relative_path)
if extension.lower() == '.nim':
set_nim_project(st_project, relative_path)
```
#### File: core/utils/project.py
```python
import json
import os
import platform
import re
import sublime
# Based off of code from https://github.com/facelessuser/FavoriteFiles/
def _get_project_file(win_id):
session_data = None
# Construct the base settings paths
auto_save_session_path = os.path.join(
sublime.packages_path(),
'..',
'Settings',
'Auto Save Session.sublime_session'
)
regular_session_path = os.path.join(
sublime.packages_path(),
'..',
'Settings',
'Session.sublime_session'
)
# Try loading the session data from one of the files
for session_path in (auto_save_session_path, regular_session_path):
try:
with open(session_path) as session_file:
session_data = json.load(session_file, strict=False)
break
except (IOError, ValueError):
continue
if session_data is None:
return None
# Find the window data corresponding with the given ID
project = _find_project_in_data(session_data, win_id) or ''
# Throw out empty project names
if re.match('.*\\.sublime-project', project) or os.path.exists(project):
return project
return None
def _find_project_in_data(session_data, win_id):
# Iterates through the given session data, searching for the window
# with the given ID, and returning the project path associated with the
# window.
for window in session_data.get('windows', ()):
if window.get('window_id') == win_id and 'workspace_name' in window:
project = window['workspace_name']
if platform.system() == 'Windows':
project = os.path.normpath(
project.lstrip('/').replace('/', ':/', 1)
)
return project
return None
def set_nim_project(st_project, nim_path):
"""
Associate a nim project file with the current sublime project.
:type st_project: str
:type nim_path: str
"""
if st_project is not None:
with open(st_project, 'r+') as project_file:
data = json.loads(project_file.read())
data['settings']['nim-project'] = nim_path.replace('\\', '/')
project_file.seek(0)
project_file.truncate()
project_file.write(
json.dumps(data, indent=4)
)
def get_nim_project(window, view):
"""
Given a window and view, return the Nim project associated with it.
:type window: sublime.Window
:type view: sublime.View
:rtype: str
"""
st_project = _get_project_file(window.id())
result = view.file_name()
if st_project is not None:
with open(st_project) as project_file:
data = json.loads(project_file.read())
try:
path = data['settings']['nim-project']
# Get full path
directory = os.path.dirname(st_project)
path = path.replace('/', os.sep)
result = os.path.join(directory, path)
except IOError:
pass
return result
``` |
{
"source": "JohnAdriaan/uPHue",
"score": 3
} |
#### File: JohnAdriaan/uPHue/bridge.py
```python
import os
import socket
import json
import http.client as httplib
from uPHue import *
class Bridge(object):
""" Interface to the Hue ZigBee bridge
"""
def __init__(self, ip=None, username=None, config_file_path=None):
""" Initialization function.
Parameters:
------------
ip : string
IP address as dotted quad
username : string, optional
"""
if config_file_path is not None:
self.config_file_path = config_file_path
else:
self.config_file_path = os.path.join(os.getcwd(), '.python_hue')
self.ip = ip
self.username = username
if username is not None:
self.api = '/api/' + username
self._name = None
# self.minutes = 600 # these do not seem to be used anywhere?
# self.seconds = 10
self.connect()
@property
def name(self):
'''Get or set the name of the bridge [string]'''
self._name = self.get('/config')['name']
return self._name
@name.setter
def name(self, value):
self._name = value
data = {'name': self._name}
self.put('/config', data)
def get(self, req):
return self.request('GET', self.api + req)
def put(self, req, data):
return self.request('PUT', self.api + req, data)
def post(self, req, data):
return self.request('POST', self.api + req, data)
def delete(self, req):
return self.request('DELETE', self.api + req)
def request(self, mode='GET', address=None, data=None):
""" Utility function for HTTP GET/PUT requests for the API"""
connection = httplib.HTTPConnection(self.ip, timeout=10)
try:
if mode == 'GET' or mode == 'DELETE':
connection.request(mode, address)
if mode == 'PUT' or mode == 'POST':
connection.request(mode, address, json.dumps(data))
logger.debug("{0} {1} {2}".format(mode, address, str(data)))
except socket.timeout:
error = "{} Request to {}{} timed out.".format(mode, self.ip, address)
logger.exception(error)
raise PhueRequestTimeout(None, error)
result = connection.getresponse()
response = result.read()
connection.close()
response = response.decode('utf-8')
logger.debug(response)
return json.loads(response)
def get_ip_address(self, set_result=False):
""" Get the bridge ip address from the meethue.com nupnp api """
connection = httplib.HTTPSConnection('www.meethue.com')
connection.request('GET', '/api/nupnp')
logger.info('Connecting to meethue.com/api/nupnp')
result = connection.getresponse()
data = json.loads(str(result.read(), encoding='utf-8'))
""" close connection after read() is done, to prevent issues with read() """
connection.close()
ip = str(data[0]['internalipaddress'])
if ip != '':
if set_result:
self.ip = ip
return ip
else:
return False
def register_app(self):
""" Register this computer with the Hue bridge hardware and save the resulting access token """
registration_request = {"devicetype": "python_hue"}
response = self.request('POST', '/api', registration_request)
for line in response:
for key in line:
if 'success' in key:
with open(self.config_file_path, 'w') as f:
logger.info(
'Writing configuration file to ' + self.config_file_path)
f.write(json.dumps({self.ip: line['success']}))
logger.info('Reconnecting to the bridge')
self.connect()
if 'error' in key:
error_type = line['error']['type']
if error_type == 101:
raise PhueRegistrationException(error_type,
'The link button has not been pressed in the last 30 seconds.')
if error_type == 7:
raise PhueException(error_type,
'Unknown username')
def connect(self):
""" Connect to the Hue bridge """
logger.info('Attempting to connect to the bridge...')
# If the ip and username were provided at class init
if self.ip is not None and self.username is not None:
logger.info('Using ip: ' + self.ip)
logger.info('Using username: ' + self.username)
return
if self.ip is None or self.username is None:
try:
with open(self.config_file_path) as f:
config = json.loads(f.read())
if self.ip is None:
self.ip = list(config.keys())[0]
logger.info('Using ip from config: ' + self.ip)
else:
logger.info('Using ip: ' + self.ip)
if self.username is None:
self.username = config[self.ip]['username']
self.api = '/api/' + self.username
logger.info(
'Using username from config: ' + self.username)
else:
logger.info('Using username: ' + self.username)
except Exception as e:
logger.info(
'Error opening config file, will attempt bridge registration')
self.register_app()
def get_api(self):
""" Returns the full api dictionary """
return self.get('')
```
#### File: JohnAdriaan/uPHue/schedule.py
```python
from uPHue import *
class Schedule(object):
""" This is merely a container for `Schedule.Bridge`"""
class Bridge(object):
def __init__(self, bridge):
self.bridge = bridge
# Schedules #####
def get_schedule(self, schedule_id=None, parameter=None):
if schedule_id is None:
return self.bridge.get('/schedules')
if parameter is None:
return self.bridge.get('/schedules/' + str(schedule_id))
def create_schedule(self, name, time, light_id, data, description=' '):
schedule = {
'name': name,
'localtime': time,
'description': description,
'command':
{
'method': 'PUT',
'address': (self.bridge.api +
'/lights/' + str(light_id) + '/state'),
'body': data
}
}
return self.bridge.post('/schedules', schedule)
def set_schedule_attributes(self, schedule_id, attributes):
"""
:param schedule_id: The ID of the schedule
:param attributes: Dictionary with attributes and their new values
"""
return self.bridge.put('/schedules/' + str(schedule_id), data=attributes)
def create_group_schedule(self, name, time, group_id, data, description=' '):
schedule = {
'name': name,
'localtime': time,
'description': description,
'command':
{
'method': 'PUT',
'address': (self.bridge.api +
'/groups/' + str(group_id) + '/action'),
'body': data
}
}
return self.bridge.post('/schedules', schedule)
def delete_schedule(self, schedule_id):
return self.bridge.delete('/schedules/' + str(schedule_id))
``` |
{
"source": "JohnAgapeyev/7402-ass2",
"score": 3
} |
#### File: JohnAgapeyev/7402-ass2/main.py
```python
import math
import sys
import enchant
import time
import itertools
from multiprocessing import Pool
def decryptMessage(key, message):
# Determine the number of columns
nCols = math.ceil(len(message) / key)
# Determine the number of rows
nRows = key
# Determine the unused cells
nUnused = (nCols * nRows) - len(message)
# Each string in plaintext represents a column in the grid.
plaintext = [''] * nCols
# row and col point to the location of the next character in the ciphertext
row = col = 0
for symbol in message:
plaintext[col] += symbol
col += 1 # point to next column
# If it reaches the last column in the row, or at an unused cell, start processing the next row
if (col == nCols) or (col == nCols - 1 and row >= nRows - nUnused):
col = 0
row += 1
return ''.join(plaintext)
def encryptMessage (key, message):
# Each string in ciphertext represents a column in the grid.
ciphertext = [''] * key
# Iterate through each column in ciphertext.
for col in range (key):
pointer = col
# process the complete length of the plaintext
while pointer < len (message):
# Place the character at pointer in message at the end of the
# current column in the ciphertext list.
ciphertext[col] += message[pointer]
# move pointer over
pointer += key
# Convert the ciphertext list into a single string value and return it.
return ''.join (ciphertext)
def try_key(args):
key, ciphertext = args
count = 0
for word in decryptMessage(key, ciphertext).split():
if len(word) > 5 and dictionary.check(word):
count += 1
return count
if len(sys.argv) != 3:
print('Usage: ./main.py <plaintext_filename> <keysize>')
sys.exit(1)
data = open(sys.argv[1]).read()
keylen = int(sys.argv[2])
if keylen <= 0:
print('Key size must be greater than zero')
sys.exit(1)
if keylen >= len(data):
print('Key size must be smaller than the length of the plaintext')
sys.exit(1)
dictionary = enchant.Dict("en_US")
ciphertext = encryptMessage(keylen, data)
print("Length of the ciphertext: ", len(ciphertext))
res_list = Pool().map(try_key, zip(range(1, len(ciphertext)), itertools.repeat(ciphertext)))
best_key = res_list.index(max(res_list)) + 1
print(decryptMessage(best_key, ciphertext))
print("Best key was", best_key)
``` |
{
"source": "JohnAgapeyev/7402-ass5",
"score": 3
} |
#### File: JohnAgapeyev/7402-ass5/feistel.py
```python
import sys
import secrets
import random
import matplotlib.pyplot as plt
import numpy as np
from Crypto.Cipher import AES
from Crypto.Hash import HMAC
from Crypto.Hash import SHA256
round_count = 8
#Master secret key
#Keeping it fixed for simplicity, and to avoid having to pad/KDF the thing
K = bytearray("yellow submarine", 'utf8')
def pkcs7_pad(x):
padding = 16 - ((len(x) % 16 != 0) * (len(x) % 16))
return x + bytes([padding]) * padding
def pkcs7_strip(x):
for i in range(x[-1]):
if x[-(i + 1)] != x[-1]:
raise ValueError('Input is not padded or padding is corrupt')
return x[:-x[-1]]
#This is completely arbitrary, and bad
def easy(i, k, x):
x = bytearray(x)
for j in range(len(x)):
x[j] = (x[j] * i) & 0xff
x[j] = (x[j] << k[i]) & 0xff
return x
def rotate_byte(x, n):
return ((x << n) | (x >> (8 - n))) & 0xff;
#This is solidly amateur, but I obviously lack the capability to analyze/break it
def medium(i, k, x):
x = bytearray(x)
random.Random(i).shuffle(x)
#Since I know this will be 8 bytes, I can use it for bitslicing majority function
for j in range(len(x)):
for n in range(8):
count = 0
for elem in x:
count += (elem & (1 << j)) != 0
x[j] ^= (-(count >= 0) ^ x[j]) & (1 << n)
x[j] = (x[j] + x[i]) & 0xff
x[j] = rotate_byte(x[j], i)
x[j] = x[j] ^ k[j]
x[j] = rotate_byte(x[j], 3)
x[j] = (x[j] + k[j+8]) & 0xff
for kb in k:
x[j] = rotate_byte(((x[j] ^ kb) + 0x3a) & 0xff, 7)
random.Random(j).shuffle(x)
random.Random(-i).shuffle(x)
return x
#This is actually secure, just a waste of time
def hard(i, k, x):
return bytearray(AES.new(k, AES.MODE_CTR, nonce=bytes([i])*8).encrypt(bytearray(x)))
def easy_subkey(master):
k = []
for i in range(round_count):
x = bytearray([a ^ b for (a,b) in zip(master, [i]*16)])
k.append(x);
return k
def medium_subkey(master):
k = []
for i in range(round_count):
tmp_master = master
for j in range(len(tmp_master)):
random.Random(j).shuffle(tmp_master)
tmp_master[j] = rotate_byte(tmp_master[j], tmp_master[i] % 8)
tmp_master[j] = tmp_master[j] ^ 0xc3
random.Random(j).shuffle(tmp_master)
tmp_master[j] = rotate_byte(tmp_master[len(tmp_master) - j - 1], random.Random(sum(tmp_master)).getrandbits(3))
tmp_master[j] = (tmp_master[j] + (i * 176)) & 0xff
random.Random(i).shuffle(tmp_master)
k.append(bytearray(tmp_master));
return k
def hard_subkey(master):
k = []
for i in range(round_count):
h = HMAC.new(master, digestmod=SHA256)
h.update(bytearray([i]*16))
k.append(bytearray(h.digest()[:16]))
return k
subkey_generator = hard_subkey
round_function = hard
def round(i, k, L, R):
return R, [a ^ b for (a,b) in zip(L, round_function(i, k, R))]
def process_block(B, rounds, subkeys):
#Split the block
L, R = B[:8], B[8:]
for j in rounds:
L, R = round(j, subkeys[j], L, R)
return bytearray(R + L)
def ecb_encrypt(plain, subkeys):
#i is block num
for i in range(len(plain) // 16):
start_block = i * 16
end_block = start_block + 16
#Grab the block
B = plain[start_block : end_block]
B = process_block(B, range(round_count), subkeys)
#Write the block back
plain[start_block : end_block] = B
return plain
def ecb_decrypt(plain, subkeys):
#i is block num
for i in range(len(plain) // 16):
start_block = i * 16
end_block = start_block + 16
#Grab the block
B = plain[start_block : end_block]
B = process_block(B, reversed(range(round_count)), subkeys)
#Write the block back
plain[start_block : end_block] = B
plain = pkcs7_strip(plain)
return plain
def cbc_encrypt(plain, subkeys, iv=None):
iv = iv if iv else bytearray(secrets.randbits(128).to_bytes(16, sys.byteorder))
plain = iv + plain
prev = iv
for i in range(1, len(plain) // 16):
start_block = i * 16
end_block = start_block + 16
#Grab the block
B = plain[start_block : end_block]
#Xor the iv in
B = [a ^ b for (a,b) in zip(B, prev)]
B = process_block(B, range(round_count), subkeys)
#Save the resulting block as the "new" iv
prev = B
#Write the block back
plain[start_block : end_block] = B
return plain, iv
def cbc_decrypt(plain, subkeys):
if len(plain) < 32:
raise ValueError('Input is not padded or does not contain an IV')
iv = plain[:16]
prev = iv
#i is block num
for i in range(1, len(plain) // 16):
start_block = i * 16
end_block = start_block + 16
#Grab the block
PB = plain[start_block : end_block]
B = process_block(PB, reversed(range(round_count)), subkeys)
#Xor the iv in
B = [a ^ b for (a,b) in zip(B, prev)]
#Save the resulting block as the "new" iv
prev = PB
#Write the block back
plain[start_block : end_block] = B
plain = pkcs7_strip(plain)
return plain[16:]
def ctr_encrypt(plain, subkeys, iv=None):
iv = iv if iv else secrets.randbits(128)
plain = bytearray(iv.to_bytes(16, sys.byteorder)) + plain
#i is block num
for i in range(1, len(plain) // 16):
start_block = i * 16
end_block = start_block + 16
#Grab the block
B = plain[start_block : end_block]
iv_block = bytearray((iv + i).to_bytes(16, sys.byteorder))
encrypted_block = process_block(iv_block, range(round_count), subkeys)
#Xor the ciphertext in
B = bytearray([a ^ b for (a,b) in zip(B, encrypted_block)])
#Write the block back
plain[start_block : end_block] = B
return plain, iv
def ctr_decrypt(plain, subkeys):
if len(plain) < 32:
raise ValueError('Input is not padded or does not contain an IV')
iv = int.from_bytes(plain[:16], byteorder=sys.byteorder, signed=False)
#i is block num
for i in range(1, len(plain) // 16):
start_block = i * 16
end_block = start_block + 16
#Grab the block
B = plain[start_block : end_block]
iv_block = bytearray((iv + i).to_bytes(16, sys.byteorder))
encrypted_block = process_block(iv_block, range(round_count), subkeys)
#Xor the ciphertext in
B = bytearray([a ^ b for (a,b) in zip(B, encrypted_block)])
#Write the block back
plain[start_block : end_block] = B
plain = pkcs7_strip(plain)
return plain[16:]
encrypt_function = ctr_encrypt
decrypt_function = ctr_decrypt
def run_test(mode, data):
original = data
#this is the base test case
K = bytearray("yellow submarine", 'utf8')
k = subkey_generator(K)
#if there is an iv use it to seed the next call
encrypt = None
iv = None
if mode in ['ctr','cbc']:
encrypt, iv = encrypt_function(bytearray(original), k)
else:
encrypt = encrypt_function(bytearray(original), k)
#flip one bit in the key and reencrypt
K = bytearray("yellow sucmarine", 'utf8')
k = subkey_generator(K)
encrypt_key1 = None
if iv:
encrypt_key1 = encrypt_function(bytearray(original), k, iv)
else:
encrypt_key1 = encrypt_function(bytearray(original), k)
def m1_n_avg(base, compare):
m1 = []
for index, byte in enumerate(base):
if byte in compare:
start = compare.index(byte)
matching = 0
for i in range(index, len(base)):
if base[i] == compare[i]:
matching += 1
else:
break
m1.append(matching)
aml1 = sum(m1)/len(m1) if m1 else 0
return m1, aml1
#how many preserved multi byte sequences are there
m1, aml1 = m1_n_avg(original, encrypt)
m2, aml2 = m1_n_avg(encrypt, encrypt_key1)
return m1, aml1, m2, aml2
funcmap ={
"ecb":(ecb_encrypt, ecb_decrypt),
"ctr":(ctr_encrypt, ctr_decrypt),
"cbc":(cbc_encrypt, cbc_decrypt),
"e":(easy_subkey, easy),
"m":(medium_subkey, medium),
"h":(hard_subkey, hard)
}
if __name__ == '__main__':
def print_help():
print('''usage:
./feistel.py [function] [mode] [quality] [input filename] [output filename]
function is 'e' for encrypt, 'd' for decrypt
mode is 'ecb' for ecb, 'cbc' for cbc, and 'ctr' for ctr
quality is 'e' for easy, 'm' for medium, 'h' for hard
./feistel.py t [input filename]
runs automated tests to compare the different modes using a given file''')
sys.exit(1)
if len(sys.argv) == 1:
print_help()
if sys.argv[1] == 't' and len(sys.argv) == 3:
data = pkcs7_pad(bytearray(open(sys.argv[2], 'rb').read()))
ms = []
mks = []
ms2 = []
mks2 = []
names=[]
for mode in ['ecb', 'ctr', 'cbc']:
for diff in ['e', 'm', 'h']:
encrypt_function, decrypt_function = funcmap[mode]
subkey_generator, round_function = funcmap[diff]
m1, aml1, m2, aml2 = run_test(mode, bytearray(data))
ms.append(len(m1))
mks.append(aml1)
ms2.append(len(m2))
mks2.append(aml2)
names.append(f'{mode}:{diff}')
print(f'''
__/{mode} {diff}\__
==>diffusion<==
matches: {len(m1)}
average match len: {aml1}
==>confusion<==
matches: {len(m2)}
average match len: {aml2}
''')
plt.style.use('dark_background')
ind = np.arange(9)
sp = plt.subplot(2,1,1)
plt.title("Matches Per Mode")
pm1 = plt.bar(ind, ms2, width=0.5, log=True, label='Diffusion')
pm2 = plt.bar(ind+0.5, ms, width=0.5, log=True, label='Confusion')
sp.set_xticks(ind+0.5)
sp.set_xticklabels(names)
plt.legend([pm1, pm2], ['Confusion', 'Diffusion'])
sp2 = plt.subplot(2,1,2)
plt.title("Average Match Length")
pl1 = plt.bar(ind, mks2, width=0.5, log=True, label='Diffusion')
pl2 = plt.bar(ind+0.5, mks, width=0.5, log=True, label='Confusion')
sp2.set_xticks(ind+0.5)
sp2.set_xticklabels(names)
plt.legend([pl1, pl2], ['Confusion', 'Diffusion'])
plt.show()
sys.exit(0)
if len(sys.argv[1:]) != 5:
print_help()
if sys.argv[2] in funcmap:
encrypt_function, decrypt_function = funcmap[sys.argv[2]]
else:
print_help()
if sys.argv[3] in funcmap:
subkey_generator, round_function = funcmap[sys.argv[3]]
else:
print_help()
k = subkey_generator(K)
if sys.argv[1] == 'e':
P = pkcs7_pad(bytearray(open(sys.argv[4], 'rb').read()))
if sys.argv[2] in 'cbcctr':
P = encrypt_function(P, k)[0]
else:
P = encrypt_function(P, k)
with open(sys.argv[5], 'wb') as out:
out.write(P)
elif sys.argv[1] == 'd':
P = bytearray(open(sys.argv[4], 'rb').read())
if len(P) % 16 != 0:
raise ValueError('Ciphertext is not a valid length, it must be corrupted')
P = decrypt_function(P, k)
with open(sys.argv[5], 'wb') as out:
out.write(P)
else:
print_help()
``` |
{
"source": "JohnAgapeyev/7402-lab2",
"score": 3
} |
#### File: JohnAgapeyev/7402-lab2/lab.py
```python
def f(i, k, x):
return ((2 * i * k)**x) % 15
L = 0b0010
R = 0b1000
print("Encryption plaintext {} {}".format(bin(L), bin(R)))
#Round 1 encrypt
X = f(1, 7, R)
print("Post round 1 f() output {}".format(bin(X)));
#Xor output
X = L ^ X
print("Post round 1 xor {} {}".format(bin(L), bin(R)));
#Swap
L = R
R = X
print("Post round 1 swap {} {}".format(bin(L), bin(R)));
#Round 2 encrypt
X = f(2, 7, R)
print("Post round 2 f() output {}".format(bin(X)));
#Xor output
L ^= X
print("Post round 2 xor {} {}".format(bin(L), bin(R)));
#No swap on last round
print("Encrypt Output {} {}".format(bin(L), bin(R)))
print("Decryption")
#Round 1 Decrypt
X = f(2, 7, R)
print("Post decrypt round 1 f() output {}".format(bin(X)));
#Xor output
X = L ^ X
print("Post decrypt round 1 xor {} {}".format(bin(L), bin(R)));
#Swap
L = R
R = X
print("Post decrypt round 1 swap {} {}".format(bin(L), bin(R)));
#Round 2 decrypt
X = f(1, 7, R)
print("Post decrypt round 2 f() output {}".format(bin(X)));
#Xor output
L ^= X
print("Decrypt output {} {}".format(bin(L), bin(R)))
``` |
{
"source": "JohnAgapeyev/binary_diff",
"score": 2
} |
#### File: JohnAgapeyev/binary_diff/diff.py
```python
import sys
import os
import getopt
import csv
import json
import itertools
import zipfile
import tarfile
import binwalk
import collections
from heapq import nsmallest
from collections import defaultdict
import tlsh
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing.dummy import Pool
from sklearn.cluster import *
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
pool = Pool()
def usage():
print("python3 ./diff.py [file directory] [metadata file]")
def from_matrix_to_vector(i, j, N):
if i <= j:
return i * N - (i - 1) * i / 2 + j - i
else:
return j * N - (j - 1) * j / 2 + i - j
def partition_hashes(hash_list, file_list):
output = {}
for h in hash_list:
filename = file_list[hash_list.index(h)]
quartile_range = int(h[8:10], 16)
if quartile_range not in output:
output[quartile_range] = [(filename, h)]
else:
output[quartile_range].append((filename, h))
return output
#Values are from the TLSH paper
def convert_dist_to_confidence(d):
if d < 30:
return 99.99819
elif d < 40:
return 99.93
elif d < 50:
return 99.48
elif d < 60:
return 98.91
elif d < 70:
return 98.16
elif d < 80:
return 97.07
elif d < 90:
return 95.51
elif d < 100:
return 93.57
elif d < 150:
return 75.67
elif d < 200:
return 49.9
elif d < 250:
return 30.94
elif d < 300:
return 20.7
else:
return 0
def lsh_json(data):
filename = data[0]
meta = []
print(filename)
if not data[1] or data[1] == None:
pass
else:
stuff = [d for d in data[1] if d['filename'] == os.path.basename(filename)]
if stuff:
if len(stuff) >= 1:
stuff = stuff[0]
[meta.extend([k,v]) for k,v in stuff.items()]
[meta.extend([k,v]) for k,v in meta[3].items()]
del meta[3]
[meta.extend([k,v]) for k,v in meta[-1].items()]
del meta[-3]
[meta.extend([k,v]) for k,v in meta[-4].items()]
del meta[-6]
if os.path.getsize(filename) < 256:
raise ValueError("{} must be at least 256 bytes".format(filename))
if tarfile.is_tarfile(filename):
with tarfile.open(filename, 'r') as tar:
for member in tar.getmembers():
if not member or member.size < 256:
continue
try:
meta.append(tlsh.hash(tar.extractfile(member).read()))
if use_binwalk:
for module in binwalk.scan(tar.extractfile(member).read(), signature=True, quiet=True):
for result in module.results:
meta.append(str(result.file.path))
meta.append(str(result.offset))
meta.append(str(result.description))
except:
continue
elif zipfile.is_zipfile(filename):
try:
with zipfile.ZipFile(filename) as z:
for member in z.infolist():
if not member or member.file_size < 256:
continue
try:
with z.read(member) as zipdata:
meta.append(tlsh.hash(zipdata))
if use_binwalk:
for module in binwalk.scan(zipdata):
for result in module.results:
meta.append(str(result.file.path))
meta.append(str(result.offset))
meta.append(str(result.description))
except:
continue
except:
pass
if use_binwalk:
for module in binwalk.scan(filename, signature=True, quiet=True):
for result in module.results:
meta.append(str(result.file.path))
meta.append(str(result.offset))
meta.append(str(result.description))
file_hash = tlsh.hash(open(filename, 'rb').read())
if not meta:
return file_hash
else:
return tlsh.hash(str.encode(file_hash + ''.join(map(str, meta))))
def diff_hash(one, two):
return tlsh.diff(one, two)
def list_files(directory):
f = []
for (dirpath, _, filenames) in os.walk(directory):
for name in filenames:
f.append(os.path.join(dirpath, name))
return f
def parse_metadata(filename):
contents = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
#Remove the md5 and sha1 hashes since they're useless to me
contents.append(row[:-2])
return contents[1:]
def parse_metadata_json(filename):
with open(filename, 'r') as jsonfile:
metadata = json.load(jsonfile)
for obj in metadata:
del obj['MD5']
del obj['SHA1']
del obj['SHA256']
del obj['SHA512']
obj['filename'] = obj['Properties'].pop('FileName')
return metadata
def flatten(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_n_closest(n, filenames, adjacency):
closest = {}
for f in filenames:
elem = adj[filenames.index(f)]
smallest_dists = nsmallest(n + 1, elem)
smallest_files = []
old_dist = 0
for d in smallest_dists:
#Ignore the file listing itself
if d == 0:
continue
elif d == old_dist:
continue
old_dist = d
if smallest_dists.count(d) > 1:
prev = 0
for i in range(smallest_dists.count(d)):
dist_filename = smallest_dists.index(d, prev)
smallest_files.append((d, filenames[dist_filename]))
prev = dist_filename + 1
continue;
#Filename indices are analagous to adjacency indices
smallest_files.append((d, filenames[smallest_dists.index(d)]))
closest[f] = smallest_files
return closest
def get_partition_entry(partition_hashes, new_hash):
return partition_hashes[int(new_hash[8:10], 16)]
def get_n_closest_partial(n, hash_partition, hash_list):
closest = {}
for h in hash_list:
entry = get_partition_entry(hash_partition, h)
elem = []
filename = ""
for k,v in entry:
d = diff_hash(h, v)
if d > 0:
elem.append((d, k))
else:
filename = k
elem.sort(key=lambda tup: tup[0])
smallest_files = []
for i in range(len(elem)):
if i + 1 > n:
break
smallest_files.append(elem[i])
closest[filename] = smallest_files
return closest
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:m:bn:t", ["help", "directory", "metadata", "binwalk", "number", "test"])
except getopt.GetoptError as err:
print(err) # will print something like "option -a not recognized"
usage()
exit(2)
directory = ""
meta = ""
use_binwalk = False
n = 10
use_existing = False
for o, a in opts:
if o in ("-d", "--directory"):
directory = a
elif o in ("-h", "--help"):
usage()
exit()
elif o in ("-m", "--metadata"):
meta = a
elif o in ("-b", "--binwalk"):
use_binwalk = True
elif o in ("-n", "--number"):
n = int(a)
elif o in ("-t", "--test"):
use_existing = True
if not directory:
print("Program must be provided a file directory path")
exit(1)
file_list = list_files(directory)
hash_list = []
if meta:
meta_contents = parse_metadata_json(meta)
else:
meta_contents = None
hash_list = [lsh_json(x) for x in zip(file_list, itertools.repeat(meta_contents))]
if use_existing:
file_data = np.load(".tmp.npz")
#See https://stackoverflow.com/questions/22315595/saving-dictionary-of-header-information-using-numpy-savez for why this syntax is needed
clustered_files = file_data['clusters'][()]
cluster_hashes = file_data['hash_list']
ms = joblib.load('.tmp2.pkl')
adj = np.zeros((len(hash_list), len(cluster_hashes)), int)
#Compare new file hashes against saved data to get distances
for i in range(len(hash_list)):
for j in range(len(cluster_hashes)):
adj[i][j] = diff_hash(hash_list[i], cluster_hashes[j]);
cluster_labels = ms.predict(adj)
for f in file_list:
#Label of the prediucted file cluster
lab = cluster_labels[file_list.index(f)]
if lab not in clustered_files:
print("{} does not belong to any existing cluster".format(f))
continue
clus = clustered_files[lab]
print("Target file {} is in cluster {}".format(f, lab))
for c in clus:
print(c)
#Empty line to separate cluster print outs
print()
exit()
else:
adj = np.zeros((len(hash_list), len(hash_list)), int)
for i in range(len(hash_list)):
for j in range(len(hash_list)):
d = diff_hash(hash_list[i], hash_list[j]);
adj[i][j] = d
adj[j][i] = d
best_cluster_count = 0
best_silhouette_score = -1.0
def cl(data):
i, adj = data
print("Trying cluster count {}".format(i))
return metrics.silhouette_score(adj, MiniBatchKMeans(n_clusters=i).fit_predict(adj))
#Calculate the best cluster count in parallel
silhouette_list = Pool().map(cl, zip(range(2, 16), itertools.repeat(adj)))
best_cluster_count = silhouette_list.index(max(silhouette_list)) + 2
ms = MiniBatchKMeans(n_clusters=best_cluster_count)
cluster_labels = ms.fit_predict(adj)
clustered_files = {}
for f in file_list:
lab = cluster_labels[file_list.index(f)]
if lab in clustered_files:
clustered_files[lab].append(f)
else:
clustered_files[lab] = [f]
print(clustered_files)
np.savez(".tmp", clusters=clustered_files, hash_list=hash_list)
joblib.dump(ms, '.tmp2.pkl')
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
plt.figure(1)
plt.clf()
colors = itertools.cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(adj[my_members, 0], adj[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
```
#### File: JohnAgapeyev/binary_diff/learn.py
```python
import sys
import os
import getopt
import math
import itertools
import collections
import numpy as np
import tensorflow as tf
from PIL import Image
from multiprocessing.dummy import Pool
#os.system("rm -rf /tmp/cnn_model")
def cnn_model_fn(features, labels, mode):
input_layer = tf.reshape(features["x"], [-1, 256, 256, 1])
#Turns 1024x1024 to 1024x1024
#Would be 1020x1020 if padding wasn't used
#Actually 256x256
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=5,
padding='same',
activation=tf.nn.relu
)
#Turns 1024x1024 to 512x512
#Actually 256x256 to 128x128
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2,2], strides=2)
#Would turns 512x512 into 508x508, but doesn't due to padding
#Actually 128x128
#conv2 = tf.layers.conv2d(
#inputs=pool1,
#filters=64,
#kernel_size=5,
#padding='same',
#activation=tf.nn.relu
#)
#Turns 512x512 to 256x256
#Actually 64x64
#pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2,2], strides=2)
#Turns 256x256 to 256x256
#Actually 64x64
#conv3 = tf.layers.conv2d(
#inputs=pool2,
#filters=128,
#kernel_size=5,
#padding='same',
#activation=tf.nn.relu
#)
#Turns 256x256 to 128x128
#Actually 32x32
#pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2,2], strides=2)
#pool3_flat = tf.reshape(pool1, [-1, 64*64*128])
pool3_flat = tf.reshape(pool1, [-1, 256*64*32])
#pool3_flat = tf.reshape(pool3, [-1, 32*32*128])
#pool3_flat = tf.reshape(pool2, [-1, 64*64*64])
dense = tf.layers.dense(inputs=pool3_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
logits = tf.layers.dense(inputs=dropout, units=3)
#logits = tf.layers.dense(inputs=dense, units=3)
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
labels = tf.squeeze(labels)
print(logits.shape)
print(labels.shape)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
#loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
#loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def get_all_files(directory):
f = []
for (dirpath, _, filenames) in os.walk(directory):
for name in filenames:
f.append(os.path.join(dirpath, name))
return f
def parser(record, label):
keys_to_features = {
"x": tf.VarLenFeature(tf.uint8),
}
#parsed = tf.parse_single_example(record, keys_to_features)
record = tf.cast(record, tf.float16)
print(record)
print(label)
# Perform additional preprocessing on the parsed data.
#image = tf.image.decode_jpeg(parsed["image_data"])
#label = tf.cast(parsed["label"], tf.int32)
return {"x": record}, label
tf.enable_eager_execution()
#ten = []
#lab = []
#for arg in get_all_files(sys.argv[1]):
#if "Schneider" in arg:
##lab.append("Schneider")
#lab.append(1)
#elif "Siemens" in arg:
##lab.append("Siemens")
#lab.append(2)
#else:
##lab.append("None")
#lab.append(0)
#data = np.fromfile(arg, np.uint8)
#file_width = math.ceil(math.sqrt(len(data)))
#data.resize((file_width, file_width))
#t = tf.convert_to_tensor(data)
#t = tf.expand_dims(t, -1)
#t = tf.image.resize_images(t, (1024,1024), tf.image.ResizeMethod.NEAREST_NEIGHBOR)
#ten.append(t)
#dataset = tf.data.Dataset.from_tensors((ten, lab))
#
#dataset = dataset.map(parser)
#
#dataset = dataset.shuffle(10000).batch(3)
#it = dataset.make_one_shot_iterator()
#for e in it:
#print(e)
#exit()
#ten = []
#for arg in sys.argv:
#data = np.fromfile(arg, np.uint8)
#file_width = math.ceil(math.sqrt(len(data)))
#data.resize((file_width, file_width))
#t = tf.convert_to_tensor(data)
#t = tf.expand_dims(t, -1)
#t = tf.image.resize_images(t, (1024,1024), tf.image.ResizeMethod.NEAREST_NEIGHBOR)
#ten.append(t)
#dataset = tf.data.Dataset.from_tensors(ten)
#for e in dataset.make_one_shot_iterator():
#print(e)
def data_input_fn():
ten = []
lab = []
for arg in get_all_files(sys.argv[1]):
if "Schneider" in arg:
#lab.append("Schneider")
lab.append(1)
elif "Siemens" in arg:
#lab.append("Siemens")
lab.append(2)
else:
#lab.append("None")
lab.append(0)
data = np.fromfile(arg, np.uint8)
file_width = math.ceil(math.sqrt(len(data)))
data.resize((file_width, file_width))
t = tf.convert_to_tensor(data)
t = tf.expand_dims(t, -1)
t = tf.image.resize_images(t, (256,256), tf.image.ResizeMethod.NEAREST_NEIGHBOR)
ten.append(t)
l = tf.convert_to_tensor(lab)
l = tf.expand_dims(l, -1)
#dataset = tf.data.Dataset.from_tensors((ten, lab))
dataset = tf.data.Dataset.from_tensors((ten, l))
dataset = dataset.map(parser)
#dataset = dataset.shuffle(10000)
dataset = dataset.batch(3)
it = dataset.make_one_shot_iterator()
features, labels = it.get_next()
print(features, labels)
return features, labels
def eval_fn():
ten = []
lab = []
for arg in get_all_files(sys.argv[2]):
if "Schneider" in arg:
#lab.append("Schneider")
lab.append(1)
elif "Siemens" in arg:
#lab.append("Siemens")
lab.append(2)
else:
#lab.append("None")
lab.append(0)
data = np.fromfile(arg, np.uint8)
file_width = math.ceil(math.sqrt(len(data)))
data.resize((file_width, file_width))
t = tf.convert_to_tensor(data)
t = tf.expand_dims(t, -1)
t = tf.image.resize_images(t, (256,256), tf.image.ResizeMethod.NEAREST_NEIGHBOR)
ten.append(t)
l = tf.convert_to_tensor(lab)
l = tf.expand_dims(l, -1)
#dataset = tf.data.Dataset.from_tensors((ten, lab))
dataset = tf.data.Dataset.from_tensors((ten, l))
dataset = dataset.map(parser)
#dataset = dataset.shuffle(10000)
dataset = dataset.batch(3)
it = dataset.make_one_shot_iterator()
features, labels = it.get_next()
return features, labels
#classify = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir="/tmp/cnn_model")
classify = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir="/home/john/cnn_model")
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
#train_input_fn = tf.estimator.inputs.numpy_input_fn(
#x={"x": train_data},
#y=train_labels,
#batch_size=100,
#num_epochs=None,
#shuffle=True)
classify.train(
input_fn=data_input_fn,
steps=20000,
hooks=[logging_hook])
#eval_input_fn = tf.estimator.inputs.numpy_input_fn(
#x={"x": eval_data},
#y=eval_labels,
#num_epochs=1,
#shuffle=False)
eval_results = classify.evaluate(input_fn=eval_fn)
print(eval_results)
``` |
{
"source": "johnaheadley/core",
"score": 2
} |
#### File: scripts/dns/unbound_dhcpd.py
```python
import os
import sys
sys.path.insert(0, "/usr/local/opnsense/site-python")
import subprocess
import time
import tempfile
from daemonize import Daemonize
import watchers.dhcpd
import params
def unbound_control(commands, output_stream=None):
""" execute (chrooted) unbound-control command
:param commands: command list (parameters)
:param output_stream: (optional)output stream
:return: None
"""
if output_stream is None:
output_stream = open(os.devnull, 'w')
subprocess.check_call(['/usr/sbin/chroot', '-u', 'unbound', '-g', 'unbound', '/',
'/usr/local/sbin/unbound-control', '-c', '/var/unbound/unbound.conf'] + commands,
stdout=output_stream, stderr=subprocess.STDOUT)
output_stream.seek(0)
def unbound_known_addresses():
""" fetch known addresses
:return: list
"""
result = list()
with tempfile.NamedTemporaryFile() as output_stream:
unbound_control(['list_local_data'], output_stream)
for line in output_stream:
parts = line.decode().split()
if len(parts) > 4 and parts[3] == 'A':
result.append(parts[4])
return result
# parse input params
app_params = {'pid': '/var/run/unbound_dhcpd.pid',
'domain': 'local',
'target': '/var/unbound/dhcpleases.conf',
'background': '1'}
params.update_params(app_params)
def main():
# cleanup interval (seconds)
cleanup_interval = 60
# initiate lease watcher and setup cache
dhcpdleases = watchers.dhcpd.DHCPDLease()
cached_leases = dict()
known_addresses = unbound_known_addresses()
# start watching dhcp leases
last_cleanup = time.time()
while True:
dhcpd_changed = False
for lease in dhcpdleases.watch():
if 'ends' in lease and lease['ends'] > time.time() and 'client-hostname' in lease and 'address' in lease:
cached_leases[lease['address']] = lease
dhcpd_changed = True
if time.time() - last_cleanup > cleanup_interval:
# cleanup every x seconds
last_cleanup = time.time()
addresses = cached_leases.keys()
for address in addresses:
if cached_leases[address]['ends'] < time.time():
del cached_leases[address]
dhcpd_changed = True
if dhcpd_changed:
# dump dns output to target
with open(app_params['target'], 'w') as unbound_conf:
for address in cached_leases:
unbound_conf.write('local-data-ptr: "%s %s.%s"\n' % (
address, cached_leases[address]['client-hostname'], app_params['domain'])
)
unbound_conf.write('local-data: "%s.%s IN A %s"\n' % (
cached_leases[address]['client-hostname'], app_params['domain'], address)
)
# signal unbound
for address in cached_leases:
if address not in known_addresses:
fqdn = '%s.%s' % (cached_leases[address]['client-hostname'], app_params['domain'])
unbound_control(['local_data', address, 'PTR', fqdn])
unbound_control(['local_data', fqdn, 'IN A', address])
known_addresses.append(address)
# wait for next cycle
time.sleep(1)
# startup
if app_params['background'] == '1':
daemon = Daemonize(app="unbound_dhcpd", pid=app_params['pid'], action=main)
daemon.start()
else:
main()
``` |
{
"source": "johnahjohn/ml_iris_prediction",
"score": 4
} |
#### File: johnahjohn/ml_iris_prediction/stream_iristest.py
```python
from pycaret.classification import load_model, predict_model
import streamlit as st
import pandas as pd
import numpy as np
model = load_model('knn_model')
def predict(model, input_df):
predictions_df = predict_model(estimator=model, data=input_df)
predictions = predictions_df['Label'][0]
return predictions
def run():
from PIL import Image
image = Image.open('iris1.jpg')
image_office = Image.open('iris2.jpg')
st.image(image,use_column_width=True)
add_selectbox = st.sidebar.selectbox(
"How would you like to predict?",
("Online", "Batch"))
st.sidebar.info('This app is created to predict the species of iris flower')
st.sidebar.success('https://www.pycaret.org')
st.sidebar.image(image_office)
st.title("Predicting iris species")
if add_selectbox == 'Online':
sepal_length=st.number_input('sepal_length' , min_value=0.1, max_value=8.0, value=0.1)
sepal_width =st.number_input('sepal_width',min_value=0.0, max_value=6.0, value=0.1)
petal_length = st.number_input('petal_length', min_value=0.0, max_value=8.0, value=0.1)
petal_width = st.number_input('petal_width', min_value=0.0, max_value=4.5, value=0.1)
output=""
input_dict={'sepal_length':sepal_length,'sepal_width':sepal_width,'petal_length':petal_length,'petal_width':petal_width}
input_df = pd.DataFrame([input_dict])
if st.button("Predict"):
output = predict(model=model, input_df=input_df)
output = str(output)
st.success('The output is {}'.format(output))
if add_selectbox == 'Batch':
file_upload = st.file_uploader("Upload csv file for predictions", type=["csv"])
if file_upload is not None:
data = pd.read_csv(file_upload)
predictions = predict_model(estimator=model,data=data)
st.write(predictions)
def main():
run()
if __name__ == "__main__":
main()
``` |
{
"source": "johnaho/Beacon8r",
"score": 3
} |
#### File: Beacon8r/MainUnit/plugNChug.py
```python
import os
# python 3X I believe.
# MIT license and all that. part of Beacon8r code. @Beacon8r and @dj_ir0ngruve on twitter.
# Apologies for potato code. Python's not my main language. Yes, this was developed on windows so separator slashes are wrong for linux. Ran out of time to test right now for cross platform.
#Basically this takes any big list of names in a file named "biglistGenAccessPoints.txt", big list get it?
#and chunks through them and spits out a complete Arduino folder/file set you can use to program an ESP8266
#It creates the new folders and files in: genDuinoFolder
#Change these variables before running
beaconpattern = r"FULLPATHTO\beacon8pattern.txt" # Top part of each file
listaccesspoints = r"FULLPATHTO\biglistGenAccessPoints.txt" # The list of access points you want to chunk out
genDuinoFolder = r"FULLPATHTO_FOLDER_YOU_WANT_RESULTS_IN" #Where you set the reformatted chunks to be created in.
spliton = 2980 #Number of beacons to advertise per ESP8266. 2980 is near upper limit of space on chip I think. 130k / 1980 = roughly 44 ish ESP8266 folder/.ino file sets. Note that Folder name and *.ino file name I think have to be the same.
#Note: 13-17 per ESP8266 should be used for stability of viewing on a phone's wifi list.
#You can have around 4-5 total esp8266's near each other powered by portable usb batteries no big deal.
#First year I put each esp8266 in a ziploc bag for a smaller project.. Kapton tape covering each unit is better.
#Heat can get up to 140F not sure what that is real temperature.
#Finally... You can broadcast Emoji's. Arduino ide is fine with emojis but I haven't been able to put emojis in list here and not have python crash.
read_dataBP = 'blank'
bcn8rPlaceName = "beacon8rCluster_"
if not os.path.exists(genDuinoFolder):
os.makedirs(genDuinoFolder)
with open(beaconpattern) as f:
read_dataBP = f.read()
f.closed
print(read_dataBP)
def write_file ():
with open(genDuinoFolder + "\\"+ currWkngName+ "\\"+ currWkngName +".ino", "w") as cf:
cf.write(fileInProgress+ "\r\n}\r\n")
cf.closed
counter =0
programs =0
first = 0
currWkngName = bcn8rPlaceName + str(programs)
fileInProgress = ""
with open(listaccesspoints) as bl:
for line in bl:
counter += 1
if( counter %spliton == 0 or first ==0):
if(len(fileInProgress) >0):
#write fileInProgress to disk.
if not os.path.exists(genDuinoFolder + "\\"+ currWkngName):
os.makedirs(genDuinoFolder + "\\"+ currWkngName)
#writeFile...
write_file()
programs +=1
first =1
currWkngName = bcn8rPlaceName + str(programs)
print(currWkngName +" and "+ currWkngName + r".ino")
# Create directory if doesn't exists
if not os.path.exists(genDuinoFolder + "\\"+ currWkngName):
os.makedirs(genDuinoFolder + "\\"+ currWkngName)
#set stored first part of program
fileInProgress = read_dataBP
fileInProgress += ' snBcn("'+line[:-1] + '");\r' #removes \r\n at end of line then adds \r where needed.
bl.closed
write_file()
print(counter) # How many things have you done?
print(programs) # Oh why, the humanity, oh why!!
#This is what I imagine python programmers reading my code to the end feel like: https://xkcd.com/1513/
``` |
{
"source": "johnallanach/snippets",
"score": 3
} |
#### File: python/walkscore/get_coords.py
```python
import requests
def get_coords(address_to_fetch):
url = 'https://nominatim.openstreetmap.org/search/' + address_to_fetch +'?format=json'
response = requests.get(url).json()
lat = response[0]["lat"]
lon = response[0]["lon"]
return lat, lon
```
#### File: python/walkscore/walkscore.py
```python
import csv
import os
import random
import requests
import time
import urllib
from get_coords import get_coords
from config import WALKSCORE_API
def append_to_csv_file(filename, rows):
with open(filename, 'a', encoding='utf-8', newline='') as file:
writer = csv.writer(file)
writer.writerows(rows)
def get_address_score(input_address):
address_to_fetch = urllib.parse.quote(input_address)
try:
lat, lon = get_coords(address_to_fetch)
data_url = data_url = 'https://api.walkscore.com/score?format=json&address=' + address_to_fetch + '&lat=' + lat + '&lon=' + lon + '&transit=1&bike=1&wsapikey=' + WALKSCORE_API
res = requests.get(url=data_url)
response_data = res.json()
status = response_data["status"]
try:
walkscore = response_data["walkscore"]
except:
walkscore = 'None'
try:
transitscore = response_data["transit"]['score']
except:
transitscore = 'None'
try:
bikescore = response_data["bike"]['score']
except:
bikescore = 'None'
except:
status = 1
walkscore = 'None'
transitscore = 'None'
bikescore = 'None'
return walkscore, transitscore, bikescore, status
def main():
#compile list of Guelph addresses
address_list = []
working_directory = os.getcwd()
file_path = working_directory + '\walkscore\data\guelphaddresses.csv'
with open(file_path, encoding='utf-8') as file:
addresses = csv.reader(file)
next(addresses, None)
for row in addresses:
address = row[0]
address_list.append(address)
processed_address_list = []
#for i in range(0, len(address_list)):
for i in range(0,100):
address_to_process = random.choice(address_list)
walkscore, transitscore, bikescore, status = get_address_score(address_to_process)
if status == 1:
entry = [address_to_process, walkscore, transitscore, bikescore]
processed_address_list.append(entry)
address_list.remove(address_to_process)
#time.sleep(20) #throttle requests to not exceed 5000 API calls per day
elif status == 40:
time.sleep(3600)
walkscore, transitscore, bikescore, status = get_address_score(address_to_process)
entry = [address_to_process, walkscore, transitscore, bikescore]
processed_address_list.append(entry)
address_list.remove(address_to_process)
append_to_csv_file('guelphaddresses_output.csv', processed_address_list)
if __name__ == '__main__':
main()
"""
HTTP Response Status Code Description
200 1 Walk Score successfully returned.
200 2 Score is being calculated and is not currently available.
404 30 Invalid latitude/longitude.
500 series 31 Walk Score API internal error.
200 40 Your WSAPIKEY is invalid.
200 41 Your daily API quota has been exceeded.
403 42 Your IP address has been blocked.
"""
``` |
{
"source": "JohnAllerdyce/Sovryn-smart-contracts",
"score": 2
} |
#### File: scripts/deployment/deploy_multisig_keyholders.py
```python
from brownie import *
import json
def main():
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
# configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet" or thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
else:
raise Exception("network not supported")
if thisNetwork == "rsk-mainnet":
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
elif thisNetwork == "testnet":
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
contracts = json.load(configFile)
timelockOwnerAddress = contracts['timelockOwner']
multiSigKeyHolders= acct.deploy(MultiSigKeyHolders)
multiSigKeyHolders.transferOwnership(timelockOwnerAddress)
```
#### File: Sovryn-smart-contracts/tests/test_Protocol.py
```python
import pytest
def test_targetSetup(Constants, sovryn):
sig1 = "testFunction1(address,uint256,bytes)"
sig2 = "testFunction2(address[],uint256[],bytes[])"
sigs = [sig1,sig2]
targets = [Constants["ONE_ADDRESS"]] * len(sigs)
sovryn.setTargets(sigs, targets)
assert sovryn.getTarget(sig1) == Constants["ONE_ADDRESS"]
assert sovryn.getTarget(sig2) == Constants["ONE_ADDRESS"]
targets = [Constants["ZERO_ADDRESS"]] * len(sigs)
sovryn.setTargets(sigs, targets)
assert sovryn.getTarget(sig1) == Constants["ZERO_ADDRESS"]
assert sovryn.getTarget(sig2) == Constants["ZERO_ADDRESS"]
def test_replaceContract(Constants, sovryn, accounts, LoanSettings):
sig = "setupLoanParams((bytes32,bool,address,address,address,uint256,uint256,uint256)[])"
loanSettings = accounts[0].deploy(LoanSettings)
sovryn.setTargets([sig], [Constants["ZERO_ADDRESS"]])
assert sovryn.getTarget(sig) == Constants["ZERO_ADDRESS"]
sovryn.replaceContract(loanSettings.address)
assert sovryn.getTarget(sig) == loanSettings.address
def test_receivesEther(web3, sovryn, accounts):
assert(web3.eth.getBalance(sovryn.address) == 0)
web3.eth.sendTransaction({ "from": str(accounts[0]), "to": sovryn.address, "value": 10000, "gas": "5999" })
assert(web3.eth.getBalance(sovryn.address) == 10000)
``` |
{
"source": "John-alonso-debug/safe-transaction-service",
"score": 2
} |
#### File: safe_transaction_service/history/utils.py
```python
import logging
import time
from typing import Any, Dict, List, Optional, Union
from django.conf import settings
from django.core.signals import request_finished
from django.db import connection
from django.http import HttpRequest
from gunicorn import glogging
from redis import Redis
class IgnoreCheckUrl(logging.Filter):
def filter(self, record):
message = record.getMessage()
return not ('GET /check/' in message and '200' in message)
class CustomGunicornLogger(glogging.Logger):
def setup(self, cfg):
super().setup(cfg)
# Add filters to Gunicorn logger
logger = logging.getLogger("gunicorn.access")
logger.addFilter(IgnoreCheckUrl())
class LoggingMiddleware:
def __init__(self, get_response):
self.get_response = get_response
self.logger = logging.getLogger('LoggingMiddleware')
def get_milliseconds_now(self):
return int(time.time() * 1000)
def __call__(self, request: HttpRequest):
milliseconds = self.get_milliseconds_now()
response = self.get_response(request)
if request.resolver_match:
route = request.resolver_match.route if request.resolver_match else request.path
self.logger.info('MT::%s::%s::%s::%d::%s', request.method, route, self.get_milliseconds_now() - milliseconds,
response.status_code, request.path)
return response
def close_gevent_db_connection():
"""
Clean gevent db connections. Check `atomic block` to prevent breaking the tests (Django `TestCase` wraps tests
inside an atomic block that rollbacks at the end of the test)
https://github.com/jneight/django-db-geventpool#using-orm-when-not-serving-requests
:return:
"""
if not connection.in_atomic_block:
request_finished.send(sender="greenlet")
def chunks(elements: List[Any], n: int):
"""
:param elements: List
:param n: Number of elements per chunk
:return: Yield successive n-sized chunks from l
"""
for i in range(0, len(elements), n):
yield elements[i:i + n]
def clean_receipt_log(receipt_log: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""
Clean receipt log and make them JSON compliant
:param receipt_log:
:return:
"""
parsed_log = {'address': receipt_log['address'],
'data': receipt_log['data'],
'topics': [topic.hex() for topic in receipt_log['topics']]}
return parsed_log
def get_redis() -> Redis:
if not hasattr(get_redis, 'redis'):
get_redis.redis = Redis.from_url(settings.REDIS_URL)
return get_redis.redis
def parse_boolean_query_param(value: Union[bool, str]) -> bool:
if value in (True, 'True', 'true', '1'):
return True
else:
return False
``` |
{
"source": "johnamcleod/agents",
"score": 2
} |
#### File: bandits/agents/utils.py
```python
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import Tuple
import gin
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.policies import policy_utilities
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.typing import types
from tf_agents.utils import nest_utils
def sum_reward_weighted_observations(r: types.Tensor,
x: types.Tensor) -> types.Tensor:
"""Calculates an update used by some Bandit algorithms.
Given an observation `x` and corresponding reward `r`, the weigthed
observations vector (denoted `b` here) should be updated as `b = b + r * x`.
This function calculates the sum of weighted rewards for batched
observations `x`.
Args:
r: a `Tensor` of shape [`batch_size`]. This is the rewards of the batched
observations.
x: a `Tensor` of shape [`batch_size`, `context_dim`]. This is the matrix
with the (batched) observations.
Returns:
The update that needs to be added to `b`. Has the same shape as `b`.
If the observation matrix `x` is empty, a zero vector is returned.
"""
batch_size = tf.shape(x)[0]
return tf.reduce_sum(tf.reshape(r, [batch_size, 1]) * x, axis=0)
@gin.configurable
def build_laplacian_over_ordinal_integer_actions(
action_spec: types.BoundedTensorSpec) -> types.Tensor:
"""Build the unnormalized Laplacian matrix over ordinal integer actions.
Assuming integer actions, this functions builds the (unnormalized) Laplacian
matrix of the graph implied over the action space. The graph vertices are the
integers {0...action_spec.maximum - 1}. Two vertices are adjacent if they
correspond to consecutive integer actions. The `action_spec` must specify
a scalar int32 or int64 with minimum zero.
Args:
action_spec: a `BoundedTensorSpec`.
Returns:
The graph Laplacian matrix (float tensor) of size equal to the number of
actions. The diagonal elements are equal to 2 and the off-diagonal elements
are equal to -1.
Raises:
ValueError: if `action_spec` is not a bounded scalar int32 or int64 spec
with minimum 0.
"""
num_actions = policy_utilities.get_num_actions_from_tensor_spec(action_spec)
adjacency_matrix = np.zeros([num_actions, num_actions])
for i in range(num_actions - 1):
adjacency_matrix[i, i + 1] = 1.0
adjacency_matrix[i + 1, i] = 1.0
laplacian_matrix = np.diag(np.sum(adjacency_matrix,
axis=0)) - adjacency_matrix
return laplacian_matrix
def compute_pairwise_distances(input_vecs: types.Tensor) -> types.Tensor:
"""Compute the pairwise distances matrix.
Given input embedding vectors, this utility computes the (squared) pairwise
distances matrix.
Args:
input_vecs: a `Tensor`. Input embedding vectors (one per row).
Returns:
The (squared) pairwise distances matrix. A dense float `Tensor` of shape
[`num_vectors`, `num_vectors`], where `num_vectors` is the number of input
embedding vectors.
"""
r = tf.reduce_sum(input_vecs * input_vecs, axis=1, keepdims=True)
pdistance_matrix = (
r - 2 * tf.matmul(input_vecs, input_vecs, transpose_b=True)
+ tf.transpose(r))
return tf.cast(pdistance_matrix, dtype=tf.float32)
@gin.configurable
def build_laplacian_nearest_neighbor_graph(input_vecs: types.Tensor,
k: int = 1) -> types.Tensor:
"""Build the Laplacian matrix of a nearest neighbor graph.
Given input embedding vectors, this utility returns the Laplacian matrix of
the induced k-nearest-neighbor graph.
Args:
input_vecs: a `Tensor`. Input embedding vectors (one per row). Shaped
`[num_vectors, ...]`.
k : an integer. Number of nearest neighbors to use.
Returns:
The graph Laplacian matrix. A dense float `Tensor` of shape
`[num_vectors, num_vectors]`, where `num_vectors` is the number of input
embedding vectors (`Tensor`).
"""
num_actions = tf.shape(input_vecs)[0]
pdistance_matrix = compute_pairwise_distances(input_vecs)
sorted_indices = tf.argsort(values=pdistance_matrix)
selected_indices = tf.reshape(sorted_indices[:, 1 : k + 1], [-1, 1])
rng = tf.tile(
tf.expand_dims(tf.range(num_actions), axis=-1), [1, k])
rng = tf.reshape(rng, [-1, 1])
full_indices = tf.concat([rng, selected_indices], axis=1)
adjacency_matrix = tf.zeros([num_actions, num_actions], dtype=tf.float32)
adjacency_matrix = tf.tensor_scatter_nd_update(
tensor=adjacency_matrix,
indices=full_indices,
updates=tf.ones([k * num_actions], dtype=tf.float32))
# Symmetrize it.
adjacency_matrix = adjacency_matrix + tf.transpose(adjacency_matrix)
adjacency_matrix = tf.minimum(
adjacency_matrix, tf.ones_like(adjacency_matrix))
degree_matrix = tf.linalg.tensor_diag(tf.reduce_sum(adjacency_matrix, axis=1))
laplacian_matrix = degree_matrix - adjacency_matrix
return laplacian_matrix
def process_experience_for_neural_agents(
experience: types.NestedTensor, accepts_per_arm_features: bool,
training_data_spec: types.NestedTensorSpec
) -> Tuple[types.NestedTensor, types.Tensor, types.Tensor]:
"""Processes the experience and prepares it for the network of the agent.
First the reward, the action, and the observation are flattened to have only
one batch dimension. Then, if the experience includes chosen action features
in the policy info, it gets copied in place of the per-arm observation.
Args:
experience: The experience coming from the replay buffer.
accepts_per_arm_features: Whether the agent accepts per-arm features.
training_data_spec: The data spec describing what the agent expects.
Returns:
A tuple of (observation, action, reward) tensors to be consumed by the train
function of the neural agent.
"""
flattened_experience, _ = nest_utils.flatten_multi_batched_nested_tensors(
experience, training_data_spec)
observation = flattened_experience.observation
action = flattened_experience.action
reward = flattened_experience.reward
if not accepts_per_arm_features:
return observation, action, reward
# The arm observation we train on needs to be copied from the respective
# policy info field to the per arm observation field. Pretending there was
# only one action, we fill the action field with zeros.
chosen_arm_features = flattened_experience.policy_info.chosen_arm_features
observation[bandit_spec_utils.PER_ARM_FEATURE_KEY] = tf.nest.map_structure(
lambda t: tf.expand_dims(t, axis=1), chosen_arm_features)
action = tf.zeros_like(action)
if bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY in observation:
# This change is not crucial but since in training there will be only one
# action per sample, it's good to follow the convention that the feature
# value for `num_actions` be less than or equal to the maximum available
# number of actions.
observation[bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY] = tf.ones_like(
observation[bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY])
return observation, action, reward
```
#### File: bandits/environments/classification_environment.py
```python
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import Optional, Text
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.bandits.environments import bandit_tf_environment as bte
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
from tf_agents.typing import types
from tf_agents.utils import eager_utils
tfd = tfp.distributions
def _batched_table_lookup(tbl, row, col):
"""Mapped 2D table lookup.
Args:
tbl: a `Tensor` of shape `[r, s, t]`.
row: a `Tensor` of dtype `int32` with shape `[r]` and values in
the range `[0, s - 1]`.
col: a `Tensor` of dtype `int32` with shape `[r]` and values in
the range `[0, t - 1]`.
Returns:
A `Tensor` `x` with shape `[r]` where `x[i] = tbl[i, row[i], col[i]`.
"""
assert_correct_shapes = tf.group(
tf.assert_equal(tf.shape(row), tf.shape(col)),
tf.assert_equal(tf.shape(row)[0], tf.shape(tbl)[0]))
rng = tf.range(tf.shape(row)[0])
idx = tf.stack([rng, row, col], axis=-1)
with tf.control_dependencies([assert_correct_shapes]):
values = tf.gather_nd(tbl, idx)
return values
@gin.configurable
class ClassificationBanditEnvironment(bte.BanditTFEnvironment):
"""An environment based on an arbitrary classification problem."""
def __init__(self,
dataset: tf.data.Dataset,
reward_distribution: types.Distribution,
batch_size: types.Int,
label_dtype_cast: Optional[tf.DType] = None,
shuffle_buffer_size: Optional[types.Int] = None,
repeat_dataset: Optional[bool] = True,
prefetch_size: Optional[types.Int] = None,
seed: Optional[types.Int] = None,
name: Optional[Text] = 'classification'):
"""Initialize `ClassificationBanditEnvironment`.
Args:
dataset: a `tf.data.Dataset` consisting of two `Tensor`s, [inputs, labels]
where inputs can be of any shape, while labels are integer class labels.
The label tensor can be of any rank as long as it has 1 element.
reward_distribution: a `tfd.Distribution` with event_shape
`[num_classes, num_actions]`. Entry `[i, j]` is the reward for taking
action `j` for an instance of class `i`.
batch_size: if `dataset` is batched, this is the size of the batches.
label_dtype_cast: if not None, casts dataset labels to this dtype.
shuffle_buffer_size: If None, do not shuffle. Otherwise, a shuffle buffer
of the specified size is used in the environment's `dataset`.
repeat_dataset: Makes the environment iterate on the `dataset` once
avoiding `OutOfRangeError: End of sequence` errors when the environment
is stepped past the end of the `dataset`.
prefetch_size: If None, do not prefetch. Otherwise, a prefetch buffer
of the specified size is used in the environment's `dataset`.
seed: Used to make results deterministic.
name: The name of this environment instance.
Raises:
ValueError: if `reward_distribution` does not have an event shape with
rank 2.
"""
# Computing `action_spec`.
event_shape = reward_distribution.event_shape
if len(event_shape) != 2:
raise ValueError(
'reward_distribution must have event shape of rank 2; '
'got event shape {}'.format(event_shape))
_, num_actions = event_shape
action_spec = tensor_spec.BoundedTensorSpec(shape=(),
dtype=tf.int32,
minimum=0,
maximum=num_actions - 1,
name='action')
output_shapes = tf.compat.v1.data.get_output_shapes(dataset)
# Computing `time_step_spec`.
if len(output_shapes) != 2:
raise ValueError('Dataset must have exactly two outputs; got {}'.format(
len(output_shapes)))
context_shape = output_shapes[0]
context_dtype, lbl_dtype = tf.compat.v1.data.get_output_types(dataset)
if label_dtype_cast:
lbl_dtype = label_dtype_cast
observation_spec = tensor_spec.TensorSpec(
shape=context_shape, dtype=context_dtype)
time_step_spec = time_step.time_step_spec(observation_spec)
super(ClassificationBanditEnvironment, self).__init__(
action_spec=action_spec,
time_step_spec=time_step_spec,
batch_size=batch_size,
name=name)
if shuffle_buffer_size:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size,
seed=seed,
reshuffle_each_iteration=True)
if repeat_dataset:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
if prefetch_size:
dataset = dataset.prefetch(prefetch_size)
self._data_iterator = eager_utils.dataset_iterator(dataset)
self._current_label = tf.compat.v2.Variable(
tf.zeros(batch_size, dtype=lbl_dtype))
self._previous_label = tf.compat.v2.Variable(
tf.zeros(batch_size, dtype=lbl_dtype))
self._reward_distribution = reward_distribution
self._label_dtype = lbl_dtype
reward_means = self._reward_distribution.mean()
self._optimal_action_table = tf.argmax(
reward_means, axis=1, output_type=self._action_spec.dtype)
self._optimal_reward_table = tf.reduce_max(reward_means, axis=1)
def _observe(self) -> types.NestedTensor:
context, lbl = eager_utils.get_next(self._data_iterator)
self._previous_label.assign(self._current_label)
self._current_label.assign(tf.reshape(
tf.cast(lbl, dtype=self._label_dtype), shape=[self._batch_size]))
return tf.reshape(
context,
shape=[self._batch_size] + self._time_step_spec.observation.shape)
def _apply_action(self, action: types.NestedTensor) -> types.NestedTensor:
action = tf.reshape(
action, shape=[self._batch_size] + self._action_spec.shape)
reward_samples = self._reward_distribution.sample(tf.shape(action))
return _batched_table_lookup(reward_samples, self._current_label, action)
def compute_optimal_action(self) -> types.NestedTensor:
return tf.gather(
params=self._optimal_action_table, indices=self._previous_label)
def compute_optimal_reward(self) -> types.NestedTensor:
return tf.gather(
params=self._optimal_reward_table, indices=self._previous_label)
```
#### File: bandits/environments/movielens_per_arm_py_environment.py
```python
from __future__ import absolute_import
# Using Type Annotations.
import random
from typing import Optional, Text
import gin
import numpy as np
from tf_agents.bandits.environments import bandit_py_environment
from tf_agents.bandits.environments import dataset_utilities
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.specs import array_spec
from tf_agents.trajectories import time_step as ts
GLOBAL_KEY = bandit_spec_utils.GLOBAL_FEATURE_KEY
PER_ARM_KEY = bandit_spec_utils.PER_ARM_FEATURE_KEY
@gin.configurable
class MovieLensPerArmPyEnvironment(bandit_py_environment.BanditPyEnvironment):
"""Implements the per-arm version of the MovieLens Bandit environment.
This environment implements the MovieLens 100K dataset, available at:
https://www.kaggle.com/prajitdatta/movielens-100k-dataset
This dataset contains 100K ratings from 943 users on 1682 items.
This csv list of:
user id | item id | rating | timestamp.
This environment computes a low-rank matrix factorization (using SVD) of the
data matrix `A`, such that: `A ~= U * Sigma * V^T`.
The environment uses the rows of `U` as global (or user) features, and the
rows of `V` as per-arm (or movie) features.
The reward of recommending movie `v` to user `u` is `u * Sigma * v^T`.
"""
def __init__(self,
data_dir: Text,
rank_k: int,
batch_size: int = 1,
num_actions: int = 50,
name: Optional[Text] = 'movielens_per_arm'):
"""Initializes the Per-arm MovieLens Bandit environment.
Args:
data_dir: (string) Directory where the data lies (in text form).
rank_k : (int) Which rank to use in the matrix factorization. This will
also be the feature dimension of both the user and the movie features.
batch_size: (int) Number of observations generated per call.
num_actions: (int) How many movies to choose from per round.
name: (string) The name of this environment instance.
"""
self._batch_size = batch_size
self._context_dim = rank_k
self._num_actions = num_actions
# Compute the matrix factorization.
self._data_matrix = dataset_utilities.load_movielens_data(data_dir)
self._num_users, self._num_movies = self._data_matrix.shape
# Compute the SVD.
u, s, vh = np.linalg.svd(self._data_matrix, full_matrices=False)
# Keep only the largest singular values.
self._u_hat = u[:, :rank_k].astype(np.float32)
self._s_hat = s[:rank_k].astype(np.float32)
self._v_hat = np.transpose(vh[:rank_k]).astype(np.float32)
self._approx_ratings_matrix = np.matmul(self._u_hat * self._s_hat,
np.transpose(self._v_hat))
self._action_spec = array_spec.BoundedArraySpec(
shape=(),
dtype=np.int32,
minimum=0,
maximum=num_actions - 1,
name='action')
observation_spec = {
GLOBAL_KEY:
array_spec.ArraySpec(shape=[rank_k], dtype=np.float32),
PER_ARM_KEY:
array_spec.ArraySpec(
shape=[num_actions, rank_k], dtype=np.float32),
}
self._time_step_spec = ts.time_step_spec(observation_spec)
self._current_user_indices = np.zeros(batch_size, dtype=np.int32)
self._previous_user_indices = np.zeros(batch_size, dtype=np.int32)
self._current_movie_indices = np.zeros([batch_size, num_actions],
dtype=np.int32)
self._previous_movie_indices = np.zeros([batch_size, num_actions],
dtype=np.int32)
self._observation = {
GLOBAL_KEY:
np.zeros([batch_size, rank_k]),
PER_ARM_KEY:
np.zeros([batch_size, num_actions, rank_k]),
}
super(MovieLensPerArmPyEnvironment, self).__init__(
observation_spec, self._action_spec, name=name)
@property
def batch_size(self):
return self._batch_size
@property
def batched(self):
return True
def _observe(self):
sampled_user_indices = np.random.randint(
self._num_users, size=self._batch_size)
self._previous_user_indices = self._current_user_indices
self._current_user_indices = sampled_user_indices
sampled_movie_indices = np.array([
random.sample(range(self._num_movies), self._num_actions)
for _ in range(self._batch_size)
])
movie_index_vector = sampled_movie_indices.reshape(-1)
flat_movie_list = self._v_hat[movie_index_vector]
current_movies = flat_movie_list.reshape(
[self._batch_size, self._num_actions, self._context_dim])
self._previous_movie_indices = self._current_movie_indices
self._current_movie_indices = sampled_movie_indices
batched_observations = {
GLOBAL_KEY:
self._u_hat[sampled_user_indices],
PER_ARM_KEY:
current_movies,
}
return batched_observations
def _apply_action(self, action):
chosen_arm_indices = self._current_movie_indices[range(self._batch_size),
action]
return self._approx_ratings_matrix[self._current_user_indices,
chosen_arm_indices]
def _rewards_for_all_actions(self):
rewards_matrix = self._approx_ratings_matrix[
np.expand_dims(self._previous_user_indices, axis=-1),
self._previous_movie_indices]
return rewards_matrix
def compute_optimal_action(self):
return np.argmax(self._rewards_for_all_actions(), axis=-1)
def compute_optimal_reward(self):
return np.max(self._rewards_for_all_actions(), axis=-1)
```
#### File: tf_agents/replay_buffers/replay_buffer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.utils import common
from tensorflow.python.data.util import nest as data_nest # pylint:disable=g-direct-tensorflow-import # TF internal
from tensorflow.python.util import deprecation # pylint:disable=g-direct-tensorflow-import # TF internal
class ReplayBuffer(tf.Module):
"""Abstract base class for TF-Agents replay buffer.
In eager mode, methods modify the buffer or return values directly. In graph
mode, methods return ops that do so when executed.
"""
def __init__(self, data_spec, capacity, stateful_dataset=False):
"""Initializes the replay buffer.
Args:
data_spec: A spec or a list/tuple/nest of specs describing a single item
that can be stored in this buffer
capacity: number of elements that the replay buffer can hold.
stateful_dataset: whether the dataset contains stateful ops or not.
"""
super(ReplayBuffer, self).__init__()
common.check_tf1_allowed()
self._data_spec = data_spec
self._capacity = capacity
self._stateful_dataset = stateful_dataset
@property
def data_spec(self):
"""Returns the spec for items in the replay buffer."""
return self._data_spec
@property
def capacity(self):
"""Returns the capacity of the replay buffer."""
return self._capacity
@property
def stateful_dataset(self):
"""Returns whether the dataset of the replay buffer has stateful ops."""
return self._stateful_dataset
def num_frames(self):
"""Returns the number of frames in the replay buffer."""
return self._num_frames()
def add_batch(self, items):
"""Adds a batch of items to the replay buffer.
Args:
items: An item or list/tuple/nest of items to be added to the replay
buffer. `items` must match the data_spec of this class, with a
batch_size dimension added to the beginning of each tensor/array.
Returns:
Adds `items` to the replay buffer.
"""
return self._add_batch(items)
@deprecation.deprecated(
date=None,
instructions=(
'Use `as_dataset(..., single_deterministic_pass=False) instead.'
))
def get_next(self, sample_batch_size=None, num_steps=None, time_stacked=True):
"""Returns an item or batch of items from the buffer.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. If None (default), a single item is returned
which matches the data_spec of this class (without a batch dimension).
Otherwise, a batch of sample_batch_size items is returned, where each
tensor in items will have its first dimension equal to sample_batch_size
and the rest of the dimensions match the corresponding data_spec. See
examples below.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. If None (default), in non-episodic replay buffers, a batch of
single items is returned. In episodic buffers, full episodes are
returned (note that sample_batch_size must be None in that case).
Otherwise, a batch of sub-episodes is returned, where a sub-episode is a
sequence of consecutive items in the replay_buffer. The returned tensors
will have first dimension equal to sample_batch_size (if
sample_batch_size is not None), subsequent dimension equal to num_steps,
if time_stacked=True and remaining dimensions which match the data_spec
of this class. See examples below.
time_stacked: (Optional.) Boolean, when true and num_steps > 1 it returns
the items stacked on the time dimension. See examples below for details.
Examples of tensor shapes returned: (B = batch size, T = timestep, D =
data spec) get_next(sample_batch_size=None, num_steps=None,
time_stacked=True)
return shape (non-episodic): [D]
return shape (episodic): [T, D] (T = full length of the episode)
get_next(sample_batch_size=B, num_steps=None, time_stacked=True)
return shape (non-episodic): [B, D]
return shape (episodic): Not supported get_next(sample_batch_size=B,
num_steps=T, time_stacked=True)
return shape: [B, T, D] get_next(sample_batch_size=None, num_steps=T,
time_stacked=False)
return shape: ([D], [D], ..) T tensors in the tuple
get_next(sample_batch_size=B, num_steps=T, time_stacked=False)
return shape: ([B, D], [B, D], ..) T tensors in the tuple
Returns:
A 2-tuple containing:
- An item or sequence of (optionally batched and stacked) items.
- Auxiliary info for the items (i.e. ids, probs).
"""
return self._get_next(sample_batch_size, num_steps, time_stacked)
def as_dataset(self,
sample_batch_size=None,
num_steps=None,
num_parallel_calls=None,
sequence_preprocess_fn=None,
single_deterministic_pass=False):
"""Creates and returns a dataset that returns entries from the buffer.
A single entry from the dataset is the result of the following pipeline:
* Sample sequences from the underlying data store
* (optionally) Process them with `sequence_preprocess_fn`,
* (optionally) Split them into subsequences of length `num_steps`
* (optionally) Batch them into batches of size `sample_batch_size`.
In practice, this pipeline is executed in parallel as much as possible
if `num_parallel_calls != 1`.
Some additional notes:
If `num_steps is None`, different replay buffers will behave differently.
For example, `TFUniformReplayBuffer` will return single time steps without
a time dimension. In contrast, e.g., `EpisodicReplayBuffer` will return
full sequences (since each sequence may be an episode of unknown length,
the outermost shape dimension will be `None`).
If `sample_batch_size is None`, no batching is performed; and there is no
outer batch dimension in the returned Dataset entries. This setting
is useful with variable episode lengths using e.g. `EpisodicReplayBuffer`,
because it allows the user to get full episodes back, and use `tf.data`
to build padded or truncated batches themselves.
If `single_determinsitic_pass == True`, the replay buffer will make
every attempt to ensure every time step is visited once and exactly once
in a deterministic manner (though true determinism depends on the
underlying data store). Additional work may be done to ensure minibatches
do not have multiple rows from the same episode. In some cases, this
may mean arguments like `num_parallel_calls` are ignored.
Args:
sample_batch_size: (Optional.) An optional batch_size to specify the
number of items to return. If None (default), a single item is returned
which matches the data_spec of this class (without a batch dimension).
Otherwise, a batch of sample_batch_size items is returned, where each
tensor in items will have its first dimension equal to sample_batch_size
and the rest of the dimensions match the corresponding data_spec.
num_steps: (Optional.) Optional way to specify that sub-episodes are
desired. If None (default), a batch of single items is returned.
Otherwise, a batch of sub-episodes is returned, where a sub-episode is a
sequence of consecutive items in the replay_buffer. The returned tensors
will have first dimension equal to sample_batch_size (if
sample_batch_size is not None), subsequent dimension equal to num_steps,
and remaining dimensions which match the data_spec of this class.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process in parallel. If not
specified, elements will be processed sequentially.
sequence_preprocess_fn: (Optional) fn for preprocessing the collected
data before it is split into subsequences of length `num_steps`.
Defined in `TFAgent.preprocess_sequence`. Defaults to pass through.
single_deterministic_pass: Python boolean. If `True`, the dataset will
return a single deterministic pass through its underlying data.
**NOTE**: If the buffer is modified while a Dataset iterator is
iterating over this data, the iterator may miss any new data or
otherwise have subtly invalid data.
Returns:
A dataset of type tf.data.Dataset, elements of which are 2-tuples of:
- An item or sequence of items or batch thereof
- Auxiliary info for the items (i.e. ids, probs).
Raises:
NotImplementedError: If a non-default argument value is not supported.
ValueError: If the data spec contains lists that must be converted to
tuples.
"""
# data_tf.nest.flatten does not flatten python lists, nest.flatten does.
if tf.nest.flatten(self._data_spec) != data_nest.flatten(self._data_spec):
raise ValueError(
'Cannot perform gather; data spec contains lists and this conflicts '
'with gathering operator. Convert any lists to tuples. '
'For example, if your spec looks like [a, b, c], '
'change it to (a, b, c). Spec structure is:\n {}'.format(
tf.nest.map_structure(lambda spec: spec.dtype, self._data_spec)))
if single_deterministic_pass:
ds = self._single_deterministic_pass_dataset(
sample_batch_size=sample_batch_size,
num_steps=num_steps,
sequence_preprocess_fn=sequence_preprocess_fn,
num_parallel_calls=num_parallel_calls)
else:
ds = self._as_dataset(
sample_batch_size=sample_batch_size,
num_steps=num_steps,
sequence_preprocess_fn=sequence_preprocess_fn,
num_parallel_calls=num_parallel_calls)
if self._stateful_dataset:
options = tf.data.Options()
if hasattr(options, 'experimental_allow_stateful'):
options.experimental_allow_stateful = True
ds = ds.with_options(options)
return ds
@deprecation.deprecated(
date=None,
instructions=(
'Use `as_dataset(..., single_deterministic_pass=True)` instead.'
))
def gather_all(self):
"""Returns all the items in buffer.
Returns:
Returns all the items currently in the buffer. Returns a tensor
of shape [B, T, ...] where B = batch size, T = timesteps,
and the remaining shape is the shape spec of the items in the buffer.
"""
return self._gather_all()
def clear(self):
"""Resets the contents of replay buffer.
Returns:
Clears the replay buffer contents.
"""
return self._clear()
# Subclasses must implement these methods.
@abc.abstractmethod
def _num_frames(self):
"""Returns the number of frames in the replay buffer."""
raise NotImplementedError
@abc.abstractmethod
def _add_batch(self, items):
"""Adds a batch of items to the replay buffer."""
raise NotImplementedError
@abc.abstractmethod
def _get_next(self, sample_batch_size, num_steps, time_stacked):
"""Returns an item or batch of items from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _as_dataset(self,
sample_batch_size,
num_steps,
sequence_preprocess_fn,
num_parallel_calls):
"""Creates and returns a dataset that returns entries from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _single_deterministic_pass_dataset(self,
sample_batch_size,
num_steps,
sequence_preprocess_fn,
num_parallel_calls):
"""Creates and returns a dataset that returns entries from the buffer."""
raise NotImplementedError
@abc.abstractmethod
def _gather_all(self):
"""Returns all the items in buffer."""
raise NotImplementedError
@abc.abstractmethod
def _clear(self):
"""Clears the replay buffer."""
raise NotImplementedError
``` |
{
"source": "johnamcleod/trieste",
"score": 3
} |
#### File: docs/notebooks/multi_objective_ehvi.pct.py
```python
import math
import gpflow
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from util.plotting import plot_bo_points, plot_function_2d, plot_mobo_history, plot_mobo_points_in_obj_space
# %%
import trieste
from trieste.acquisition.function import ExpectedHypervolumeImprovement
from trieste.data import Dataset
from trieste.models import create_model
from trieste.models.model_interfaces import ModelStack
from trieste.space import Box
from trieste.utils.multi_objectives import VLMOP2
from trieste.utils.pareto import Pareto, get_reference_point
from trieste.acquisition.rule import EfficientGlobalOptimization
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
#
# In this tutorial, we provide a multi-objective optimization example using the expected hypervolume improvement acquisition function.
# We consider the VLMOP2 function --- a synthetic benchmark problem with two objectives. We start by defining the problem parameters.
# %%
vlmop2 = VLMOP2().objective()
observer = trieste.utils.objectives.mk_observer(vlmop2)
# %%
mins = [-2, -2]
maxs = [2, 2]
search_space = Box(mins, maxs)
num_objective = 2
# %% [markdown]
# Let's randomly sample some initial data from the observer ...
# %%
num_initial_points = 20
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ... and visualise the data across the design space: each figure contains the contour lines of each objective function.
# %%
_, ax = plot_function_2d(
vlmop2,
mins,
maxs,
grid_density=100,
contour=True,
title=["Obj 1", "Obj 2"],
figsize=(12, 6),
colorbar=True,
xlabel="$X_1$",
ylabel="$X_2$",
)
plot_bo_points(initial_query_points, ax=ax[0, 0], num_init=num_initial_points)
plot_bo_points(initial_query_points, ax=ax[0, 1], num_init=num_initial_points)
plt.show()
# %% [markdown]
# ... and in the objective space. The `plot_mobo_points_in_obj_space` will automatically search for non-dominated points and colours them in purple.
# %%
plot_mobo_points_in_obj_space(initial_data.observations)
plt.show()
# %% [markdown]
# ## Modelling the two functions
#
# In this example we model the two objective functions individually with their own Gaussian process models, for problems where the objective functions are similar it may make sense to build a joint model.
#
# We use a model wrapper: `ModelStack` to stack these two independent GP into a single model working as a (independent) multi-output model.
# %%
def build_stacked_independent_objectives_model(data: Dataset, num_output) -> ModelStack:
gprs =[]
for idx in range(num_output):
single_obj_data = Dataset(data.query_points, tf.gather(data.observations, [idx], axis=1))
variance = tf.math.reduce_variance(single_obj_data.observations)
kernel = gpflow.kernels.Matern52(variance)
gpr = gpflow.models.GPR((single_obj_data.query_points, single_obj_data.observations), kernel, noise_variance=1e-5)
gpflow.utilities.set_trainable(gpr.likelihood, False)
gprs.append((create_model({
"model": gpr,
"optimizer": gpflow.optimizers.Scipy(),
"optimizer_args": {
"minimize_args": {"options": dict(maxiter=100)}}}), 1))
return ModelStack(*gprs)
# %%
model = build_stacked_independent_objectives_model(initial_data, num_objective)
# %% [markdown]
# ## Define the acquisition function
# Here we utilize the [EHVI](https://link.springer.com/article/10.1007/s10898-019-00798-7): `ExpectedHypervolumeImprovement` acquisition function:
# %%
ehvi = ExpectedHypervolumeImprovement()
rule = EfficientGlobalOptimization(builder=ehvi) # type: ignore
# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop
# %%
num_steps = 30
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(num_steps, initial_data, model, acquisition_rule=rule)
# %% [markdown]
# To conclude, we visualize the queried data across the design space.
# We represent the initial points as crosses and the points obtained by our optimization loop as dots.
# %%
dataset = result.try_get_final_dataset()
data_query_points = dataset.query_points
data_observations = dataset.observations
_, ax = plot_function_2d(
vlmop2,
mins,
maxs,
grid_density=100,
contour=True,
figsize=(12, 6),
title=["Obj 1", "Obj 2"],
xlabel="$X_1$",
ylabel="$X_2$",
colorbar=True,
)
plot_bo_points(data_query_points, ax=ax[0, 0], num_init=num_initial_points)
plot_bo_points(data_query_points, ax=ax[0, 1], num_init=num_initial_points)
plt.show()
# %% [markdown]
# Visualize in objective space. Purple dots denote the non-dominated points.
# %%
plot_mobo_points_in_obj_space(data_observations, num_init=num_initial_points)
plt.show()
# %% [markdown]
# We can also visualize how a performance metric evolved with respect to the number of BO iterations.
# First, we need to define a performance metric. Many metrics have been considered for multi-objective optimization. Here, we use the log hypervolume difference, defined as the difference between the hypervolume of the actual Pareto front and the hypervolume of the approximate Pareto front based on the bo-obtained data.
# %% [markdown]
#
# $$
# log_{10}\ \text{HV}_{\text{diff}} = log_{10}(\text{HV}_{\text{actual}} - \text{HV}_{\text{bo-obtained}})
# $$
#
# %% [markdown]
# First we need to calculate the $\text{HV}_{\text{actual}}$ based on the actual Pareto front. For some multi-objective synthetic functions like VLMOP2, the actual Pareto front has a clear definition, thus we could use `gen_pareto_optimal_points` to near uniformly sample on the actual Pareto front. And use these generated Pareto optimal points to (approximately) calculate the hypervolume of the actual Pareto frontier:
# %%
actual_pf = VLMOP2().gen_pareto_optimal_points(100) # gen 100 pf points
ref_point = get_reference_point(data_observations)
idea_hv = Pareto(tf.cast(actual_pf, dtype=data_observations.dtype)).hypervolume_indicator(ref_point)
# %% [markdown]
# Then we define the metric function:
# %%
def log_hv(observations):
obs_hv = Pareto(observations).hypervolume_indicator(ref_point)
return math.log10(idea_hv - obs_hv)
# %% [markdown]
# Finally, we can plot the convergence of our performance metric over the course of the optimization.
# The blue vertical line in the figure denotes the time after which BO starts.
# %%
fig, ax = plot_mobo_history(data_observations, log_hv, num_init=num_initial_points)
ax.set_xlabel("Iterations")
ax.set_ylabel("log HV difference")
plt.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
```
#### File: tests/integration/test_multi_objective_bayesian_optimization.py
```python
import gpflow
import pytest
import tensorflow as tf
from tests.util.misc import random_seed
from trieste.acquisition.function import ExpectedHypervolumeImprovement
from trieste.acquisition.rule import AcquisitionRule, EfficientGlobalOptimization
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.models import GaussianProcessRegression
from trieste.models.model_interfaces import ModelStack
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.utils.multi_objectives import VLMOP2
from trieste.utils.objectives import mk_observer
from trieste.utils.pareto import Pareto, get_reference_point
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
(20, EfficientGlobalOptimization(ExpectedHypervolumeImprovement().using(OBJECTIVE))),
],
)
def test_multi_objective_optimizer_finds_pareto_front_of_the_VLMOP2_function(
num_steps: int, acquisition_rule: AcquisitionRule
) -> None:
search_space = Box([-2, -2], [2, 2])
def build_stacked_independent_objectives_model(data: Dataset) -> ModelStack:
gprs = []
for idx in range(2):
single_obj_data = Dataset(
data.query_points, tf.gather(data.observations, [idx], axis=1)
)
variance = tf.math.reduce_variance(single_obj_data.observations)
kernel = gpflow.kernels.Matern52(variance, tf.constant([0.2, 0.2], tf.float64))
gpr = gpflow.models.GPR(single_obj_data.astuple(), kernel, noise_variance=1e-5)
gpflow.utilities.set_trainable(gpr.likelihood, False)
gprs.append((GaussianProcessRegression(gpr), 1))
return ModelStack(*gprs)
observer = mk_observer(VLMOP2().objective(), OBJECTIVE)
initial_query_points = search_space.sample(10)
initial_data = observer(initial_query_points)
model = build_stacked_independent_objectives_model(initial_data[OBJECTIVE])
dataset = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, {OBJECTIVE: model}, acquisition_rule)
.try_get_final_datasets()[OBJECTIVE]
)
# A small log hypervolume difference corresponds to a succesful optimization.
ref_point = get_reference_point(dataset.observations)
obs_hv = Pareto(dataset.observations).hypervolume_indicator(ref_point)
ideal_pf = tf.cast(VLMOP2().gen_pareto_optimal_points(100), dtype=tf.float64)
ideal_hv = Pareto(ideal_pf).hypervolume_indicator(ref_point)
assert tf.math.log(ideal_hv - obs_hv) < -3.5
```
#### File: unit/acquisition/test_optimizer.py
```python
from __future__ import annotations
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import quadratic, random_seed
from trieste.acquisition import AcquisitionFunction
from trieste.acquisition.optimizer import (
AcquisitionOptimizer,
automatic_optimizer_selector,
batchify,
generate_random_search_optimizer,
optimize_continuous,
optimize_discrete,
)
from trieste.space import Box, DiscreteSearchSpace
from trieste.type import TensorType
def _quadratic_sum(shift: list[float]) -> AcquisitionFunction:
return lambda x: tf.reduce_sum(0.5 - quadratic(x - shift), axis=-2)
@random_seed
@pytest.mark.parametrize(
"search_space, shift, expected_maximizer, optimizers",
[
(
DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]])),
[1.0],
[[1.2]],
[optimize_discrete, generate_random_search_optimizer()],
), # 1D
( # 2D
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [-0.2, 0.3], [0.2, -0.3], [1.2, 0.4]])),
[0.3, -0.4],
[[0.2, -0.3]],
[optimize_discrete, generate_random_search_optimizer()],
),
(
Box([-1], [2]),
[1.0],
[[1.0]],
[optimize_continuous, generate_random_search_optimizer(10_000)],
), # 1D
(
Box([-1, -2], [1.5, 2.5]),
[0.3, -0.4],
[[0.3, -0.4]],
[optimize_continuous, generate_random_search_optimizer(10_000)],
), # 2D
(
Box([-1, -2], [1.5, 2.5]),
[1.0, 4],
[[1.0, 2.5]],
[optimize_continuous, generate_random_search_optimizer(10_000)],
), # 2D with maximum outside search space
(
Box([-1, -2, 1], [1.5, 2.5, 1.5]),
[0.3, -0.4, 0.5],
[[0.3, -0.4, 1.0]],
[optimize_continuous, generate_random_search_optimizer(100_000)],
), # 3D
],
)
def test_optimizer(
search_space: DiscreteSearchSpace,
shift: list[float],
expected_maximizer: list[list[float]],
optimizers: list[AcquisitionOptimizer],
) -> None:
for optimizer in optimizers:
maximizer = optimizer(search_space, _quadratic_sum(shift))
if optimizer is optimize_continuous:
npt.assert_allclose(maximizer, expected_maximizer, rtol=1e-3)
elif optimizer is optimize_discrete:
npt.assert_allclose(maximizer, expected_maximizer, rtol=1e-4)
else:
npt.assert_allclose(maximizer, expected_maximizer, rtol=1e-1)
def test_optimize_batch_raises_with_invalid_batch_size() -> None:
batch_size_one_optimizer = optimize_continuous
with pytest.raises(ValueError):
batchify(batch_size_one_optimizer, -5)
@random_seed
@pytest.mark.parametrize("batch_size", [1, 2, 3, 5])
@pytest.mark.parametrize(
"search_space, acquisition, maximizer",
[
(Box([-1], [1]), _quadratic_sum([0.5]), ([[0.5]])),
(Box([-1, -1, -1], [1, 1, 1]), _quadratic_sum([0.5, -0.5, 0.2]), ([[0.5, -0.5, 0.2]])),
],
)
def test_optimize_batch(
search_space: Box, acquisition: AcquisitionFunction, maximizer: TensorType, batch_size: int
) -> None:
batch_size_one_optimizer = optimize_continuous
batch_optimizer = batchify(batch_size_one_optimizer, batch_size)
points = batch_optimizer(search_space, acquisition)
assert points.shape == [batch_size] + search_space.lower.shape
for point in points:
print(point)
npt.assert_allclose(tf.expand_dims(point, 0), maximizer, rtol=2e-4)
@random_seed
@pytest.mark.parametrize(
"search_space, acquisition, maximizer",
[
(
DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]])),
_quadratic_sum([1.0]),
[[1.2]],
),
(Box([0], [1]), _quadratic_sum([0.5]), ([[0.5]])),
(Box([-1, -1, -1], [1, 1, 1]), _quadratic_sum([0.5, -0.5, 0.2]), ([[0.5, -0.5, 0.2]])),
],
)
def test_automatic_optimizer_selector(
search_space: Box,
acquisition: AcquisitionFunction,
maximizer: TensorType,
) -> None:
optimizer = automatic_optimizer_selector
point = optimizer(search_space, acquisition)
npt.assert_allclose(point, maximizer, rtol=2e-4)
def test_generate_random_search_optimizer_raises_with_invalid_sample_size() -> None:
with pytest.raises(ValueError):
generate_random_search_optimizer(num_samples=-5)
```
#### File: tests/unit/test_space.py
```python
from __future__ import annotations
import copy
import itertools
from collections.abc import Container
import numpy.testing as npt
import pytest
import tensorflow as tf
from typing_extensions import Final
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, ShapeLike, various_shapes
from trieste.space import Box, DiscreteSearchSpace, SearchSpace
class Integers(SearchSpace):
def __init__(self, exclusive_limit: int):
assert exclusive_limit > 0
self.limit: Final[int] = exclusive_limit
def sample(self, num_samples: int) -> tf.Tensor:
return tf.random.shuffle(tf.range(self.limit))[:num_samples]
def __contains__(self, point: tf.Tensor) -> tf.Tensor:
tf.debugging.assert_integer(point)
return 0 <= point < self.limit
def __mul__(self, other: Integers) -> Integers:
return Integers(self.limit * other.limit)
@pytest.mark.parametrize("exponent", [0, -2])
def test_search_space___pow___raises_for_non_positive_exponent(exponent: int) -> None:
space = Integers(3)
with pytest.raises(ValueError):
space ** exponent
def test_search_space___pow___multiplies_correct_number_of_search_spaces() -> None:
assert (Integers(5) ** 7).limit == 5 ** 7
def _points_in_2D_search_space() -> tf.Tensor:
return tf.constant([[-1.0, 0.4], [-1.0, 0.6], [0.0, 0.4], [0.0, 0.6], [1.0, 0.4], [1.0, 0.6]])
@pytest.mark.parametrize("shape", various_shapes(excluding_ranks=[2]))
def test_discrete_search_space_raises_for_invalid_shapes(shape: ShapeLike) -> None:
with pytest.raises(ValueError):
DiscreteSearchSpace(tf.random.uniform(shape))
def test_discrete_search_space_points() -> None:
space = DiscreteSearchSpace(_points_in_2D_search_space())
npt.assert_array_equal(space.points, _points_in_2D_search_space())
@pytest.mark.parametrize("point", list(_points_in_2D_search_space()))
def test_discrete_search_space_contains_all_its_points(point: tf.Tensor) -> None:
assert point in DiscreteSearchSpace(_points_in_2D_search_space())
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.0, -0.4]),
tf.constant([-1.0, 0.5]),
tf.constant([-2.0, 0.4]),
tf.constant([-2.0, 0.7]),
],
)
def test_discrete_search_space_does_not_contain_other_points(point: tf.Tensor) -> None:
assert point not in DiscreteSearchSpace(_points_in_2D_search_space())
@pytest.mark.parametrize(
"points, test_point",
[
(tf.constant([[0.0]]), tf.constant([0.0, 0.0])),
(tf.constant([[0.0, 0.0]]), tf.constant(0.0)),
(tf.constant([[0.0, 0.0]]), tf.constant([0.0])),
(tf.constant([[0.0, 0.0]]), tf.constant([0.0, 0.0, 0.0])),
],
)
def test_discrete_search_space_contains_raises_for_invalid_shapes(
points: tf.Tensor, test_point: tf.Tensor
) -> None:
space = DiscreteSearchSpace(points)
with pytest.raises(ValueError):
_ = test_point in space
def _assert_correct_number_of_unique_constrained_samples(
num_samples: int, search_space: SearchSpace, samples: tf.Tensor
) -> None:
assert all(sample in search_space for sample in samples)
assert len(samples) == num_samples
unique_samples = set(tuple(sample.numpy().tolist()) for sample in samples)
assert len(unique_samples) == len(samples)
@pytest.mark.parametrize("num_samples", [0, 1, 3, 5, 6])
def test_discrete_search_space_sampling(num_samples: int) -> None:
search_space = DiscreteSearchSpace(_points_in_2D_search_space())
samples = search_space.sample(num_samples)
_assert_correct_number_of_unique_constrained_samples(num_samples, search_space, samples)
@pytest.mark.parametrize("num_samples", [7, 8, 10])
def test_discrete_search_space_sampling_raises_when_too_many_samples_are_requested(
num_samples: int,
) -> None:
search_space = DiscreteSearchSpace(_points_in_2D_search_space())
with pytest.raises(ValueError, match="samples"):
search_space.sample(num_samples)
def test_discrete_search_space___mul___points_is_the_concatenation_of_original_points() -> None:
dss1 = DiscreteSearchSpace(tf.constant([[-1.0, -1.4], [-1.5, -3.6], [-0.5, -0.6]]))
dss2 = DiscreteSearchSpace(tf.constant([[1.0, 1.4], [1.5, 3.6]]))
product = dss1 * dss2
all_expected_points = tf.constant(
[
[-1.0, -1.4, 1.0, 1.4],
[-1.0, -1.4, 1.5, 3.6],
[-1.5, -3.6, 1.0, 1.4],
[-1.5, -3.6, 1.5, 3.6],
[-0.5, -0.6, 1.0, 1.4],
[-0.5, -0.6, 1.5, 3.6],
]
)
assert len(product.points) == len(all_expected_points)
assert all(point in product for point in all_expected_points)
def test_discrete_search_space___mul___for_empty_search_space() -> None:
dss = DiscreteSearchSpace(tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]))
empty = DiscreteSearchSpace(tf.zeros([0, 1]))
npt.assert_array_equal((empty * dss).points, tf.zeros([0, 3]))
npt.assert_array_equal((dss * empty).points, tf.zeros([0, 3]))
def test_discrete_search_space___mul___for_identity_space() -> None:
dss = DiscreteSearchSpace(tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]))
identity = DiscreteSearchSpace(tf.zeros([1, 0]))
npt.assert_array_equal((dss * identity).points, dss.points)
npt.assert_array_equal((identity * dss).points, dss.points)
def test_discrete_search_space___mul___raises_if_points_have_different_types() -> None:
dss1 = DiscreteSearchSpace(_points_in_2D_search_space())
dss2 = DiscreteSearchSpace(tf.constant([[1.0, 1.4], [-1.5, 3.6]], tf.float64))
with pytest.raises(TypeError):
_ = dss1 * dss2
def test_discrete_search_space_deepcopy() -> None:
dss = DiscreteSearchSpace(_points_in_2D_search_space())
npt.assert_allclose(copy.deepcopy(dss).points, _points_in_2D_search_space())
@pytest.mark.parametrize(
"lower, upper",
[
pytest.param([0.0, 1.0], [1.0, 2.0], id="lists"),
pytest.param((0.0, 1.0), (1.0, 2.0), id="tuples"),
pytest.param(range(2), range(1, 3), id="ranges"),
],
)
def test_box_converts_sequences_to_float64_tensors(lower, upper) -> None:
box = Box(lower, upper)
assert tf.as_dtype(box.lower.dtype) is tf.float64
assert tf.as_dtype(box.upper.dtype) is tf.float64
npt.assert_array_equal(box.lower, [0.0, 1.0])
npt.assert_array_equal(box.upper, [1.0, 2.0])
def _pairs_of_shapes(
*, excluding_ranks: Container[int] = ()
) -> frozenset[tuple[ShapeLike, ShapeLike]]:
shapes = various_shapes(excluding_ranks=excluding_ranks)
return frozenset(itertools.product(shapes, shapes))
@pytest.mark.parametrize(
"lower_shape, upper_shape", _pairs_of_shapes(excluding_ranks={1}) | {((1,), (2,)), ((0,), (0,))}
)
def test_box_raises_if_bounds_have_invalid_shape(
lower_shape: ShapeLike, upper_shape: ShapeLike
) -> None:
lower, upper = tf.zeros(lower_shape), tf.ones(upper_shape)
with pytest.raises(ValueError):
Box(lower, upper)
@pytest.mark.parametrize(
"lower_dtype, upper_dtype",
[
(tf.uint32, tf.uint32), # same dtypes
(tf.int8, tf.uint16), # different dtypes ...
(tf.uint32, tf.float32),
(tf.float32, tf.float64),
(tf.float64, tf.bfloat16),
],
)
def test_box_raises_if_bounds_have_invalid_dtypes(
lower_dtype: tf.DType, upper_dtype: tf.DType
) -> None:
lower, upper = tf.zeros([3], dtype=lower_dtype), tf.ones([3], dtype=upper_dtype)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
Box(lower, upper)
@pytest.mark.parametrize(
"lower, upper",
[
(tf.ones((3,)), tf.ones((3,))), # all equal
(tf.ones((3,)) + 1, tf.ones((3,))), # lower all higher than upper
( # one lower higher than upper
tf.constant([2.3, -0.1, 8.0]),
tf.constant([3.0, -0.2, 8.0]),
),
(tf.constant([2.3, -0.1, 8.0]), tf.constant([3.0, -0.1, 8.0])), # one lower equal to upper
],
)
def test_box_raises_if_any_lower_bound_is_not_less_than_upper_bound(
lower: tf.Tensor, upper: tf.Tensor
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
Box(lower, upper)
def test_box_bounds_attributes() -> None:
lower, upper = tf.zeros([2]), tf.ones([2])
box = Box(lower, upper)
npt.assert_array_equal(box.lower, lower)
npt.assert_array_equal(box.upper, upper)
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.0, 0.0, -2.0]), # lower bound
tf.constant([2.0, 1.0, -0.5]), # upper bound
tf.constant([0.5, 0.5, -1.5]), # approx centre
tf.constant([-1.0, 0.0, -1.9]), # near the edge
],
)
def test_box_contains_point(point: tf.Tensor) -> None:
assert point in Box(tf.constant([-1.0, 0.0, -2.0]), tf.constant([2.0, 1.0, -0.5]))
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.1, 0.0, -2.0]), # just outside
tf.constant([-0.5, -0.5, 1.5]), # negative of a contained point
tf.constant([10.0, -10.0, 10.0]), # well outside
],
)
def test_box_does_not_contain_point(point: tf.Tensor) -> None:
assert point not in Box(tf.constant([-1.0, 0.0, -2.0]), tf.constant([2.0, 1.0, -0.5]))
@pytest.mark.parametrize(
"bound_shape, point_shape",
((bs, ps) for bs, ps in _pairs_of_shapes() if bs != ps and len(bs) == 1 and bs != (0,)),
)
def test_box_contains_raises_on_point_of_different_shape(
bound_shape: ShapeLike,
point_shape: ShapeLike,
) -> None:
box = Box(tf.zeros(bound_shape), tf.ones(bound_shape))
point = tf.zeros(point_shape)
with pytest.raises(ValueError):
_ = point in box
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_box_sampling(num_samples: int) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)))
samples = box.sample(num_samples)
_assert_correct_number_of_unique_constrained_samples(num_samples, box, samples)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_box_discretize_returns_search_space_with_only_points_contained_within_box(
num_samples: int,
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)))
dss = box.discretize(num_samples)
samples = dss.sample(num_samples)
assert all(sample in box for sample in samples)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_box_discretize_returns_search_space_with_correct_number_of_points(
num_samples: int,
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)))
dss = box.discretize(num_samples)
samples = dss.sample(num_samples)
assert len(samples) == num_samples
with pytest.raises(ValueError):
dss.sample(num_samples + 1)
def test_box___mul___bounds_are_the_concatenation_of_original_bounds() -> None:
box1 = Box(tf.constant([0.0, 1.0]), tf.constant([2.0, 3.0]))
box2 = Box(tf.constant([4.1, 5.1, 6.1]), tf.constant([7.2, 8.2, 9.2]))
product = box1 * box2
npt.assert_allclose(product.lower, [0, 1, 4.1, 5.1, 6.1])
npt.assert_allclose(product.upper, [2, 3, 7.2, 8.2, 9.2])
def test_box___mul___raises_if_bounds_have_different_types() -> None:
box1 = Box(tf.constant([0.0, 1.0]), tf.constant([2.0, 3.0]))
box2 = Box(tf.constant([4.0, 5.0], tf.float64), tf.constant([6.0, 7.0], tf.float64))
with pytest.raises(TypeError):
_ = box1 * box2
def test_box_deepcopy() -> None:
box = Box(tf.constant([1.2, 3.4]), tf.constant([5.6, 7.8]))
box_copy = copy.deepcopy(box)
npt.assert_allclose(box.lower, box_copy.lower)
npt.assert_allclose(box.upper, box_copy.upper)
```
#### File: trieste/acquisition/function.py
```python
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Mapping
from itertools import product
from math import inf
from typing import Callable, Optional, Union
import tensorflow as tf
import tensorflow_probability as tfp
from ..data import Dataset
from ..models import ProbabilisticModel
from ..space import SearchSpace
from ..type import TensorType
from ..utils import DEFAULTS
from ..utils.pareto import Pareto, get_reference_point
from .sampler import BatchReparametrizationSampler, GumbelSampler
AcquisitionFunction = Callable[[TensorType], TensorType]
"""
Type alias for acquisition functions.
An :const:`AcquisitionFunction` maps a set of `B` query points (each of dimension `D`) to a single
value that describes how useful it would be evaluate all these points together (to our goal of
optimizing the objective function). Thus, with leading dimensions, an :const:`AcquisitionFunction`
takes input shape `[..., B, D]` and returns shape `[..., 1]`.
Note that :const:`AcquisitionFunction`s which do not support batch optimization still expect inputs
with a batch dimension, i.e. an input of shape `[..., 1, D]`.
"""
class AcquisitionFunctionBuilder(ABC):
"""An :class:`AcquisitionFunctionBuilder` builds an acquisition function."""
@abstractmethod
def prepare_acquisition_function(
self, datasets: Mapping[str, Dataset], models: Mapping[str, ProbabilisticModel]
) -> AcquisitionFunction:
"""
:param datasets: The data from the observer.
:param models: The models over each dataset in ``datasets``.
:return: An acquisition function.
"""
class SingleModelAcquisitionBuilder(ABC):
"""
Convenience acquisition function builder for an acquisition function (or component of a
composite acquisition function) that requires only one model, dataset pair.
"""
def using(self, tag: str) -> AcquisitionFunctionBuilder:
"""
:param tag: The tag for the model, dataset pair to use to build this acquisition function.
:return: An acquisition function builder that selects the model and dataset specified by
``tag``, as defined in :meth:`prepare_acquisition_function`.
"""
single_builder = self
class _Anon(AcquisitionFunctionBuilder):
def prepare_acquisition_function(
self, datasets: Mapping[str, Dataset], models: Mapping[str, ProbabilisticModel]
) -> AcquisitionFunction:
return single_builder.prepare_acquisition_function(datasets[tag], models[tag])
def __repr__(self) -> str:
return f"{single_builder!r} using tag {tag!r}"
return _Anon()
@abstractmethod
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: The data to use to build the acquisition function.
:param model: The model over the specified ``dataset``.
:return: An acquisition function.
"""
class ExpectedImprovement(SingleModelAcquisitionBuilder):
"""
Builder for the expected improvement function where the "best" value is taken to be the minimum
of the posterior mean at observed points.
"""
def __repr__(self) -> str:
""""""
return "ExpectedImprovement()"
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: The data from the observer. Must be populated.
:param model: The model over the specified ``dataset``.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise ValueError: If ``dataset`` is empty.
"""
if len(dataset.query_points) == 0:
raise ValueError("Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)
return expected_improvement(model, eta)
def expected_improvement(model: ProbabilisticModel, eta: TensorType) -> AcquisitionFunction:
r"""
Return the Expected Improvement (EI) acquisition function for single-objective global
optimization. Improvement is with respect to the current "best" observation ``eta``, where an
improvement moves towards the objective function's minimum, and the expectation is calculated
with respect to the ``model`` posterior. For model posterior :math:`f`, this is
.. math:: x \mapsto \mathbb E \left[ \max (\eta - f(x), 0) \right]
This function was introduced by Mockus et al, 1975. See :cite:`Jones:1998` for details.
:param model: The model of the objective function.
:param eta: The "best" observation.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = model.predict(tf.squeeze(x, -2))
normal = tfp.distributions.Normal(mean, tf.sqrt(variance))
return (eta - mean) * normal.cdf(eta) + variance * normal.prob(eta)
return acquisition
class AugmentedExpectedImprovement(SingleModelAcquisitionBuilder):
"""
Builder for the augmented expected improvement function for optimization single-objective
optimization problems with high levels of observation noise.
"""
def __repr__(self) -> str:
""""""
return "AugmentedExpectedImprovement()"
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: The data from the observer. Must be populated.
:param model: The model over the specified ``dataset``.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise ValueError: If ``dataset`` is empty.
"""
if len(dataset.query_points) == 0:
raise ValueError("Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)
return augmented_expected_improvement(model, eta)
def augmented_expected_improvement(
model: ProbabilisticModel, eta: TensorType
) -> AcquisitionFunction:
r"""
Return the Augmented Expected Improvement (AEI) acquisition function for single-objective global
optimization under homoscedastic observation noise.
Improvement is with respect to the current "best" observation ``eta``, where an
improvement moves towards the objective function's minimum, and the expectation is calculated
with respect to the ``model`` posterior. In contrast to standard EI, AEI has an additional
multiplicative factor that penalizes evaluations made in areas of the space with very small
posterior predictive variance. Thus, when applying standard EI to noisy optimisation
problems, AEI avoids getting trapped and repeatedly querying the same point.
For model posterior :math:`f`, this is
.. math:: x \mapsto EI(x) * \left(1 - frac{\tau^2}{\sqrt{s^2(x)+\tau^2}}\right),
where :math:`s^2(x)` is the predictive variance and :math:`\tau` is observation noise.
This function was introduced by Huang et al, 2006. See :cite:`Huang:2006` for details.
:param model: The model of the objective function.
:param eta: The "best" observation.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
try:
noise_variance = model.get_observation_noise()
except NotImplementedError:
raise ValueError(
"""
Augmented expected improvement only currently supports homoscedastic gpflow models
with a likelihood.variance attribute.
"""
)
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = model.predict(tf.squeeze(x, -2))
normal = tfp.distributions.Normal(mean, tf.sqrt(variance))
expected_improvement = (eta - mean) * normal.cdf(eta) + variance * normal.prob(eta)
augmentation = 1 - (tf.math.sqrt(noise_variance)) / (
tf.math.sqrt(noise_variance + variance)
)
return expected_improvement * augmentation
return acquisition
class MinValueEntropySearch(SingleModelAcquisitionBuilder):
r"""
Builder for the max-value entropy search acquisition function modified for objective
minimisation. :class:`MinValueEntropySearch` estimates the information in the distribution
of the objective minimum that would be gained by evaluating the objective at a given point.
This implementation largely follows :cite:`wang2017max` and samples the objective minimum
:math:`y^*` via a Gumbel sampler.
"""
def __init__(self, search_space: SearchSpace, num_samples: int = 10, grid_size: int = 5000):
"""
:param search_space: The global search space over which the optimisation is defined.
:param num_samples: Number of samples to draw from the distribution over the minimum of the
objective function.
:param grid_size: Size of the grid with which to fit the Gumbel distribution. We recommend
scaling this with search space dimension.
"""
self._search_space = search_space
if num_samples <= 0:
raise ValueError(f"num_samples must be positive, got {num_samples}")
self._num_samples = num_samples
if grid_size <= 0:
raise ValueError(f"grid_size must be positive, got {grid_size}")
self._grid_size = grid_size
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: The data from the observer.
:param model: The model over the specified ``dataset``.
:return: The max-value entropy search acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
"""
if len(dataset.query_points) == 0:
raise ValueError("Dataset must be populated.")
gumbel_sampler = GumbelSampler(self._num_samples, model)
query_points = self._search_space.sample(num_samples=self._grid_size)
tf.debugging.assert_same_float_dtype([dataset.query_points, query_points])
query_points = tf.concat([dataset.query_points, query_points], 0)
gumbel_samples = gumbel_sampler.sample(query_points)
return min_value_entropy_search(model, gumbel_samples)
def min_value_entropy_search(model: ProbabilisticModel, samples: TensorType) -> AcquisitionFunction:
r"""
Return the max-value entropy search acquisition function (adapted from :cite:`wang2017max`),
modified for objective minimisation. This function calculates the information gain (or change in
entropy) in the distribution over the objective minimum :math:`y^*`, if we were to evaluate the
objective at a given point.
:param model: The model of the objective function.
:param samples: Samples from the distribution over :math:`y^*`.
:return: The max-value entropy search acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
"""
tf.debugging.assert_rank(samples, 2)
if len(samples) == 0:
raise ValueError("Gumbel samples must be populated.")
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
fmean, fvar = model.predict(tf.squeeze(x, -2))
fsd = tf.math.sqrt(fvar)
fsd = tf.clip_by_value(
fsd, 1.0e-8, fmean.dtype.max
) # clip below to improve numerical stability
normal = tfp.distributions.Normal(tf.cast(0, fmean.dtype), tf.cast(1, fmean.dtype))
gamma = (tf.squeeze(samples) - fmean) / fsd
minus_cdf = 1 - normal.cdf(gamma)
minus_cdf = tf.clip_by_value(
minus_cdf, 1.0e-8, 1
) # clip below to improve numerical stability
f_acqu_x = -gamma * normal.prob(gamma) / (2 * minus_cdf) - tf.math.log(minus_cdf)
return tf.math.reduce_mean(f_acqu_x, axis=1, keepdims=True)
return acquisition
class NegativeLowerConfidenceBound(SingleModelAcquisitionBuilder):
"""
Builder for the negative of the lower confidence bound. The lower confidence bound is typically
minimised, so the negative is suitable for maximisation.
"""
def __init__(self, beta: float = 1.96):
"""
:param beta: Weighting given to the variance contribution to the lower confidence bound.
Must not be negative.
"""
self._beta = beta
def __repr__(self) -> str:
""""""
return f"NegativeLowerConfidenceBound({self._beta!r})"
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: Unused.
:param model: The model over the specified ``dataset``.
:return: The negative lower confidence bound function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise ValueError: If ``beta`` is negative.
"""
lcb = lower_confidence_bound(model, self._beta)
return lambda at: -lcb(at)
class NegativePredictiveMean(NegativeLowerConfidenceBound):
"""
Builder for the negative of the predictive mean. The predictive mean is minimised on minimising
the objective function. The negative predictive mean is therefore maximised.
"""
def __init__(self):
super().__init__(beta=0.0)
def __repr__(self) -> str:
""""""
return "NegativePredictiveMean()"
def lower_confidence_bound(model: ProbabilisticModel, beta: float) -> AcquisitionFunction:
r"""
The lower confidence bound (LCB) acquisition function for single-objective global optimization.
.. math:: x^* \mapsto \mathbb{E} [f(x^*)|x, y] - \beta \sqrt{ \mathrm{Var}[f(x^*)|x, y] }
See :cite:`Srinivas:2010` for details.
:param model: The model of the objective function.
:param beta: The weight to give to the standard deviation contribution of the LCB. Must not be
negative.
:return: The lower confidence bound function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise ValueError: If ``beta`` is negative.
"""
if beta < 0:
raise ValueError(
f"Standard deviation scaling parameter beta must not be negative, got {beta}"
)
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = model.predict(tf.squeeze(x, -2))
return mean - beta * tf.sqrt(variance)
return acquisition
class ProbabilityOfFeasibility(SingleModelAcquisitionBuilder):
r"""
Builder for the :func:`probability_of_feasibility` acquisition function, defined in
:cite:`gardner14` as
.. math::
\int_{-\infty}^{\tau} p(c(\mathbf{x}) | \mathbf{x}, \mathcal{D}) \mathrm{d} c(\mathbf{x})
\qquad ,
where :math:`\tau` is a threshold. Values below the threshold are considered feasible by the
constraint function. See also :cite:`schonlau1998global` for details.
"""
def __init__(self, threshold: float | TensorType):
"""
:param threshold: The (scalar) probability of feasibility threshold.
:raise ValueError (or InvalidArgumentError): If ``threshold`` is not a scalar.
"""
tf.debugging.assert_scalar(threshold)
super().__init__()
self._threshold = threshold
def __repr__(self) -> str:
""""""
return f"ProbabilityOfFeasibility({self._threshold!r})"
@property
def threshold(self) -> float | TensorType:
"""The probability of feasibility threshold."""
return self._threshold
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: Unused.
:param model: The model over the specified ``dataset``.
:return: The probability of feasibility function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
return probability_of_feasibility(model, self.threshold)
def probability_of_feasibility(
model: ProbabilisticModel, threshold: float | TensorType
) -> AcquisitionFunction:
r"""
The probability of feasibility acquisition function defined in :cite:`gardner14` as
.. math::
\int_{-\infty}^{\tau} p(c(\mathbf{x}) | \mathbf{x}, \mathcal{D}) \mathrm{d} c(\mathbf{x})
\qquad ,
where :math:`\tau` is a threshold. Values below the threshold are considered feasible by the
constraint function.
:param model: The model of the objective function.
:param threshold: The (scalar) probability of feasibility threshold.
:return: The probability of feasibility function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise ValueError: If ``threshold`` is not a scalar.
"""
tf.debugging.assert_scalar(threshold)
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, var = model.predict(tf.squeeze(x, -2))
distr = tfp.distributions.Normal(mean, tf.sqrt(var))
return distr.cdf(tf.cast(threshold, x.dtype))
return acquisition
class ExpectedConstrainedImprovement(AcquisitionFunctionBuilder):
"""
Builder for the *expected constrained improvement* acquisition function defined in
:cite:`gardner14`. The acquisition function computes the expected improvement from the best
feasible point, where feasible points are those that (probably) satisfy some constraint. Where
there are no feasible points, this builder simply builds the constraint function.
"""
def __init__(
self,
objective_tag: str,
constraint_builder: AcquisitionFunctionBuilder,
min_feasibility_probability: float | TensorType = 0.5,
):
"""
:param objective_tag: The tag for the objective data and model.
:param constraint_builder: The builder for the constraint function.
:param min_feasibility_probability: The minimum probability of feasibility for a
"best point" to be considered feasible.
:raise ValueError (or InvalidArgumentError): If ``min_feasibility_probability`` is not a
scalar in the unit interval :math:`[0, 1]`.
"""
tf.debugging.assert_scalar(min_feasibility_probability)
if not 0 <= min_feasibility_probability <= 1:
raise ValueError(
f"Minimum feasibility probability must be between 0 and 1 inclusive,"
f" got {min_feasibility_probability}"
)
self._objective_tag = objective_tag
self._constraint_builder = constraint_builder
self._min_feasibility_probability = min_feasibility_probability
def __repr__(self) -> str:
""""""
return (
f"ExpectedConstrainedImprovement({self._objective_tag!r}, {self._constraint_builder!r},"
f" {self._min_feasibility_probability!r})"
)
def prepare_acquisition_function(
self, datasets: Mapping[str, Dataset], models: Mapping[str, ProbabilisticModel]
) -> AcquisitionFunction:
"""
:param datasets: The data from the observer.
:param models: The models over each dataset in ``datasets``.
:return: The expected constrained improvement acquisition function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise KeyError: If `objective_tag` is not found in ``datasets`` and ``models``.
:raise ValueError: If the objective data is empty.
"""
objective_model = models[self._objective_tag]
objective_dataset = datasets[self._objective_tag]
if len(objective_dataset) == 0:
raise ValueError(
"Expected improvement is defined with respect to existing points in the objective"
" data, but the objective data is empty."
)
constraint_fn = self._constraint_builder.prepare_acquisition_function(datasets, models)
pof = constraint_fn(objective_dataset.query_points[:, None, ...])
is_feasible = pof >= self._min_feasibility_probability
if not tf.reduce_any(is_feasible):
return constraint_fn
mean, _ = objective_model.predict(objective_dataset.query_points)
eta = tf.reduce_min(tf.boolean_mask(mean, is_feasible), axis=0)
return lambda at: expected_improvement(objective_model, eta)(at) * constraint_fn(at)
class ExpectedHypervolumeImprovement(SingleModelAcquisitionBuilder):
"""
Builder for the expected hypervolume improvement acquisition function.
The implementation of the acquisition function largely
follows :cite:`yang2019efficient`
"""
def __repr__(self) -> str:
""""""
return "ExpectedHypervolumeImprovement()"
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: The data from the observer. Must be populated.
:param model: The model over the specified ``dataset``.
:return: The expected hypervolume improvement acquisition function.
"""
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
_pf = Pareto(mean)
_reference_pt = get_reference_point(_pf.front)
return expected_hv_improvement(model, _pf, _reference_pt)
def expected_hv_improvement(
model: ProbabilisticModel,
pareto: Pareto,
reference_point: TensorType,
) -> AcquisitionFunction:
r"""
expected Hyper-volume (HV) calculating using Eq. 44 of :cite:`yang2019efficient` paper.
The expected hypervolume improvement calculation in the non-dominated region
can be decomposed into sub-calculations based on each partitioned cell.
For easier calculation, this sub-calculation can be reformulated as a combination
of two generalized expected improvements, corresponding to Psi (Eq. 44) and Nu (Eq. 45)
function calculations, respectively.
Note:
1. Since in Trieste we do not assume the use of a certain non-dominated region partition
algorithm, we do not assume the last dimension of the partitioned cell has only one
(lower) bound (i.e., minus infinity, which is used in the :cite:`yang2019efficient` paper).
This is not as efficient as the original paper, but is applicable to different non-dominated
partition algorithm.
2. As the Psi and nu function in the original paper are defined for maximization problems,
we inverse our minimisation problem (to also be a maximisation), allowing use of the
original notation and equations.
:param model: The model of the objective function.
:param pareto: Pareto class
:param reference_point: The reference point for calculating hypervolume
:return: The expected_hv_improvement acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
"""
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
normal = tfp.distributions.Normal(
loc=tf.zeros(shape=1, dtype=x.dtype), scale=tf.ones(shape=1, dtype=x.dtype)
)
def Psi(a: TensorType, b: TensorType, mean: TensorType, std: TensorType) -> TensorType:
return std * normal.prob((b - mean) / std) + (mean - a) * (
1 - normal.cdf((b - mean) / std)
)
def nu(lb: TensorType, ub: TensorType, mean: TensorType, std: TensorType) -> TensorType:
return (ub - lb) * (1 - normal.cdf((ub - mean) / std))
def ehvi_based_on_partitioned_cell(
neg_pred_mean: TensorType, pred_std: TensorType
) -> TensorType:
r"""
Calculate the ehvi based on cell i.
"""
lb_points, ub_points = pareto.hypercell_bounds(
tf.constant([-inf] * neg_pred_mean.shape[-1], dtype=x.dtype), reference_point
)
neg_lb_points, neg_ub_points = -ub_points, -lb_points
neg_ub_points = tf.minimum(neg_ub_points, 1e10) # clip to improve numerical stability
psi_ub = Psi(
neg_lb_points, neg_ub_points, neg_pred_mean, pred_std
) # [..., num_cells, out_dim]
psi_lb = Psi(
neg_lb_points, neg_lb_points, neg_pred_mean, pred_std
) # [..., num_cells, out_dim]
psi_lb2ub = tf.maximum(psi_lb - psi_ub, 0.0) # [..., num_cells, out_dim]
nu_contrib = nu(neg_lb_points, neg_ub_points, neg_pred_mean, pred_std)
cross_index = tf.constant(
list(product(*[[0, 1]] * reference_point.shape[-1]))
) # [2^d, indices_at_dim]
stacked_factors = tf.concat(
[tf.expand_dims(psi_lb2ub, -2), tf.expand_dims(nu_contrib, -2)], axis=-2
) # Take the cross product of psi_diff and nu across all outcomes
# [..., num_cells, 2(operation_num, refer Eq. 45), num_obj]
factor_combinations = tf.linalg.diag_part(
tf.gather(stacked_factors, cross_index, axis=-2)
) # [..., num_cells, 2^d, 2(operation_num), num_obj]
return tf.reduce_sum(tf.reduce_prod(factor_combinations, axis=-1), axis=-1)
candidate_mean, candidate_var = model.predict(tf.squeeze(x, -2))
candidate_std = tf.sqrt(candidate_var)
neg_candidate_mean = -tf.expand_dims(candidate_mean, 1) # [..., 1, out_dim]
candidate_std = tf.expand_dims(candidate_std, 1) # [..., 1, out_dim]
ehvi_cells_based = ehvi_based_on_partitioned_cell(neg_candidate_mean, candidate_std)
return tf.reduce_sum(
ehvi_cells_based,
axis=-1,
keepdims=True,
)
return acquisition
class BatchMonteCarloExpectedImprovement(SingleModelAcquisitionBuilder):
"""
Expected improvement for batches of points (or :math:`q`-EI), approximated using Monte Carlo
estimation with the reparametrization trick. See :cite:`Ginsbourger2010` for details.
Improvement is measured with respect to the minimum predictive mean at observed query points.
This is calculated in :class:`BatchMonteCarloExpectedImprovement` by assuming observations
at new points are independent from those at known query points. This is faster, but is an
approximation for noisy observers.
"""
def __init__(self, sample_size: int, *, jitter: float = DEFAULTS.JITTER):
"""
:param sample_size: The number of samples for each batch of points.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
:raise ValueError (or InvalidArgumentError): If ``sample_size`` is not positive, or
``jitter`` is negative.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_greater_equal(jitter, 0.0)
super().__init__()
self._sample_size = sample_size
self._jitter = jitter
def __repr__(self) -> str:
""""""
return f"BatchMonteCarloExpectedImprovement({self._sample_size!r}, jitter={self._jitter!r})"
def prepare_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
"""
:param dataset: The data from the observer. Must be populated.
:param model: The model over the specified ``dataset``. Must have event shape [1].
:return: The batch *expected improvement* acquisition function.
:raise ValueError (or InvalidArgumentError): If ``dataset`` is not populated, or ``model``
does not have an event shape of [1].
"""
tf.debugging.assert_positive(len(dataset))
mean, _ = model.predict(dataset.query_points)
tf.debugging.assert_shapes(
[(mean, ["_", 1])], message="Expected model with event shape [1]."
)
eta = tf.reduce_min(mean, axis=0)
sampler = BatchReparametrizationSampler(self._sample_size, model)
def batch_ei(at: TensorType) -> TensorType:
samples = tf.squeeze(sampler.sample(at, jitter=self._jitter), axis=-1) # [..., S, B]
min_sample_per_batch = tf.reduce_min(samples, axis=-1) # [..., S]
batch_improvement = tf.maximum(eta - min_sample_per_batch, 0.0) # [..., S]
return tf.reduce_mean(batch_improvement, axis=-1, keepdims=True) # [..., 1]
return batch_ei
class GreedyAcquisitionFunctionBuilder(ABC):
"""
A :class:`GreedyAcquisitionFunctionBuilder` builds an acquisition function
suitable for greedily building batches for batch Bayesian
Optimization. :class:`GreedyAcquisitionFunctionBuilder` differs
from :class:`AcquisitionFunctionBuilder` by requiring that a set
of pending points is passed to the builder. Note that this acquisition function
is typically called `B` times each Bayesian optimization step, when building batches
of size `B`.
"""
@abstractmethod
def prepare_acquisition_function(
self,
datasets: Mapping[str, Dataset],
models: Mapping[str, ProbabilisticModel],
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
:param datasets: The data from the observer.
:param models: The models over each dataset in ``datasets``.
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:return: An acquisition function.
"""
class SingleModelGreedyAcquisitionBuilder(ABC):
"""
Convenience acquisition function builder for a greedy acquisition function (or component of a
composite greedy acquisition function) that requires only one model, dataset pair.
"""
def using(self, tag: str) -> GreedyAcquisitionFunctionBuilder:
"""
:param tag: The tag for the model, dataset pair to use to build this acquisition function.
:return: An acquisition function builder that selects the model and dataset specified by
``tag``, as defined in :meth:`prepare_acquisition_function`.
"""
single_builder = self
class _Anon(GreedyAcquisitionFunctionBuilder):
def prepare_acquisition_function(
self,
datasets: Mapping[str, Dataset],
models: Mapping[str, ProbabilisticModel],
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
return single_builder.prepare_acquisition_function(
datasets[tag], models[tag], pending_points=pending_points
)
def __repr__(self) -> str:
return f"{single_builder!r} using tag {tag!r}"
return _Anon()
@abstractmethod
def prepare_acquisition_function(
self,
dataset: Dataset,
model: ProbabilisticModel,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
:param dataset: The data to use to build the acquisition function.
:param model: The model over the specified ``dataset``.
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:return: An acquisition function.
"""
class LocalPenalizationAcquisitionFunction(SingleModelGreedyAcquisitionBuilder):
r"""
Builder of the acquisition function maker for greedily collecting batches by local
penalization. The resulting :const:`AcquisitionFunctionMaker` takes in a set of pending
points and returns a base acquisition function penalized around those points.
An estimate of the objective function's Lipschitz constant is used to control the size
of penalization.
Local penalization allows us to perform batch Bayesian optimization with a standard (non-batch)
acqusition function. All that we require is that the acquisition function takes strictly
positive values. By iteratively building a batch of points though sequentially maximizing
this acquisition function but down-weighted around locations close to the already
chosen (pending) points, local penalization provides diverse batches of candidate points.
Local penalization is applied to the acquisition function multiplicatively. However, to
improve numerical stability, we perfom additive penalization in a log space.
The Lipschitz constant and additional penalization parameters are estimated once
when first preparing the acquisition function with no pending points. These estimates
are reused for all subsequent function calls.
"""
def __init__(
self,
search_space: SearchSpace,
num_samples: int = 500,
penalizer: Callable[..., PenalizationFunction] = None,
base_acquisition_function_builder: Optional[
Union[ExpectedImprovement, MinValueEntropySearch]
] = None,
):
"""
:param search_space: The global search space over which the optimisation is defined.
:param num_samples: Size of the random sample over which the Lipschitz constant
is estimated. We recommend scaling this with search space dimension.
:param penalizer: The chosen penalization method (defaults to soft penalization).
:param base_acquisition_function_builder: Base acquisition function to be
penalized (defaults to expected improvement). Local penalization only supports
strictly positive acquisition functions.
"""
self._search_space = search_space
if num_samples <= 0:
raise ValueError(f"num_samples must be positive, got {num_samples}")
self._num_samples = num_samples
self._lipschitz_penalizer = soft_local_penalizer if penalizer is None else penalizer
if base_acquisition_function_builder is None:
self._base_builder: SingleModelAcquisitionBuilder = ExpectedImprovement()
elif isinstance(
base_acquisition_function_builder, (ExpectedImprovement, MinValueEntropySearch)
):
self._base_builder = base_acquisition_function_builder
else:
raise ValueError(
f"""
Local penalization can only be applied to strictly positive acquisition functions,
we got {base_acquisition_function_builder}.
"""
)
self._lipschitz_constant = None
self._eta = None
self._base_acquisition_function: Optional[AcquisitionFunction] = None
def prepare_acquisition_function(
self,
dataset: Dataset,
model: ProbabilisticModel,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
:param dataset: The data from the observer.
:param model: The model over the specified ``dataset``.
:param pending_points: The points we penalize with respect to.
:return: The (log) expected improvement penalized with respect to the pending points.
:raise ValueError: if the first call does not have pending_points=None.
"""
if len(dataset.query_points) == 0:
raise ValueError("Dataset must be populated.")
if (
pending_points is None
): # compute penalization params and base acquisition once per optimization step
samples = self._search_space.sample(num_samples=self._num_samples)
samples = tf.concat([dataset.query_points, samples], 0)
def get_lipschitz_estimate(
sampled_points,
) -> tf.Tensor: # use max norm of posterior mean gradients
with tf.GradientTape() as g:
g.watch(sampled_points)
mean, _ = model.predict(sampled_points)
grads = g.gradient(mean, sampled_points)
grads_norm = tf.norm(grads, axis=1)
max_grads_norm = tf.reduce_max(grads_norm)
eta = tf.reduce_min(mean, axis=0)
return max_grads_norm, eta
lipschitz_constant, eta = get_lipschitz_estimate(samples)
if (
lipschitz_constant < 1e-5
): # threshold to improve numerical stability for 'flat' models
lipschitz_constant = 10
self._lipschitz_constant = lipschitz_constant
self._eta = eta
if isinstance(self._base_builder, ExpectedImprovement): # reuse eta estimate
self._base_acquisition_function = expected_improvement(model, self._eta)
else:
self._base_acquisition_function = self._base_builder.prepare_acquisition_function(
dataset, model
)
if (self._lipschitz_constant is None) or (self._base_acquisition_function is None):
raise ValueError("Local penalization must be first called with no pending_points.")
if pending_points is None:
return self._base_acquisition_function # no penalization required if no pending_points.
tf.debugging.assert_shapes(
[(pending_points, ["N", len(self._search_space.upper)])],
message="pending_points must be of shape [N,D]",
)
penalization = self._lipschitz_penalizer(
model, pending_points, self._lipschitz_constant, self._eta
)
def penalized_acquisition(x: TensorType) -> TensorType:
log_acq = tf.math.log(self._base_acquisition_function(x)) + tf.math.log(penalization(x))
return tf.math.exp(log_acq)
return penalized_acquisition
PenalizationFunction = Callable[[TensorType], TensorType]
"""
An :const:`PenalizationFunction` maps a query point (of dimension `D`) to a single
value that described how heavily it should be penalized (a positive quantity).
As penalization is applied multiplicatively to acquisition functions, small
penalization outputs correspond to a stronger penalization effect. Thus, with
leading dimensions, an :const:`PenalizationFunction` takes input
shape `[..., 1, D]` and returns shape `[..., 1]`.
"""
def soft_local_penalizer(
model: ProbabilisticModel,
pending_points: TensorType,
lipschitz_constant: TensorType,
eta: TensorType,
) -> PenalizationFunction:
r"""
Return the soft local penalization function used for single-objective greedy batch Bayesian
optimization in :cite:`Gonzalez:2016`.
Soft penalization returns the probability that a candidate point does not belong
in the exclusion zones of the pending points. For model posterior mean :math:`\mu`, model
posterior variance :math:`\sigma^2`, current "best" function value :math:`\eta`, and an
estimated Lipschitz constant :math:`L`,the penalization from a set of pending point :math:`x'`
on a candidate point :math:`x` is given by
.. math:: \phi(x, x') = \frac{1}{2}\textrm{erfc}(-z)
where :math:`z = \frac{1}{\sqrt{2\sigma^2(x')}}(L||x'-x|| + \eta - \mu(x'))`.
The penalization from a set of pending points is just product of the individual penalizations.
See :cite:`Gonzalez:2016` for a full derivation.
:param model: The model over the specified ``dataset``.
:param pending_points: The points we penalize with respect to.
:param lipschitz_constant: The estimated Lipschitz constant of the objective function.
:param eta: The estimated global minima.
:return: The local penalization function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
mean_pending, variance_pending = model.predict(pending_points)
radius = tf.transpose((mean_pending - eta) / lipschitz_constant)
scale = tf.transpose(tf.sqrt(variance_pending) / lipschitz_constant)
def penalization_function(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This penalization function cannot be calculated for batches of points.",
)
pairwise_distances = tf.norm(
tf.expand_dims(x, 1) - tf.expand_dims(pending_points, 0), axis=-1
)
standardised_distances = (pairwise_distances - radius) / scale
normal = tfp.distributions.Normal(tf.cast(0, x.dtype), tf.cast(1, x.dtype))
penalization = normal.cdf(standardised_distances)
return tf.reduce_prod(penalization, axis=-1)
return penalization_function
def hard_local_penalizer(
model: ProbabilisticModel,
pending_points: TensorType,
lipschitz_constant: TensorType,
eta: TensorType,
) -> PenalizationFunction:
r"""
Return the hard local penalization function used for single-objective greedy batch Bayesian
optimization in :cite:`Alvi:2019`.
Hard penalization is a stronger penalizer than soft penalization and is sometimes more effective
See :cite:`Alvi:2019` for details. Our implementation follows theirs, with the penalization from
a set of pending points being the product of the individual penalizations.
:param model: The model over the specified ``dataset``.
:param pending_points: The points we penalize with respect to.
:param lipschitz_constant: The estimated Lipschitz constant of the objective function.
:param eta: The estimated global minima.
:return: The local penalization function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
mean_pending, variance_pending = model.predict(pending_points)
radius = tf.transpose((mean_pending - eta) / lipschitz_constant)
scale = tf.transpose(tf.sqrt(variance_pending) / lipschitz_constant)
def penalization_function(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This penalization function cannot be calculated for batches of points.",
)
pairwise_distances = tf.norm(
tf.expand_dims(x, 1) - tf.expand_dims(pending_points, 0), axis=-1
)
p = -5 # following experiments of :cite:`Alvi:2019`.
penalization = ((pairwise_distances / (radius + scale)) ** p + 1) ** (1 / p)
return tf.reduce_prod(penalization, axis=-1)
return penalization_function
```
#### File: trieste/utils/multi_objectives.py
```python
from __future__ import annotations
import math
from abc import ABC, abstractmethod
from collections.abc import Callable
from functools import partial
import tensorflow as tf
from ..type import TensorType
class MultiObjectiveTestProblem(ABC):
"""
Base class for synthetic multi-objective test functions.
Prepares the synthetic function and generates pareto optimal points.
The latter can be used as a reference of performance measure of certain
multi-objective optimization algorithms.
"""
@property
@abstractmethod
def dim(self) -> int:
"""
The input dimensionality of the test function
"""
@property
@abstractmethod
def bounds(self) -> list[list[float]]:
"""
The input space bounds of the test function
"""
@abstractmethod
def objective(self) -> Callable[[TensorType], TensorType]:
"""
Get the synthetic test function.
:return: A callable synthetic function
"""
@abstractmethod
def gen_pareto_optimal_points(self, n: int, seed=None) -> tf.Tensor:
"""
Generate `n` Pareto optimal points.
:param n: The number of pareto optimal points to be generated.
:param seed: An integer used to create a random seed for distributions that
used to generate pareto optimal points.
:return: The Pareto optimal points
"""
class VLMOP2(MultiObjectiveTestProblem):
"""
The VLMOP2 problem, typically evaluated over :math:`[-2, 2]^2`.
The idea pareto fronts lies on -1/sqrt(2) - 1/sqrt(2) and x1=x2.
See :cite:`van1999multiobjective` and :cite: fonseca1995multiobjective
(the latter for discussion of pareto front property) for details.
"""
bounds = [[-2.0] * 2, [2.0] * 2]
dim = 2
def objective(self):
return vlmop2
def gen_pareto_optimal_points(self, n: int, seed=None):
tf.debugging.assert_greater(n, 0)
_x = tf.linspace([-1 / tf.sqrt(2.0)], [1 / tf.sqrt(2.0)], n)
return vlmop2(tf.concat([_x, _x], axis=1))
def vlmop2(x: TensorType) -> TensorType:
"""
The VLMOP2 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 2].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))], message="vlmop2 only allow 2d input")
transl = 1 / math.sqrt(2.0)
y1 = 1 - tf.exp(-1 * tf.reduce_sum((x - transl) ** 2, axis=-1))
y2 = 1 - tf.exp(-1 * tf.reduce_sum((x + transl) ** 2, axis=-1))
return tf.stack([y1, y2], axis=-1)
class DTLZ(MultiObjectiveTestProblem):
"""
DTLZ series multi-objective test problem.
See :cite:deb2002scalable for details.
"""
def __init__(self, input_dim: int, num_objective: int):
tf.debugging.assert_greater(input_dim, 0)
tf.debugging.assert_greater(num_objective, 0)
tf.debugging.assert_greater(
input_dim,
num_objective,
f"input dimension {input_dim}"
f" must be greater than function objective numbers {num_objective}",
)
self._dim = input_dim
self.M = num_objective
self.k = self._dim - self.M + 1
self._bounds = [[0] * input_dim, [1] * input_dim]
@property
def dim(self):
return self._dim
@property
def bounds(self):
return self._bounds
class DTLZ1(DTLZ):
"""
The DTLZ1 problem, the idea pareto fronts lie on a linear hyper-plane.
See :cite:deb2002scalable for details.
"""
def objective(self):
return partial(dtlz1, m=self.M, k=self.k, d=self.dim)
def gen_pareto_optimal_points(self, n: int, seed=None):
tf.debugging.assert_greater_equal(self.M, 2)
rnd = tf.random.uniform([n, self.M - 1], minval=0, maxval=1, seed=seed)
strnd = tf.sort(rnd, axis=-1)
strnd = tf.concat([tf.zeros([n, 1]), strnd, tf.ones([n, 1])], axis=-1)
return 0.5 * (strnd[..., 1:] - strnd[..., :-1])
def dtlz1(x: TensorType, m: int, k: int, d: int) -> TensorType:
"""
The DTLZ1 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param m: The objective numbers.
:param k: The input dimensionality for g.
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., m].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} is not align with pre-specified dim: {d}",
)
tf.debugging.assert_greater(m, 0, message=f"positive objective numbers expected but found {m}")
def g(xM):
return 100 * (
k
+ tf.reduce_sum(
(xM - 0.5) ** 2 - tf.cos(20 * math.pi * (xM - 0.5)), axis=-1, keepdims=True
)
)
ta = tf.TensorArray(x.dtype, size=m)
for i in range(m):
xM = x[..., m - 1 :]
y = 1 + g(xM)
y *= 1 / 2 * tf.reduce_prod(x[..., : m - 1 - i], axis=-1, keepdims=True)
if i > 0:
y *= 1 - x[..., m - i - 1, tf.newaxis]
ta = ta.write(i, y)
return tf.squeeze(tf.concat(tf.split(ta.stack(), m, axis=0), axis=-1), axis=0)
class DTLZ2(DTLZ):
"""
The DTLZ2 problem, the idea pareto fronts lie on (part of) a unit hyper sphere.
See :cite:deb2002scalable for details.
"""
def objective(self):
return partial(dtlz2, m=self.M, d=self.dim)
def gen_pareto_optimal_points(self, n: int, seed=None):
tf.debugging.assert_greater_equal(self.M, 2)
rnd = tf.random.normal([n, self.M], seed=seed)
samples = tf.abs(rnd / tf.norm(rnd, axis=-1, keepdims=True))
return samples
def dtlz2(x: TensorType, m: int, d: int) -> TensorType:
"""
The DTLZ2 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param m: The objective numbers.
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., m].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} is not align with pre-specified dim: {d}",
)
tf.debugging.assert_greater(m, 0, message=f"positive objective numbers expected but found {m}")
def g(xM):
z = (xM - 0.5) ** 2
return tf.reduce_sum(z, axis=-1, keepdims=True)
ta = tf.TensorArray(x.dtype, size=m)
for i in tf.range(m):
y = 1 + g(x[..., m - 1 :])
for j in tf.range(m - 1 - i):
y *= tf.cos(math.pi / 2 * x[..., j, tf.newaxis])
if i > 0:
y *= tf.sin(math.pi / 2 * x[..., m - 1 - i, tf.newaxis])
ta = ta.write(i, y)
return tf.squeeze(tf.concat(tf.split(ta.stack(), m, axis=0), axis=-1), axis=0)
``` |
{
"source": "johnandrea/compare-trees",
"score": 3
} |
#### File: johnandrea/compare-trees/diff.py
```python
import sys
import os
import re
import difflib
import readgedcom
show_debug = False
# how much change for a structure/branch difference
branch_name_threshold = 0.88
branch_date_threshold = 750 #days
# how much change to report a person details difference
report_name_threshold = 0.92
report_date_threshold = 400 #days
def check_config( start_ok ):
ok = start_ok
def check_val( start_ok, wanted_type, x_name, x ):
ok = start_ok
if isinstance( x, wanted_type ):
if x < 0:
print( x_name, 'cannot be less than zero', file=sys.stderr )
ok = False
else:
print( x_name, 'must be a', wanted_type, file=sys.stderr )
ok = False
return ok
ok = check_val( ok, float, 'branch_name_threshold', branch_name_threshold )
ok = check_val( ok, float, 'report_name_threshold', report_name_threshold )
ok = check_val( ok, int, 'branch_date_threshold', branch_date_threshold )
ok = check_val( ok, int, 'report_date_threshold', report_date_threshold )
return ok
def days_between( d1, d2 ):
# expecting two dates as strings yyyymmdd
# return the approximate number of days between
# this is only for approximate comparisons, not date manipulations
# Assuming dates are C.E. Gregorian
def extract_parts( date ):
# yyyymmdd
# 01234567
return [ date[0:4], date[4:6], date[6:8] ]
def total_days( date ):
# [0]=year [1]=month [2]=day
return int(date[0]) * 365 + int(date[1]) * 30 + int(date[2])
parts = []
for date in [d1, d2]:
parts.append(extract_parts( date ))
return abs( total_days(parts[0]) - total_days(parts[1]) )
def input_to_id( s ):
# might have been given as just a number
# convert to id style 'i58'
result = ''
if s:
result = s.replace('@','').replace('i','').replace('I','')
return 'i' + result
def get_name( t, p ):
result = trees[t][ikey][p]['name'][0]['value']
if readgedcom.UNKNOWN_NAME in result:
result = 'unknown'
else:
# remove any suffix after the end slash
result = re.sub( r'/[^/]*$', '', result ).replace('/','').strip()
return result
def get_best_date( t, p, date_name ):
best = None
if date_name in trees[t][ikey][p]['best-events']:
best = trees[t][ikey][p]['best-events'][date_name]
return best
def get_full_date( t, p, date_name ):
# return the yyyymmdd value for the person's dated event, or None
result = None
best = get_best_date( t, p, date_name )
if best is not None:
if trees[t][ikey][p][date_name][best]['date']['is_known']:
# get the minimum date if its a range. if not a range min and max are equal
result = trees[t][ikey][p][date_name][best]['date']['min']['value']
return result
def get_a_date( t, p, date_name ):
# return the date as given in the input file (or an empty string),
# which might contain before/after/etc or be a range
# not appropriate for comparison
result = ''
best = get_best_date( t, p, date_name )
if best is not None:
if trees[t][ikey][p][date_name][best]['date']['is_known']:
result = trees[t][ikey][p][date_name][best]['date']['in']
return result
def get_dates( t, p ):
# for display purposes, not for comparison
return [ get_a_date(t, p, 'birt'), get_a_date(t, p, 'deat') ]
def show_indi( t, p ):
# show an person's important details
dates = get_dates(t,p)
return get_name(t,p) + ' (' + dates[0] + '-' + dates[1] + ')'
def get_other_partner( t, p, f ):
# in any given family, return the partner of the given person
result = None
for partner in ['wife','husb']:
if partner in trees[t][fkey][f]:
partner_id = trees[t][fkey][f][partner][0]
if partner_id != p:
result = partner_id
return result
def list_all_partners( t, p ):
# for the given person in all the families in which they are a partner
# return [family-id] = the-other-partner-id
# though partnerid might be unknown i.e. None
result = dict()
if 'fams' in trees[t][ikey][p]:
for fam in trees[t][ikey][p]['fams']:
result[fam] = get_other_partner( t, p, fam )
return result
def show_person_header( t, p ):
print( '' )
print( show_indi( t, p ) )
def get_name_match_value( n1, n2 ):
return difflib.SequenceMatcher(None, n1, n2).ratio()
def compare_a_person( p1, p2 ):
def compare_person_dates( p1, title, date1, date2 ):
if date1:
if date2:
if days_between( date1, date2 ) >= report_date_threshold:
show_person_header(1,p1)
print( title, 'difference', date1, ' vs ', date2 )
else:
show_person_header(1,p1)
print( title, 'not in second tree' )
else:
if date2:
show_person_header(1,p1)
print( title, 'not in first tree' )
name1 = get_name( 1, p1 )
name2 = get_name( 2, p2 )
if get_name_match_value(name1,name2) < report_name_threshold:
show_person_header(1,p1)
print( 'Name difference:', name1, ' vs ', name2 )
for d in ['birt','deat']:
# first check whats given in the input file
d1 = get_a_date( 1, p1, d )
d2 = get_a_date( 2, p2, d )
if d1 != d2:
# then look closer at the parsed values
d1 = get_full_date( 1, p1, d )
d2 = get_full_date( 2, p2, d )
compare_person_dates(p1, d, d1, d2 )
def person_match_value( t1, p1, t2, p2 ):
# should also check dates
name1 = get_name( t1, p1 )
name2 = get_name( t2, p2 )
value = get_name_match_value( name1, name2 )
if show_debug:
print( 'debug:sameness ', value, name1, ' vs ', name2 )
return value
def is_same_person( t1, p1, t2, p2 ):
return person_match_value( t1, p1, t2, p2 ) >= branch_name_threshold
def follow_parents( p1, p2 ):
if show_debug:
print( 'debug:follow parents of', get_name(1,p1) )
def get_famc( t, p ):
result = None
if 'famc' in trees[t][ikey][p]:
result = trees[t][ikey][p]['famc'][0]
return result
def get_partner( t, f, partner ):
result = None
if partner in trees[t][fkey][f]:
result = trees[t][fkey][f][partner][0]
return result
fam1 = get_famc( 1, p1 )
fam2 = get_famc( 2, p2 )
if fam1:
if fam2:
# this is going the be trouble for same sex couples
for partner in ['wife','husb']:
partner1 = get_partner( 1, fam1, partner )
partner2 = get_partner( 2, fam2, partner )
if partner1:
if partner2:
if is_same_person( 1, partner1, 2, partner2 ):
# now what, check details
if show_debug:
print( 'debug:matched parent', partner, get_name(1,partner1) )
follow_person( partner1, partner2 )
else:
show_person_header( 1, p1 )
print( 'Parent(', partner, ') different from first to second' )
else:
show_person_header( 1, p1 )
print( 'Parent(', partner, ') removed in second' )
else:
if partner2:
show_person_header( 1, p1 )
print( 'Parent(', partner, ') added in second' )
else:
show_person_header( 1, p1 )
print( 'Parent(s) removed in second' )
else:
if fam2:
show_person_header( 1, p1 )
print( 'Parent(s) added in second' )
def max_in_matrix( m ):
# should be a pythonic way to do this
x = -1
for i in m:
for j in m[i]:
x = max( x, m[i][j] )
return x
def follow_children( p1, partner1, f1, f2 ):
global visited_fam
if f1 in visited_fam:
return
visited_fam.append(f1)
def match_children( p1, partner_name, children1, children2 ):
# make a matrix of matches to try gettimg the closest pairings
match_values = dict()
for c1 in children1:
match_values[c1] = dict()
for c2 in children2:
match_values[c1][c2] = person_match_value( 1, c1, 2, c2 )
matched1 = dict()
matched2 = dict()
best = max_in_matrix( match_values )
while best >= branch_name_threshold:
# where did it occur
# there might be a pythonic way to do this
for c1 in children1:
if c1 not in matched1:
for c2 in children2:
if c2 not in matched2:
if match_values[c1][c2] >= best:
match_values[c1][c2] = -1
matched1[c1] = c2
matched2[c2] = c1
best = max_in_matrix( match_values )
for c1 in children1:
if c1 in matched1:
follow_person( c1, matched1[c1] )
else:
show_person_header( 1, p1 )
print( 'with', partner_name, 'didnt match child', get_name(1,c1), 'first to second' )
partner_name = get_name(1,partner1)
if show_debug:
print( 'debug:follow children', get_name(1,p1),' and ', partner_name )
children1 = trees[1][fkey][f1]['chil']
children2 = trees[2][fkey][f2]['chil']
if children1:
if children2:
match_children( p1, partner_name, children1, children2 )
else:
show_person_header( 1, p1 )
print( 'All children with',partner_name,'removed in second' )
else:
if children2:
show_person_header( 1, p1 )
print( 'All children with',partner_name,'added in second' )
def follow_partners( p1, p2 ):
if show_debug:
print( 'debug:in follow partners', get_name(1,p1) )
def match_partners( p1, partners1, partners2 ):
# make a matrix of tree1 people to tree2 people
# in order to find the best matches
match_values = dict()
for fam1 in partners1:
partner1 = partners1[fam1]
match_values[fam1] = dict()
for fam2 in partners2:
partner2 = partners2[fam2]
match_values[fam1][fam2] = person_match_value( 1, partner1, 2, partner2 )
# find the best match for each,
# so long as it isn't a better match for someone else
matched1 = dict()
matched2 = dict()
best = max_in_matrix( match_values )
while best >= branch_name_threshold:
# where did it occur
# there might be a pythonic way to do this
for fam1 in partners1:
if fam1 not in matched1:
for fam2 in partners2:
if fam2 not in matched2:
if match_values[fam1][fam2] >= best:
match_values[fam1][fam2] = -1
matched1[fam1] = fam2
matched2[fam2] = fam1
best = max_in_matrix( match_values )
for fam1 in partners1:
partner1 = partners1[fam1]
if fam1 in matched1:
fam2 = matched1[fam1]
follow_person( partner1, partners2[fam2] )
# now that families are known, do children within the family
follow_children( p1, partner1, fam1, fam2 )
else:
show_person_header( 1, p1 )
print( 'Didnt match partner', get_name(1,partner1), 'first to second' )
# check all the partners that person 1 might share with person 2
partners1 = list_all_partners( 1, p1 )
partners2 = list_all_partners( 2, p2 )
if partners1:
if partners2:
match_partners( p1, partners1, partners2 )
else:
show_person_header( 1, p1 )
print( 'Partner(s) removed in second' )
else:
if partners2:
show_person_header( 1, p1 )
print( 'Partner(s) added in second' )
def follow_person( p1, p2 ):
global visited
if p1 in visited:
return
visited.append( p1 )
if show_debug:
print( 'debug:following person', show_indi( 1, p1 ) )
compare_a_person( p1, p2 )
follow_parents( p1, p2 )
follow_partners( p1, p2 )
def show_usage():
print( '' )
print( 'Arguments: tree1file person1xref tree2file person2xref' )
print( 'Output report to stdout' )
if len(sys.argv) != 5:
show_usage()
sys.exit(1)
ikey = readgedcom.PARSED_INDI
fkey = readgedcom.PARSED_FAM
# the tree data will be globals
trees = []
starts = []
file_names = []
# add an initial zero'th element so that the rest of the program uses 1 and 2
trees.append(0)
starts.append(0)
file_names.append(0)
# params 1,2 then 3,4
for i in [1,3]:
file_names.append( sys.argv[i] )
starts.append( input_to_id( sys.argv[i+1] ) )
ok = True
for i in [1,2]:
if os.path.isfile( file_names[i] ):
trees.append( readgedcom.read_file( file_names[i] ) )
else:
print( 'Data file does not exist:', file_names[i], file=sys.stderr )
ok = False
if file_names[1] == file_names[2]:
print( 'Identical files', file=sys.stderr )
ok = False
if not ok:
sys.exit(1)
print( 'Starting points' )
for i in [1,2]:
if isinstance( trees[i], dict ) and ikey in trees[i]:
if starts[i] in trees[i][ikey]:
print( i, '=', show_indi( i, starts[i] ) )
else:
print( 'Given key', starts[i], 'not in tree', i, file_names[i], file=sys.stderr )
ok = False
else:
print( 'Tree', i, 'not fully parsed data', file=sys.stderr )
ok = False
ok = check_config( ok )
if not ok:
sys.exit(1)
# match the trees
# prevent double visitations of the same person
visited = []
visited_fam = []
follow_person( starts[1], starts[2] )
``` |
{
"source": "johnandrea/draw-dna-matches",
"score": 3
} |
#### File: johnandrea/draw-dna-matches/draw-dna-matches.py
```python
import sys
import re
import readgedcom
# Output DNA matches in a tree view in Graphviz DOT format.
# Given a GEDCOM with people having an event of type defined below
# and an optional note containing the cM value.
# The person for whom the matches are compared should have a note or value
# beginning with 'Me,'
#
# Might hot handle the situation where the closest shared family between
# two people doesn't exist in the data.
#
# This code is released under the MIT License: https://opensource.org/licenses/MIT
# Copyright (c) 2022 <NAME>
# v2.1
# This is the name of the event of value
EVENT_NAME = 'dnamatch'
# within the event, 'note' or 'value'
EVENT_ITEM = 'note'
# lines to ancestors
line_colors = ['orchid', 'tomato', 'lightseagreen']
line_colors.extend( ['gold', 'royalblue', 'coral'] )
line_colors.extend( ['yellowgreen', 'chocolate', 'salmon'] )
# box containing a match person
box_color = 'lightgreen'
# multiple marriage outline
multi_marr_color = 'orange'
def remove_numeric_comma( s ):
""" Given 1,234 1,234,567 and similar, remove the comma.
Does the same for all such patterns in the string.
Assuming anglo style numbers rather than euro style of 1.234,99 """
comma_pattern = re.compile( r'\d,\d' )
result = s
# global replace count not working on this expression,
# use a loop as a workaround
while comma_pattern.search( result ):
result = re.sub( r'^(.*\d),(\d.*)$', r'\1\2', result )
return result
def extract_dna_cm( note ):
""" Return the numeric cM value from the note which is either just
a number or a number followed by "cM" or "cm" """
a_number = re.compile( r'[0-9]' )
all_numbers = re.compile( r'^[0-9]+$' )
cm_count = re.compile( r'([0-9]+)\s*cm\W' )
s = note.strip()
result = ''
if a_number.search( s ):
s = remove_numeric_comma( s )
if all_numbers.search( s ):
result = s
else:
# append a space for a simpler regexp of the word ending
match = cm_count.search( s.lower() + ' ' )
if match:
result = match.group(1)
return result
def get_name( individual ):
""" Return the name for the individual in the passed data section. """
name = individual['name'][0]['value']
# the standard unknown code is not good for svg output
if '?' in name and '[' in name and ']' in name:
name = 'unknown'
return name.replace( '/', '' ).replace('"','"').replace("'","’")
def check_for_dna_event( individual ):
""" Does the person in data section contain the
desired dna event. Return a list with found or not. """
result = [ False, '' ]
if 'even' in individual:
for event in individual['even']:
if event['type'].lower() == EVENT_NAME.lower():
if EVENT_ITEM in event:
result[0] = True
result[1] = event[EVENT_ITEM].strip()
break
return result
def get_ancestor_families( indi, individuals, families ):
""" See the nested function. """
def all_ancestor_families_of( gen, start, indi ):
""" Return a dict of all the ancestors of the given individual.
Format is collection of [fam_id] = number of genertions from start """
result = dict()
if 'famc' in individuals[indi]:
fam = individuals[indi]['famc'][0]
result[fam] = gen
for parent in ['wife','husb']:
if parent in families[fam]:
parent_id = families[fam][parent][0]
# prevent a loop
if parent_id != start:
result.update( all_ancestor_families_of( gen+1, start, parent_id ) )
return result
return all_ancestor_families_of( 1, indi, indi )
def find_ancestor_path( start, end_fam, individuals, families, path ):
""" Return a list of the family ids from the start person to the end family.
The list is in order of generations, i.e. the end family will be at the end
of the list.
Assuming that there are no loops in the families.
Returned is a list of [ found-flag, [ famid, famid, ..., end-famid ] ] """
if 'famc' in individuals[start]:
fam = individuals[start]['famc'][0]
for parent in ['wife','husb']:
if parent in families[fam]:
parent_id = families[fam][parent][0]
path.append( fam )
if fam == end_fam:
return [ True, path ]
# Try the next generation
found = find_ancestor_path( parent_id, end_fam, individuals, families, path )
if found[0]:
return found
# This family doesn't lead to that ancestor
path.pop()
return [ False, path ]
def does_fam_have_match( matches, family ):
# Does the family contain a person which is a match
result = False
for parent in ['husb','wife']:
if parent in family:
if family[parent][0] in matches:
result = True
break
return result
def make_dot_id( xref ):
return xref.lower().replace('@','').replace('i','').replace('f','').replace('.','')
def make_fam_dot_id( xref ):
return 'f' + make_dot_id( str(xref) )
def make_indi_dot_id( xref ):
return 'i' + make_dot_id( str(xref) )
def begin_dot():
""" Start of the DOT output file """
print( 'digraph family {' )
print( 'rankdir=LR;')
def end_dot():
""" End of the DOT output file """
print( '}' )
def dot_labels( individuals, families, matches, my_ancestors, fam_with_matches, people_in_paths ):
""" Output a label for each person who appears in the graphs. """
def output_label( dot_id, s, extra ):
print( dot_id, '[label=' + s.replace("'",'.') + extra + '];' )
def output_plain_family_label( fam, multiplied_marr ):
text = ''
sep = ''
for parent in ['wife','husb']:
if parent in families[fam]:
parent_id = families[fam][parent][0]
text += sep + get_name( individuals[parent_id] )
sep = '\\n+ '
options = ''
if multiplied_marr:
options = ', color=' + multi_marr_color
output_label( make_fam_dot_id(fam), '"'+ text +'"', options )
def output_match_family_label( fam, multiplied_marr ):
text = '<\n<table cellpadding="2" cellborder="0" cellspacing="0" border="0">'
sep = ''
for parent in ['wife','husb']:
if parent in families[fam]:
parent_id = families[fam][parent][0]
# add padding
name = ' ' + sep + get_name( individuals[parent_id] ) + ' '
tr = '\n<tr><td'
if parent_id in matches:
td = tr + ' bgcolor="' + box_color + '">'
text += td + name + '</td></tr>'
text += td + matches[parent_id]['note'] + '</td></tr>'
else:
text += tr + ' border="1">' + name + '</td></tr>'
sep = '+ '
text += '\n</table>\n>'
options = ', shape="none"'
if multiplied_marr:
options += ', color=' + multi_marr_color
output_label( make_fam_dot_id(fam), text, options )
def output_indi_label( indi ):
# note the escaped newlines
text = get_name( individuals[indi] ) + '\\n' + matches[indi]['note']
options = ', shape="record", style=filled, color=' + box_color
output_label( make_indi_dot_id(indi), '"'+ text +'"', options )
def find_families_of_multiple_marriages():
results = []
person_in_fams = dict()
for indi in people_in_paths:
person_in_fams[indi] = []
key = 'fams'
if key in individuals[indi]:
for fam in individuals[indi][key]:
if fam in fam_with_matches:
person_in_fams[indi].append( fam )
for indi in person_in_fams:
if len( person_in_fams[indi] ) > 1:
for fam in person_in_fams[indi]:
if fam not in results:
results.append( fam )
return results
multiple_marriages = find_families_of_multiple_marriages()
already_used = []
# names of the dna people
for indi in matches:
if indi not in already_used:
already_used.append( indi )
if indi not in people_in_paths:
output_indi_label( indi )
for fam in matches[indi]['path']:
if fam not in already_used:
already_used.append( fam )
multiple_marr = fam in multiple_marriages
if fam_with_matches[fam]:
output_match_family_label( fam, multiple_marr )
else:
output_plain_family_label( fam, multiple_marr )
# and for me
for shared_fam in my_ancestors:
for fam in my_ancestors[shared_fam]:
if fam not in already_used:
already_used.append( fam )
multiple_marr = fam in multiple_marriages
if fam_with_matches[fam]:
output_match_family_label( fam, multiple_marr )
else:
output_plain_family_label( fam, multiple_marr )
def dot_connect( me, matches, my_paths, people_in_paths ):
""" Output the links from one family to the next. """
# if this many or more incoming edges, set a color on the edges
n_to_color = 3
# keep the routes from one family to the next
# each one only once
routes = dict()
for indi in matches:
# again, don't draw a person if they are in someone else's path
if matches[indi]['path'] and indi not in people_in_paths:
previous = make_indi_dot_id( indi )
for ancestor in matches[indi]['path']:
target = make_fam_dot_id( ancestor )
route = (previous, target)
routes[route] = ancestor
previous = target
for shared_fam in my_paths:
# ok to route from me to first fam because it will be saved only once
previous = make_indi_dot_id( me )
for ancestor in my_paths[shared_fam]:
target = make_fam_dot_id( ancestor )
route = (previous, target)
routes[route] = ancestor
previous = target
# count the number of connections into each family
# so that the multiply connected can be coloured
counts = dict()
for route in routes:
ancestor = routes[route]
if ancestor not in counts:
counts[ancestor] = 0
counts[ancestor] += 1
# assign the same color to the connection lines for the families
# with multiple connections
# and
# loop through the colors to give a different color as each family matches
ancestor_color = dict()
n_colors = len( line_colors )
c = n_colors + 1
for ancestor in counts:
if counts[ancestor] >= n_to_color:
c = c + 1
if c >= n_colors:
c = 0
ancestor_color[ancestor] = line_colors[c]
# output the routes
for route in routes:
ancestor = routes[route]
extra = ''
if ancestor in ancestor_color:
extra = ' [color=' + ancestor_color[ancestor] + ']'
print( route[0] + ' -> ' + route[1] + extra + ';' )
# these are keys into the parsed sections of the returned data structure
i_key = readgedcom.PARSED_INDI
f_key = readgedcom.PARSED_FAM
opts = dict()
opts['display-gedcom-warnings'] = False
data = readgedcom.read_file( sys.argv[1], opts )
# people who have the dna event
# matched[indi] = { note: the event text, shared: closest shared ancestor )
matched = dict()
# the id of the base dna match person
me = None
for indi in data[i_key]:
result = check_for_dna_event( data[i_key][indi] )
if result[0]:
matched[indi] = dict()
if result[1].lower().startswith( 'me,' ):
matched[indi]['note'] = 'me'
me = indi
else:
matched[indi]['note'] = extract_dna_cm( result[1] ) + ' cM'
if not me:
print( 'Didnt find base person', file=sys.stderr )
sys.exit()
ancestors = dict()
for indi in matched:
ancestors[indi] = get_ancestor_families( indi, data[i_key], data[f_key] )
# how many generations do I have
max_gen = max( ancestors[me].values() )
# For each of the dna matched people,
# find the youngest ancestor which matches with me.
#
# Both people exist in the same tree so there must be a similar ancestor
# unless the dna match is noise. In this case the shared ancestor will be None.
#
# Finding shared ancestor person rather than shared ancestor family
# in order to skip half-cousins. Then in the display show only the people
# rather than families.
for indi in matched:
if indi == me:
continue
found_match = None
found_gen = max_gen + 1
for ancestor_fam in ancestors[indi]:
if ancestor_fam in ancestors[me]:
my_generation = ancestors[me][ancestor_fam]
if my_generation < found_gen:
found_gen = my_generation
found_match = ancestor_fam
# the closest shared family
matched[indi]['shared'] = found_match
# Find the path from each match to the closest shared family.
# Assuming only one such path for each matched person
# Adding the list in matched[indi]['path']
for indi in matched:
matched[indi]['path'] = []
if indi == me:
continue
top_ancestor_fam = matched[indi]['shared']
if top_ancestor_fam:
family_path = find_ancestor_path( indi, top_ancestor_fam, data[i_key], data[f_key], [] )
if family_path[0]:
matched[indi]['path'] = family_path[1]
# Find the paths from me to each of those shared ancestors
my_paths = dict()
shared_ancestors = dict()
for indi in matched:
if indi == me:
continue
shared_ancestors[matched[indi]['shared']] = True
for ancestor_fam in shared_ancestors:
family_path = find_ancestor_path( me, ancestor_fam, data[i_key], data[f_key], [] )
if family_path[0]:
my_paths[ancestor_fam] = family_path[1]
# Find families along those paths which contain a matched person
# value for each family is true or false.
families_with_matches = dict()
for indi in matched:
if indi == me:
continue
for fam in matched[indi]['path']:
if fam not in families_with_matches:
families_with_matches[fam] = does_fam_have_match( matched, data[f_key][fam] )
for ancestor_fam in my_paths:
for fam in my_paths[ancestor_fam]:
if fam not in families_with_matches:
families_with_matches[fam] = does_fam_have_match( matched, data[f_key][fam] )
# Find the people who are in the families in the paths
all_people_in_paths = []
for fam in families_with_matches:
for parent in ['husb','wife']:
if parent in data[f_key][fam]:
all_people_in_paths.append( data[f_key][fam][parent][0] )
# Output to stdout
begin_dot()
dot_labels( data[i_key], data[f_key], matched, my_paths, families_with_matches, all_people_in_paths )
dot_connect( me, matched, my_paths, all_people_in_paths )
end_dot()
``` |
{
"source": "johnandrea/ns-covid-counts",
"score": 3
} |
#### File: common/tests/test-non-numbers.py
```python
import sys
from word2number import w2n
def fix_number( given ):
result = None
if given == 'null':
result = given
elif given == '':
result = 'null'
else:
try:
result = w2n.word_to_num( given )
except ValueError:
print( 'non-numeric found:', given, file=sys.stderr )
return result
def run_test( s ):
x = fix_number( s )
print( s, '=>', x )
run_test( '5' )
run_test( 'five' )
run_test( 'zero' )
run_test( '' )
run_test( 'null' )
run_test( 'nothing' )
run_test( 'twenty-two' )
``` |
{
"source": "johnandrea/readgedcom",
"score": 3
} |
#### File: playground/dates/t2.py
```python
import datetime
import arrow
# datetime fails on MS-Windows 7 -native library can't handle unix epoch and before
def fill( n, s ):
return s + ' ' * max( 0, n-len(s) )
def run_test( y, m, d ):
out = fill( 10, str(y) + '-' + str(m) + '-' + str(d) )
out += ' | '
try:
x = datetime.date( y, m, d )
out += fill( 10, str( x ) )
except:
out += fill( 10, 'err' )
out += ' | '
try:
x = arrow.get( y, m, d )
out += fill( 10, str( x.date() ) )
except:
out += fill( 10, 'err' )
print( out )
out = fill( 10, 'yyy-mm-dd' )
out += ' | '
out += fill( 10, 'datetime' )
out += ' | '
out += fill( 10, 'arrow' )
print( out )
run_test( 2020, 7, 9 )
run_test( 1970, 1, 2 )
run_test( 1970, 1, 1 )
run_test( 1969, 12, 31 )
run_test( 1960, 11, 30 )
run_test( 1909, 1, 2 )
run_test( 1808, 3, 4 )
run_test( 1707, 5, 6 )
run_test( 1606, 8, 9 )
```
#### File: playground/dates/t3.py
```python
import sys
import dateparser
import arrow
def fill( n, s ):
return s + ' ' * max( 0, n-len(s) )
def run_test( s ):
out = fill( 15, s )
out += ' | '
try:
x = dateparser.parse( s )
out += fill( 10, str( x.date() ) )
except:
#e = sys.exc_info()[0]
e = 'err'
out += fill( 10, str(e) )
out += ' | '
try:
x = arrow.get( s )
out += fill( 10, str( x.date() ) )
except:
#e = sys.exc_info()[0]
e = 'err'
out += fill( 10, str(e) )
print( out )
out = fill( 15, 'date' )
out += ' | '
out += fill( 10, 'dateparser' )
out += ' | '
out += fill( 10, 'arrow' )
print( out )
run_test( '10 mar 1982' )
run_test( '1982 mar 10' )
run_test( '1970 jan 1' )
run_test( '1969 dec 31' )
run_test( '3 oct 1860' )
run_test( '3-oct-1860' )
run_test( '1860-oct-3' )
run_test( '1860-10-3' )
run_test( '3-10-1860' )
run_test( 'wrong-1907' )
run_test( '1907-wrong' )
run_test( '1907 wrong' )
run_test( 'wrong 1907' )
``` |
{
"source": "JohnAndrewTaylor/Algorithms",
"score": 4
} |
#### File: Algorithms/Graphs/utilData.py
```python
import heapq
class Stack:
def __init__(self):
self.stack = []
def push(self,a):
self.stack.append(a)
def pop(self):
return self.stack.pop()
def isEmpty(self):
return not self.stack
class Queue:
def __init__(self):
self.queue = []
def push(self,a):
self.queue.insert(0,a)
def pop(self):
return self.queue.pop()
def isEmpty(self):
return not self.queue
class PriorityQueue:
def __init__(self):
self.pq = []
def push(self, a, order):
heapq.heappush(self.pq, (order, a))
def pop(self):
_, a = heapq.heappop(self.pq)
return a
def isEmpty(self):
return not self.pq
```
#### File: Algorithms/Graphs/utilGraph.py
```python
class Graph:
def __init__(self, vertices, edges):
self.vertexList = []
self.edgeList = []
for v in vertices:
self.vertexList.append(Vertex(v))
for e in edges:
self.edgeList.append(Edge(e))
# Initialize vertex neighbors dictionary
# Uses vertex label to return a list of tuples
# Tuple: neighboring vertex, weight of connecting edge
self.neighborsDict = {}
for v in self.vertexList:
neighbors = []
for e in self.edgeList:
if (e.getStart().getLabel() == v.getLabel()):
neighbors.append((e.getEnd(), e.getWeight()))
if (e.getEnd().getLabel() == v.getLabel()):
neighbors.append((e.getStart(), e.getWeight()))
self.neighborsDict[v.getLabel()] = neighbors
def __str__(self):
return "Vertices: %s ||| Edges: %s" % (self.vertexList, self.edgeList)
def __repr__(self):
return self.__str__()
def getEdges(self):
return self.edgeList
def getVertices(self):
return self.vertexList
def getNeighbors(self, v):
return self.neighborsDict.get(v.getLabel())
class Edge(Graph):
def __init__(self, tupleEdge):
self.startVertex = Vertex(tupleEdge[0])
self.endVertex = Vertex(tupleEdge[1])
if len(tupleEdge) == 3:
self.weight = tupleEdge[2]
else:
self.weight = 1
def __str__(self):
return "{(%s,%s) weight:%s}" % (self.startVertex.getLabel(),self.endVertex.getLabel(),self.getWeight())
def __repr__(self):
return self.__str__()
def __eq__(self, e):
return (self.startVertex.getLabel() == e.getStart().getLabel()) & (self.endVertex.getLabel() == e.getEnd().getLabel())
def getTuple(self):
return (self.startVertex, self.endVertex)
def getStart(self):
return self.startVertex
def getEnd(self):
return self.endVertex
def getWeight(self):
return self.weight
class Vertex(Graph):
def __init__(self, label):
self.vertexLabel = label
def __str__(self):
return "%s" % (self.vertexLabel)
def __repr__(self):
return self.__str__()
def __eq__(self, v):
return self.vertexLabel == v.getLabel()
def getLabel(self):
return self.vertexLabel
``` |
{
"source": "JohnAndrewTaylor/flowsa",
"score": 3
} |
#### File: flowsa/data_source_scripts/StatCan_IWS_MI.py
```python
import pandas as pd
import io
import zipfile
import pycountry
from flowsa.common import *
def sc_call(url, sc_response, args):
"""
Convert response for calling url to pandas dataframe, begin parsing df into FBA format
:param url: string, url
:param sc_response: df, response from url call
:param args: dictionary, arguments specified when running
flowbyactivity.py ('year' and 'source')
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
# read all files in the stat canada zip
with zipfile.ZipFile(io.BytesIO(sc_response.content), "r") as f:
# read in file names
for name in f.namelist():
# if filename does not contain "MetaData", then create dataframe
if "MetaData" not in name:
data = f.open(name)
df = pd.read_csv(data, header=0)
return df
def sc_parse(dataframe_list, args):
"""
Functions to being parsing and formatting data into flowbyactivity format
:param dataframe_list: list of dataframes to concat and format
:param args: arguments as specified in flowbyactivity.py ('year' and 'source')
:return: dataframe parsed and partially formatted to flowbyactivity specifications
"""
# concat dataframes
df = pd.concat(dataframe_list, sort=False)
# drop columns
df = df.drop(columns=['COORDINATE', 'DECIMALS', 'DGUID', 'SYMBOL', 'TERMINATED', 'UOM_ID', 'SCALAR_ID', 'VECTOR'])
# rename columns
df = df.rename(columns={'GEO': 'Location',
'North American Industry Classification System (NAICS)': 'Description',
'REF_DATE': 'Year',
'STATUS': 'Spread',
'VALUE': "FlowAmount",
'Water use parameter': 'FlowName'})
# extract NAICS as activity column. rename activity based on flowname
df['Activity'] = df['Description'].str.extract('.*\[(.*)\].*')
df.loc[df['Description'] == 'Total, all industries', 'Activity'] = '31-33' # todo: change these activity names
df.loc[df['Description'] == 'Other manufacturing industries', 'Activity'] = 'Other'
df['FlowName'] = df['FlowName'].str.strip()
df.loc[df['FlowName'] == 'Water intake', 'ActivityConsumedBy'] = df['Activity']
df.loc[df['FlowName'].isin(['Water discharge', "Water recirculation"]), 'ActivityProducedBy'] = df['Activity']
# create "unit" column
df["Unit"] = "million " + df["UOM"] + "/year"
# drop columns used to create unit and activity columns
df = df.drop(columns=['SCALAR_FACTOR', 'UOM', 'Activity'])
# Modify the assigned RSD letter values to numeric value
df.loc[df['Spread'] == 'A', 'Spread'] = 2.5 # given range: 0.01 - 4.99%
df.loc[df['Spread'] == 'B', 'Spread'] = 7.5 # given range: 5 - 9.99%
df.loc[df['Spread'] == 'C', 'Spread'] = 12.5 # given range: 10 - 14.99%
df.loc[df['Spread'] == 'D', 'Spread'] = 20 # given range: 15 - 24.99%
df.loc[df['Spread'] == 'E', 'Spread'] = 37.5 # given range:25 - 49.99%
df.loc[df['Spread'] == 'F', 'Spread'] = 75 # given range: > 49.99%
df.loc[df['Spread'] == 'x', 'Spread'] = withdrawn_keyword
# hard code data
df['Class'] = 'Water'
df['SourceName'] = 'StatCan_IWS_MI'
# temp hardcode canada iso code
df['Location'] = call_country_code('Canada')
df['Year'] = df['Year'].astype(str)
df['LocationSystem'] = "ISO"
df["MeasureofSpread"] = 'RSD'
df["DataReliability"] = '3'
df["DataCollection"] = '4'
# subset based on year
df = df[df['Year'] == args['year']]
return df
def convert_statcan_data_to_US_water_use(df, attr):
"""
Use Canadian GDP data to convert 3 digit canadian water use to us water
use:
- canadian gdp
- us gdp
:param df:
:param attr:
:return:
"""
import flowsa
from flowsa.values_from_literature import get_Canadian_to_USD_exchange_rate
from flowsa.flowbyfunctions import assign_fips_location_system, aggregator
from flowsa.common import fba_default_grouping_fields
from flowsa.dataclean import harmonize_units
from flowsa.common import US_FIPS, load_bea_crosswalk
# load Canadian GDP data
gdp = flowsa.getFlowByActivity(datasource='StatCan_GDP', year=attr['allocation_source_year'], flowclass='Money')
gdp = harmonize_units(gdp)
# drop 31-33
gdp = gdp[gdp['ActivityProducedBy'] != '31-33']
gdp = gdp.rename(columns={"FlowAmount": "CanDollar"})
# merge df
df_m = pd.merge(df, gdp[['CanDollar', 'ActivityProducedBy']], how='left', left_on='ActivityConsumedBy',
right_on='ActivityProducedBy')
df_m['CanDollar'] = df_m['CanDollar'].fillna(0)
df_m = df_m.drop(columns=["ActivityProducedBy_y"])
df_m = df_m.rename(columns={"ActivityProducedBy_x": "ActivityProducedBy"})
df_m = df_m[df_m['CanDollar'] != 0]
exchange_rate = get_Canadian_to_USD_exchange_rate(str(attr['allocation_source_year']))
exchange_rate = float(exchange_rate)
# convert to mgal/USD
df_m.loc[:, 'FlowAmount'] = df_m['FlowAmount'] / (df_m['CanDollar'] / exchange_rate)
df_m.loc[:, 'Unit'] = 'Mgal/USD'
df_m = df_m.drop(columns=["CanDollar"])
# convert Location to US
df_m.loc[:, 'Location'] = US_FIPS
df_m = assign_fips_location_system(df_m, str(attr['allocation_source_year']))
# load us gdp
# load Canadian GDP data
us_gdp_load = flowsa.getFlowByActivity(datasource='BEA_GDP_GrossOutput', year=attr['allocation_source_year'],
flowclass='Money')
us_gdp_load = harmonize_units(us_gdp_load)
# load bea crosswalk
cw_load = load_bea_crosswalk()
cw = cw_load[['BEA_2012_Detail_Code', 'NAICS_2012_Code']].drop_duplicates()
cw = cw[cw['NAICS_2012_Code'].apply(lambda x: len(str(x)) == 3)].drop_duplicates().reset_index(drop=True)
# merge
us_gdp = pd.merge(us_gdp_load, cw, how='left', left_on='ActivityProducedBy', right_on='BEA_2012_Detail_Code')
us_gdp = us_gdp.drop(columns=['ActivityProducedBy', 'BEA_2012_Detail_Code'])
# rename columns
us_gdp = us_gdp.rename(columns={'NAICS_2012_Code': 'ActivityProducedBy'})
# agg by naics
us_gdp = aggregator(us_gdp, fba_default_grouping_fields)
us_gdp = us_gdp.rename(columns={'FlowAmount': 'us_gdp'})
# determine annual us water use
df_m2 = pd.merge(df_m, us_gdp[['ActivityProducedBy', 'us_gdp']], how='left', left_on='ActivityConsumedBy',
right_on='ActivityProducedBy')
df_m2.loc[:, 'FlowAmount'] = df_m2['FlowAmount'] * (df_m2['us_gdp'])
df_m2.loc[:, 'Unit'] = 'Mgal'
df_m2 = df_m2.rename(columns={'ActivityProducedBy_x': 'ActivityProducedBy'})
df_m2 = df_m2.drop(columns=['ActivityProducedBy_y', 'us_gdp'])
return df_m2
# def disaggregate_statcan_to_naics_6(df):
# """
#
# :param df:
# :return:
# """
#
# return df
```
#### File: flowsa/flowsa/flowbyfunctions.py
```python
from flowsa.common import *
from flowsa.common import fbs_activity_fields
from flowsa.dataclean import clean_df, replace_strings_with_NoneType, replace_NoneType_with_empty_cells
def create_geoscale_list(df, geoscale, year='2015'):
"""
Create a list of FIPS associated with given geoscale
:param df: FlowBySector of FlowByActivity df
:param geoscale: 'national', 'state', or 'county'
:return: list of relevant FIPS
"""
# filter by geoscale depends on Location System
fips = []
if geoscale == "national":
fips.append(US_FIPS)
elif df['LocationSystem'].str.contains('FIPS').any():
# all_FIPS = read_stored_FIPS()
if geoscale == "state":
state_FIPS = get_state_FIPS(year)
fips = list(state_FIPS['FIPS'])
elif geoscale == "county":
county_FIPS = get_county_FIPS(year)
fips = list(county_FIPS['FIPS'])
return fips
def filter_by_geoscale(df, geoscale):
"""
Filter flowbyactivity by FIPS at the given scale
:param df: Either flowbyactivity or flowbysector
:param geoscale: string, either 'national', 'state', or 'county'
:return: filtered flowbyactivity or flowbysector
"""
fips = create_geoscale_list(df, geoscale)
df = df[df['Location'].isin(fips)].reset_index(drop=True)
if len(df) == 0:
log.error("No flows found in the " + " flow dataset at the " + geoscale + " scale")
else:
return df
def agg_by_geoscale(df, from_scale, to_scale, groupbycols):
"""
:param df: flowbyactivity or flowbysector df
:param from_scale:
:param to_scale:
:param groupbycolumns: flowbyactivity or flowbysector default groupby columns
:return:
"""
# use from scale to filter by these values
df = filter_by_geoscale(df, from_scale).reset_index(drop=True)
df = update_geoscale(df, to_scale)
fba_agg = aggregator(df, groupbycols)
return fba_agg
def weighted_average(df, data_col, weight_col, by_col):
"""
Generates a weighted average result based on passed columns
Parameters
----------
df : DataFrame
Dataframe prior to aggregating from which a weighted average is calculated
data_col : str
Name of column to be averaged.
weight_col : str
Name of column to serve as the weighting.
by_col : list
List of columns on which the dataframe is aggregated.
Returns
-------
result : series
Series reflecting the weighted average values for the data_col,
at length consistent with the aggregated dataframe, to be reapplied
to the data_col in the aggregated dataframe.
"""
df = df.assign(_data_times_weight=df[data_col] * df[weight_col])
df = df.assign(_weight_where_notnull=df[weight_col] * pd.notnull(df[data_col]))
g = df.groupby(by_col)
result = g['_data_times_weight'].sum() / g['_weight_where_notnull'].sum()
del df['_data_times_weight'], df['_weight_where_notnull']
return result
def aggregator(df, groupbycols):
"""
Aggregates flowbyactivity or flowbysector df by given groupbycols
:param df: Either flowbyactivity or flowbysector
:param groupbycols: Either flowbyactivity or flowbysector columns
:return:
"""
# reset index
df = df.reset_index(drop=True)
# tmp replace null values with empty cells
df = replace_NoneType_with_empty_cells(df)
# drop columns with flowamount = 0
df = df[df['FlowAmount'] != 0]
# list of column headers, that if exist in df, should be aggregated using the weighted avg fxn
possible_column_headers = ('Spread', 'Min', 'Max', 'DataReliability', 'TemporalCorrelation',
'GeographicalCorrelation', 'TechnologicalCorrelation',
'DataCollection')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers if e in df.columns.values.tolist()]
df_dfg = df.groupby(groupbycols).agg({'FlowAmount': ['sum']})
# run through other columns creating weighted average
for e in column_headers:
df_dfg[e] = weighted_average(df, e, 'FlowAmount', groupbycols)
df_dfg = df_dfg.reset_index()
df_dfg.columns = df_dfg.columns.droplevel(level=1)
# if datatypes are strings, ensure that Null values remain NoneType
df_dfg = replace_strings_with_NoneType(df_dfg)
return df_dfg
def sector_ratios(df, sectorcolumn):
"""
Determine ratios of the less aggregated sectors within a more aggregated sector
:param df: A df with sector columns
:param sectorcolumn: 'SectorConsumedBy' or 'SectorProducedBy'
:return:
"""
# drop any null rows (can occur when activities are ranges)
df = df[~df[sectorcolumn].isnull()]
# find the longest length sector
length = max(df[sectorcolumn].apply(lambda x: len(str(x))).unique())
# for loop in reverse order longest length naics minus 1 to 2
# appends missing naics levels to df
sector_ratios = []
for i in range(length, 3, -1):
# subset df to sectors with length = i and length = i + 1
df_subset = df.loc[df[sectorcolumn].apply(lambda x: len(x) == i)]
# create column for sector grouping
df_subset = df_subset.assign(Sector_group=df_subset[sectorcolumn].apply(lambda x: x[0:i-1]))
# subset df to create denominator
df_denom = df_subset[['FlowAmount', 'Location', 'Sector_group']]
df_denom = df_denom.groupby(['Location', 'Sector_group'], as_index=False)[["FlowAmount"]].agg("sum")
df_denom = df_denom.rename(columns={"FlowAmount": "Denominator"})
# merge the denominator column with fba_w_sector df
ratio_df = df_subset.merge(df_denom, how='left')
# calculate ratio
ratio_df.loc[:, 'FlowAmountRatio'] = ratio_df['FlowAmount'] / ratio_df['Denominator']
ratio_df = ratio_df.drop(columns=['Denominator', 'Sector_group']).reset_index()
sector_ratios.append(ratio_df)
# concat list of dataframes (info on each page)
df_w_ratios = pd.concat(sector_ratios, sort=True).reset_index(drop=True)
return df_w_ratios
def sector_aggregation(df_load, group_cols):
"""
Function that checks if a sector length exists, and if not, sums the less aggregated sector
:param df: Either a flowbyactivity df with sectors or a flowbysector df
:param group_cols: columns by which to aggregate
:return:
"""
# determine if activities are sector-like, if aggregating a df with a 'SourceName'
sector_like_activities = False
if 'SourceName' in df_load.columns:
# load source catalog
cat = load_source_catalog()
# for s in pd.unique(flowbyactivity_df['SourceName']):
s = pd.unique(df_load['SourceName'])[0]
# load catalog info for source
src_info = cat[s]
sector_like_activities = src_info['sector-like_activities']
# ensure None values are not strings
df = replace_NoneType_with_empty_cells(df_load)
# if activities are source like, drop from df and group calls, add back in as copies of sector columns
# columns to keep
if sector_like_activities:
group_cols = [e for e in group_cols if e not in ('ActivityProducedBy', 'ActivityConsumedBy')]
# subset df
df_cols = [e for e in df.columns if e not in ('ActivityProducedBy', 'ActivityConsumedBy')]
df = df[df_cols]
# find the longest length sector
length = df[[fbs_activity_fields[0], fbs_activity_fields[1]]].apply(
lambda x: x.str.len()).max().max()
length = int(length)
# for loop in reverse order longest length naics minus 1 to 2
# appends missing naics levels to df
for i in range(length - 1, 1, -1):
# subset df to sectors with length = i and length = i + 1
df_subset = df.loc[df[fbs_activity_fields[0]].apply(lambda x: i + 1 >= len(x) >= i) |
df[fbs_activity_fields[1]].apply(lambda x: i + 1 >= len(x) >= i)]
# create a list of i digit sectors in df subset
sector_subset = df_subset[
['Location', fbs_activity_fields[0], fbs_activity_fields[1]]].drop_duplicates().reset_index(
drop=True)
df_sectors = sector_subset.copy()
df_sectors.loc[:, 'SectorProducedBy'] = df_sectors['SectorProducedBy'].apply(lambda x: x[0:i])
df_sectors.loc[:, 'SectorConsumedBy'] = df_sectors['SectorConsumedBy'].apply(lambda x: x[0:i])
sector_list = df_sectors.drop_duplicates().values.tolist()
# create a list of sectors that are exactly i digits long
# where either sector column is i digits in length
df_existing_1 = sector_subset.loc[(sector_subset['SectorProducedBy'].apply(lambda x: len(x) == i)) |
(sector_subset['SectorConsumedBy'].apply(lambda x: len(x) == i))]
# where both sector columns are i digits in length
df_existing_2 = sector_subset.loc[(sector_subset['SectorProducedBy'].apply(lambda x: len(x) == i)) &
(sector_subset['SectorConsumedBy'].apply(lambda x: len(x) == i))]
# concat existing dfs
df_existing = pd.concat([df_existing_1, df_existing_2], sort=False)
existing_sectors = df_existing.drop_duplicates().dropna().values.tolist()
# list of sectors of length i that are not in sector list
missing_sectors = [e for e in sector_list if e not in existing_sectors]
if len(missing_sectors) != 0:
# new df of sectors that start with missing sectors. drop last digit of the sector and sum flows
# set conditions
agg_sectors_list = []
for q, r, s in missing_sectors:
c1 = df_subset['Location'] == q
c2 = df_subset[fbs_activity_fields[0]].apply(lambda x: x[0:i] == r) #.str.startswith(y)
c3 = df_subset[fbs_activity_fields[1]].apply(lambda x: x[0:i] == s) #.str.startswith(z)
# subset data
agg_sectors_list.append(df_subset.loc[c1 & c2 & c3])
agg_sectors = pd.concat(agg_sectors_list, sort=False)
agg_sectors = agg_sectors.loc[
(agg_sectors[fbs_activity_fields[0]].apply(lambda x: len(x) > i)) |
(agg_sectors[fbs_activity_fields[1]].apply(lambda x: len(x) > i))]
agg_sectors.loc[:, fbs_activity_fields[0]] = agg_sectors[fbs_activity_fields[0]].apply(
lambda x: x[0:i])
agg_sectors.loc[:, fbs_activity_fields[1]] = agg_sectors[fbs_activity_fields[1]].apply(
lambda x: x[0:i])
# aggregate the new sector flow amounts
agg_sectors = aggregator(agg_sectors, group_cols)
# append to df
agg_sectors = replace_NoneType_with_empty_cells(agg_sectors)
df = df.append(agg_sectors, sort=False).reset_index(drop=True)
# manually modify non-NAICS codes that might exist in sector
df.loc[:, 'SectorConsumedBy'] = np.where(df['SectorConsumedBy'].isin(['F0', 'F01']),
'F010', df['SectorConsumedBy']) # domestic/household
df.loc[:, 'SectorProducedBy'] = np.where(df['SectorProducedBy'].isin(['F0', 'F01']),
'F010', df['SectorProducedBy']) # domestic/household
# drop any duplicates created by modifying sector codes
df = df.drop_duplicates()
# if activities are source-like, set col values as copies of the sector columns
if sector_like_activities:
df = df.assign(ActivityProducedBy=df['SectorProducedBy'])
df = df.assign(ActivityConsumedBy=df['SectorConsumedBy'])
# reindex columns
df = df.reindex(df_load.columns, axis=1)
# replace null values
df = replace_strings_with_NoneType(df)
return df
def sector_disaggregation(df, group_cols):
"""
function to disaggregate sectors if there is only one naics at a lower level
works for lower than naics 4
:param df_load: A FBS df
:param group_cols:
:return: A FBS df with missing naics5 and naics6
"""
# ensure None values are not strings
df = replace_NoneType_with_empty_cells(df)
# load naics 2 to naics 6 crosswalk
cw_load = load_sector_length_crosswalk()
# for loop min length to 6 digits, where min length cannot be less than 2
length = df[[fbs_activity_fields[0], fbs_activity_fields[1]]].apply(
lambda x: x.str.len()).min().min()
if length < 2:
length = 2
# appends missing naics levels to df
for i in range(length, 6):
sector_merge = 'NAICS_' + str(i)
sector_add = 'NAICS_' + str(i+1)
# subset the df by naics length
cw = cw_load[[sector_merge, sector_add]]
# only keep the rows where there is only one value in sector_add for a value in sector_merge
cw = cw.drop_duplicates(subset=[sector_merge], keep=False).reset_index(drop=True)
sector_list = cw[sector_merge].values.tolist()
# subset df to sectors with length = i and length = i + 1
df_subset = df.loc[df[fbs_activity_fields[0]].apply(lambda x: i + 1 >= len(x) >= i) |
df[fbs_activity_fields[1]].apply(lambda x: i + 1 >= len(x) >= i)]
# create new columns that are length i
df_subset = df_subset.assign(SectorProduced_tmp=df_subset[fbs_activity_fields[0]].apply(lambda x: x[0:i]))
df_subset = df_subset.assign(SectorConsumed_tmp=df_subset[fbs_activity_fields[1]].apply(lambda x: x[0:i]))
# subset the df to the rows where the tmp sector columns are in naics list
df_subset_1 = df_subset.loc[(df_subset['SectorProduced_tmp'].isin(sector_list)) &
(df_subset['SectorConsumed_tmp'] == "")]
df_subset_2 = df_subset.loc[(df_subset['SectorProduced_tmp'] == "") &
(df_subset['SectorConsumed_tmp'].isin(sector_list))]
df_subset_3 = df_subset.loc[(df_subset['SectorProduced_tmp'].isin(sector_list)) &
(df_subset['SectorConsumed_tmp'].isin(sector_list))]
# concat existing dfs
df_subset = pd.concat([df_subset_1, df_subset_2, df_subset_3], sort=False)
# drop all rows with duplicate temp values, as a less aggregated naics exists
# list of column headers, that if exist in df, should be aggregated using the weighted avg fxn
possible_column_headers = ('Flowable', 'FlowName', 'Unit', 'Context', 'Compartment', 'Location', 'Year',
'SectorProduced_tmp', 'SectorConsumed_tmp')
# list of column headers that do exist in the df being subset
cols_to_drop = [e for e in possible_column_headers if e in df_subset.columns.values.tolist()]
df_subset = df_subset.drop_duplicates(subset=cols_to_drop, keep=False).reset_index(drop=True)
# merge the naics cw
new_naics = pd.merge(df_subset, cw[[sector_merge, sector_add]],
how='left', left_on=['SectorProduced_tmp'], right_on=[sector_merge])
new_naics = new_naics.rename(columns={sector_add: "SPB"})
new_naics = new_naics.drop(columns=[sector_merge])
new_naics = pd.merge(new_naics, cw[[sector_merge, sector_add]],
how='left', left_on=['SectorConsumed_tmp'], right_on=[sector_merge])
new_naics = new_naics.rename(columns={sector_add: "SCB"})
new_naics = new_naics.drop(columns=[sector_merge])
# drop columns and rename new sector columns
new_naics = new_naics.drop(columns=["SectorProducedBy", "SectorConsumedBy", "SectorProduced_tmp",
"SectorConsumed_tmp"])
new_naics = new_naics.rename(columns={"SPB": "SectorProducedBy",
"SCB": "SectorConsumedBy"})
# append new naics to df
new_naics['SectorConsumedBy'] = new_naics['SectorConsumedBy'].replace({np.nan: ""})
new_naics['SectorProducedBy'] = new_naics['SectorProducedBy'].replace({np.nan: ""})
new_naics = replace_NoneType_with_empty_cells(new_naics)
df = pd.concat([df, new_naics], sort=True)
# replace blank strings with None
df = replace_strings_with_NoneType(df)
return df
def assign_fips_location_system(df, year_of_data):
"""
Add location system based on year of data. County level FIPS change over the years.
:param df: df with FIPS location system
:param year_of_data: year of data pulled
:return:
"""
if '2015' <= year_of_data:
df.loc[:, 'LocationSystem'] = 'FIPS_2015'
elif '2013' <= year_of_data < '2015':
df.loc[:, 'LocationSystem'] = 'FIPS_2013'
elif '2010' <= year_of_data < '2013':
df.loc[:, 'LocationSystem'] = 'FIPS_2010'
elif year_of_data < '2010':
log.warning(
"Missing FIPS codes from crosswalk for " + year_of_data + ". Temporarily assigning to FIPS_2010")
df.loc[:, 'LocationSystem'] = 'FIPS_2010'
return df
def collapse_fbs_sectors(fbs):
"""
Collapses the Sector Produced/Consumed into a single column named "Sector"
uses
:param fbs: a standard FlowBySector (format)
:return:
"""
# ensure correct datatypes and order
fbs = clean_df(fbs, flow_by_sector_fields, fbs_fill_na_dict)
# collapse the FBS sector columns into one column based on FlowType
fbs.loc[fbs["FlowType"] == 'TECHNOSPHERE_FLOW', 'Sector'] = fbs["SectorConsumedBy"]
fbs.loc[fbs["FlowType"] == 'WASTE_FLOW', 'Sector'] = fbs["SectorProducedBy"]
fbs.loc[(fbs["FlowType"] == 'WASTE_FLOW') & (fbs['SectorProducedBy'].isnull()), 'Sector'] = fbs["SectorConsumedBy"]
fbs.loc[(fbs["FlowType"] == 'ELEMENTARY_FLOW') & (fbs['SectorProducedBy'].isnull()), 'Sector'] = fbs["SectorConsumedBy"]
fbs.loc[(fbs["FlowType"] == 'ELEMENTARY_FLOW') & (fbs['SectorConsumedBy'].isnull()), 'Sector'] = fbs["SectorProducedBy"]
fbs.loc[(fbs["FlowType"] == 'ELEMENTARY_FLOW') & (fbs['SectorConsumedBy'].isin(['F010', 'F0100', 'F01000'])) &
(fbs['SectorProducedBy'].isin(['22', '221', '2213', '22131', '221310'])), 'Sector'] = fbs["SectorConsumedBy"]
# drop sector consumed/produced by columns
fbs_collapsed = fbs.drop(columns=['SectorProducedBy', 'SectorConsumedBy'])
# aggregate
fbs_collapsed = aggregator(fbs_collapsed, fbs_collapsed_default_grouping_fields)
# sort dataframe
fbs_collapsed = clean_df(fbs_collapsed, flow_by_sector_collapsed_fields, fbs_collapsed_fill_na_dict)
fbs_collapsed = fbs_collapsed.sort_values(['Sector', 'Flowable', 'Context', 'Location']).reset_index(drop=True)
return fbs_collapsed
def return_activity_from_scale(df, provided_from_scale):
"""
Determine the 'from scale' used for aggregation/df subsetting for each activity combo in a df
:param df: flowbyactivity df
:param activity_df: a df with the activityproducedby, activityconsumedby columns and
a column 'exists' denoting if data is available at specified geoscale
:param provided_from_scale: The scale to use specified in method yaml
:return:
"""
# determine the unique combinations of activityproduced/consumedby
unique_activities = unique_activity_names(df)
# filter by geoscale
fips = create_geoscale_list(df, provided_from_scale)
df_sub = df[df['Location'].isin(fips)]
# determine unique activities after subsetting by geoscale
unique_activities_sub = unique_activity_names(df_sub)
# return df of the difference between unique_activities and unique_activities2
df_missing = dataframe_difference(unique_activities, unique_activities_sub, which='left_only')
# return df of the similarities between unique_activities and unique_activities2
df_existing = dataframe_difference(unique_activities, unique_activities_sub, which='both')
df_existing = df_existing.drop(columns='_merge')
df_existing['activity_from_scale'] = provided_from_scale
# for loop through geoscales until find data for each activity combo
if provided_from_scale == 'national':
geoscales = ['state', 'county']
elif provided_from_scale == 'state':
geoscales = ['county']
elif provided_from_scale == 'county':
log.info('No data - skipping')
if len(df_missing) > 0:
for i in geoscales:
# filter by geoscale
fips_i = create_geoscale_list(df, i)
df_i = df[df['Location'].isin(fips_i)]
# determine unique activities after subsetting by geoscale
unique_activities_i = unique_activity_names(df_i)
# return df of the difference between unique_activities subset and unique_activities for geoscale
df_missing_i = dataframe_difference(unique_activities_sub, unique_activities_i, which='right_only')
df_missing_i = df_missing_i.drop(columns='_merge')
df_missing_i['activity_from_scale'] = i
# return df of the similarities between unique_activities and unique_activities2
df_existing_i = dataframe_difference(unique_activities_sub, unique_activities_i, which='both')
# append unique activities and df with defined activity_from_scale
unique_activities_sub = unique_activities_sub.append(df_missing_i[[fba_activity_fields[0],
fba_activity_fields[1]]])
df_existing = df_existing.append(df_missing_i)
df_missing = dataframe_difference(df_missing[[fba_activity_fields[0], fba_activity_fields[1]]],
df_existing_i[[fba_activity_fields[0], fba_activity_fields[1]]],
which=None)
return df_existing
def subset_df_by_geoscale(df, activity_from_scale, activity_to_scale):
"""
Subset a df by geoscale or agg to create data specified in method yaml
:param flow_subset:
:param activity_from_scale:
:param activity_to_scale:
:return:
"""
# method of subset dependent on LocationSystem
if df['LocationSystem'].str.contains('FIPS').any():
df = df[df['LocationSystem'].str.contains('FIPS')].reset_index(drop=True)
# determine 'activity_from_scale' for use in df geoscale subset, by activity
modified_from_scale = return_activity_from_scale(df, activity_from_scale)
# add 'activity_from_scale' column to df
df2 = pd.merge(df, modified_from_scale)
# list of unique 'from' geoscales
unique_geoscales = modified_from_scale['activity_from_scale'].drop_duplicates().values.tolist()
if len(unique_geoscales) > 1:
log.info('Dataframe has a mix of geographic levels: ' + ', '.join(unique_geoscales))
# to scale
if fips_number_key[activity_from_scale] > fips_number_key[activity_to_scale]:
to_scale = activity_to_scale
else:
to_scale = activity_from_scale
df_subset_list = []
# subset df based on activity 'from' scale
for i in unique_geoscales:
df3 = df2[df2['activity_from_scale'] == i]
# if desired geoscale doesn't exist, aggregate existing data
# if df is less aggregated than allocation df, aggregate fba activity to allocation geoscale
if fips_number_key[i] > fips_number_key[to_scale]:
log.info("Aggregating subset from " + i + " to " + to_scale)
df_sub = agg_by_geoscale(df3, i, to_scale, fba_default_grouping_fields)
# else filter relevant rows
else:
log.info("Subsetting " + i + " data")
df_sub = filter_by_geoscale(df3, i)
df_subset_list.append(df_sub)
df_subset = pd.concat(df_subset_list, ignore_index=True)
# only keep cols associated with FBA
df_subset = clean_df(df_subset, flow_by_activity_fields, fba_fill_na_dict, drop_description=False)
# right now, the only other location system is for Statistics Canada data
else:
df_subset = df.copy()
return df_subset
def unique_activity_names(fba_df):
"""
Determine the unique activity names in a df
:param fba_df: a flowbyactivity df
:return: df with ActivityProducedBy and ActivityConsumedBy columns
"""
activities = fba_df[[fba_activity_fields[0], fba_activity_fields[1]]]
unique_activities = activities.drop_duplicates().reset_index(drop=True)
return unique_activities
def dataframe_difference(df1, df2, which=None):
"""
Find rows which are different between two DataFrames
:param df1:
:param df2:
:param which: 'both', 'right_only', 'left_only'
:return:
"""
comparison_df = df1.merge(df2,
indicator=True,
how='outer')
if which is None:
diff_df = comparison_df[comparison_df['_merge'] != 'both']
else:
diff_df = comparison_df[comparison_df['_merge'] == which]
return diff_df
def estimate_suppressed_data(df, sector_column, naics_level):
"""
Estimate data suppression, by equally allocating parent NAICS values to child NAICS
:param df:
:param sector_column:
:param naics_level: numeric, indicate at what NAICS length to base estimated suppresed data off (2 - 5)
:return:
"""
# exclude nonsectors
df = replace_NoneType_with_empty_cells(df)
# find the longest length sector
max_length = max(df[sector_column].apply(lambda x: len(str(x))).unique())
# loop through starting at naics_level, use most detailed level possible to save time
for i in range(naics_level, max_length):
# create df of i length
df_x = df.loc[df[sector_column].apply(lambda x: len(x) == i)]
# create df of i + 1 length
df_y = df.loc[df[sector_column].apply(lambda x: len(x) == i + 1)]
# create temp sector columns in df y, that are i digits in length
df_y = df_y.assign(s_tmp=df_y[sector_column].apply(lambda x: x[0:i]))
# create list of location and temp activity combos that contain a 0
missing_sectors_df = df_y[df_y['FlowAmount'] == 0]
missing_sectors_list = missing_sectors_df[['Location', 's_tmp']].drop_duplicates().values.tolist()
# subset the y df
if len(missing_sectors_list) != 0:
# new df of sectors that start with missing sectors. drop last digit of the sector and sum flows
# set conditions
suppressed_list = []
for q, r, in missing_sectors_list:
c1 = df_y['Location'] == q
c2 = df_y['s_tmp'] == r
# subset data
suppressed_list.append(df_y.loc[c1 & c2])
suppressed_sectors = pd.concat(suppressed_list, sort=False, ignore_index=True)
# add column of existing allocated data for length of i
suppressed_sectors['alloc_flow'] = suppressed_sectors.groupby(['Location', 's_tmp'])['FlowAmount'].transform('sum')
# subset further so only keep rows of 0 value
suppressed_sectors_sub = suppressed_sectors[suppressed_sectors['FlowAmount'] == 0]
# add count
suppressed_sectors_sub = \
suppressed_sectors_sub.assign(sector_count=
suppressed_sectors_sub.groupby(['Location',
's_tmp'])['s_tmp'].transform('count'))
# merge suppressed sector subset with df x
df_m = pd.merge(df_x,
suppressed_sectors_sub[['Class', 'Compartment', 'FlowType', 'FlowName', 'Location', 'LocationSystem', 'Unit',
'Year', sector_column, 's_tmp', 'alloc_flow', 'sector_count']],
left_on=['Class', 'Compartment', 'FlowType', 'FlowName', 'Location', 'LocationSystem', 'Unit',
'Year', sector_column],
right_on=['Class', 'Compartment', 'FlowType', 'FlowName', 'Location', 'LocationSystem', 'Unit',
'Year', 's_tmp'],
how='right')
# calculate estimated flows by subtracting the flow amount already allocated from total flow of \
# sector one level up and divide by number of sectors with suppresed data
df_m.loc[:, 'FlowAmount'] = (df_m['FlowAmount'] - df_m['alloc_flow']) / df_m['sector_count']
# only keep the suppressed sector subset activity columns
df_m = df_m.drop(columns=[sector_column + '_x', 's_tmp', 'alloc_flow', 'sector_count'])
df_m = df_m.rename(columns={sector_column + '_y': sector_column})
# reset activity columns #todo: modify so next 2 lines only run if activities are sector-like
df_m = df_m.assign(ActivityProducedBy=df_m['SectorProducedBy'])
df_m = df_m.assign(ActivityConsumedBy=df_m['SectorConsumedBy'])
# drop the existing rows with suppressed data and append the new estimates from fba df
modified_df = pd.merge(df, df_m[['FlowName', 'Location', sector_column]], indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1)
df = pd.concat([modified_df, df_m], ignore_index=True)
df_w_estimated_data = replace_strings_with_NoneType(df)
return df_w_estimated_data
def collapse_activity_fields(df):
"""
The 'activityconsumedby' and 'activityproducedby' columns from the allocation dataset do not always align with
the water use dataframe. Generalize the allocation activity column.
:param fba_df:
:return:
"""
df = replace_strings_with_NoneType(df)
activity_consumed_list = df['ActivityConsumedBy'].drop_duplicates().values.tolist()
activity_produced_list = df['ActivityProducedBy'].drop_duplicates().values.tolist()
# if an activity field column is all 'none', drop the column and rename renaming activity columns to generalize
if all(v is None for v in activity_consumed_list):
df = df.drop(columns=['ActivityConsumedBy', 'SectorConsumedBy'])
df = df.rename(columns={'ActivityProducedBy': 'Activity',
'SectorProducedBy': 'Sector'})
elif all(v is None for v in activity_produced_list):
df = df.drop(columns=['ActivityProducedBy', 'SectorProducedBy'])
df = df.rename(columns={'ActivityConsumedBy': 'Activity',
'SectorConsumedBy': 'Sector'})
else:
log.error('Cannot generalize dataframe')
# drop other columns
df = df.drop(columns=['ProducedBySectorType', 'ConsumedBySectorType'])
return df
def allocate_by_sector(df_w_sectors, source_name, allocation_source, allocation_method, group_cols, **kwargs):
"""
Create an allocation ratio for df
:param df_w_sectors: df with column of sectors
:param source_name: the name of the FBA df being allocated
:param allocation_source: The name of the FBA allocation dataframe
:param allocation_method: currently written for 'proportional'
:param group_cols: columns on which to base aggregation and disaggregation
:return: df with FlowAmountRatio for each sector
"""
# first determine if there is a special case with how the allocation ratios are created
if allocation_method == 'proportional-flagged':
# if the allocation method is flagged, subset sectors that are flagged/notflagged, where nonflagged sectors \
# have flowamountratio=1
if kwargs != {}:
if 'flowSubsetMapped' in kwargs:
fsm = kwargs['flowSubsetMapped']
flagged = fsm[fsm['disaggregate_flag'] == 1]
#todo: change col to be generalized, not SEctorConsumedBy specific
flagged_names = flagged['SectorConsumedBy'].tolist()
nonflagged = fsm[fsm['disaggregate_flag'] == 0]
nonflagged_names = nonflagged['SectorConsumedBy'].tolist()
# subset the original df so rows of data that run through the proportioanl allocation process are
# sectors included in the flagged list
df_w_sectors_nonflagged = df_w_sectors.loc[
(df_w_sectors[fbs_activity_fields[0]].isin(nonflagged_names)) |
(df_w_sectors[fbs_activity_fields[1]].isin(nonflagged_names))
].reset_index(drop=True)
df_w_sectors_nonflagged = df_w_sectors_nonflagged.assign(FlowAmountRatio=1)
df_w_sectors = df_w_sectors.loc[(df_w_sectors[fbs_activity_fields[0]].isin(flagged_names)) |
(df_w_sectors[fbs_activity_fields[1]].isin(flagged_names))
].reset_index(drop=True)
else:
log.error('The proportional-flagged allocation method requires a column "disaggregate_flag" in the'
'flow_subset_mapped df')
# run sector aggregation fxn to determine total flowamount for each level of sector
if len(df_w_sectors) == 0:
allocation_df = df_w_sectors_nonflagged.copy()
else:
df1 = sector_aggregation(df_w_sectors, group_cols)
# run sector disaggregation to capture one-to-one naics4/5/6 relationships
df2 = sector_disaggregation(df1, group_cols)
# if statements for method of allocation
# either 'proportional' or 'proportional-flagged'
allocation_df = []
if allocation_method == 'proportional' or allocation_method == 'proportional-flagged':
allocation_df = proportional_allocation_by_location(df2)
else:
log.error('Must create function for specified method of allocation')
if allocation_method == 'proportional-flagged':
# drop rows where values are not in flagged names
allocation_df = allocation_df.loc[(allocation_df[fbs_activity_fields[0]].isin(flagged_names)) |
(allocation_df[fbs_activity_fields[1]].isin(flagged_names))
].reset_index(drop=True)
# concat the flagged and nonflagged dfs
allocation_df = pd.concat([allocation_df, df_w_sectors_nonflagged],
ignore_index=True).sort_values(['SectorProducedBy', 'SectorConsumedBy'])
return allocation_df
def proportional_allocation_by_location(df):
"""
Creates a proportional allocation based on all the most aggregated sectors within a location
Ensure that sectors are at 2 digit level - can run sector_aggregation() prior to using this function
:param df:
:param sectorcolumn:
:return:
"""
# tmp drop NoneType
df = replace_NoneType_with_empty_cells(df)
# find the shortest length sector
denom_df = df.loc[(df['SectorProducedBy'].apply(lambda x: len(x) == 2)) |
(df['SectorConsumedBy'].apply(lambda x: len(x) == 2))]
denom_df = denom_df.assign(Denominator=denom_df['FlowAmount'].groupby(
denom_df['Location']).transform('sum'))
denom_df_2 = denom_df[['Location', 'LocationSystem', 'Year', 'Denominator']].drop_duplicates()
# merge the denominator column with fba_w_sector df
allocation_df = df.merge(denom_df_2, how='left')
# calculate ratio
allocation_df.loc[:, 'FlowAmountRatio'] = allocation_df['FlowAmount'] / allocation_df[
'Denominator']
allocation_df = allocation_df.drop(columns=['Denominator']).reset_index()
# add nonetypes
allocation_df = replace_strings_with_NoneType(allocation_df)
return allocation_df
def proportional_allocation_by_location_and_activity(df, sectorcolumn):
"""
Creates a proportional allocation within each aggregated sector within a location
:param df:
:param sectorcolumn:
:return:
"""
# tmp replace NoneTypes with empty cells
df = replace_NoneType_with_empty_cells(df)
# denominator summed from highest level of sector grouped by location
short_length = min(df[sectorcolumn].apply(lambda x: len(str(x))).unique())
# want to create denominator based on short_length
denom_df = df.loc[df[sectorcolumn].apply(lambda x: len(x) == short_length)].reset_index(drop=True)
grouping_cols = [e for e in ['FlowName', 'Location', 'Activity', 'ActivityConsumedBy', 'ActivityProducedBy']
if e in denom_df.columns.values.tolist()]
denom_df.loc[:, 'Denominator'] = denom_df.groupby(grouping_cols)['HelperFlow'].transform('sum')
# list of column headers, that if exist in df, should be aggregated using the weighted avg fxn
possible_column_headers = ('Location', 'LocationSystem', 'Year', 'Activity', 'ActivityConsumedBy', 'ActivityProducedBy')
# list of column headers that do exist in the df being aggregated
column_headers = [e for e in possible_column_headers if e in denom_df.columns.values.tolist()]
merge_headers = column_headers.copy()
column_headers.append('Denominator')
# create subset of denominator values based on Locations and Activities
denom_df_2 = denom_df[column_headers].drop_duplicates().reset_index(drop=True)
# merge the denominator column with fba_w_sector df
allocation_df = df.merge(denom_df_2,
how='left',
left_on=merge_headers,
right_on=merge_headers)
# calculate ratio
allocation_df.loc[:, 'FlowAmountRatio'] = allocation_df['HelperFlow'] / allocation_df['Denominator']
allocation_df = allocation_df.drop(columns=['Denominator']).reset_index(drop=True)
# fill empty cols with NoneType
allocation_df = replace_strings_with_NoneType(allocation_df)
# fill na values with 0
allocation_df['HelperFlow'] = allocation_df['HelperFlow'].fillna(0)
return allocation_df
# def proportional_allocation_by_location_and_sector(df, sectorcolumn):
# """
# Creates a proportional allocation within each aggregated sector within a location
# :param df:
# :param sectorcolumn:
# :return:
# """
# from flowsa.common import load_source_catalog
#
# cat = load_source_catalog()
# src_info = cat[pd.unique(df['SourceName'])[0]]
# # load source catalog to determine the level of sector aggregation associated with a crosswalk
# level_of_aggregation = src_info['sector_aggregation_level']
#
# # denominator summed from highest level of sector grouped by location
# short_length = min(df[sectorcolumn].apply(lambda x: len(str(x))).unique())
# # want to create denominator based on short_length - 1, unless short_length = 2
# denom_df = df.loc[df[sectorcolumn].apply(lambda x: len(x) == short_length)]
# if (level_of_aggregation == 'disaggregated') & (short_length != 2):
# short_length = short_length - 1
# denom_df.loc[:, 'sec_tmp'] = denom_df[sectorcolumn].apply(lambda x: x[0:short_length])
# denom_df.loc[:, 'Denominator'] = denom_df.groupby(['Location', 'sec_tmp'])['FlowAmount'].transform('sum')
# else: # short_length == 2:]
# denom_df.loc[:, 'Denominator'] = denom_df['FlowAmount']
# denom_df.loc[:, 'sec_tmp'] = denom_df[sectorcolumn]
# # if short_length == 2:
# # denom_df.loc[:, 'Denominator'] = denom_df['FlowAmount']
# # denom_df.loc[:, 'sec_tmp'] = denom_df[sectorcolumn]
# # else:
# # short_length = short_length - 1
# # denom_df.loc[:, 'sec_tmp'] = denom_df[sectorcolumn].apply(lambda x: x[0:short_length])
# # denom_df.loc[:, 'Denominator'] = denom_df.groupby(['Location', 'sec_tmp'])['FlowAmount'].transform('sum')
#
# denom_df_2 = denom_df[['Location', 'LocationSystem', 'Year', 'sec_tmp', 'Denominator']].drop_duplicates()
# # merge the denominator column with fba_w_sector df
# df.loc[:, 'sec_tmp'] = df[sectorcolumn].apply(lambda x: x[0:short_length])
# allocation_df = df.merge(denom_df_2, how='left', left_on=['Location', 'LocationSystem', 'Year', 'sec_tmp'],
# right_on=['Location', 'LocationSystem', 'Year', 'sec_tmp'])
# # calculate ratio
# allocation_df.loc[:, 'FlowAmountRatio'] = allocation_df['FlowAmount'] / allocation_df[
# 'Denominator']
# allocation_df = allocation_df.drop(columns=['Denominator', 'sec_tmp']).reset_index(drop=True)
#
# return allocation_df
```
#### File: flowsa/flowsa/flowbysector.py
```python
import sys
import argparse
import yaml
import pandas as pd
from esupy.processed_data_mgmt import write_df_to_file
import flowsa
from flowsa.common import log, flowbysectormethodpath, flow_by_sector_fields, \
fips_number_key, flow_by_activity_fields, load_source_catalog, \
flowbysectoractivitysetspath, flow_by_sector_fields_w_activity,\
set_fb_meta, paths, fba_activity_fields, \
fbs_activity_fields, fba_fill_na_dict, fbs_fill_na_dict, fbs_default_grouping_fields, \
fbs_grouping_fields_w_activities
from flowsa.fbs_allocation import direct_allocation_method, function_allocation_method, \
dataset_allocation_method
from flowsa.mapping import add_sectors_to_flowbyactivity, map_elementary_flows, \
get_sector_list
from flowsa.flowbyfunctions import agg_by_geoscale, sector_aggregation, \
aggregator, subset_df_by_geoscale, sector_disaggregation
from flowsa.dataclean import clean_df, harmonize_FBS_columns, reset_fbs_dq_scores
from flowsa.datachecks import check_if_losing_sector_data,\
check_for_differences_between_fba_load_and_fbs_output, \
compare_fba_load_and_fbs_output_totals, compare_geographic_totals,\
replace_naics_w_naics_from_another_year
# import specific functions
from flowsa.data_source_scripts.BEA import subset_BEA_Use
from flowsa.data_source_scripts.Blackhurst_IO import convert_blackhurst_data_to_gal_per_year,\
convert_blackhurst_data_to_gal_per_employee
from flowsa.data_source_scripts.BLS_QCEW import clean_bls_qcew_fba,\
clean_bls_qcew_fba_for_employment_sat_table, \
bls_clean_allocation_fba_w_sec
from flowsa.data_source_scripts.EIA_CBECS_Land import cbecs_land_fba_cleanup
from flowsa.data_source_scripts.EIA_MECS import mecs_energy_fba_cleanup,\
eia_mecs_energy_clean_allocation_fba_w_sec, \
mecs_land_fba_cleanup, mecs_land_fba_cleanup_for_land_2012_fbs,\
mecs_land_clean_allocation_mapped_fba_w_sec
from flowsa.data_source_scripts.EPA_NEI import clean_NEI_fba, clean_NEI_fba_no_pesticides
from flowsa.data_source_scripts.StatCan_IWS_MI import convert_statcan_data_to_US_water_use
from flowsa.data_source_scripts.stewiFBS import stewicombo_to_sector, stewi_to_sector
from flowsa.data_source_scripts.USDA_CoA_Cropland import \
disaggregate_coa_cropland_to_6_digit_naics,\
coa_irrigated_cropland_fba_cleanup, coa_nonirrigated_cropland_fba_cleanup
from flowsa.data_source_scripts.USDA_ERS_MLU import allocate_usda_ers_mlu_land_in_urban_areas,\
allocate_usda_ers_mlu_other_land,\
allocate_usda_ers_mlu_land_in_rural_transportation_areas
from flowsa.data_source_scripts.USDA_IWMS import disaggregate_iwms_to_6_digit_naics
from flowsa.data_source_scripts.USGS_NWIS_WU import usgs_fba_data_cleanup,\
usgs_fba_w_sectors_data_cleanup
def parse_args():
"""Make year and source script parameters"""
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--method",
required=True, help="Method for flow by sector file. "
"A valid method config file must exist with this name.")
args = vars(ap.parse_args())
return args
def load_method(method_name):
"""
Loads a flowbysector method from a YAML
:param method_name:
:return:
"""
sfile = flowbysectormethodpath + method_name + '.yaml'
try:
with open(sfile, 'r') as f:
method = yaml.safe_load(f)
except IOError:
log.error("FlowBySector method file not found.")
return method
def load_source_dataframe(k, v):
"""
Load the source dataframe. Data can be a FlowbyActivity or
FlowBySector parquet stored in flowsa, or a FlowBySector
formatted dataframe from another package.
:param k: The datasource name
:param v: The datasource parameters
:return:
"""
if v['data_format'] == 'FBA':
# if yaml specifies a geoscale to load, use parameter to filter dataframe
if 'source_fba_load_scale' in v:
geo_level = v['source_fba_load_scale']
else:
geo_level = None
log.info("Retrieving flowbyactivity for datasource " + k + " in year " + str(v['year']))
flows_df = flowsa.getFlowByActivity(datasource=k, year=v['year'], flowclass=v['class'],
geographic_level=geo_level)
elif v['data_format'] == 'FBS':
log.info("Retrieving flowbysector for datasource " + k)
flows_df = flowsa.getFlowBySector(k)
elif v['data_format'] == 'FBS_outside_flowsa':
log.info("Retrieving flowbysector for datasource " + k)
flows_df = getattr(sys.modules[__name__], v["FBS_datapull_fxn"])(v)
else:
log.error("Data format not specified in method file for datasource " + k)
return flows_df
def main(**kwargs):
"""
Creates a flowbysector dataset
:param method_name: Name of method corresponding to flowbysector method yaml name
:return: flowbysector
"""
if len(kwargs) == 0:
kwargs = parse_args()
method_name = kwargs['method']
# assign arguments
log.info("Initiating flowbysector creation for " + method_name)
# call on method
method = load_method(method_name)
# create dictionary of data and allocation datasets
fb = method['source_names']
# Create empty list for storing fbs files
fbs_list = []
for k, v in fb.items():
# pull fba data for allocation
flows = load_source_dataframe(k, v)
if v['data_format'] == 'FBA':
# ensure correct datatypes and that all fields exist
flows = clean_df(flows, flow_by_activity_fields,
fba_fill_na_dict, drop_description=False)
# clean up fba, if specified in yaml
if v["clean_fba_df_fxn"] != 'None':
log.info("Cleaning up " + k + " FlowByActivity")
flows = getattr(sys.modules[__name__], v["clean_fba_df_fxn"])(flows)
# if activity_sets are specified in a file, call them here
if 'activity_set_file' in v:
aset_names = pd.read_csv(flowbysectoractivitysetspath +
v['activity_set_file'], dtype=str)
else:
aset_names = None
# create dictionary of allocation datasets for different activities
activities = v['activity_sets']
# subset activity data and allocate to sector
for aset, attr in activities.items():
# subset by named activities
if 'activity_set_file' in v:
names = aset_names[aset_names['activity_set'] == aset]['name']
else:
names = attr['names']
log.info("Preparing to handle " + aset + " in " + k)
log.debug("Preparing to handle subset of activities: " + ', '.join(map(str, names)))
# subset fba data by activity
# if activities are sector-like, check sectors are valid
if load_source_catalog()[k]['sector-like_activities']:
flows = replace_naics_w_naics_from_another_year(flows,
method['target_sector_source'])
flows_subset =\
flows[(flows[fba_activity_fields[0]].isin(names)) |
(flows[fba_activity_fields[1]].isin(names))].reset_index(drop=True)
# extract relevant geoscale data or aggregate existing data
flows_subset_geo = subset_df_by_geoscale(flows_subset, v['geoscale_to_use'],
attr['allocation_from_scale'])
# if loading data subnational geoscale, check for data loss
if attr['allocation_from_scale'] != 'national':
compare_geographic_totals(flows_subset_geo, flows_subset, k, method_name, aset)
# Add sectors to df activity, depending on level of specified sector aggregation
log.info("Adding sectors to " + k)
flow_subset_wsec =\
add_sectors_to_flowbyactivity(flows_subset_geo,
sectorsourcename=method['target_sector_source'],
allocationmethod=attr['allocation_method'])
# clean up fba with sectors, if specified in yaml
if v["clean_fba_w_sec_df_fxn"] != 'None':
log.info("Cleaning up " + k + " FlowByActivity with sectors")
flow_subset_wsec = getattr(sys.modules[__name__],
v["clean_fba_w_sec_df_fxn"])(flow_subset_wsec,
attr=attr)
# map df to elementary flows
log.info("Mapping flows in " + k + ' to federal elementary flow list')
if 'fedefl_mapping' in v:
mapping_files = v['fedefl_mapping']
else:
mapping_files = k
flow_subset_mapped = map_elementary_flows(flow_subset_wsec, mapping_files)
# clean up mapped fba with sectors, if specified in yaml
if "clean_mapped_fba_w_sec_df_fxn" in v:
log.info("Cleaning up " + k + " FlowByActivity with sectors")
flow_subset_mapped =\
getattr(sys.modules[__name__],
v["clean_mapped_fba_w_sec_df_fxn"])\
(flow_subset_mapped, attr, method)
# rename SourceName to MetaSources
flow_subset_mapped = flow_subset_mapped.\
rename(columns={'SourceName': 'MetaSources'})
# if allocation method is "direct", then no need to create alloc ratios,
# else need to use allocation
# dataframe to create sector allocation ratios
if attr['allocation_method'] == 'direct':
fbs = direct_allocation_method(flow_subset_mapped, k, names, method)
# if allocation method for an activity set requires a specific
# function due to the complicated nature
# of the allocation, call on function here
elif attr['allocation_method'] == 'allocation_function':
fbs = function_allocation_method(flow_subset_mapped, names, attr, fbs_list)
else:
fbs =\
dataset_allocation_method(flow_subset_mapped, attr,
names, method, k, v, aset,
method_name, aset_names)
# drop rows where flowamount = 0 (although this includes dropping suppressed data)
fbs = fbs[fbs['FlowAmount'] != 0].reset_index(drop=True)
# define grouping columns dependent on sectors being activity-like or not
if load_source_catalog()[k]['sector-like_activities'] is False:
groupingcols = fbs_grouping_fields_w_activities
groupingdict = flow_by_sector_fields_w_activity
else:
groupingcols = fbs_default_grouping_fields
groupingdict = flow_by_sector_fields
# clean df
fbs = clean_df(fbs, groupingdict, fbs_fill_na_dict)
# aggregate df geographically, if necessary
log.info("Aggregating flowbysector to " + method['target_geoscale'] + " level")
# determine from scale
if fips_number_key[v['geoscale_to_use']] <\
fips_number_key[attr['allocation_from_scale']]:
from_scale = v['geoscale_to_use']
else:
from_scale = attr['allocation_from_scale']
fbs_geo_agg = agg_by_geoscale(fbs, from_scale,
method['target_geoscale'], groupingcols)
# aggregate data to every sector level
log.info("Aggregating flowbysector to all sector levels")
fbs_sec_agg = sector_aggregation(fbs_geo_agg, groupingcols)
# add missing naics5/6 when only one naics5/6 associated with a naics4
fbs_agg = sector_disaggregation(fbs_sec_agg, groupingdict)
# check if any sector information is lost before reaching
# the target sector length, if so,
# allocate values equally to disaggregated sectors
log.debug('Checking for data at ' + method['target_sector_level'])
fbs_agg_2 = check_if_losing_sector_data(fbs_agg, method['target_sector_level'])
# compare flowbysector with flowbyactivity
# todo: modify fxn to work if activities are sector like in df being allocated
if load_source_catalog()[k]['sector-like_activities'] is False:
check_for_differences_between_fba_load_and_fbs_output(
flow_subset_mapped, fbs_agg_2, aset, k, method_name)
# return sector level specified in method yaml
# load the crosswalk linking sector lengths
sector_list = get_sector_list(method['target_sector_level'])
# subset df, necessary because not all of the sectors are
# NAICS and can get duplicate rows
fbs_1 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_2 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isin(sector_list)) &
(fbs_agg_2[fbs_activity_fields[1]].isnull())].\
reset_index(drop=True)
fbs_3 = fbs_agg_2.loc[(fbs_agg_2[fbs_activity_fields[0]].isnull()) &
(fbs_agg_2[fbs_activity_fields[1]].isin(sector_list))].\
reset_index(drop=True)
fbs_sector_subset = pd.concat([fbs_1, fbs_2, fbs_3])
# drop activity columns
fbs_sector_subset = fbs_sector_subset.drop(['ActivityProducedBy',
'ActivityConsumedBy'],
axis=1, errors='ignore')
# save comparison of FBA total to FBS total for an activity set
compare_fba_load_and_fbs_output_totals(flows_subset_geo, fbs_sector_subset, aset, k,
method_name, attr, method, mapping_files)
log.info("Completed flowbysector for " + aset)
fbs_list.append(fbs_sector_subset)
else:
# if the loaded flow dt is already in FBS format, append directly to list of FBS
log.info("Append " + k + " to FBS list")
# ensure correct field datatypes and add any missing fields
flows = clean_df(flows, flow_by_sector_fields, fbs_fill_na_dict)
fbs_list.append(flows)
# create single df of all activities
log.info("Concat data for all activities")
fbss = pd.concat(fbs_list, ignore_index=True, sort=False)
log.info("Clean final dataframe")
# add missing fields, ensure correct data type, add missing columns, reorder columns
fbss = clean_df(fbss, flow_by_sector_fields, fbs_fill_na_dict)
# prior to aggregating, replace MetaSources string with all sources
# that share context/flowable/sector values
fbss = harmonize_FBS_columns(fbss)
# aggregate df as activities might have data for the same specified sector length
fbss = aggregator(fbss, fbs_default_grouping_fields)
# sort df
log.info("Sort and store dataframe")
# ensure correct data types/order of columns
fbss = clean_df(fbss, flow_by_sector_fields, fbs_fill_na_dict)
fbss = fbss.sort_values(
['SectorProducedBy', 'SectorConsumedBy', 'Flowable', 'Context']).reset_index(drop=True)
# tmp reset data quality scores
fbss = reset_fbs_dq_scores(fbss)
# save parquet file
meta = set_fb_meta(method_name, "FlowBySector")
write_df_to_file(fbss,paths,meta)
if __name__ == '__main__':
main()
```
#### File: scripts/FlowByActivity_Crosswalks/write_Crosswalk_BLM_PLS.py
```python
import pandas as pd
from flowsa.common import datapath
from scripts.common_scripts import unique_activity_names, order_crosswalk
def assign_naics(df):
"""manually assign each ERS activity to a NAICS_2012 code"""
df.loc[df['Activity'] == 'Asphalt Competitive Leases', 'Sector'] = '212399' # All Other Nonmetallic Mineral Mining
df.loc[df['Activity'] == 'Class III Reinstatement Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Coal Licenses, Exploration Licenses', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Coal Licenses, Licenses To Mine', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Combined Hydrocarbon Leases', 'Sector'] = '211112' # Natural Gas Liquid Extraction
df.loc[df['Activity'] == 'Competitive General Services Administration (GSA) Oil and Gas Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Competitive National Petroleum Reserve-Alaska Leases, Public Domain', 'Sector'] = '211111' # Crude Petroleum and Natural Gas Extraction
df.loc[df['Activity'] == 'Competitive Naval Oil Shale Reserve Leases, Public Domain', 'Sector'] = '211111' # Crude Petroleum and Natural Gas Extraction
df.loc[df['Activity'] == 'Competitive Protective Leases, Public Domain and Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Competitive Reform Act Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Competitive Reform Act Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction # todo: added 1/13 - check on results
df.loc[df['Activity'] == 'EPAct Competitive Geothermal Leases, Public Domain and Acquired Lands', 'Sector'] = '221116' # Power generation, geothermal
df.loc[df['Activity'] == 'Exchange Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Federal Coal Leases, Competitive Nonregional Lease-by-Application Leases', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Federal Coal Leases, Competitive Pre-Federal Coal Leasing Amendment Act (FCLAA) Leases', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Federal Coal Leases, Competitive Regional Emergency/Bypass Leases', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Federal Coal Leases, Competitive Regional Leases', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Federal Coal Leases, Exchange Leases', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Federal Coal Leases, Preference Right Leases', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Geothermal Leases, Public Domain and Acquired Lands', 'Sector'] = '221116' # Power generation, geothermal
df.loc[df['Activity'] == 'Gilsonite Leases, Gilsonite Competitive Leases', 'Sector'] = '212399' # Gilsonite mining and/or beneficiating
df.loc[df['Activity'] == 'Gilsonite Leases, Gilsonite Fringe Acreage Noncompetitive Leases', 'Sector'] = '212399' # Gilsonite mining and/or beneficiating
df.loc[df['Activity'] == 'Gilsonite Leases, Gilsonite Preference Right Leases', 'Sector'] = '212399' # Gilsonite mining and/or beneficiating
df.loc[df['Activity'] == 'Hardrock - Acquired Lands Leases, Hardrock Preference Right Leases', 'Sector'] = '2122' # Metal Ore Mining
df = df.append(pd.DataFrame([['BLM_PLS', 'Hardrock - Acquired Lands Leases, Hardrock Preference Right Leases', '2123']], # Nonmetallic Mineral Mining and Quarrying
columns=['ActivitySourceName', 'Activity', 'Sector']
), ignore_index=True, sort=True)
# todo: check definition
df.loc[df['Activity'] == 'Logical Mining Units', 'Sector'] = '21211' # Coal Mining
df.loc[df['Activity'] == 'Noncompetitive Pre-Reform Act Future Interest Leases, Public Domain and Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Competitive Pre-Reform Act Future Interest Leases, Public Domain and Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction #todo: added 1/13 - check on output
df.loc[df['Activity'] == 'Noncompetitive Reform Act Future Interest Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Noncompetitive Reform Act Future Interest Leases, Public Domain and Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Noncompetitive Reform Act Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Noncompetitive Reform Act Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil Shale Leases, Oil Shale R, D&D Leases', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil Shale RD&D Leases', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil and Gas Pre-Reform Act Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil and Gas Pre-Reform Act Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil and Gas Pre-Reform Act Over-the-Counter Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil and Gas Pre-Reform Act Over-the-Counter Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil and Gas Special Act - Federal Farm Mortgage Corporation Act of 1934, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil and Gas Special Act - Rights-of-Way of 1930, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Oil and Gas Special Act - Texas Relinquishment Act of 1919, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Phosphate Leases, Phosphate Competitive Leases', 'Sector'] = '212392' # Phosphate Rock Mining
df.loc[df['Activity'] == 'Phosphate Leases, Phosphate Fringe Acreage Noncompetitive Leases', 'Sector'] = '212392' # Phosphate Rock Mining
df.loc[df['Activity'] == 'Phosphate Leases, Phosphate Preference Right Leases', 'Sector'] = '212392' # Phosphate Rock Mining
df.loc[df['Activity'] == 'Phosphate Use Permits', 'Sector'] = '212392' # Phosphate Rock Mining
df.loc[df['Activity'] == 'Potassium Leases, Potassium Competitive Leases', 'Sector'] = '212391' # Potash, Soda, and Borate Mineral Mining
df.loc[df['Activity'] == 'Potassium Leases, Potassium Fringe Acreage Noncompetitive Leases', 'Sector'] = '212391' # Potash, Soda, and Borate Mineral Mining
df.loc[df['Activity'] == 'Potassium Leases, Potassium Preference Right Leases', 'Sector'] = '212391' # Potash, Soda, and Borate Mineral Mining
df.loc[df['Activity'] == 'Pre-EPAct Competitive Geothermal Leases, Public Domain and Acquired Lands', 'Sector'] = '221116' # Power generation, geothermal
df.loc[df['Activity'] == 'Pre-Reform Act Simultaneous Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Pre-Reform Act Simultaneous Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction #todo: added 1/13 check if needed
df.loc[df['Activity'] == 'Private Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Reform Act Leases, Acquired Lands', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Renewal Leases, Public Domain', 'Sector'] = '21111' # Oil and Gas Extraction
df.loc[df['Activity'] == 'Sodium Leases, Sodium Competitive Leases', 'Sector'] = '212391' # Potash, Soda, and Borate Mineral Mining
df.loc[df['Activity'] == 'Sodium Leases, Sodium Fringe Acreage Noncompetitive Leases', 'Sector'] = '212391' # Potash, Soda, and Borate Mineral Mining
df.loc[df['Activity'] == 'Sodium Leases, Sodium Preference Right Leases', 'Sector'] = '212391' # Potash, Soda, and Borate Mineral Mining
df.loc[df['Activity'] == 'Sodium Use Permit', 'Sector'] = '212391' # Potash, Soda, and Borate Mineral Mining
# todo: look over data - check next activity is double counting
#df.loc[df['Activity'] == 'Summary: Pre-Reform Act Simultaneous Leases, Public Domain and Acquired Lands', 'Sector'] = ''
return df
if __name__ == '__main__':
# select years to pull unique activity names
years = ['2007', '2011', '2012']
# assign flowclass
flowcass = ['Land']
# assign datasource
datasource = 'BLM_PLS'
# df of unique ers activity names
df = unique_activity_names(flowcass, years, datasource)
# add manual naics 2012 assignments
df = assign_naics(df)
# assign sector source name
df['SectorSourceName'] = 'NAICS_2012_Code'
# drop any rows where naics12 is 'nan' (because level of detail not needed or to prevent double counting)
df.dropna(subset=["Sector"], inplace=True)
# assign sector type
df['SectorType'] = "I"
# sort df
df = order_crosswalk(df)
# save as csv
df.to_csv(datapath + "activitytosectormapping/" + "Crosswalk_" + datasource + "_toNAICS.csv", index=False)
```
#### File: scripts/FlowByActivity_Crosswalks/write_Crosswalk_Census_AHS.py
```python
from flowsa.common import datapath
from scripts.common_scripts import unique_activity_names, order_crosswalk
def assign_naics(df):
"""manually assign each ERS activity to a NAICS_2012 code"""
# assign sector source name
df['SectorSourceName'] = 'NAICS_2012_Code'
df.loc[df['Activity'] == 'Asphalt Competitive Leases', 'Sector'] = ''
df.loc[df['Activity'] == 'Class III Reinstatement Leases, Public Domain', 'Sector'] = ''
df.loc[df['Activity'] == 'Coal Licenses, Exploration Licenses', 'Sector'] = ''
return df
if __name__ == '__main__':
# select years to pull unique activity names
years = ['2011', '2013', '2015', '2017']
# assign flowclass
flowcass = ['Land']
# datasource
datasource = 'Census_AHS'
# df of unique ers activity names
df = unique_activity_names(flowcass, years, datasource)
# add manual naics 2012 assignments
df = assign_naics(df)
# drop any rows where naics12 is 'nan' (because level of detail not needed or to prevent double counting)
df.dropna(subset=["Sector"], inplace=True)
# assign sector type
df['SectorType'] = None
# sort df
df = order_crosswalk(df)
# save as csv
df.to_csv(datapath + "activitytosectormapping/" + "Crosswalk_" + datasource + "_toNAICS.csv", index=False)
```
#### File: scripts/FlowByActivity_Crosswalks/write_Crosswalk_USGS_NWIS_WU.py
```python
import pandas as pd
from flowsa.common import datapath
from scripts.common_scripts import unique_activity_names, order_crosswalk
def assign_naics(df):
"""manually assign each ERS activity to a NAICS_2012 code"""
df.loc[df['Activity'] == 'Aquaculture', 'Sector'] = '1125'
# df.loc[df['Activity'] == 'Commercial', 'Sector'] = ''
df.loc[df['Activity'] == 'Domestic', 'Sector'] = 'F01000'
df.loc[df['Activity'] == 'Hydroelectric Power', 'Sector'] = '221111'
df.loc[df['Activity'] == 'Industrial', 'Sector'] = '1133'
df = df.append(pd.DataFrame([['Industrial', '23']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '31']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '32']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '33']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '48839']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '5111']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '51222']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '51223']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '54171']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '56291']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Industrial', '81149']], columns=['Activity', 'Sector']), sort=True)
df.loc[df['Activity'] == 'Irrigation', 'Sector'] = '111'
df = df.append(pd.DataFrame([['Irrigation', '112']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Irrigation', '71391']], columns=['Activity', 'Sector']), sort=True)
df.loc[df['Activity'] == 'Irrigation Crop', 'Sector'] = '111'
df = df.append(pd.DataFrame([['Irrigation Crop', '112']], columns=['Activity', 'Sector']), sort=True)
df.loc[df['Activity'] == 'Irrigation Golf Courses', 'Sector'] = '71391'
df.loc[df['Activity'] == 'Irrigation Total', 'Sector'] = '111'
df = df.append(pd.DataFrame([['Irrigation Total', '71391']], columns=['Activity', 'Sector']), sort=True)
df.loc[df['Activity'] == 'Livestock', 'Sector'] = '1121'
df = df.append(pd.DataFrame([['Livestock', '1122']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Livestock', '1123']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Livestock', '1124']], columns=['Activity', 'Sector']), sort=True)
df = df.append(pd.DataFrame([['Livestock', '1129']], columns=['Activity', 'Sector']), sort=True)
df.loc[df['Activity'] == 'Mining', 'Sector'] = '21'
df = df.append(pd.DataFrame([['Mining', '54136']], columns=['Activity', 'Sector']), sort=True)
df.loc[df['Activity'] == 'Public', 'Sector'] = '221310'
df.loc[df['Activity'] == 'Public Supply', 'Sector'] = '221310'
df = df.append(pd.DataFrame([['Thermoelectric Power', '2211']], columns=['Activity', 'Sector']), sort=True)
df.loc[df['Activity'] == 'Thermoelectric Power Closed-loop cooling', 'Sector'] = '221100A'
df.loc[df['Activity'] == 'Thermoelectric Power Once-through cooling', 'Sector'] = '221100B'
# df.loc[df['Activity'] == 'Total', 'Sector'] = ''
# df.loc[df['Activity'] == 'Total Groundwater', 'Sector'] = ''
# df.loc[df['Activity'] == 'Total Surface', 'Sector'] = ''
# assign sector source name
df['SectorSourceName'] = 'NAICS_2012_Code'
return df
if __name__ == '__main__':
# select years to pull unique activity names
years = ['2010', '2015']
# flowclass
flowclass = ['Water']
# datasource
datasource = 'USGS_NWIS_WU'
# df of unique ers activity names
df = unique_activity_names(flowclass, years, datasource)
# add manual naics 2012 assignments
df = assign_naics(df)
# drop any rows where naics12 is 'nan' (because level of detail not needed or to prevent double counting)
df.dropna(subset=["Sector"], inplace=True)
# assign sector type
df['SectorType'] = 'I'
# assign sector type
df['ActivitySourceName'] = 'USGS_NWIS_WU'
# sort df
df = order_crosswalk(df)
# save as csv
df.to_csv(datapath + "activitytosectormapping/" + "Crosswalk_" + datasource + "_toNAICS.csv", index=False)
``` |
{
"source": "JohNan/homeassistant-volkswagencarnet",
"score": 2
} |
#### File: custom_components/volkswagencarnet/sensor.py
```python
import logging
from . import DATA_KEY, VolkswagenEntity, DOMAIN
from .const import DATA
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Volkswagen sensors."""
if discovery_info is None:
return
async_add_entities([VolkswagenSensor(hass.data[DATA_KEY], *discovery_info)])
async def async_setup_entry(hass, entry, async_add_devices):
data = hass.data[DOMAIN][entry.entry_id][DATA]
coordinator = data.coordinator
if coordinator.data is not None:
async_add_devices(
VolkswagenSensor(data, coordinator.vin, instrument.component, instrument.attr)
for instrument in (instrument for instrument in data.instruments if instrument.component == 'sensor')
)
return True
class VolkswagenSensor(VolkswagenEntity):
"""Representation of a Volkswagen Carnet Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
_LOGGER.debug("Getting state of %s" % self.instrument.attr)
return self.instrument.state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self.instrument.unit
``` |
{
"source": "JohNan/homeassistant-wellbeing",
"score": 2
} |
#### File: custom_components/wellbeing/api.py
```python
import json
from datetime import datetime, timedelta
import asyncio
import logging
import socket
from enum import Enum
from typing import Union
import aiohttp
import async_timeout
from custom_components.wellbeing.const import SENSOR, FAN, BINARY_SENSOR
from homeassistant.components.binary_sensor import DEVICE_CLASS_CONNECTIVITY
from homeassistant.const import TEMP_CELSIUS, PERCENTAGE, DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_CO2, \
DEVICE_CLASS_HUMIDITY, CONCENTRATION_PARTS_PER_MILLION, CONCENTRATION_PARTS_PER_BILLION
TIMEOUT = 10
RETRIES = 3
BASE_URL = "https://api.delta.electrolux.com/api"
TOKEN_URL = "https://electrolux-wellbeing-client.vercel.app/api/mu<PASSWORD>"
LOGIN_URL = f"{BASE_URL}/Users/Login"
APPLIANCES_URL = f"{BASE_URL}/Domains/Appliances"
APPLIANCE_INFO_URL = f"{BASE_URL}/AppliancesInfo"
APPLIANCE_DATA_URL = f"{BASE_URL}/Appliances"
FILTER_TYPE = {
48: "Particle filter",
64: "Breeze 360 filter",
96: "Breeze 360 filter",
99: "Breeze 360 filter",
192: "Odor filter",
0: "Filter"
}
_LOGGER: logging.Logger = logging.getLogger(__package__)
HEADERS = {"Content-type": "application/json; charset=UTF-8"}
class Mode(str, Enum):
OFF = "PowerOff"
AUTO = "Auto"
MANUAL = "Manual"
UNDEFINED = "Undefined"
class ApplianceEntity:
entity_type: int = None
def __init__(self, name, attr, device_class=None) -> None:
self.attr = attr
self.name = name
self.device_class = device_class
self._state = None
def setup(self, data):
self._state = data[self.attr]
return self
def clear_state(self):
self._state = None
@property
def state(self):
return self._state
class ApplianceSensor(ApplianceEntity):
entity_type: int = SENSOR
def __init__(self, name, attr, unit="", device_class=None) -> None:
super().__init__(name, attr, device_class)
self.unit = unit
class ApplianceFan(ApplianceEntity):
entity_type: int = FAN
def __init__(self, name, attr) -> None:
super().__init__(name, attr)
class ApplianceBinary(ApplianceEntity):
entity_type: int = BINARY_SENSOR
def __init__(self, name, attr, device_class=None) -> None:
super().__init__(name, attr, device_class)
@property
def state(self):
return self._state in ['enabled', True, 'Connected']
class Appliance:
serialNumber: str
brand: str
device: str
firmware: str
mode: Mode
entities: []
def __init__(self, name, pnc_id, model) -> None:
self.model = model
self.pnc_id = pnc_id
self.name = name
@staticmethod
def _create_entities(data):
a7_entities = [
ApplianceSensor(
name="eCO2",
attr='ECO2',
unit=CONCENTRATION_PARTS_PER_MILLION,
device_class=DEVICE_CLASS_CO2
),
ApplianceSensor(
name=f"{FILTER_TYPE[data.get('FilterType_1', 0)]} Life",
attr='FilterLife_1',
unit=PERCENTAGE
),
ApplianceSensor(
name=f"{FILTER_TYPE[data.get('FilterType_2', 0)]} Life",
attr='FilterLife_2',
unit=PERCENTAGE
),
ApplianceSensor(
name='State',
attr='State'
),
ApplianceBinary(
name='PM Sensor State',
attr='PMSensState'
)
]
a9_entities = [
ApplianceSensor(
name=f"{FILTER_TYPE.get(data.get('FilterType', 0), 'Filter')} Life",
attr='FilterLife',
unit=PERCENTAGE
),
ApplianceSensor(
name="CO2",
attr='CO2',
unit=CONCENTRATION_PARTS_PER_MILLION,
device_class=DEVICE_CLASS_CO2
),
]
common_entities = [
ApplianceFan(
name="Fan Speed",
attr='Fanspeed'
),
ApplianceSensor(
name="Temperature",
attr='Temp',
unit=TEMP_CELSIUS,
device_class=DEVICE_CLASS_TEMPERATURE
),
ApplianceSensor(
name="TVOC",
attr='TVOC',
unit=CONCENTRATION_PARTS_PER_BILLION
),
ApplianceSensor(
name="PM1",
attr='PM1'
),
ApplianceSensor(
name="PM2.5",
attr='PM2_5'
),
ApplianceSensor(
name="PM10",
attr='PM10'
),
ApplianceSensor(
name="Humidity",
attr='Humidity',
unit=PERCENTAGE,
device_class=DEVICE_CLASS_HUMIDITY
),
ApplianceSensor(
name="Mode",
attr='Workmode'
),
ApplianceBinary(
name="Ionizer",
attr='Ionizer'
),
ApplianceBinary(
name="UI Light",
attr='UILight'
),
ApplianceBinary(
name="Connection State",
attr='connectionState',
device_class=DEVICE_CLASS_CONNECTIVITY
),
ApplianceBinary(
name="Status",
attr='status'
),
ApplianceBinary(
name="Safety Lock",
attr='SafetyLock'
)
]
return common_entities + a9_entities + a7_entities
def get_entity(self, entity_type, entity_attr):
return next(
entity
for entity in self.entities
if entity.attr == entity_attr and entity.entity_type == entity_type
)
def clear_mode(self):
self.mode = Mode.UNDEFINED
def setup(self, data):
self.firmware = data.get('FrmVer_NIU')
self.mode = Mode(data.get('Workmode'))
self.entities = [
entity.setup(data)
for entity in Appliance._create_entities(data) if entity.attr in data
]
@property
def speed_range(self):
if self.model == "WELLA7":
return 1, 5
if self.model == "PUREA9":
return 1, 9
return 0
class Appliances:
def __init__(self, appliances) -> None:
self.appliances = appliances
def get_appliance(self, pnc_id):
return self.appliances.get(pnc_id, None)
class WellbeingApiClient:
def __init__(self, username: str, password: str, session: aiohttp.ClientSession) -> None:
"""Sample API Client."""
self._username = username
self._password = password
self._session = session
self._access_token = None
self._token = None
self._current_access_token = None
self._token_expires = datetime.now()
self.appliances = None
async def _get_token(self) -> dict:
return await self.api_wrapper("get", TOKEN_URL)
async def _login(self, access_token: str) -> dict:
credentials = {
"Username": self._username,
"Password": self._password
}
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"Accept": "application/json"
}
return await self.api_wrapper("post", LOGIN_URL, credentials, headers)
async def _get_appliances(self, access_token: str) -> dict:
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"Accept": "application/json"
}
return await self.api_wrapper("get", APPLIANCES_URL, headers=headers)
async def _get_appliance_info(self, access_token: str, pnc_id: str) -> dict:
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"Accept": "application/json"
}
url = f"{APPLIANCE_INFO_URL}/{pnc_id}"
return await self.api_wrapper("get", url, headers=headers)
async def _get_appliance_data(self, access_token: str, pnc_id: str) -> dict:
headers = {
"Authorization": f"Bearer {access_token}"
}
return await self.api_wrapper("get", f"{APPLIANCE_DATA_URL}/{pnc_id}", headers=headers)
async def async_login(self) -> bool:
if self._current_access_token is not None and self._token_expires > datetime.now():
return True
_LOGGER.debug("Current token is not set or expired")
self._token = None
self._current_access_token = None
access_token = await self._get_token()
if 'accessToken' not in access_token:
self._access_token = None
self._current_access_token = None
_LOGGER.debug("AccessToken 1 is missing")
return False
token = await self._login(access_token['accessToken'])
if 'accessToken' not in token:
self._current_access_token = None
_LOGGER.debug("AccessToken 2 is missing")
return False
self._token_expires = datetime.now() + timedelta(seconds=token['expiresIn'])
self._current_access_token = token['accessToken']
return True
async def async_get_data(self) -> Appliances:
"""Get data from the API."""
n = 0
while not await self.async_login() and n < RETRIES:
_LOGGER.debug(f"Re-trying login. Attempt {n + 1} / {RETRIES}")
n += 1
if self._current_access_token is None:
raise Exception("Unable to login")
access_token = self._current_access_token
appliances = await self._get_appliances(access_token)
_LOGGER.debug(f"Fetched data: {appliances}")
found_appliances = {}
for appliance in (appliance for appliance in appliances if 'pncId' in appliance):
app = Appliance(appliance['applianceName'], appliance['pncId'], appliance['modelName'])
appliance_info = await self._get_appliance_info(access_token, appliance['pncId'])
_LOGGER.debug(f"Fetched data: {appliance_info}")
app.brand = appliance_info['brand']
app.serialNumber = appliance_info['serialNumber']
app.device = appliance_info['device']
if app.device != 'AIR_PURIFIER':
continue
appliance_data = await self._get_appliance_data(access_token, appliance['pncId'])
_LOGGER.debug(f"{appliance_data.get('applianceData', {}).get('applianceName', 'N/A')}: {appliance_data}")
data = appliance_data.get('twin', {}).get('properties', {}).get('reported', {})
data['connectionState'] = appliance_data.get('twin', {}).get('connectionState')
data['status'] = appliance_data.get('twin', {}).get('connectionState')
app.setup(data)
found_appliances[app.pnc_id] = app
return Appliances(found_appliances)
async def set_fan_speed(self, pnc_id: str, level: int):
data = {
"Fanspeed": level
}
result = await self._send_command(self._current_access_token, pnc_id, data)
_LOGGER.debug(f"Set Fan Speed: {result}")
async def set_work_mode(self, pnc_id: str, mode: Mode):
data = {
"WorkMode": mode
}
result = await self._send_command(self._current_access_token, pnc_id, data)
_LOGGER.debug(f"Set Fan Speed: {result}")
async def _send_command(self, access_token: str, pnc_id: str, command: dict) -> None:
"""Get data from the API."""
headers = {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"Accept": "application/json"
}
await self.api_wrapper("put", f"{APPLIANCE_DATA_URL}/{pnc_id}/Commands", data=command, headers=headers)
async def api_wrapper(self, method: str, url: str, data: dict = {}, headers: dict = {}) -> dict:
"""Get information from the API."""
try:
async with async_timeout.timeout(TIMEOUT):
if method == "get":
response = await self._session.get(url, headers=headers)
return await response.json()
elif method == "put":
response = await self._session.put(url, headers=headers, json=data)
return await response.json()
elif method == "patch":
await self._session.patch(url, headers=headers, json=data)
elif method == "post":
response = await self._session.post(url, headers=headers, json=data)
return await response.json()
except asyncio.TimeoutError as exception:
_LOGGER.error(
"Timeout error fetching information from %s - %s",
url,
exception,
)
except (KeyError, TypeError) as exception:
_LOGGER.error(
"Error parsing information from %s - %s",
url,
exception,
)
except (aiohttp.ClientError, socket.gaierror) as exception:
_LOGGER.error(
"Error fetching information from %s - %s",
url,
exception,
)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Something really wrong happened! - %s", exception)
``` |
{
"source": "JohnAnih/BMW-used-car-prediction",
"score": 3
} |
#### File: app/frontend/utils.py
```python
from collections import OrderedDict
import dash_bootstrap_components as dbc
import dash_html_components as html
def get_results(mileage: int, transmission: str,
fuel_type: str, car_year: int, mpg: int,
tax: int, engine_class: str, model_type: str, prediction: str):
features = OrderedDict({
"Mileage:": mileage,
"Transmission:": transmission,
"Fuel Type:": fuel_type,
"Car Year:": car_year,
"MPG:": mpg,
"Tax:": tax,
"Engine Class:": engine_class,
"Car Model:": model_type,
})
results = [html.H3("You entered the following details:",
className="h3-additional-style", style={"margin-bottom": "2%"}),
]
for feature, user_input in features.items():
results.append(html.B(feature),)
results.append(html.P(f"{user_input}"),)
results.append(dbc.Badge(f"Estimated price: ${prediction}", className="price-style"),)
return results
def show_errror_msg():
return "You must fill in all questions to make a predictions"
``` |
{
"source": "JohNan/pyplaato",
"score": 3
} |
#### File: pyplaato/models/keg.py
```python
from datetime import datetime
from enum import Enum
import dateutil.parser
from .device import PlaatoDevice, PlaatoDeviceType
from .pins import PinsBase
from ..const import UNIT_TEMP_CELSIUS, UNIT_TEMP_FAHRENHEIT, UNIT_PERCENTAGE, \
METRIC, UNIT_OZ, UNIT_ML
class PlaatoKeg(PlaatoDevice):
"""Class for holding a Plaato Keg"""
device_type = PlaatoDeviceType.Keg
def __init__(self, attrs):
self.beer_left_unit = attrs.get(self.Pins.BEER_LEFT_UNIT, None)
self.volume_unit = attrs.get(self.Pins.VOLUME_UNIT, None)
self.mass_unit = attrs.get(self.Pins.MASS_UNIT, None)
self.measure_unit = attrs.get(self.Pins.MEASURE_UNIT, None)
self.og = attrs.get(self.Pins.OG, None)
self.fg = attrs.get(self.Pins.FG, None)
self.__mode = attrs.get(self.Pins.MODE, None)
self.__firmware_version = attrs.get(self.Pins.FIRMWARE_VERSION, None)
self.__leak_detection = attrs.get(self.Pins.LEAK_DETECTION, None)
self.__abv = attrs.get(self.Pins.ABV, None)
self.__name = attrs.get(self.Pins.BEER_NAME, "Beer")
self.__percent_beer_left = attrs.get(self.Pins.PERCENT_BEER_LEFT, None)
self.__pouring = attrs.get(self.Pins.POURING, False)
self.__beer_left = attrs.get(self.Pins.BEER_LEFT, None)
self.__temperature = attrs.get(self.Pins.TEMPERATURE, None)
self.__unit_type = attrs.get(self.Pins.UNIT_TYPE, None)
self.__last_pour = attrs.get(self.Pins.LAST_POUR, None)
self.__date = attrs.get(self.Pins.DATE, None)
def __repr__(self):
return f"{self.__class__.__name__} -> " \
f"Bear Left: {self.beer_left}, " \
f"Temp: {self.temperature}, " \
f"Pouring: {self.pouring}"
@property
def date(self) -> float:
if self.__date is not None and not self.__date:
date = dateutil.parser.parse(self.__date)
return datetime.timestamp(date)
return super().date
@property
def temperature(self):
if self.__temperature is not None:
return round(float(self.__temperature), 1)
@property
def temperature_unit(self):
if self.__unit_type == METRIC:
return UNIT_TEMP_CELSIUS
return UNIT_TEMP_FAHRENHEIT
@property
def beer_left(self):
if self.__beer_left is not None:
return round(float(self.__beer_left), 2)
@property
def percent_beer_left(self):
if self.__percent_beer_left is not None:
return round(self.__percent_beer_left, 2)
@property
def last_pour(self):
if self.__last_pour is not None:
return round(float(self.__last_pour), 2)
@property
def last_pour_unit(self):
if self.__unit_type == METRIC:
return UNIT_ML
return UNIT_OZ
@property
def abv(self):
if self.__abv is not None and not self.__abv:
return round(float(self.__abv), 2)
@property
def pouring(self):
"""
0 = Not Pouring
255 = Pouring
:return: True if 255 = Pouring else False
"""
return self.__pouring == "255"
@property
def leak_detection(self):
"""
1 = Leaking
0 = Not Leaking
:return: True if 1 = Leaking else False
"""
return self.__leak_detection == "1"
@property
def mode(self):
"""
1 = Beer
2 = Co2
"""
return "Beer" if self.__mode == "1" else "Co2"
@property
def name(self) -> str:
return self.__name
@property
def firmware_version(self) -> str:
return self.__firmware_version
def get_sensor_name(self, pin: PinsBase) -> str:
names = {
self.Pins.PERCENT_BEER_LEFT: "Percent Beer Left",
self.Pins.POURING: "Pouring",
self.Pins.BEER_LEFT: "Beer Left",
self.Pins.TEMPERATURE: "Temperature",
self.Pins.LAST_POUR: "Last Pour Amount",
self.Pins.OG: "Original Gravity",
self.Pins.FG: "Final Gravity",
self.Pins.ABV: "Alcohol by Volume",
self.Pins.LEAK_DETECTION: "Leaking",
self.Pins.MODE: "Mode",
self.Pins.DATE: "Keg Date",
self.Pins.BEER_NAME: "Beer Name"
}
return names.get(pin, pin.name)
@property
def sensors(self) -> dict:
return {
self.Pins.PERCENT_BEER_LEFT: self.percent_beer_left,
self.Pins.BEER_LEFT: self.beer_left,
self.Pins.TEMPERATURE: self.temperature,
self.Pins.LAST_POUR: self.last_pour
}
@property
def binary_sensors(self) -> dict:
return {
self.Pins.LEAK_DETECTION: self.leak_detection,
self.Pins.POURING: self.pouring,
}
@property
def attributes(self) -> dict:
return {
self.get_sensor_name(self.Pins.BEER_NAME): self.name,
self.get_sensor_name(self.Pins.DATE): datetime.fromtimestamp(self.date).strftime('%x'),
self.get_sensor_name(self.Pins.MODE): self.mode,
self.get_sensor_name(self.Pins.OG): self.og,
self.get_sensor_name(self.Pins.FG): self.fg,
self.get_sensor_name(self.Pins.ABV): self.abv
}
def get_unit_of_measurement(self, pin: PinsBase):
if pin == self.Pins.BEER_LEFT:
return self.beer_left_unit
if pin == self.Pins.TEMPERATURE:
return self.temperature_unit
if pin == self.Pins.LAST_POUR:
return self.last_pour_unit
if pin == self.Pins.ABV or pin == self.Pins.PERCENT_BEER_LEFT:
return UNIT_PERCENTAGE
return ""
# noinspection PyTypeChecker
@staticmethod
def pins():
return list(PlaatoKeg.Pins)
class Pins(PinsBase, Enum):
BEER_NAME = "v64"
PERCENT_BEER_LEFT = "v48"
POURING = "v49"
BEER_LEFT = "v51"
BEER_LEFT_UNIT = "v74"
TEMPERATURE = "v56"
UNIT_TYPE = "v71"
MEASURE_UNIT = "v75"
MASS_UNIT = "v73"
VOLUME_UNIT = "v82"
LAST_POUR = "v59"
DATE = "v67"
OG = "v65"
FG = "v66"
ABV = "v68"
FIRMWARE_VERSION = "v93"
LEAK_DETECTION = "v83"
MODE = "v88"
``` |
{
"source": "johnanthonyjose/fvcore",
"score": 3
} |
#### File: tests/param_scheduler/test_scheduler_exponential.py
```python
import unittest
from fvcore.common.param_scheduler import ExponentialParamScheduler
class TestExponentialScheduler(unittest.TestCase):
_num_epochs = 10
def _get_valid_config(self):
return {"start_value": 2.0, "decay": 0.1}
def _get_valid_intermediate_values(self):
return [1.5887, 1.2619, 1.0024, 0.7962, 0.6325, 0.5024, 0.3991, 0.3170, 0.2518]
def test_scheduler(self):
config = self._get_valid_config()
scheduler = ExponentialParamScheduler(**config)
schedule = [
round(scheduler(epoch_num / self._num_epochs), 4)
for epoch_num in range(self._num_epochs)
]
expected_schedule = [
config["start_value"]
] + self._get_valid_intermediate_values()
self.assertEqual(schedule, expected_schedule)
``` |
{
"source": "johnanthonyowens/ctds",
"score": 3
} |
#### File: ctds/tests/test_cursor_rowlist.py
```python
import ctds
from .base import TestExternalDatabase
class TestCursorRowList(TestExternalDatabase):
def test___doc__(self):
self.assertEqual(
ctds.RowList.__doc__,
'''\
A :ref:`sequence <python:sequence>` object which buffers result set rows
in a lightweight manner. Python objects wrapping the columnar data are
only created when the data is actually accessed.
'''
)
def test_indexerror(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3),(4),(5),(6);
SELECT * FROM @{0};
'''.format(self.test_indexerror.__name__)
)
rows = cursor.fetchall()
for index in (6, 7):
try:
rows[index]
except IndexError as ex:
self.assertEqual('index is out of range', str(ex))
else:
self.fail('IndexError was not raised for index {0}'.format(index)) # pragma: nocover
def test_rowlist(self):
with self.connect() as connection:
with connection.cursor() as cursor:
cursor.execute(
'''
DECLARE @{0} TABLE(i INT);
INSERT INTO @{0}(i) VALUES (1),(2),(3),(4),(5),(6);
SELECT * FROM @{0};
'''.format(self.test_rowlist.__name__)
)
description = cursor.description
rows = cursor.fetchall()
# The rowlist should be accessible after closing the cursor
# and connection.
self.assertTrue(isinstance(rows, ctds.RowList))
self.assertEqual(len(rows), 6)
self.assertTrue(rows.description is description)
for index, row in enumerate(rows):
# The row object should always be the same instance.
self.assertTrue(isinstance(row, ctds.Row))
self.assertEqual(id(row), id(rows[index]))
``` |
{
"source": "JohnAntonios/slackthespot",
"score": 2
} |
#### File: JohnAntonios/slackthespot/app.py
```python
from time import time, sleep
from pathlib import Path
from dotenv import load_dotenv, set_key
from os import getenv
from slack import WebClient
from slack.errors import SlackApiError
from json import dumps
from tekore.scope import every
from tekore.util import prompt_for_user_token, refresh_user_token
from tekore import Spotify
from tekore.sender import PersistentSender
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
redirect_uri = 'http://localhost/oauth_code'
spotify_client_id = getenv("SPOTIFY_CLIENT_ID")
spotify_client_secret = getenv("SPOTIFY_CLIENT_SECRET")
spotify_refresh_token = getenv("SPOTIFY_CURRENT_USER_TOKEN")
token = None
if spotify_refresh_token:
token = refresh_user_token(
spotify_client_id,
spotify_client_secret,
spotify_refresh_token
)
else:
# This only needs to be done once, or when you would like to switch the Spotify user to connect to.
token = prompt_for_user_token(
spotify_client_id,
spotify_client_secret,
redirect_uri,
every
)
set_key(env_path, "SPOTIFY_CURRENT_USER_TOKEN", token.refresh_token, "n")
spotify = Spotify(token, sender=PersistentSender())
def set_status_message(spotifyInst):
track = spotifyInst.playback_currently_playing()
if track is None:
return False
if track.is_playing == False:
return False
track_name = track.item.name
track_artists = ', '.join([artist.name for artist in track.item.artists])
message = f"'{track.item.name}' - {track_artists} github.com/JohnAntonios/slackthespot"
# Slack has a limit of 100 characters for the status message.
if len(message) > 100:
# Omitting the artist names
message = f"'{track.item.name}' github.com/JohnAntonios/slackthespot"
# Remove the github link, if message is still to long
if len(message) > 100:
message = track.item.name
# If its still too long, then rip
if len(message) > 100:
message = "The current song is too long to show in Slack"
return message
slack_token = input("Slack Token: ")
client = WebClient(token=slack_token)
def do_it():
print("Keep this terminal window open!")
status_message = set_status_message(spotify)
if status_message != False:
try:
new_status = dumps({
"status_text": status_message,
"status_emoji": ":musical_note:"
})
res = client.users_profile_set(profile=new_status)
if res.status_code == 200 or res is not None:
print(f"Success! {status_message}")
except SlackApiError as e:
print("something broke")
else:
print("Not playing anything on Spotify or Paused or Buffering a track")
start_time = time()
while True:
do_it()
sleep(60.0 - ((time() - start_time) % 60.0))
``` |
{
"source": "johnantonn/cord-19",
"score": 4
} |
#### File: cord-19/preprocessing/JSONProcessor.py
```python
class JSONProcessor:
""" Class that contains methods to process json data. """
def convert_dictionaries_to_list(self, json_input, dictionaries_to_convert):
""" Function that converts dictionaries inside the given json to list of dictionaries.
Args:
json_input (dict): The input json
dictionaries_to_convert (list): A list with all the dictionaries to convert along with
the required information
Returns:
(dict): The converted json
"""
for dictionary_to_convert in dictionaries_to_convert:
dict_data = json_input[dictionary_to_convert["external_key"]]
if type(dict_data) != dict:
continue
internal_key = dictionary_to_convert["internal_key"]
list_of_dicts = list()
for key, value in dict_data.items():
value[internal_key] = key
list_of_dicts.append(value)
json_input[dictionary_to_convert["external_key"]] = list_of_dicts
return json_input
def convert_list_to_dictionaries(self, json_input, lists_to_convert):
""" Function that converts lists of objects to dictionaries and adds them to new key-value.
Args:
json_input (dict): The input json
lists_to_convert: A list with all the lists to make them dictionaries
Returns:
(dict): The converted json
"""
for list_to_convert in lists_to_convert:
list_data = json_input[list_to_convert["external_key"]]
if type(list_data) != list:
continue
internal_key = list_to_convert["internal_key"]
new_external_key = list_to_convert["new_external_key"]
dictionary = dict()
for i in range(len(list_data)):
dictionary[new_external_key + str(i)] = list_data[i]
json_input[list_to_convert["new_external_key"]] = dictionary
return json_input
``` |
{
"source": "johnaparker/fields",
"score": 2
} |
#### File: tests/drude/compare.py
```python
import numpy as np
import matplotlib.pyplot as plt
import h5py
from subprocess import call
def L2_norm(y1,y2):
return np.linalg.norm(y1-y2)
def sup_norm(y1,y2):
i = np.argmax(np.abs(y1-y2))
return np.abs(y1-y2)[i], wav[-i]
freq = np.linspace(1/30, 3/30, 200)
omega = 2*np.pi*freq
wav = 1/freq
# qbox result
with h5py.File("scat.h5", 'r') as f:
inc = f["sources/tfsf/flux"][...]
scat = f["monitors/box_monitor/scat/flux"][...]
omega_0 = f['materials/material_0/omega_0'][...]
gamma = f['materials/material_0/gamma'][...]
eps_inf = f['materials/material_0/eps_inf'][...]
scat_qbox = scat/inc
plt.plot(wav, scat_qbox, label = "qbox")
# meep result
with h5py.File("meep/test2.h5", 'r') as f:
inc = f["inc"][...]/40
scat = f["box_scat"][...]
scat_meep = scat/inc
plt.plot(wav, scat_meep, label = "meep")
# mie theory result
filename = "eps.txt"
eps = eps_inf - omega_0**2/(omega**2 - 1j*omega*gamma)
print(gamma.shape)
to_file = np.array([wav[::-1], eps.real[::-1], np.abs(eps.imag[::-1])])
np.savetxt(filename, to_file.T)
call(['../../bench/mie-theory-cylinder/a.out'], stdin=open('cyl.in'))
theory = np.loadtxt("mie.out").T
plt.plot(theory[0], theory[1], label="exact")
# print L2 errors
print("L2 norm (meep): {0:.2f}".format(L2_norm(theory[1], scat_meep[::-1])))
print("sup norm (meep): {0:.2f} at {1:.2f}".format(*sup_norm(theory[1], scat_meep[::-1])))
print("L2 norm (qbox): {0:.2f}".format(L2_norm(theory[1], scat_qbox[::-1])))
print("sup norm (qbox): {0:.2f} at {1:.2f}".format(*sup_norm(theory[1], scat_qbox[::-1])))
plt.legend()
plt.show()
```
#### File: dynamics/simple/animation.py
```python
import h5py
import numpy as np
import matplotlib.pyplot as plt
import my_pytools.my_matplotlib.colors as colors
import matplotlib.animation as animation
filename = "metal_sphere"
filename = "metal_ellipse"
filename = "dielectric_block"
filename = "metal_block"
filename = "out"
with h5py.File('{}.h5'.format(filename), 'r') as f:
d = f["monitors/volume_monitor/monitor_0/Ez"][...]
Er = d['real']
Ei = d['imag']
I = Er**2 + Ei**2
I = I.squeeze()
vmax = np.max(I)
vmin = 0
plt.gca().set_aspect('equal')
im = plt.pcolormesh(I.T, cmap=colors.cmap['parula'], vmax = vmax, vmin = vmin)
def updatefig(frame):
d = f["monitors/volume_monitor/monitor_{0}/Ez".format(frame)][...]
Er = d['real']
Ei = d['imag']
I = Er**2 + Ei**2
I = I.squeeze()
im.set_array(np.ravel(I.T))
return im,
writer = animation.FFMpegWriter(fps=30, bitrate=4000)
ani = animation.FuncAnimation(plt.gcf(), updatefig, np.arange(0,100), interval=30, blit=False)
# ani.save("{}.mp4".format(filename), writer=writer)
plt.show()
``` |
{
"source": "johnaparker/johnaparker.github.io",
"score": 2
} |
#### File: johnaparker.github.io/_code/test.py
```python
import numpy as np
import miepy
from topics.photonic_clusters.create_lattice import hexagonal_lattice_particles
import matplotlib.pyplot as plt
from numpipe import scheduler, pbar
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from my_pytools.my_matplotlib.layout import alpha_labels
from scipy import constants
from scipy.integrate import trapz
from my_pytools.my_matplotlib.plots import colorbar
mpl.rc('font', size=12, family='arial')
mpl.rc('mathtext', default='regular')
job = scheduler()
nm = 1e-9
um = 1e-6
Ag = miepy.materials.Ag()
radius = 75*nm
width = 2500*nm
wavelengths = np.linspace(470*nm, 880*nm, 1000)
energy = constants.h*constants.c/constants.e/wavelengths
separation = 600*nm
source = miepy.sources.gaussian_beam(width=width, polarization=[1,1j], power=1)
source = miepy.sources.plane_wave(polarization=[1,1j], amplitude=1e7)
lmax = 2
water = miepy.materials.water()
@job.cache
def fields():
pos = lattice[:]*600*nm
# pos -= np.average(pos, axis=0)[np.newaxis]
xmax = 100*nm
pos = [[-xmax,-xmax,0], [xmax,xmax,0]]
cluster = miepy.sphere_cluster(position=pos,
radius=75*nm,
material=Ag,
source=source,
wavelength=800*nm,
lmax=lmax,
medium=water)
xmax = 500*nm
x = np.linspace(-xmax, xmax, 250)
y = np.linspace(-xmax, xmax, 250)
X, Y = np.meshgrid(x, y)
Z = np.zeros_like(X)
E = cluster.E_field(X, Y, Z)
Esrc = cluster.E_source(X, Y, Z)
# enhance = np.linalg.norm(E, axis=0)/np.linalg.norm(Esrc, axis=0)
enhance = np.linalg.norm(E, axis=0)**2
return dict(enhance=enhance, X=X, Y=Y, E=E)
lattice = hexagonal_lattice_particles(37)
pos = 600*nm*lattice
@job.plots
def vis():
from my_pytools.my_matplotlib.colors import cmap
cmap = cmap['parula']
fig, ax = plt.subplots(figsize=(6,6))
vm = 13
var = job.load(fields)
vmax = np.max(var.enhance)/3
vmin = np.min(var.enhance)*80
for j in range(len(lattice)):
circle = plt.Circle(pos[j,:2]/nm, 90, color='C3', fill=False, lw=2)
# ax.add_patch(circle)
im = ax.pcolormesh(var.X/nm, var.Y/nm, var.enhance, rasterized=True, cmap=cmap, vmax=vmax, vmin=vmin)
skip = 15
idx = np.s_[::skip,::skip]
ax.quiver(var.X[idx]/nm, var.Y[idx]/nm, var.E.real[0][idx], var.E.real[1][idx], pivot='mid')
# im = ax.contourf(var.X/nm, var.Y/nm, var.enhance, rasterized=True, cmap=cmap, vmax=vmax)
# plt.colorbar(im, ax=ax, label='field enhancement')
ax.set_aspect('equal')
ax.axis('off')
ax.set_xlim([-500,500])
plt.tight_layout(pad=0)
plt.savefig('temp.png', bbox_inches=0)
job.run()
``` |
{
"source": "johnaparker/meep_ext",
"score": 2
} |
#### File: meep_ext/templates/separation_forces.py
```python
import numpy as np
from scipy import constants
import matplotlib.pyplot as plt
import matplotlib as mpl
import meep
import meep_ext
import pinboard
job = pinboard.pinboard()
### transformation optics
nb = 1.33
scale = nb
nm = 1e-9*scale
um = 1e-6*scale
### geometry
radius = 75*nm
gold = meep_ext.material.Au(multiplier=1/scale**2)
# gold = meep.Medium(index=3.5/scale)
### source
wavelength = 550*nm/scale
fcen = 1/wavelength
src_time = meep.GaussianSource(frequency=1.3*scale/um, fwidth=4.0*scale/um)
polarization = meep.Ex # used in convergence check 'decay_by'
source = lambda sim: meep_ext.rhc_polarized_plane_wave(sim, src_time)
### monitor info
pml_monitor_gap = 50*nm
particle_monitor_gap = 50*nm
### grid
resolution = 1/(10*nm)
pml = meep.PML(100*nm)
@job.cache
def norm_sim():
"""perform normalization simulation"""
L = 2*radius + 2*pml_monitor_gap + 2*particle_monitor_gap + 2*pml.thickness
cell = meep.Vector3(L,L,L)
norm = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=[],
resolution=resolution)
norm.init_fields()
source(norm)
flux_inc = meep_ext.add_flux_plane(norm, fcen, 0, 1, [0,0,0], [2*radius, 2*radius, 0])
norm.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, polarization,
pt=meep.Vector3(0,0,0), decay_by=1e-3))
return {'area': (2*radius)**2, 'norm': np.asarray(meep.get_fluxes(flux_inc))}
@job.cache
def sim(separation):
"""perform scattering simulation"""
L = separation + 2*radius + 2*pml_monitor_gap + 2*particle_monitor_gap + 2*pml.thickness
cell = meep.Vector3(L,L,L)
p1 = meep.Vector3(-separation/2, 0, 0)
p2 = meep.Vector3(separation/2, 0, 0)
geometry = [meep.Sphere(center=p1,
radius=radius,
material=gold),
meep.Sphere(center=p2,
radius=radius,
material=gold)]
scat = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=geometry,
resolution=resolution)
scat.init_fields()
source(scat)
L = 2*radius + 2*particle_monitor_gap
Fx = meep_ext.add_force_box(scat, fcen, 0, 1, p2, [L,L,L], meep.X)
Fy = meep_ext.add_force_box(scat, fcen, 0, 1, p2, [L,L,L], meep.Y)
Fz = meep_ext.add_force_box(scat, fcen, 0, 1, p2, [L,L,L], meep.Z)
# scat.run(until_after_sources=8*um)
scat.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, polarization,
pt=p2-meep.Vector3(0,0,L/2), decay_by=1e-4))
return {'Fx': np.array(meep.get_forces(Fx))[0], 'Fy': np.array(meep.get_forces(Fy))[0], 'Fz': np.array(meep.get_forces(Fz))[0]}
@job.at_end
def vis():
### forces
fig, ax = plt.subplots()
force = np.zeros([3,len(separations)])
for i,separation in enumerate(separations):
var = job.load(sim, f'p{i}')
force[0,i] = var.Fx
force[1,i] = var.Fy
force[2,i] = var.Fz
norm = job.load(norm_sim)
ax.axhline(0, linestyle='--', color='black')
ax.plot(separations/nm, force[0]/norm.norm*norm.area*constants.epsilon_0/2, 'o', color='C0', label='Fx (FDTD)')
ax.plot(separations/nm, force[1]/norm.norm*norm.area*constants.epsilon_0/2, 'o', color='C1', label='Fy (FDTD)')
ax.plot(separations/nm, force[2]/norm.norm*norm.area*constants.epsilon_0/2, 'o', color='C2', label='Fz (FDTD)')
import miepy
eps = meep_ext.get_eps(gold)(wavelength)
Au = miepy.constant_material(eps*scale**2)
water = miepy.constant_material(nb**2)
source = miepy.sources.rhc_polarized_plane_wave()
seps = np.linspace(300*nm/scale, 900*nm/scale, 100)
force = np.zeros([3,len(seps)])
for i,sep in enumerate(seps):
spheres = miepy.spheres([[-sep/2,0,0],[sep/2,0,0]], radius/scale, Au)
sol = miepy.gmt(spheres, source, wavelength, 2, medium=water)
F = sol.force_on_particle(1)
force[:,i] = F.squeeze()
ax.plot(seps*scale/nm, force[0], color='C0', label='Fx (GMT)')
ax.plot(seps*scale/nm, force[1], color='C1', label='Fy (GMT)')
ax.plot(seps*scale/nm, force[2], color='C2', label='Fz (GMT)')
ax.set(xlabel='separation (nm)', ylabel='force')
ax.legend()
plt.show()
separations = np.linspace(300*nm, 900*nm, 10)
for i,separation in enumerate(separations):
job.add_instance(sim, f'p{i}', separation=separation)
job.execute()
```
#### File: meep_ext/templates/wavelength_forces.py
```python
import numpy as np
from scipy import constants
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib as mpl
import meep
import meep_ext
import pinboard
job = pinboard.pinboard()
### transformation optics
nb = 1.33
scale = nb
nm = 1e-9*scale
um = 1e-6*scale
### geometry
radius = 75*nm
gold = meep_ext.material.Au(multiplier=1/scale**2)
# gold = meep.Medium(index=3.5/scale)
sep = 400*nm
p1 = meep.Vector3(-sep/2, 0, 0)
p2 = meep.Vector3(sep/2, 0, 0)
geometry = [meep.Sphere(center=p1,
radius=radius,
material=gold),
meep.Sphere(center=p2,
radius=radius,
material=gold)]
### source
fcen, df = meep_ext.freq_data(scale/(400*nm), scale/(1000*nm))
nfreq = 40
src_time = meep.GaussianSource(frequency=1.3*scale/um, fwidth=4.0*scale/um)
polarization = meep.Ex # used in convergence check 'decay_by'
source = lambda sim: meep_ext.rhc_polarized_plane_wave(sim, src_time)
### monitor info
particle_monitor_gap = 50*nm
pml_monitor_gap = 50*nm
### grid
resolution = 1/(7.5*nm)
pml = meep.PML(100*nm)
lx = sep + 2*radius + 2*particle_monitor_gap + 2*pml_monitor_gap + 2*pml.thickness
ly = lz = 2*radius + 2*particle_monitor_gap + 2*pml_monitor_gap + 2*pml.thickness
cell = meep.Vector3(lx,ly,lz)
Nx, Ny, Nz = map(round, cell*resolution)
@job.cache
def dimer_fields():
"""capture the Ex fields in the xz plane"""
sim = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=geometry,
resolution=resolution)
sim.init_fields()
source(sim)
while sim.fields.time() < 3*um:
sim.fields.step()
if sim.fields.t % 10 == 0:
yield {'E': sim.get_array(meep.Vector3(0,0,0), meep.Vector3(cell[0], 0, cell[2]), polarization)}
@job.cache
def dimer_norm():
"""perform normalization simulation"""
norm = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=[],
resolution=resolution)
norm.init_fields()
source(norm)
flux_inc = meep_ext.add_flux_plane(norm, fcen, df, nfreq, [0,0,0], [2*radius, 2*radius, 0])
norm.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, polarization,
pt=meep.Vector3(0,0,0), decay_by=1e-3))
return {'frequency': np.array(meep.get_flux_freqs(flux_inc)), 'area': (2*radius)**2,
'incident': np.asarray(meep.get_fluxes(flux_inc))}
@job.cache
def dimer_scat():
"""perform scattering simulation"""
scat = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=geometry,
resolution=resolution)
scat.init_fields()
source(scat)
L = 2*radius + 2*particle_monitor_gap
Fx = meep_ext.add_force_box(scat, fcen, df, nfreq, p2, [L,L,L], meep.X)
Fy = meep_ext.add_force_box(scat, fcen, df, nfreq, p2, [L,L,L], meep.Y)
Fz = meep_ext.add_force_box(scat, fcen, df, nfreq, p2, [L,L,L], meep.Z)
# scat.run(until_after_sources=8*um)
scat.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, polarization,
pt=p2-meep.Vector3(0,0,L/2), decay_by=1e-4))
return {'Fx': np.array(meep.get_forces(Fx)), 'Fy': np.array(meep.get_forces(Fy)), 'Fz': np.array(meep.get_forces(Fz)),
'frequency': np.array(meep.get_force_freqs(Fx))}
@job.at_end
def vis():
nm = 1e-9
### forces
fig, axes = plt.subplots(nrows=2, figsize=(7,6), sharex=True,
gridspec_kw=dict(height_ratios=[2,1], hspace=0.05))
norm = job.load(dimer_norm)
scat = job.load(dimer_scat)
for ax in axes:
ax.plot((1/nm)/norm.frequency, scat.Fx/norm.incident*norm.area*constants.epsilon_0/2*1e25, 'o', color='C0', label='Fx (FDTD)')
ax.plot((1/nm)/norm.frequency, scat.Fy/norm.incident*norm.area*constants.epsilon_0/2*1e25, 'o', color='C1', label='Fy (FDTD)')
ax.plot((1/nm)/norm.frequency, scat.Fz/norm.incident*norm.area*constants.epsilon_0/2*1e25, 'o', color='C2', label='Fz (FDTD)')
import miepy
wavelengths = np.linspace(400*nm, 1000*nm, 100)
eps = meep_ext.get_eps(gold)(wavelengths)
Au = miepy.data_material(wavelengths, eps*scale**2)
water = miepy.constant_material(nb**2)
# Au = miepy.constant_material(3.5**2)
spheres = miepy.spheres([[-sep/2/scale,0,0],[sep/2/scale,0,0]], radius/scale, Au)
source = miepy.sources.rhc_polarized_plane_wave()
sol = miepy.gmt(spheres, source, wavelengths, 2, medium=water)
F = sol.force_on_particle(1)
for ax in axes:
ax.axhline(0, linestyle='--', color='black')
ax.plot(wavelengths/nm, F[0]*1e25, color='C0', label='Fx (GMT)')
ax.plot(wavelengths/nm, F[1]*1e25, color='C1', label='Fy (GMT)')
ax.plot(wavelengths/nm, F[2]*1e25, color='C2', label='Fz (GMT)')
axes[0].legend()
axes[0].set(ylabel='force')
axes[1].set(xlabel='wavelength (nm)', ylabel='force', ylim=[-0.035,0.01])
### field animation
fig, ax = plt.subplots()
x = np.linspace(0, cell[0]/nm, Nx)
z = np.linspace(0, cell[1]/nm, Nz)
X,Z = np.meshgrid(x,z, indexing='ij')
var = job.load(dimer_fields)
idx = np.s_[10:-10,10:-10]
E = var.E[:,10:-10,10:-10]
# E = var.E
vmax = np.max(np.abs(E))/2
im = ax.pcolormesh(X[idx], Z[idx], E[0], cmap='RdBu', animated=True, vmax=vmax, vmin=-vmax)
ax.set_aspect('equal')
def update(i):
im.set_array(np.ravel(E[i][:-1,:-1]))
return [im]
ani = animation.FuncAnimation(fig, update, range(E.shape[0]), interval=50, blit=True)
plt.show()
job.execute()
``` |
{
"source": "johnaparker/MiePy",
"score": 2
} |
#### File: benchmarks/parallel/main.py
```python
import numpy as np
import matplotlib.pyplot as plt
import miepy
from tqdm import tqdm
from functools import partial
from scipy.sparse.linalg import bicg, bicgstab
from miepy.interactions import solve_linear_system
from timer import time_function
nm = 1e-9
Ag = miepy.materials.Ag()
radius = 75*nm
source = miepy.sources.plane_wave.from_string(polarization='rhc')
separation = 250*nm
def tests(Nmax, step=1):
Nparticles = np.arange(1, Nmax+1, step)
names = ['build', 'solve', 'flux', 'force']
ftimer = {name: np.zeros_like(Nparticles, dtype=float) for name in names}
for i,N in enumerate(Nparticles):
print(N, Nmax)
positions = [[n*separation, 0, 0] for n in range(N)]
mie = miepy.sphere_cluster(position=positions,
radius=radius,
material=Ag,
source=source,
wavelength=600*nm,
lmax=2)
ftimer['force'][i] = time_function(mie.force)
ftimer['flux'][i] = time_function(mie.cross_sections)
ftimer['build'][i] = time_function(partial(miepy.interactions.sphere_aggregate_tmatrix,
mie.position, mie.mie_scat, mie.material_data.k_b))
A = miepy.interactions.sphere_aggregate_tmatrix(mie.position, mie.mie_scat, k=mie.material_data.k_b)
ftimer['solve'][i] = time_function(partial(solve_linear_system, A, mie.p_src, method=miepy.solver.bicgstab))
return ftimer, Nparticles
import h5py
import os
OMP = os.environ['OMP_NUM_THREADS']
ftimer, Nparticles = tests(300, step=10)
with h5py.File(f'out_{OMP}.h5', 'w') as f:
for name, time in ftimer.items():
f[name] = time
f.attrs['Nparticles'] = Nparticles
```
#### File: examples/fdtd_compare/cylinder_grid_scattering.py
```python
import numpy as np
from scipy import constants
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib as mpl
import meep
import meep_ext
from numpipe import scheduler, pbar
import miepy
job = scheduler()
nm = 1e-9
um = 1e-6
### Parameters
radius = 75*nm
height = 110*nm
material = meep_ext.material.Au()
x = np.linspace(-220*nm, 220*nm, 3)
y = np.linspace(-220*nm, 220*nm, 3)
z = np.linspace(-40*nm, 40*nm, 9)
X, Y = np.meshgrid(x, y)
x = X.flatten()
y = Y.flatten()
theta = np.linspace(0, np.pi/2, 9)
phi = np.linspace(0, 2*np.pi, 9)
geometry = []
for i in range(9):
axis = meep.Vector3(np.sin(theta[i])*np.cos(phi[i]), np.sin(theta[i])*np.sin(phi[i]), np.cos(theta[i]))
geometry.append(meep.Cylinder(center=meep.Vector3(x[i],y[i],z[i]),
radius=radius,
height=height,
material=material,
axis=axis))
box = [2*radius + 2*np.max(x)]*2 + [2*radius + 2*np.max(z)]
resolution = 1/(8*nm)
medium = meep.Medium(index=1)
fcen, df = meep_ext.freq_data(1/(400*nm), 1/(1000*nm))
nfreq = 40
polarization = 'x'
src_time = meep.GaussianSource(frequency=1.3/um, fwidth=4.0/um)
if polarization == 'x':
source = lambda sim: meep_ext.x_polarized_plane_wave(sim, src_time)
decay = meep.Ex
else:
source = lambda sim: meep_ext.y_polarized_plane_wave(sim, src_time)
decay = meep.Ey
### monitor info
particle_monitor_gap = 50*nm
pml_monitor_gap = 50*nm
norm_file_ext = 'norm_box'
monitor_size = [box[0] + 2*particle_monitor_gap, box[1] + 2*particle_monitor_gap,
box[2] + 2*particle_monitor_gap]
### grid
pml = meep.PML(15/resolution)
lx = box[0] + 2*particle_monitor_gap + 2*pml_monitor_gap + 2*pml.thickness
ly = box[1] + 2*particle_monitor_gap + 2*pml_monitor_gap + 2*pml.thickness
lz = box[2] + 2*particle_monitor_gap + 2*pml_monitor_gap + 2*pml.thickness
cell = meep.Vector3(lx,ly,lz)
Nx, Ny, Nz = map(round, cell*resolution)
@job.cache
def norm_sim():
"""perform normalization simulation"""
norm = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=[],
default_material=medium,
resolution=resolution)
norm.init_fields()
source(norm)
flux_box_inc = meep_ext.add_flux_box(norm, fcen, df, nfreq, [0,0,0], monitor_size)
flux_inc = meep_ext.add_flux_plane(norm, fcen, df, nfreq, [0,0,0], [box[0], box[1], 0])
norm.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, decay,
pt=meep.Vector3(0,0,monitor_size[2]/2), decay_by=1e-3))
norm.save_flux(norm_file_ext, flux_box_inc)
return {'frequency': np.array(meep.get_flux_freqs(flux_inc)), 'area': box[0]*box[1],
'incident': np.asarray(meep.get_fluxes(flux_inc))}
@job.cache
def scat_sim():
"""perform scattering simulation"""
scat = meep.Simulation(cell_size=cell,
boundary_layers=[pml],
geometry=geometry,
default_material=medium,
resolution=resolution)
scat.init_fields()
source(scat)
flux_box_absorb = meep_ext.add_flux_box(scat, fcen, df, nfreq, [0,0,0], monitor_size)
flux_box_scat = meep_ext.add_flux_box(scat, fcen, df, nfreq, [0,0,0], monitor_size)
scat.load_minus_flux(norm_file_ext, flux_box_scat)
scat.run(until_after_sources=meep.stop_when_fields_decayed(.5*um, decay,
pt=meep.Vector3(0,0,monitor_size[2]/2), decay_by=1e-5))
return {'scattering': np.array(meep.get_fluxes(flux_box_scat)),'absorption': -np.array(meep.get_fluxes(flux_box_absorb)),
'frequency': np.array(meep.get_flux_freqs(flux_box_scat))}
@job.plots
def vis():
fig, ax = plt.subplots()
norm = job.load(norm_sim)
scat = job.load(scat_sim)
ax.plot((1/nm)/norm.frequency, scat.scattering/norm.incident*norm.area, 'o', color='C0', label='scattering (FDTD)')
ax.plot((1/nm)/norm.frequency, scat.absorption/norm.incident*norm.area, 'o', color='C1', label='absorption (FDTD)')
ax.plot((1/nm)/norm.frequency, (scat.scattering + scat.absorption)/norm.incident*norm.area,
'o', color='C2', label='extinction (FDTD)')
wavelengths = np.linspace(400*nm, 1000*nm, 100)
eps = meep_ext.get_eps(material)(wavelengths)
Au = miepy.data_material(wavelengths, eps)
# Au = miepy.constant_material(3.5**2)
C, A, E = [np.zeros_like(wavelengths) for i in range(3)]
particles = []
for i in range(9):
orientation = miepy.quaternion.from_spherical_coords(theta[i], phi[i])
particles.append(miepy.cylinder(position=[x[i], y[i], z[i]], radius=radius, height=height, material=Au, orientation=orientation))
for i, wavelength in enumerate(pbar(wavelengths)):
sol = miepy.cluster(particles=particles,
source=miepy.sources.plane_wave([1,0]),
wavelength=wavelength,
lmax=3)
C[i], A[i], E[i] = sol.cross_sections()
ax.axhline(0, linestyle='--', color='black')
ax.plot(wavelengths/nm, C, color='C0', label='scattering (GMT)')
ax.plot(wavelengths/nm, A, color='C1', label='absorption (GMT)')
ax.plot(wavelengths/nm, E, color='C2', label='extinction (GMT)')
ax.legend()
ax.set(xlabel='wavelength (nm)', ylabel='cross-section')
plt.show()
job.run()
```
#### File: examples/tests/cruzan.py
```python
import numpy as np
import miepy
from sympy.physics.wigner import wigner_3j, gaunt
from scipy import special
import math
from functools import lru_cache
# w = miepy.vsh.wigner_3j(1,1,1, 1,-1,0)
# print(w)
# w = wigner_3j(1,1,1,1,-1,0)
# print(w)
# from IPython import embed; embed()
@lru_cache(None)
def gaunt(m,n,u,v,p):
"""gaunt coefficient"""
f = lambda n: special.gamma(n+1)
numerator = f(n+m)*f(v+u)*f(p-m-u)
denominator = f(n-m)*f(v-u)*f(p+m+u)
factor = (-1)**(m+u)*(2*p+1)*(numerator/denominator)**0.5
w1 = miepy.vsh.wigner_3j(n,v,p,0,0,0)
w2 = miepy.vsh.wigner_3j(n,v,p,m,u,-m-u)
return factor*w1*w2
@lru_cache(None)
def b_func(m,n,u,v,p):
"""b function"""
f = lambda n: special.gamma(n+1)
numerator = f(n+m)*f(v+u)*f(p-m-u+1)
denominator = f(n-m)*f(v-u)*f(p+m+u+1)
factor = (-1)**(m+u)*(2*p+3)*(numerator/denominator)**0.5
w1 = miepy.vsh.wigner_3j(n,v,p,0,0,0)
w2 = miepy.vsh.wigner_3j(n,v,p+1,m,u,-m-u)
return factor*w1*w2
# g = gaunt(1,2,0,1,1)
# print(g)
# g = miepy.vsh.a_func(1,2,0,1,1)
# print(g)
# from IPython import embed; embed()
def A_translation(m, n, u, v, r, theta, phi, k):
m *= -1
f = lambda n: special.gamma(n+1)
numerator = (2*v+1)*f(n-m)*f(v-u)
denominator = 2*n*(n+1)*f(n+m)*f(v+u)
factor = (-1)**m * numerator/denominator*np.exp(1j*(u+m)*phi)
qmax = min(n, v, (n+v - abs(m+u))//2)
sum_term = 0
for q in range(0, qmax+1):
p = n + v - 2*q
aq = gaunt(m,n,u,v,p)
A = 1j**p*(n*(n+1) + v*(v+1) - p*(p+1))*aq
Pnm = miepy.vsh.associated_legendre(p,u+m)
sum_term += A*miepy.vsh.spherical_hn(p, k*r)*Pnm(np.cos(theta))
return factor*sum_term
def B_translation(m, n, u, v, r, theta, phi, k):
m *= -1
f = lambda n: special.gamma(n+1)
numerator = (2*v+1)*f(n-m)*f(v-u)
denominator = 2*n*(n+1)*f(n+m)*f(v+u)
factor = (-1)**(m+1) * numerator/denominator*np.exp(1j*(u+m)*phi)
qmax = min(n, v, (n+v+1 - abs(m+u))//2)
sum_term = 0
for q in range(1, qmax+1):
p = n + v - 2*q
bq = b_func(m,n,u,v,p)
A = 1j**(p+1)*(((p+1)**2 - (n-v)**2)*((n+v+1)**2 - (p+1)**2))**0.5*bq
Pnm = miepy.vsh.associated_legendre(p+1,u+m)
sum_term += A*miepy.vsh.spherical_hn(p+1, k*r)*Pnm(np.cos(theta))
return factor*sum_term
A = A_translation(8,10,-9,12, 2, 0.5, 0.5, 1)
B = B_translation(8,10,-9,12, 2, 0.5, 0.5, 1)
print(f'A: {A:.10e}' , f'B: {B:.10e}', '', sep='\n')
A = A_translation(0,10,0,10, 2, 0.5, 0.5, 1)
B = B_translation(0,10,0,10, 2, 0.5, 0.5, 1)
print(f'A: {A:.10e}' , f'B: {B:.10e}', '', sep='\n')
A = A_translation(-2, 11, 3, 9, 2, 0.5, 0.5, 1)
B = B_translation(-2, 11, 3, 9, 2, 0.5, 0.5, 1)
print(f'A: {A:.10e}' , f'B: {B:.10e}', '', sep='\n')
A = A_translation(-2, 6, -2, 10, 2, 0.5, 0.5, 1)
B = B_translation(-2, 6, -2, 10, 2, 0.5, 0.5, 1)
print(f'A: {A:.10e}' , f'B: {B:.10e}', '', sep='\n')
# A = A_translation(0, 1, 1, 1, 2, 0.5, 0.5, 1)
# print(A)
# A = miepy.vsh.A_translation(0, 1, 1, 1, 2, 0.5, 0.5, 1)
# print(A)
from IPython import embed; embed()
```
#### File: MiePy/miepy/coordinates.py
```python
import miepy
import numpy as np
def sph_to_cart(r, theta, phi, origin=None):
"""convert spherical coordinates (r, theta, phi) centered at origin to cartesian coordinates (x, y, z)"""
if origin is None:
origin = np.zeros(3, dtype=float)
x = origin[0] + r*np.sin(theta)*np.cos(phi)
y = origin[1] + r*np.sin(theta)*np.sin(phi)
z = origin[2] + r*np.cos(theta)
return x,y,z
def cart_to_sph(x, y, z, origin=None):
"""convert cartesian coordinates (x, y, z) to spherical coordinates (r, theta, phi) centered at origin"""
if origin is None:
origin = np.zeros(3, dtype=float)
x0,y0,z0 = origin
r = ((x - x0)**2 + (y - y0)**2 + (z - z0)**2)**0.5
theta = np.arccos((z - z0)/r)
phi = np.arctan2(y - y0, x - x0)
return r, theta, phi
def cyl_to_cart(r, phi, z, origin=None):
"""convert cylindrical coordinates (r, phi, z) centered at origin to cartesian coordinates (x, y, z)"""
if origin is None:
origin = np.zeros(3, dtype=float)
x = origin[0] + r*np.cos(phi)
y = origin[1] + r*np.sin(phi)
return x, y, z + origin[2]
def cart_to_cyl(x, y, z, origin=None):
"""convert cartesian coordinates (x, y, z) to cylindrical coordinates (r, phi, z) centered at origin"""
if origin is None:
origin = np.zeros(3, dtype=float)
x0,y0,z0 = origin
r = ((x - x0)**2 + (y - y0)**2)**0.5
phi = np.arctan2(y - y0, x - x0)
return r, phi, z - z0
#TODO: if theta is scalar, or phi is scalar... same with other functions here
#TODO: implement origin
def sph_basis_vectors(theta, phi, origin=None):
"""obtain the spherical basis vectors (r_hat, theta_hat, phi_hat) for given theta, phi"""
if origin is None:
origin = np.zeros(3, dtype=float)
r_hat = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
theta_hat = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -1*np.sin(theta)])
phi_hat = np.array([-1*np.sin(phi), np.cos(phi), np.zeros_like(phi)])
return r_hat, theta_hat, phi_hat
#TODO: implement origin
def vec_cart_to_sph(F, theta, phi, origin=None):
"""convert a vector field F from cartesian to spherical coordinates
Arguments:
F[3,...] vector field values
theta theta coordinates
phi phi coordinates
"""
if origin is None:
origin = np.zeros(3, dtype=float)
Fsph = np.zeros_like(F)
r_hat, theta_hat, phi_hat = sph_basis_vectors(theta, phi)
Fsph[0] = np.sum(F*r_hat, axis=0)
Fsph[1] = np.sum(F*theta_hat, axis=0)
Fsph[2] = np.sum(F*phi_hat, axis=0)
return Fsph
#TODO: implement origin
def vec_sph_to_cart(F, theta, phi, origin=None):
"""convert a vector field F from spherical to cartesian coordinates
Arguments:
F[3,...] vector field values
theta theta coordinates
phi phi coordinates
"""
if origin is None:
origin = np.zeros(3, dtype=float)
Fcart = np.zeros_like(F)
r_hat, theta_hat, phi_hat = sph_basis_vectors(theta, phi)
for i in range(3):
Fcart[i] = F[0]*r_hat[i] + F[1]*theta_hat[i] + F[2]*phi_hat[i]
return Fcart
def sphere_mesh(sampling):
"""
Obtain a THETA,PHI mesh for discretizing the surface of the sphere, consistent
with the format required by the project and decompose functions
Returns (THETA,PHI) meshgrids
Arguments:
sampling number of points to sample between 0 and pi
"""
phi = np.linspace(0, 2*np.pi, 2*sampling)
tau = np.linspace(-1, 1, sampling)
theta = np.arccos(tau)
THETA,PHI = np.meshgrid(theta, phi, indexing='ij')
return THETA, PHI
def cart_sphere_mesh(radius, origin, sampling):
"""Given a radius, origin and sampling, return the
X,Y,Z,THETA,PHI,tau,phi coordinates of a discretized sphere"""
r = np.array([radius])
tau = np.linspace(-1,1, sampling)
theta = np.arccos(tau)
phi = np.linspace(0, 2*np.pi, 2*sampling)
R, THETA, PHI = np.meshgrid(r,theta,phi, indexing='ij')
X = origin[0] + R*np.sin(THETA)*np.cos(PHI)
Y = origin[1] + R*np.sin(THETA)*np.sin(PHI)
Z = origin[2] + R*np.cos(THETA)
return map(np.squeeze, (X,Y,Z,THETA,PHI,tau,phi))
def rotate(x, y, z, quat, origin=None):
"""Rotate the points (x, y, z) around an origin using a quaternion"""
if origin is None:
origin = np.zeros(3, dtype=float)
R = miepy.quaternion.as_rotation_matrix(quat)
p = np.asarray(translate(x, y, z, -origin))
p_rotated = np.einsum('ij,j...->i...', R, p)
p_final = translate(*p_rotated, origin)
return p_final
def rotate_sph(theta, phi, quat):
"""Rotate the spherical coordinates (theta, phi) to rotated spherical coordinates"""
q1 = miepy.quaternion.from_spherical_coords(theta, phi)
q2 = quat*q1
theta_r, phi_r = np.moveaxis(miepy.quaternion.as_spherical_coords(q2), -1, 0)
theta_r = np.asarray(theta_r)
phi_r = np.asarray(phi_r)
# Final step: if theta = 0, then above conversion turns phi -> phi_r/2, so this is corrected
idx = (theta == 0)
phi_r[idx] *= 2
return theta_r, phi_r
def rotate_vec(F, quat):
"""Rotate the vector F using a quaternion"""
R = miepy.quaternion.as_rotation_matrix(quat)
F_rotated = np.einsum('ij,j...->i...', R, F)
return F_rotated
def translate(x, y, z, dr):
"""Translate the points (x, y, z) by dr"""
xp = x + dr[0]
yp = y + dr[1]
zp = z + dr[2]
return np.asarray([xp, yp, zp])
```
#### File: miepy/mie_single/scattering.py
```python
import numpy as np
import miepy
import scipy.constants as constants
from miepy.special_functions import riccati_1,riccati_2,vector_spherical_harmonics
def scattering_per_multipole(an, bn, k):
"""Scattering cross-section per multipole. Returns scat[Nfreq,2,lmax].
an[N] an scattering coefficients
bn[N] bn scattering coefficients
k[N] wavenumbers
"""
Nfreq, lmax = an.shape
flux = np.zeros([Nfreq,2,lmax])
nvals = np.arange(1, lmax+1)
flux[:,0,:] = 2*np.pi*(2*nvals+1)*np.abs(an)**2/k[:,np.newaxis]**2
flux[:,1,:] = 2*np.pi*(2*nvals+1)*np.abs(bn)**2/k[:,np.newaxis]**2
return flux
def extinction_per_multipole(an, bn, k):
"""Extinction cross-section per multipole. Returns extinct[Nfreq,2,lmax].
an[N] an scattering coefficients
bn[N] bn scattering coefficients
k[N] wavenumbers
"""
Nfreq, lmax = an.shape
flux = np.zeros([Nfreq,2,lmax])
nvals = np.arange(1, lmax+1)
flux[:,0,:] = 2*np.pi*(2*nvals+1)*np.real(an)/k[:,np.newaxis]**2
flux[:,1,:] = 2*np.pi*(2*nvals+1)*np.real(bn)/k[:,np.newaxis]**2
return flux
def absorbption_per_multipole(an, bn, k):
"""Absorbption cross-section per multipole. Returns absorb[Nfreq,2,lmax].
an[N] an scattering coefficients
bn[N] bn scattering coefficients
k[N] wavenumbers
"""
return extinction_per_multipole(an, bn, k) - scattering_per_multipole(an, bn, k)
def cross_sections(an, bn, k):
"""Return the 3 cross-sections, (Scattering, Absorbption, Extinction)
an[N] an scattering coefficients
bn[N] bn scattering coefficients
k[N] wavenumbers
"""
scat_flux = scattering_per_multipole(an, bn, k)
extinct_flux = extinction_per_multipole(an, bn, k)
abs_flux = extinct_flux - scat_flux
return miepy.flux.cross_sections(*[np.sum(arr, axis=(1,2)) for arr in [scat_flux, abs_flux, extinct_flux]])
def multipole_label(T,L):
"""Get multipole label.
T = 0 (electric), 1(magnetic)
L = 0,1,2... (order)
"""
first = ['e', 'm'][T]
if L <= 3:
last = ['D', 'Q', 'O', 'H'][L]
else:
last = " (L = {L})".format(L=L)
return first + last
def scattered_E(an, bn, k):
"""For a given an, bn, k, return the scattered electric field function E(r,theta,phi)
an[L] an coefficients
an[L] bn coefficients
k wavenumber in the medium
"""
lmax = an.shape[0]
def E_func(r, theta, phi):
E = np.zeros(shape = [3] + list(r.shape), dtype=complex)
for L in range(1,lmax+1):
En = 1j**L*(2*L+1)/(L*(L+1))
VSH = vector_spherical_harmonics(L,3)
E += En*(1j*an[L-1]*VSH.N_e1n(k)(r,theta,phi) \
- bn[L-1]*VSH.M_o1n(k)(r,theta,phi))
return -E
return E_func
def interior_E(cn, dn, k):
"""For a given cn, dn, k, return the interior electric field function E(r,theta,phi) for a sphere
cn[L] cn coefficients
dn[L] dn coefficients
k wavenumber inside the sphere
"""
lmax = cn.shape[0]
def E_func(r, theta, phi):
E = np.zeros(shape = [3] + list(r.shape), dtype=complex)
for L in range(1,lmax+1):
En = 1j**L*(2*L+1)/(L*(L+1))
VSH = vector_spherical_harmonics(L,1)
E += En*(cn[L-1]*VSH.M_o1n(k)(r,theta,phi) \
- 1j*dn[L-1]*VSH.N_e1n(k)(r,theta,phi))
return -E
return E_func
def scattered_H(an, bn, k, n_b, mu_b):
"""For a given an, bn, k, return the scattered electric field function H(r,theta,phi)
an[L] an coefficients
an[L] bn coefficients
k wavenumber in the medium
n_b index of refraction of the medium
mu_b permeability of the medium
"""
lmax = an.shape[0]
def H_func(r, theta, phi):
H = np.zeros(shape = [3] + list(r.shape), dtype=complex)
for L in range(1,lmax+1):
En = 1j**L*(2*L+1)/(L*(L+1))
VSH = vector_spherical_harmonics(L,3)
H += n_b*En/mu_b*(1j*bn[L-1]*VSH.N_o1n(k)(r,theta,phi) \
+ an[L-1]*VSH.M_e1n(k)(r,theta,phi))
return -H
return H_func
def interior_H(cn, dn, k, n, mu):
"""For a given cn, dn, k, return the interior electric field function H(r,theta,phi) for a sphere
cn[L] cn coefficients
dn[L] dn coefficients
k wavenumber inside the sphere
n index of refraction of the sphere
mu permeability of the sphere
"""
lmax = cn.shape[0]
def H_func(r, theta, phi):
H = np.zeros(shape = [3] + list(r.shape), dtype=complex)
for L in range(1,lmax+1):
En = 1j**L*(2*L+1)/(L*(L+1))
VSH = vector_spherical_harmonics(L,1)
H += -n*En/mu*(dn[L-1]*VSH.M_e1n(k)(r,theta,phi) \
+ 1j*cn[L-1]*VSH.N_o1n(k)(r,theta,phi))
return -H
return H_func
```
#### File: miepy/particles/core_shell.py
```python
import miepy
from .particle_base import particle
class core_shell(particle):
def __init__(self, position, core_radius, shell_thickness, core_material, shell_material):
"""A sphere object
Arguments:
position[3] x,y,z position of particle
radius sphere radius
material particle material (miepy.material object)
"""
super().__init__(position, None, core_material)
self.core_radius = core_radius
self.shell_thickness = shell_thickness
self.core_material = core_material
self.shell_material = shell_material
def __repr__(self):
return f'''{self.__class__.__name__}:
position = {self.position} m
radius = {self.radius:.2e} m
material = {self.material}'''
def is_inside(self, pos):
pass
def compute_tmatrix(self, lmax, wavelength, eps_m, **kwargs):
self.tmatrix = miepy.tmatrix.tmatrix_core_shell(self.core_radius, self.shell_thickness, wavelength,
self.core_material.eps(wavelength), self.shell_material.eps(wavelength), eps_m, lmax)
self.tmatrix_fixed = self.tmatrix
return self.tmatrix
def enclosed_radius(self):
return self.core_radius + self.shell_thickness
def _dict_key(self, wavelength):
return (core_shell, self.core_radius, self.shell_thickness,
self.core_material.eps(wavelength).item(), self.core_material.mu(wavelength).item(),
self.shell_material.eps(wavelength).item(), self.shell_material.mu(wavelength).item())
```
#### File: miepy/particles/particle_base.py
```python
import numpy as np
import miepy
#TODO: lmax per particle
#TODO: position and orientation should be properties
class particle:
def __init__(self, position, orientation, material):
"""A particle consists of a position, orientation, material, and a lazily evaluated T-matrix
Arguments:
position[3] x,y,z position of particle
orientation particle orientation
material particle material (miepy.material object)
"""
self._position = np.asarray(position, dtype=float)
if orientation is None:
self._orientation = miepy.quaternion.one
else:
self._orientation = orientation
self.material = material
self.tmatrix_fixed = None
self.tmatrix = None
if self.material is not None and self.material.name == 'metal':
self.conducting = True
else:
self.conducting = False
@property
def position(self):
return self._position
@position.setter
def position(self, p):
self.position[...] = p
@property
def orientation(self):
return self._orientation
@orientation.setter
def orientation(self, n):
self._orientation = n
self._rotate_fixed_tmatrix()
def is_inside(self, pos):
"""Return true if pos is inside the particle"""
pass
def enclosed_radius(self):
"""Return the radius of the smallest circumscribing sphere"""
pass
def compute_tmatrix(self, lmax, wavelength, eps_m, **kwargs):
"""Compute the T-matrix of the particle
Arguments:
lmax maximum number of multipoles
wavelength incident wavelength
eps_m permitvittiy of the medium
kwargs additional kwargs to pass
"""
pass
def _rotate_fixed_tmatrix(self):
if self.tmatrix_fixed is not None:
self.tmatrix = miepy.tmatrix.rotate_tmatrix(self.tmatrix_fixed, self.orientation)
def _dict_key(self, wavelength):
raise NotImplementedError('particle must implement _dict_key')
```
#### File: miepy/particles/sphere.py
```python
import miepy
from .particle_base import particle
class sphere(particle):
def __init__(self, position, radius, material):
"""A sphere object
Arguments:
position[3] x,y,z position of particle
radius sphere radius
material particle material (miepy.material object)
"""
super().__init__(position, None, material)
self.radius = radius
def __repr__(self):
return f'''{self.__class__.__name__}:
position = {self.position} m
radius = {self.radius:.2e} m
material = {self.material}'''
def is_inside(self, pos):
pass
def compute_tmatrix(self, lmax, wavelength, eps_m, **kwargs):
self.tmatrix = miepy.tmatrix.tmatrix_sphere(self.radius, wavelength,
self.material.eps(wavelength), eps_m, lmax, conducting=self.conducting)
self.tmatrix_fixed = self.tmatrix
return self.tmatrix
def enclosed_radius(self):
return self.radius
def _rotate_fixed_tmatrix(self):
self.tmatrix = self.tmatrix_fixed
def _dict_key(self, wavelength):
return (sphere, self.radius, self.material.eps(wavelength).item(), self.material.mu(wavelength).item())
```
#### File: miepy/sources/plane_waves.py
```python
import numpy as np
import miepy
from miepy.vsh.special import pi_func, tau_func
from miepy.sources import polarized_propagating_source
class plane_wave(polarized_propagating_source):
def __init__(self, polarization, amplitude=1, phase=0, theta=0, phi=0, standing=False):
"""
Create a plane-wave source. Default arguments provide a unit-amplitude, zero-propagating wave
Arguments:
polarization[2] (TM, TE) values representing the polarization
theta theta spherical angle of k-vector
phi phi spherical angle of k-vector
amplitude electric field amplitude E0
phase phase factor
"""
polarized_propagating_source.__init__(self, polarization=polarization,
amplitude=amplitude, phase=phase, origin=None, theta=theta, phi=phi, standing=standing)
def __repr__(self):
return f'plane_wave(polarization={self.polarization}, amplitude={self.amplitude}, theta={self.theta}, phi={self.phi})'
@classmethod
def from_string(cls, polarization, direction='z', amplitude=1, phase=0, standing=False):
"""Create a plane wave from string values for the polarization and direction
Arguments:
polarization x, y, or z
direction x, y, z, -x, -y, or -z
amplitude electric field amplitude E0
phase phase factor
"""
if direction in ['z', '+z']:
theta = 0
phi = 0
elif direction == '-z':
theta = np.pi
phi = 0
elif direction in ['x', '+x']:
theta = np.pi/2
phi = 0
elif direction == '-x':
theta = np.pi/2
phi = np.pi
elif direction in ['y', '+y']:
theta = np.pi/2
phi = np.pi/2
elif direction == '-y':
theta = np.pi/2
phi = 3*np.pi/2
else:
raise ValueError("'{direction}' is not a valid direction of propagation. Use one of ['x', 'y', 'z', '-x', '-y', '-z']".format(direction=direction))
if polarization == direction[-1]:
raise ValueError('polarization cannot be the same as the direction of propagation')
if polarization == 'x':
if direction[-1] == 'z':
pol = [1, 0]
else:
pol = [0, -1]
elif polarization == 'y':
if direction[-1] == 'x':
pol = [0, 1]
else:
pol = [0, 1]
elif polarization == 'z':
pol = [-1, 0]
elif polarization == 'rhc':
pol = [1, 1j]
elif polarization == 'lhc':
pol = [1, -1j]
else:
raise ValueError("'{polarization}' is not a valid polarization. Use one of ['x', 'y', 'z', 'rhc', 'lhc']".format(polarization=polarization))
return cls(polarization=pol, theta=theta, phi=phi, amplitude=amplitude, phase=phase, standing=standing)
def E_field(self, x1, x2, x3, k, far=False, spherical=False):
if spherical:
x1, x2, x3 = miepy.coordinates.sph_to_cart(x1, x2, x3)
amp = self.amplitude*np.exp(1j*k*(self.k_hat[0]*x1 + self.k_hat[1]*x2 + self.k_hat[2]*x3))*np.exp(1j*self.phase)
pol = self.n_tm*self.polarization[0] + self.n_te*self.polarization[1]
E = np.einsum('i...,...->i...', pol, amp)
if spherical:
E = miepy.coordinates.cart_to_sph(*E)
return E
def H_field(self, x1, x2, x3, k, far=False, spherical=False):
if spherical:
x1, x2, x3 = miepy.coordinates.sph_to_cart(x1, x2, x3)
amp = self.amplitude*np.exp(1j*k*(self.k_hat[0]*x1 + self.k_hat[1]*x2 + self.k_hat[2]*x3))*np.exp(1j*self.phase)
pol = self.n_te*self.polarization[0] - self.n_tm*self.polarization[1]
H = np.einsum('i...,...->i...', pol, amp)
if spherical:
H = miepy.coordinates.cart_to_sph(*H)
return H
def structure(self, position, k, lmax):
position = np.asarray(position)
Nparticles = len(position)
rmax = miepy.vsh.lmax_to_rmax(lmax)
p_src = np.empty([Nparticles, 2, rmax], dtype=complex)
for j in range(Nparticles):
phase = k*(self.k_hat[0]*position[j,0] + self.k_hat[1]*position[j,1] + self.k_hat[2]*position[j, 2]) + self.phase
for i,n,m in miepy.mode_indices(lmax):
pi_value = pi_func(n, m, self.theta)
tau_value = tau_func(n, m, self.theta)
Emn = np.abs(miepy.vsh.Emn(m, n))
factor = self.amplitude*np.exp(1j*(phase - m*self.phi))*Emn
p_src[j,0,i] = factor*(tau_value*self.polarization[0] - 1j*pi_value*self.polarization[1])
p_src[j,1,i] = factor*(pi_value*self.polarization[0] - 1j*tau_value*self.polarization[1])
return p_src
def angular_spectrum(self, theta, phi, k):
if theta == self.theta and phi == self.phi:
return np.inf
else:
return 0
#TODO: implement origin
def E_angular(self, theta, phi, k, radius=None, origin=None):
return self.angular_spectrum(theta, phi, j)
#TODO: implement origin
def H_angular(self, theta, phi, k, radius=None, origin=None):
return self.angular_spectrum(theta, phi, j)
def reflect(self, interface, medium, wavelength):
theta = np.pi - self.theta
phi = self.phi
k = 2*np.pi*medium.index(wavelength)/wavelength
phase = self.phase - 2*k*self.k_hat[2]*interface.z
r_parallel, r_perp = interface.reflection_coefficients(self.theta, wavelength, medium)
a_theta = r_parallel*self.polarization[0]
a_phi = r_perp*self.polarization[1]
polarization = [a_theta, a_phi]
amplitude = np.linalg.norm(polarization)*self.amplitude
if amplitude == 0:
polarization = self.polarization
return plane_wave(polarization=polarization, theta=theta, phi=phi, amplitude=amplitude, phase=phase)
def transmit(self, interface, medium, wavelength):
m = interface.get_relative_index(wavelength, medium)
theta = np.arcsin(np.sin(self.theta)/m)
phi = self.phi
phase = self.phase
t_parallel, t_perp = interface.transmission_coefficients(self.theta, wavelength, medium)
a_theta = t_parallel*self.polarization[0]
a_phi = t_perp*self.polarization[1]
polarization = [a_theta, a_phi]
amplitude = np.linalg.norm(polarization)*self.amplitude
if amplitude == 0:
polarization = self.polarization
return plane_wave(polarization=polarization, theta=theta, phi=phi, amplitude=amplitude, phase=phase)
```
#### File: miepy/sources/vsh_sources.py
```python
import numpy as np
import miepy
from miepy.sources import source
class vsh_source(source):
def __init__(self, n, m, ftype='electric', center=None, mode=None, amplitude=1, phase=0):
"""
Arguments:
n n value of VSH mode
m m value of VSH mode
ftype 'electric' or 'magnetic' dipole (default: electric)
center center position of vsh_source
mode type of vsh_mode (default: incident)
amplitude amplitude of the source (default: 1)
phase additional phase factor (default: 0)
"""
self.n = n
self.m = m
self.ftype = ftype
self.mode = mode
if mode is None:
self.mode = miepy.vsh_mode.incident
if self.ftype == 'electric':
self.N, self.M = miepy.vsh.VSH(self.n, self.m, self.mode)
self.N_far, self.M_far = miepy.vsh.VSH_far(self.n, self.m, self.mode)
elif self.ftype == 'magnetic':
self.M, self.N = miepy.vsh.VSH(self.n, self.m, self.mode)
self.M_far, self.N_far = miepy.vsh.VSH_far(self.n, self.m, self.mode)
else:
raise ValueError("ftype must be either 'electric' or 'magnetic'")
if center is None:
self.center = np.array([0,0,0], dtype=float)
else:
self.center = np.asarray(center, dtype=float)
self.amplitude = amplitude
self.phase = phase
def E_field(self, x1, x2, x3, k, far=False, spherical=False):
factor = self.amplitude*np.exp(1j*self.phase)
if not spherical:
x1, x2, x3 = miepy.coordinates.cart_to_sph(x1, x2, x3, origin=self.center)
if far:
E = self.N_far(x1, x2, x3, k)
else:
E = self.N(x1, x2, x3, k)
if not spherical:
return factor*miepy.coordinates.vec_sph_to_cart(E, x2, x3)
else:
return factor*E
def H_field(self, x1, x2, x3, k, far=False, spherical=False):
factor = self.amplitude*np.exp(1j*self.phase)
if not spherical:
x1, x2, x3 = miepy.coordinates.cart_to_sph(x1, x2, x3, origin=self.center)
if far:
E = self.M_far(x1, x2, x3, k)
else:
E = self.M(x1, x2, x3, k)
if not spherical:
return factor*miepy.coordinates.vec_sph_to_cart(E, x2, x3)
else:
return factor*E
def structure(self, position, k, lmax):
position = np.asarray(position)
Nparticles = len(position)
rmax = miepy.vsh.lmax_to_rmax(lmax)
p_src = np.zeros([Nparticles, 2, rmax], dtype=complex)
factor = self.amplitude*np.exp(1j*self.phase)
for i in range(Nparticles):
dr = position[i] - self.center
if not np.any(dr):
for r,n,m in miepy.mode_indices(lmax):
if n == self.n and m == self.m:
Emn = miepy.vsh.Emn(m, n)
p_src[i,0,r] = 1/(-1j*Emn)
else:
rad, theta, phi = miepy.coordinates.cart_to_sph(*dr)
for r,n,m in miepy.mode_indices(lmax):
Emn = miepy.vsh.Emn(self.m, self.n)
Euv = miepy.vsh.Emn(m, n)
A, B = miepy.cpp.vsh_translation.vsh_translation(m, n, self.m, self.n, rad, theta, phi, k, self.mode)
p_src[i,0,r] = A/(-1j*Emn)
p_src[i,1,r] = B/(-1j*Emn)
if self.ftype == 'magnetic':
p_src = p_src[:, ::-1]
return factor*p_src
def H_angular(self, theta, phi, k, radius=None, origin=None):
if radius is None:
radius = 1e6*2*np.pi/k
return self.H_field(radius, radius, theta, phi, far=True, spherical=True)[1:]
def E_angular(self, theta, phi, k, radius=None, origin=None):
if radius is None:
radius = 1e6*2*np.pi/k
return self.E_field(radius, radius, theta, phi, far=True, spherical=True)[1:]
def angular_spectrum(self, theta, phi, k):
return self.E_angular(theta, phi, k)
```
#### File: miepy/tmatrix/axisymmetric_file.py
```python
def axisymmetric_file(geometry_type, geometry_parameters, Nrank, wavelength,
index, index_m, kb=None, conducting=False, Nparam=1,
use_ds=True, complex_plane=True, eps_z_re_im=0.95, Nint=200):
"""Create input file for axisymmetric particles
Arguments:
geometry_type (int) choose from 1 (spheroid), 2 (cylinder), 3 (rounded oblate cylinder)
geometry_parameters (list) geometric parameters ([radius along symmetry axius, radius along other axes])
Nrank (int) maximum number of multipoles
wavelength (float) wavelength of incident light
index (complex) index of refraction of the particle
index_m (float) index of refraction of the medium
kb (float) parameter of chirality (default: None [no chirality])
conducting (bool) if True, particle is conducting (default: False)
Nparam (int) number of smooth curves used in approximate surface (default: 1)
use_ds (bool) if True, use discrete sources (default: True)
complex_plane (bool) if True, distribute discrete sources in complex plane (default: True)
eps_z_re_im (float) parameter used to distribute discrete sources (default: 0.95)
Nint (int) number of points used in integration (default: 200)
"""
geometry_xy = geometry_parameters[0]/wavelength
geometry_z = geometry_parameters[1]/wavelength
wavelength = 1
if kb is None:
chiral = False
kb = 1
else:
chiral = True
file_str_template = """OptProp
{wavelength}
{index_m.real}
({index.real}, {index.imag})
Variables:
- wavelength - wavelength of the incident light in vacuo.
- ind_refMed - refractive index of the ambient medium.
- ind_refRel - relative refractive index of the particle.
MatProp
.{conducting}.
.{chiral}.
{kb}
Variables:
- perfectcond - if perfectcond = t, the particle is perfectly conducting.
- chiral - if chiral = t, the particle is optical active (chiral).
- kb - parameter of chirality.
GeomProp
.false.
'../GEOMFILES/prolate.fem'
{geometry_type}
2
{geometry_xy}
{geometry_z}
{Nparam}
1.0
1.0
.false.
Variables:
- FileGeom - if FileGeom = t, the particle geometry is supplied by the
input file FileFEM.
- FileFEM - name of the file containing the particle geometry.
- TypeGeom - parameter specifying the type of the particle geometry.
- Nsurf - number of surface parameters.
- surf(1) - surface parameter.
- ...
- surf(Nsurf
- Nparam - number of smooth curves forming the generatrix curve.
- anorm - characteristic length of the particle which is used to
normalize the differential scattering cross sections.
- Rcirc - characteristic length of the particle for computing Nrank.
- miror - if miror = t, the particle is mirror symmetric.
NOTE: FOR CHIRAL PARTICLES AND DISTRIBUTED SOURCES SET miror = f.
ConvTest
.false.
.false.
Variables:
- DoConvTest - if DoConvTest = t, the interactive convergence tests
over Nint and Nrank are performed.
- MishConvTest - if MishConvTest = t, estimates of Nint and Nrank are
computed with the convergence criterion proposed by
Mishchenko.
NOTE: IF THE PARTICLE IS OPTICAL ACTIVE (chiral = t) OR THE PARTICLE
GEOMETRY IS SUPPLIED BY THE FILE FileFEM (FileGeom = t), THE CODE SETS
MishConvTest = f. IN FACT, MISHCHENKOS CONVERGENCE TEST WILL BE
PERFORMED IF (DS = f AND DoConvTest = t AND chiral = f AND FileGeom = f),
OR (DS = t AND autGenDS = t AND DoConvTest = t AND chiral = f AND
FileGeom = f).
Sources
.{use_ds}.
.true.
Variables:
- DS - if DS = t, distributed sources are used for T-matrix
calculation.
- autGenDS - if autGenDS = t, the coordinates of the distributed sources
are generated by the code.
NOTE: IF THE PARTICLE GEOMETRY IS READ FROM FILE (FileGeom = t),
THE CODE SETS autgenDS = f.
SourcePosAut
.{complex_plane}.
{eps_z_re_im}
Variables:
- ComplexPlane - if ComplexPlane = t, the distributed sources are placed
in the complex plane.
- EpsZReIm - parameter controlling the distribution of the discrete
sources.
NOTE: THESE VARIABLES MUST BE PROVIDED IF (DS = t AND autgenDS = t).
NintNrank
{Nint}
{Nrank}
Variables:
- Nint - number of integration points in computing integrals over the
generatrix curve.
- Nrank - maximum expansion order.
NOTE: THESE VARIABLES MUST BE PROVIDED IF ((DoConvTest = f) OR
(DS = t AND autgenDS = f)).
Errors
5.e-2
5.e-2
1.e-2
4
50
Variables:
- epsNint - error tolerance for the integration test.
- epsNrank - error tolerance for the expansion order test.
- epsMrank - error tolerance for the azimuthal order test.
- dNint - number of division points for the integration test
and Mishchenkos convergence test.
- dNintMrank - number of division points for azimuthal mode
calculation.
Tmat
'../TMATFILES/tmatrix.dat'
Variable:
- FileTmat - name of the file to which the T matrix is written.
PrintProgress
.false.
Variable:
- PrnProgress - if PrnProgress = t, the progress of calculation
is printed.
"""
return file_str_template.format(geometry_type=geometry_type, geometry_xy=geometry_xy, geometry_z=geometry_z,
Nrank=Nrank, wavelength=wavelength, index=index/index_m, index_m=index_m, chiral=str(chiral).lower(),
kb=kb, conducting=str(conducting).lower(), Nparam=Nparam, use_ds=str(use_ds).lower(),
complex_plane=str(complex_plane).lower(), eps_z_re_im=eps_z_re_im, Nint=Nint)
```
#### File: miepy/tmatrix/common.py
```python
import miepy
import numpy as np
from .get_tmatrix import nfmds_solver, tmatrix_solvers
def tmatrix_sphere(radius, wavelength, eps, eps_m, lmax, conducting=False):
"""Compute the T-matrix of a sphere, using regular Mie theory
Arguments:
radius sphere radius
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
conducting if True, calculate for conducting sphere (default: False)
"""
rmax = miepy.vsh.lmax_to_rmax(lmax)
tmatrix = np.zeros([2,rmax,2,rmax], dtype=complex)
k_medium = 2*np.pi*eps_m**0.5/wavelength
for i, n, m in miepy.mode_indices(lmax):
an, bn = miepy.mie_single.mie_sphere_scattering_coefficients(radius,
n, eps, 1, eps_m, 1, k_medium, conducting=conducting)
tmatrix[0,i,0,i] = an
tmatrix[1,i,1,i] = bn
return tmatrix
def tmatrix_core_shell(radius, thickness, wavelength, eps_core, eps_shell, eps_m, lmax):
"""Compute the T-matrix of a core-shell, using regular Mie theory
Arguments:
radius core radius
wavelength incident wavelength
eps_core particle permittivity
eps_shell shell permittivity
eps_m medium permittivity
lmax maximum number of multipoles
"""
rmax = miepy.vsh.lmax_to_rmax(lmax)
tmatrix = np.zeros([2,rmax,2,rmax], dtype=complex)
k_medium = 2*np.pi*eps_m**0.5/wavelength
particle = miepy.single_mie_core_shell(radius, radius + thickness,
material_in=miepy.dielectric(eps=eps_core),
material_out=miepy.dielectric(eps=eps_shell),
medium=miepy.dielectric(eps=eps_m),
lmax=lmax,
wavelength=wavelength)
particle.solve()
for i, n, m in miepy.mode_indices(lmax):
tmatrix[0,i,0,i] = -1j*particle.an[0,n-1]
tmatrix[1,i,1,i] = -1j*particle.bn[0,n-1]
return tmatrix
def tmatrix_spheroid(axis_xy, axis_z, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
axis_xy length of semiaxes perpendicular to the axis of symmetry
axis_z length of semiaxis along axis of symmetry
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
complex_plane = True if axis_xy > axis_z else False
parameters = dict(geometry_type=1, geometry_parameters=[axis_z, axis_xy], wavelength=wavelength,
index=eps**.5, index_m=eps_m**0.5, complex_plane=complex_plane, Nparam=1)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, extended_precision=extended_precision)
def tmatrix_cylinder(radius, height, wavelength, eps, eps_m, lmax, rounded=False, extended_precision=False, **kwargs):
"""Compute the T-matrix of a cylinder, with sharp or rounded (if oblate) edges
Arguments:
radius radius of cylinder
height height of cylinder
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
rounded (bool) if True, and cylinder is oblate, the cylinder's edges are rounded (default: False)
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
complex_plane = True if 2*radius > height else False
geometry_type = 3 if rounded else 2
if height >= 2*radius and rounded:
raise ValueError('prolate cylinders (height >= diameter) cannot be rounded')
parameters = dict(geometry_type=geometry_type, geometry_parameters=[height/2, radius], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, complex_plane=complex_plane, Nparam=3)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, extended_precision=extended_precision)
def tmatrix_ellipsoid(rx, ry, rz, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
rx,ry,rz radii of the 3 axes
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=1, geometry_parameters=[rx, ry, rz], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=1, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_ellipsoid(rx, ry, rz, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
rx,ry,rz radii of the 3 axes
wavelength incident wavelength
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=1, geometry_parameters=[rx, ry, rz], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=1, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_square_prism(side, height, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
width side width of the prism
height height of the prism
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=2, geometry_parameters=[side/2, height/2], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=6, Mrank=lmax, R_symmetry=0)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_regular_prism(N, side, height, wavelength, eps, eps_m, lmax, extended_precision=False, **kwargs):
"""Compute the T-matrix of a spheroid
Arguments:
N number of vertices
width side width of the prism
height height of the prism
eps particle permittivity
eps_m medium permittivity
lmax maximum number of multipoles
extended_precision (bool) whether to use extended precision (default: False)
kwargs additional keywords passed to axisymmetric_file function
"""
parameters = dict(geometry_type=3, geometry_parameters=[side/2, height/2], wavelength=wavelength,
index=eps**0.5, index_m=eps_m**0.5, Nparam=2, Mrank=lmax, R_symmetry=N)
parameters.update(kwargs)
return nfmds_solver(lmax, parameters, solver=tmatrix_solvers.non_axisymmetric,
extended_precision=extended_precision)
def tmatrix_sphere_cluster(pos, radii, lmax, lmax_cluster, wavelength, eps, eps_m, extended_precision=False, **kwargs):
parameters = dict(pos=pos, radii=radii, Nrank_particles=lmax,
wavelength=wavelength, index=eps**0.5, index_m=eps_m**0.5)
parameters.update(kwargs)
return nfmds_solver(lmax_cluster, parameters, solver=tmatrix_solvers.sphere_cluster,
extended_precision=extended_precision)
```
#### File: miepy/vsh/cluster_coefficients.py
```python
import numpy as np
import miepy.coordinates as coordinates
from miepy import vsh
#TODO: equations for rmax, r, lmax (here and elsewhere) should be a function call
#TODO: iteration over (n,m,r) could be simplified through a generator call (see all interactions)
def cluster_coefficients(positions, p_scat, k, origin, lmax=None):
"""Solve for the cluster scattering coefficients of N particles around an origin
Arguments:
positions[N,3] particle positions
p_scat[N,2,rmax] scattering coefficients
k medium wavenumber
origin position around which to calculate the cluster coefficients
lmax (optional) compute scattering for up to lmax terms (default: lmax of input p/q)
"""
Nparticles = positions.shape[0]
rmax_in = p_scat.shape[-1]
lmax_in = vsh.rmax_to_lmax(rmax_in)
if lmax is None:
lmax = lmax_in
rmax = vsh.lmax_to_rmax(lmax)
p_cluster = np.zeros([2,rmax], dtype=complex)
for i in range(Nparticles):
if np.all(positions[i] == origin):
p_cluster[0,:rmax_in] += p_scat[i,0,:rmax_in]
p_cluster[1,:rmax_in] += p_scat[i,1,:rmax_in]
continue
rij = origin - positions[i]
rad, theta, phi = coordinates.cart_to_sph(*rij)
for r,n,m in vsh.mode_indices(lmax):
for rp,v,u in vsh.mode_indices(lmax_in):
a = p_scat[i,0,rp]
b = p_scat[i,1,rp]
A, B = vsh.vsh_translation(m, n, u, v, rad, theta, phi, k, vsh.vsh_mode.incident)
p_cluster[0,r] += a*A + b*B
p_cluster[1,r] += a*B + b*A
return p_cluster
```
#### File: miepy/vsh/vsh_rotation.py
```python
import numpy as np
import miepy
def vsh_rotation_matrix(n, quat):
"""Rotation matrix for a given multipole order
Arguments:
n multipole order
quat quaternion representing the rotation
Returns:
Rotation matrix R[2n+1,2n+1], such that p' = R*p
"""
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import spherical
l = 2*n + 1
R = spherical.wigner_D(quat.components, n, n).reshape((l,l))
m = np.arange(-n, n+1)
R *= np.power(-1.0, np.subtract.outer(m, m))
return np.conj(R)
def rotate_expansion_coefficients(p_exp, quat):
"""Rotate a set of expansion coefficients to a new reference frame
Arguments:
p_exp[2,rmax] expansion coefficients
quat quaternion representing the rotation
Returns:
The rotated expansion coefficients, p_rot[2,rmax]
"""
p_rot = np.empty_like(p_exp)
rmax = p_exp.shape[-1]
lmax = miepy.vsh.rmax_to_lmax(rmax)
for n in range(1,lmax+1):
R = vsh_rotation_matrix(n, quat)
rmax = miepy.vsh.lmax_to_rmax(n)
idx = np.s_[rmax-(2*n+1):rmax]
if p_rot.ndim == 3:
p_rot[...,idx] = np.einsum('ab,Npb->Npa', R, p_exp[...,idx])
else:
p_rot[:,idx] = np.einsum('ab,pb->pa', R, p_exp[:,idx])
return p_rot
```
#### File: johnaparker/MiePy/setup.py
```python
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, find_packages, Extension
from distutils.command.build import build
from setuptools.command.build_ext import build_ext
from setuptools import Command
from distutils.version import LooseVersion
NAME = 'miepy'
DESCRIPTION = "Solve Maxwell's equations for a cluster of particles using the generalized multiparticle Mie theory (GMMT)"
URL = ''
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
KEYWORDS = 'electrodynamics mie scattering'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.5.0'
LICENSE = 'GPLv3'
REQUIRED = [
'numpy',
'scipy',
'matplotlib',
'tqdm',
'sympy',
'pandas',
'pyyaml',
'numpy_quaternion',
'spherical',
'vpython',
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
def unzip_material_database():
import zipfile
path = 'miepy/materials/database.zip'
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall('miepy/materials')
def build_nfmds(build_direc, lib_dir):
import pathlib
if platform.system() == "Windows":
build_direc = build_direc.replace('\\', r'/')
lib_dir = lib_dir.replace('\\', r'/')
src_dir = 'miepy/tmatrix/nfmds'
obj_dir = '../../../{direc}/nfmds'.format(direc=build_direc)
exe_dir = '../../../{direc}/miepy/bin'.format(direc=lib_dir)
exe_dir = '../../bin'
exe_dir_root = 'miepy/bin'
obj_dir_root = '{build_direc}/nfmds'.format(build_direc=build_direc)
pathlib.Path(exe_dir_root).mkdir(exist_ok=True)
pathlib.Path(obj_dir_root).mkdir(exist_ok=True)
command = ['make', 'objdir={obj_dir}'.format(obj_dir=obj_dir),
'exedir={exe_dir}'.format(exe_dir=exe_dir)]
subprocess.check_call(' '.join(command), cwd=src_dir, shell=True)
class builder(build):
def run(self):
if not os.path.isdir('miepy/materials/database'):
unzip_material_database()
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
build_nfmds(self.build_temp, self.build_lib)
super().run()
class builder_ext(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
keywords=KEYWORDS,
url=URL,
packages=find_packages(),
long_description=read('README.md'),
long_description_content_type='text/markdown',
install_requires=REQUIRED,
python_requires=REQUIRES_PYTHON,
include_package_data = True,
ext_modules=[CMakeExtension('miepy/cpp', './cpp')],
cmdclass={
'build': builder,
'build_ext': builder_ext,
},
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Development Status :: 3 - Alpha',
'Topic :: Scientific/Engineering :: Physics',
'Intended Audience :: Science/Research',
],
zip_safe=False,
)
```
#### File: MiePy/tests/test_decomposition.py
```python
import numpy as np
import miepy
import pytest
def test_vsh_source_decomposition():
"""verify the decomposition of the vsh_source"""
x, y, z = (-.1, .1, .1)
k = 2*np.pi
lmax = 7
p = np.array([[0.1,0,-.1]])
R, THETA, PHI = miepy.coordinates.cart_to_sph(x - p[0,0], y - p[0,1], z - p[0,2])
source = miepy.sources.vsh_source(2, 2, ftype='electric', center=(0.3,.2,.1))
E1 = source.E_field(x, y, z, k)
p_src = source.structure(p, k, lmax)[0]
Efunc = miepy.vsh.expand_E(p_src, k, miepy.vsh_mode.incident)
E2 = Efunc(R, THETA, PHI)
E2 = miepy.coordinates.vec_sph_to_cart(E2, THETA, PHI)
assert np.allclose(E1[:2], E2[:2], atol=0, rtol=1e-7), 'x,y components equal (electric mode)'
assert np.allclose(E1[2], 0, atol=1e-15), 'z components of E1 goes to zero (electric mode)'
assert np.allclose(E2[2], 0, atol=1e-7), 'z component of E2 goes to zero (electric mode)'
source = miepy.sources.vsh_source(2, 2, ftype='magnetic', center=(0.3,.2,.1))
E1 = source.E_field(x, y, z, k)
p_src = source.structure(p, k, lmax)[0]
Efunc = miepy.vsh.expand_E(p_src, k, miepy.vsh_mode.incident)
E2 = Efunc(R, THETA, PHI)
E2 = miepy.coordinates.vec_sph_to_cart(E2, THETA, PHI)
assert np.allclose(E1[2], E2[2], atol=0, rtol=1e-7), 'z components equal (magnetic mode)'
assert np.allclose(E1[:2], 0, atol=1e-15), 'x,yz components of E1 go to zero (magnetic mode)'
assert np.allclose(E2[:2], 0, atol=1e-7), 'x,y component of E2 go to zero (magnetic mode)'
```
#### File: MiePy/tests/test_ellipsoid.py
```python
import miepy
import numpy as np
nm = 1e-9
rx = ry = 40*nm
rz = 70*nm
material = miepy.materials.Ag()
wavelength = 700*nm
lmax = 3
def test_ellipsoid_equal_spheroid_z_oriented():
"""An (rx, ry, rz) ellipsoid should equal a z-oriented (rxy, rz) spheroid"""
e = miepy.spheroid([0,0,0], rx, rz, material)
T1 = e.compute_tmatrix(lmax, wavelength, 1.0)
e = miepy.ellipsoid([0,0,0], rx, ry, rz, material)
T2 = e.compute_tmatrix(lmax, wavelength, 1.0)
assert np.allclose(T1, T2, atol=2e-5)
def test_ellipsoid_equal_spheroid_x_oriented():
"""An (rz, rx, ry) ellipsoid should equal an x-oriented (rxy, rz) spheroid"""
q = miepy.quaternion.from_spherical_coords(np.pi/2, 0)
e = miepy.spheroid([0,0,0], rx, rz, material, orientation=q)
T1 = e.compute_tmatrix(lmax, wavelength, 1.0)
e = miepy.ellipsoid([0,0,0], rz, rx, ry, material)
T2 = e.compute_tmatrix(lmax, wavelength, 1.0)
assert np.allclose(T1, T2, atol=5e-5)
def test_ellipsoid_equal_spheroid_y_oriented():
"""An (rx, rz, ry) ellipsoid should equal a y-oriented (rxy, rz) spheroid"""
q = miepy.quaternion.from_spherical_coords(np.pi/2, np.pi/2)
e = miepy.spheroid([0,0,0], rx, rz, material, orientation=q)
T1 = e.compute_tmatrix(lmax, wavelength, 1.0)
e = miepy.ellipsoid([0,0,0], rx, rz, ry, material)
T2 = e.compute_tmatrix(lmax, wavelength, 1.0)
assert np.allclose(T1, T2, atol=5e-5)
```
#### File: MiePy/tests/test_far_field.py
```python
import numpy as np
import miepy
nm = 1e-9
dimer = miepy.sphere_cluster(position=[[-100*nm,0,0], [100*nm, 0, 0]],
radius=75*nm,
material=miepy.constant_material(3.6**2),
source=miepy.sources.plane_wave.from_string(polarization='y'),
wavelength=600*nm,
lmax=2)
theta = np.linspace(0, np.pi, 5)
phi = np.linspace(0, 2*np.pi, 5)
THETA, PHI = np.meshgrid(theta, phi)
R = 1e6
X, Y, Z = miepy.coordinates.sph_to_cart(R, THETA, PHI)
E_exact = dimer.E_field(R, THETA, PHI, source=False, interior=False, spherical=True)
def test_far_field_convergence():
"""far-field E and H field should agree with exact field in the large radius limit"""
E_far = dimer.E_field(R, THETA, PHI, far=True, source=False, interior=False, spherical=True)
np.testing.assert_allclose(E_exact, E_far, rtol=0, atol=1e-16)
def test_far_field_cluster_coefficient():
"""far-fields calculated from the cluster coefficients should be the same as the sum-over particle coefficients"""
dimer.solve_cluster_coefficients(lmax=4)
E_func = miepy.vsh.expand_E_far(dimer.p_cluster, dimer.material_data.k_b)
E_far = E_func(R, THETA, PHI)
np.testing.assert_allclose(E_exact, E_far, rtol=0, atol=1e-15)
def test_far_field_directly():
"""far-field function compared directly to total field function for n=2, m=-1"""
x = 1e6
n = 2
m = -1
Nfunc, Mfunc = miepy.vsh.VSH(n, m)
rad, theta, phi = 1e6, 0.9, -0.6
k = 1
N = Nfunc(rad, theta, phi, k)
M = Mfunc(rad, theta, phi, k)
tau = miepy.vsh.special.tau_func(n, m, theta)
pi = miepy.vsh.special.pi_func(n, m, theta)
E_theta_1 = 1j*N[1]
factor = np.exp(1j*k*rad)/(k*rad)
E_theta_2 = 1j*factor*(-1j)**n*tau*np.exp(1j*m*phi)
np.testing.assert_allclose(E_theta_1, E_theta_2, rtol=0, atol=1e-12)
E_theta_1 = 1j*M[1]
factor = np.exp(1j*k*rad)/(k*rad)
E_theta_2 = 1j*factor*(-1j)**n*pi*np.exp(1j*m*phi)
np.testing.assert_allclose(E_theta_1, E_theta_2, rtol=0, atol=1e-12)
```
#### File: MiePy/tests/test_fields.py
```python
import numpy as np
import miepy
nm = 1e-9
def test_boundary_conditions():
"""verifies the continunity of tangential components of E and H at the surface of a particle"""
radius = 50*nm
cluster = miepy.sphere_cluster(position=[[0,0,0]],
radius=radius,
material=miepy.materials. Ag(),
lmax=2,
wavelength=600*nm,
source=miepy.sources.plane_wave.from_string(polarization='y'),
medium=miepy.constant_material(1.2**2))
theta = 0.3
phi = 0.3
eps = .1*nm
E_out = cluster.E_field(radius + eps, theta, phi, spherical=True)
E_in = cluster.E_field(radius - eps, theta, phi, spherical=True)
H_out = cluster.H_field(radius + eps, theta, phi, spherical=True)
H_in = cluster.H_field(radius - eps, theta, phi, spherical=True)
assert np.allclose(E_out[1:], E_in[1:], atol=4e-2, rtol=0)
assert np.allclose(H_out[1:], H_in[1:], atol=4e-2, rtol=0)
```
#### File: MiePy/tests/test_interface.py
```python
import numpy as np
import miepy
import pytest
nm = 1e-9
wavelength = 600*nm
k = 2*np.pi/wavelength
radius = 75*nm
medium = miepy.materials.water()
material = miepy.materials.Ag()
width = 200*nm
polarization = [1,0]
zpos = 400*nm
@pytest.mark.parametrize("s1,s2,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization, center=[0,0,-zpos]),
miepy.sources.gaussian_beam(width=width, polarization=polarization), 0),
(miepy.sources.plane_wave(polarization=polarization),
miepy.sources.plane_wave(polarization=polarization), 1e-4)
])
def test_interface_z_translation(s1, s2, rtol):
"""
Moving the source and particle is identical to moving the interface (cross-section comparison)
"""
interface = miepy.interface(miepy.constant_material(index=1.7))
cluster = miepy.sphere_cluster(position=[0,0,-zpos],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=s1,
interface=interface,
wavelength=wavelength)
C1 = np.array(cluster.cross_sections())
interface = miepy.interface(miepy.constant_material(index=1.7), z=zpos)
cluster = miepy.sphere_cluster(position=[0,0,0],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=s2,
interface=interface,
wavelength=wavelength)
C2 = np.array(cluster.cross_sections())
assert np.allclose(C1, C2, atol=0, rtol=rtol)
@pytest.mark.parametrize("source,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization), 1e-15),
(miepy.sources.plane_wave(polarization=polarization), 0)
])
def test_index_matched_interface(source, rtol):
"""
An interface that is index-matched with the medium is identical to not having an interface (cross-section comparison)
"""
interface = miepy.interface(medium, z=zpos)
cluster = miepy.sphere_cluster(position=[0,0,0],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=source,
interface=interface,
wavelength=wavelength)
C1 = np.array(cluster.cross_sections())
cluster = miepy.sphere_cluster(position=[0,0,0],
radius=radius,
material=material,
medium=medium,
lmax=2,
source=source,
wavelength=wavelength)
C2 = np.array(cluster.cross_sections())
assert np.allclose(C1, C2, atol=0, rtol=1e-15)
```
#### File: MiePy/tests/test_near_to_far.py
```python
import numpy as np
import miepy
import pytest
### parameters
nm = 1e-9
wav = 600*nm
k = 2*np.pi/wav
width = 100*nm
polarization = [1,1j]
### angular grid
radius = 150.3*wav
theta = np.linspace(0., np.pi, 4)
phi = np.linspace(0, 2*np.pi, 5)[:-1]
THETA, PHI = np.meshgrid(theta, phi, indexing='ij')
X, Y, Z = miepy.coordinates.sph_to_cart(radius, THETA, PHI)
@pytest.mark.parametrize("source,atol,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.hermite_gaussian_beam(2, 0, width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.laguerre_gaussian_beam(1, 1, width=width, polarization=polarization), 1, 5e-2),
])
def test_source_electric_field_near_to_far(source, atol, rtol):
"""
Compare E-field of source in far field using near and far field expressions
Expressions are expected to converge in the limit r -> infinity
"""
E1 = source.E_field(X, Y, Z, k, sampling=300)
E1 = miepy.coordinates.vec_cart_to_sph(E1, THETA, PHI)[1:]
E2 = source.E_angular(THETA, PHI, k, radius=radius)
assert np.allclose(E1, E2, atol=atol, rtol=rtol)
@pytest.mark.parametrize("source,atol,rtol", [
(miepy.sources.gaussian_beam(width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.hermite_gaussian_beam(2, 0, width=width, polarization=polarization), 0, 2e-2),
(miepy.sources.laguerre_gaussian_beam(1, 1, width=width, polarization=polarization), 1, 5e-2),
])
def test_source_magnetic_field_near_to_far(source, atol, rtol):
"""
Compare H-field of source in far field using near and far field expressions
Expressions are expected to converge in the limit r -> infinity
"""
H1 = source.H_field(X, Y, Z, k, sampling=300)
H1 = miepy.coordinates.vec_cart_to_sph(H1, THETA, PHI)[1:]
H2 = source.H_angular(THETA, PHI, k, radius=radius)
assert np.allclose(H1, H2, atol=atol, rtol=rtol)
def test_cluster_field_near_to_far():
"""
Compare scattered E/H-field of a cluster in far field using near and far field expressions
Expressions are expected to converge in the limit r -> infinity
"""
x = np.linspace(-600*nm, 600*nm, 3)
y = np.linspace(-600*nm, 600*nm, 3)
cluster = miepy.sphere_cluster(position=[[xv, yv, 0] for xv in x for yv in y],
radius=100*nm,
material=miepy.constant_material(index=2),
wavelength=wav,
source=miepy.sources.plane_wave([1,1]),
lmax=3)
theta = np.linspace(0, np.pi, 5)
phi = np.linspace(0, 2*np.pi, 5)
THETA, PHI = np.meshgrid(theta, phi)
radius = np.ones_like(THETA)
E1 = cluster.E_field(radius, THETA, PHI, spherical=True, source=False)
E2 = cluster.E_angular(THETA, PHI, radius=radius, source=False)
H1 = cluster.H_field(radius, THETA, PHI, spherical=True, source=False)
H2 = cluster.H_angular(THETA, PHI, radius=radius, source=False)
assert np.allclose(E1[0], 0, atol=1e-10), 'radial component of E goes to 0'
assert np.allclose(E1[1:], E2, atol=0, rtol=1e-6), 'E converges'
assert np.allclose(H1[0], 0, atol=1e-10), 'radial component of H goes to 0'
assert np.allclose(H1[1:], H2, atol=0, rtol=1e-6), 'H converges'
```
#### File: MiePy/tests/test_single_gmt.py
```python
import numpy as np
import miepy
from tqdm import tqdm
nm = 1e-9
# wavelength from 400nm to 1000nm
wavelengths = np.linspace(400*nm,1000*nm,10)
# create a material with n = 3.7 (eps = n^2) at all wavelengths
dielectric = miepy.constant_material(3.7**2 + .1j)
# calculate scattering coefficients
radius = 100*nm # 100 nm radius
# water medium
medium = miepy.materials. water()
# Single Mie Theory
lmax = 5 # Use up to 5 multipoles
sphere = miepy.single_mie_sphere(radius, dielectric, wavelengths, lmax, medium=medium)
S,A,E = sphere.cross_sections()
Fz = sphere.radiation_force()
# Generalized Mie Theory (GMT)
source = miepy.sources.plane_wave.from_string(polarization='x')
scat = np.zeros_like(wavelengths)
absorb = np.zeros_like(wavelengths)
extinct = np.zeros_like(wavelengths)
force = np.zeros((3,) + wavelengths.shape)
for i,wavelength in enumerate(wavelengths):
system = miepy.sphere_cluster(position=[0,0,0],
radius=radius,
material=dielectric,
source=source,
wavelength=wavelength,
lmax=lmax,
medium=medium)
scat[i],absorb[i],extinct[i] = system.cross_sections()
force[:,i] = system.force_on_particle(0)
def test_scattering():
"""compare scattering cross-section of GMT and single Mie theory"""
L2 = np.linalg.norm(scat - S)/scat.shape[0]
avg = np.average(np.abs(S) + np.abs(scat))/2
assert np.all(L2 < 1e-15*avg)
def test_absoprtion():
"""compare absoprtion cross-section of GMT and single Mie theory"""
L2 = np.linalg.norm(absorb - A)/scat.shape[0]
avg = np.average(np.abs(A) + np.abs(absorb))/2
assert np.all(L2 < 2e-15*avg)
def test_force():
"""compare radiation force of GMT and single Mie theory"""
L2 = np.linalg.norm(force[2] - Fz)/Fz.shape[0]
avg = np.average(np.abs(force[2]) + np.abs(Fz))/2
assert np.all(L2 < 1e-6*avg)
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.figure()
plt.plot(wavelengths/nm, scat, color='C0', label='GMT scattering')
plt.plot(wavelengths/nm, S, 'o', color='C0', label="Single Mie theory", linewidth=2)
plt.plot(wavelengths/nm, absorb, color='C1', label='GMT absorption')
plt.plot(wavelengths/nm, A, 'o', color='C1', label="Single Mie theory", linewidth=2)
plt.plot(wavelengths/nm, extinct, color='C2', label='GMT extinction')
plt.plot(wavelengths/nm, E, 'o', color='C2', label="Single Mie theory", linewidth=2)
plt.xlabel("wavelength (nm)")
plt.ylabel("Scattering cross-section")
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.legend()
plt.figure()
plt.plot(wavelengths/nm, force[2], color='C1', label='GMT force')
plt.plot(wavelengths/nm, Fz, 'o', color='C1', label="Single Mie theory", linewidth=2)
plt.xlabel("wavelength (nm)")
plt.ylabel("force")
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.legend()
plt.show()
``` |
{
"source": "johnaparker/numpipe",
"score": 3
} |
#### File: numpipe/examples/basics.py
```python
from numpipe import scheduler, once
import numpy as np
import matplotlib.pyplot as plt
### Setup
job = scheduler()
### Fast, shared code goes here
x = np.linspace(0,1,10)
### Slow, sim-only code goes here and relevant data is written to file
@job.cache
def sim1():
"""compute the square of x"""
y = x**2
return {'y': y}
@job.cache
def sim2():
"""compute the cube of x"""
z = x**3
return {'z': z}
@job.cache
def sim3():
"""construct a time-series"""
for i in range(5):
z = x*i + 1
yield {'time_series': z, 'time': i}
yield once(xavg=np.average(x))
@job.cache
def sim4(param):
"""sim depends on parameter"""
x = np.array([1,2,3])
return {'y': param*x}
@job.cache
def sim5():
pass
job.add(sim4, 'A', param=2)
job.add(sim4, 'A', param=3)
job.add(sim4, 'A', param=4)
job.add(sim4, 'B', param=4)
@job.plots
def vis():
"""visualize the data"""
cache = job.load(sim1)
plt.plot(x, cache.y)
cache = job.load(sim2)
plt.plot(x, cache.z)
for name, cache in job.load(sim4):
print(f'{name} instance has y = {cache.y} with param = {cache.args.param}')
# with job.load(sim4, defer=True) as cache:
plt.show()
### execute
if __name__ == '__main__':
job.run()
```
#### File: numpipe/examples/dependency.py
```python
from numpipe import scheduler
from time import sleep
job = scheduler()
@job.cache()
def A():
sleep(1)
return dict(x=2)
@job.cache(depends=A)
def B():
sleep(1)
return dict(x=2)
@job.cache(depends=A)
def C(i):
sleep(1)
return dict(x=2)
@job.cache(depends=C)
def D():
sleep(1)
return dict(x=2)
job.add(C, i=0)
job.add(C, i=1).depends(B)
if __name__ == '__main__':
job.run()
```
#### File: numpipe/examples/exception.py
```python
from numpipe import scheduler
from time import sleep
### Setup
job = scheduler()
N = 10
T = .5
@job.cache()
def first():
for i in range(N):
sleep(T/N)
return {}
@job.cache
def second():
x = 1/0
return {}
@job.cache()
def third():
for i in range(N):
sleep(T/N)
return {}
if __name__ == '__main__':
job.run()
```
#### File: numpipe/examples/progress.py
```python
import numpipe
from numpipe import pbar
from time import sleep
### Setup
job = numpipe.scheduler()
import numpy as np
N = 100
T = 5
@job.cache
def progress(i):
progress = 0
for j in pbar(range(N)):
if i in (2,5,8) and j == 40:
raise RuntimeError
sleep(T/N)
yield dict()
for i in range(10):
job.add(progress, i=i)
### execute
if __name__ == '__main__':
job.run()
```
#### File: numpipe/numpipe/notify.py
```python
import socket
from datetime import datetime
from time import time, sleep
from numpipe import config
import matplotlib.pyplot as plt
import matplotlib as mpl
DEFAULT_DELAY = config.get_config()['notifications']['delay_default']
def get_bot_token():
"""get bot token from config file"""
return config.get_config()['notifications']['telegram']['token']
def get_chat_id():
"""get chat ID from config file"""
return config.get_config()['notifications']['telegram']['chat_id']
def notifications_active():
"""check whether notifications can be sent based on the config file"""
return get_bot_token() and get_chat_id()
def generate_time_str(time):
"""convert time (in seconds) to a text string"""
hours = int(time // 60**2)
minutes = int((time - hours*60**2) // 60)
seconds = int((time - hours*60**2 - minutes*60) // 1)
ret = ''
if hours:
unit = 'hr' if hours == 1 else 'hrs'
ret += f'{hours} {unit} '
if minutes:
unit = 'min' if minutes == 1 else 'mins'
ret += f'{minutes} {unit} '
if not (hours and minutes):
unit = 'sec' if seconds == 1 else 'secs'
ret += f'{seconds} {unit}'
return ret.strip()
def check_idle_matplotlib(delay=DEFAULT_DELAY, check_every=.5):
"""
Check if the user is idle based on matplotlib plot interactions (mouse move, key press, window interactions)
Arguments:
delay time (in seconds) to check before declaring idle
check_every time (in seconds) between interaction checks
"""
nfigures_before = len(plt.get_fignums())
if not nfigures_before:
raise RuntimeError('cannot check for user idleness if there are no figures')
mouse_moved = False
key_pressed = False
def on_mouse_movement(event):
nonlocal mouse_moved
mouse_moved = True
def on_key_press(event):
nonlocal key_pressed
key_pressed = True
fig = plt.figure(nfigures_before)
cid = fig.canvas.mpl_connect('motion_notify_event', on_mouse_movement)
cid = fig.canvas.mpl_connect('key_press_event', on_key_press)
t_start = time()
while time() - t_start < 1:
sleep(.01)
if not plt.get_fignums():
return
backend = mpl.get_backend()
qt_backends = ('Qt4Agg', 'Qt5Agg')
if backend in qt_backends:
x0 = fig.canvas.manager.window.x()
y0 = fig.canvas.manager.window.y()
w0 = fig.canvas.manager.window.size()
t_start = time()
while time() - t_start < delay:
sleep(check_every)
if len(plt.get_fignums()) != nfigures_before:
return False
if mouse_moved:
return False
if key_pressed:
return False
if backend in qt_backends:
if not fig.canvas.manager.window.isActiveWindow():
return False
x = fig.canvas.manager.window.x()
y = fig.canvas.manager.window.y()
w = fig.canvas.manager.window.size()
if x != x0 or y != y0:
return False
if w != w0:
return False
return True
def send_message(message):
"""send a text message"""
from telegram import Bot, ParseMode
bot = Bot(token=get_bot_token())
bot.send_message(chat_id=get_chat_id(), text=message, parse_mode=ParseMode.MARKDOWN)
def send_message_from(message, filename):
"""send a text message with """
from telegram import Bot, ParseMode
host = socket.gethostname()
message = f'*{message}*' + f'\n_{host}:{filename}_'
bot = Bot(token=get_bot_token())
bot.send_message(chat_id=get_chat_id(), text=message, parse_mode=ParseMode.MARKDOWN)
def send_finish_message(filename, njobs, time, num_exceptions):
"""send a text message summarizing the jobs that ran
Arguments:
filename name of the python file
njobs number of jobs ran
time runtime of the jobs
num_exceptions number of jobs that threw exceptions
"""
host = socket.gethostname()
time_str = generate_time_str(time)
tab = ' '
if num_exceptions:
status = f'{num_exceptions}/{njobs} failures'
else:
status = 'success'
date = datetime.now().strftime("%H:%M %d-%m-%Y")
text = f'''`Simulation finished:
{tab}filename___{filename}.py
{tab}status_____{status}
{tab}host_______{host}
{tab}njobs______{njobs}
{tab}runtime____{time_str}
{tab}date_______{date}`
'''
send_message(text)
def send_images(filename, exempt=[]):
"""send images (from matplotlib)
Arguments:
filename name of the python file (without .py extension)
exempt (optional) a list of figure numbers to not send
"""
if not plt.get_fignums():
return
from telegram import Bot, ChatAction, InputMediaPhoto
from io import BytesIO
bot = Bot(token=get_bot_token())
chat_id = get_chat_id()
send_action = lambda: bot.send_chat_action(chat_id=chat_id, action=ChatAction.UPLOAD_PHOTO)
send_action()
media = []
num_figs = 0
t_start = time()
for i in plt.get_fignums():
fig = plt.figure(i)
if fig.number in exempt:
continue
caption = f'{filename}-fig{i}'
bio = BytesIO()
bio.name = f'fig{i}.png'
fig.savefig(bio, format='png')
bio.seek(0)
media.append(InputMediaPhoto(bio, caption=caption))
plt.close(fig)
num_figs += 1
if num_figs == 10:
num_figs = 0
bot.send_media_group(chat_id, media=media)
media = []
if time() - t_start > 7:
send_action()
t_start = time()
if num_figs:
bot.send_media_group(chat_id, media=media)
def send_videos(anims):
"""send a set of animations
Arguments:
anims list of lists of the animations (each embedded list must share the same figure)
"""
if not anims:
return
from telegram import Bot
import tempfile
plt.close('all')
bot = Bot(token=get_bot_token())
chat_id = get_chat_id()
with tempfile.TemporaryDirectory() as direc:
for i,anim_list in enumerate(anims):
plt.close(anim_list[0]._fig)
filepath = f'{direc}/vid{i}.mp4'
send_animation(bot, chat_id, anim_list, filepath)
bot.send_video(chat_id, video=open(filepath, 'rb'))
def send_animation(bot, chat_id, anim_list, filepath, *args, **kwargs):
"""send a single animation for a given bot and chat_id
Arguments:
bot telegram bot
chat_id telgram chat id
anim_list list of animations (belonging to the same figure)
filepath filepath the animation will be saved to
*args additional arguments to pass to anim.save
**kwargs additional key-word arguments to pass to anim.save
"""
from telegram import ChatAction
anim = anim_list[0]
send_action = lambda: bot.send_chat_action(chat_id=chat_id, action=ChatAction.UPLOAD_VIDEO)
store_func = anim._func
t_start = time()
send_action()
def wrapper(*args):
nonlocal t_start
if time() - t_start > 7:
send_action()
t_start = time()
return store_func(*args)
anim._func = wrapper
anim.save(filepath, extra_anim=anim_list[1:], *args, **kwargs)
anim._func = store_func
def send_notifications(notifications, delay=DEFAULT_DELAY, check_idle=True, idle=False):
"""send a collection of notifications
Arguments:
notifications list of functions to call that send notifications (no arguments)
delay time (in seconds) to check if the user is idle before notifying
check_idle whether or not to check if the user is idle (default: True)
idle whether or not the user is idle now (default: False)
"""
if not notifications_active():
return
if check_idle:
idle = check_idle_matplotlib(delay=delay)
if idle:
for notification in notifications:
notification()
```
#### File: numpipe/numpipe/parser.py
```python
import argparse
from numpipe import config
notifications_default_delay = config.get_config()['notifications']['delay_default']
processes_default = None if config.get_config()['execution']['parallel_default'] else 1
mininterval = config.get_config()['progress']['mininterval']
def run_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="action")
display_parser = subparsers.add_parser('display', help='display available functions and descriptions')
display_parser = subparsers.add_parser('clean', help='remove all h5files that are no longer cache functions')
slurm_parse = subparsers.add_parser('slurm', help='run on a system with the Slurm Workload Manager')
for p in [parser, slurm_parse]:
p.add_argument('-r', '--rerun', nargs='*', type=str, default=None, help='re-run specific cached functions by name')
p.add_argument('-f', '--force', action='store_true', help='force over-write any existing cached data')
p.add_argument('-d', '--delete', nargs='*', type=str, default=None, help='delete specified cached data')
p.add_argument('-e', '--exclude', nargs='+', type=str, default=[], help='exclude cached function from being re-run')
p.add_argument('--at-end', action='store_true', default=False, help="only run at_end functions")
p.add_argument('--no-at-end', action='store_true', default=False, help="don't run at_end functions")
p.add_argument('-p', '--processes', nargs='?', default=processes_default, type=int, help='number of processes to use in parallel execution (default: cpu_count)')
p.add_argument('-ct', '--cache_time', type=float, default=300, help='time (in seconds) until data cached data is flushed to file')
p.add_argument('--no-deps', action='store_true', default=False, help='do not rerun functions that depend on other reran functions')
p.add_argument('--mininterval', type=float, default=mininterval, help='time (in seconds) for progress bar mininterval argument')
p.add_argument('--notify', action='store_true', default=False, help='send notifications without delay')
p.add_argument('--notify-message', type=str, default=None, help='send a custom message with other notifications')
p.add_argument('--notify-delay', type=float, default=notifications_default_delay, help='time (in seconds) before notifications will be sent')
p.add_argument('--theme', default='normal', type=str, help='matplotlib plot theme')
p.add_argument('--figures', nargs='+', type=int, help='which figure numbers to display')
p.add_argument('--save', nargs='?', default='', type=str, help='save figures and animations')
p.add_argument('--save-format', nargs='+', default=['png'], type=str, help='file format for figures')
p.add_argument('--save-figs', nargs='?', default='', type=str, help='save figures')
p.add_argument('--save-anims', nargs='?', default='', type=str, help='save animations')
# p.add_argument('--theme', choices=['classic', 'dark'], default='classic', help='matplotlib plot theme')
parser.add_argument('--debug', action='store_true', default=False, help='run in debug mode (single process)')
slurm_parse.add_argument('-t', '--time', type=str, default='36', help='maximum run-time for the Slurm job, formated as {hours}:{minutes}:{seconds} (minutes and seconds optional)')
slurm_parse.add_argument('-m', '--memory', type=float, default=2, help='maximum memory per cpu for the Slurm job in GB')
slurm_parse.add_argument('--batch', action='store_true', help='submit the Slurm job in batches')
slurm_parse.add_argument('--no-submit', action='store_true', help="don't submit the Slurm job after creating sbatch files")
return parser.parse_args()
```
#### File: numpipe/numpipe/utility.py
```python
from functools import wraps
import traceback
import numpy as np
class once(dict):
"""identical to dict; used to yield something only once"""
pass
class Bunch:
"""convert a dictionary into a class with data members equal to the dictionary keys"""
def __init__(self, adict):
self.__dict__.update(adict)
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def doublewrap(func):
"""
a decorator decorator, can be used as @decorator(...) or @decorator
"""
@wraps(func)
def new_func(*args, **kwargs):
if len(args) == 2 and len(kwargs) == 0 and callable(args[1]):
return func(*args)
else:
return lambda f: func(args[0], f, *args[1:], **kwargs)
return new_func
def doublewrap_1(func):
"""
a decorator decorator, can be used as @decorator(...) or @decorator
"""
@wraps(func)
def new_func(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[1]):
return func(*args)
else:
return lambda f: func(args[0], f, *args[1:], **kwargs)
return new_func
def yield_traceback(func):
"""decorator to properly yield the traceback of a function in a parallel environment"""
def new_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
raise Exception("".join(traceback.format_exception(*sys.exc_info())))
return new_func
class first_argument:
# current_iteration = {}
def __init__(self, name, num_iterations=None):
self.name = name
self.num_iterations = num_iterations
if num_iterations is not None:
current_iteration[name] = Value('i', 0)
def iterations(self):
def gen():
for i in range(self.num_iterations):
yield current_iteration[self.name].value
current_iteration[self.name].value += 1
return gen()
def flatten_along(arr, axis=None):
"""flatten an array along a give axis / axes"""
if axis is None:
return arr.flatten()
else:
if np.isscalar(axis):
arr = np.moveaxis(arr, axis, 0)
else:
axis = sorted(axis)
for i,ax in enumerate(axis):
arr = np.moveaxis(arr, ax, i)
shape = arr.shape[len(axis):]
arr = arr.reshape((-1,) + shape)
return arr
``` |
{
"source": "johnaparker/Qpost",
"score": 3
} |
#### File: Qpost/qpost/materials.py
```python
import h5py
import numpy as np
import qpost.vec as vec
class simple_material:
def __init__(self, eps, mu, conduc, material_type = "simple", name = None):
self.eps = eps
self.mu = mu
self.conduc = conduc
def perimitivitty(self, freq):
"""Return the complex permitvitty at freq"""
omega = 2*np.pi*freq
return self.eps + 1j*self.conduc/omega
class debye:
def __init__(self, eps_inf, delta_epsilon, tau, material_type = "debye", name = None):
self.eps_inf = eps_inf
self.delta_epsilon = delta_epsilon
self.tau = tau
self.material_type = material_type
self.name = name
def perimitivitty(self, freq):
"""Return the complex permitvitty at freq"""
omega = 2*np.pi*freq
return self.eps_inf + np.sum(self.delta_epsilon[:,np.newaxis]/(1 - 1j*omega*self.tau[:,np.newaxis]), axis=0)
class drude:
def __init__(self, eps_inf, omega_0, gamma, material_type = "drude", name = None):
self.eps_inf = eps_inf
self.omega_0 = omega_0
self.gamma = gamma
self.material_type = material_type
self.name = name
def perimitivitty(self, freq):
"""Return the complex permitvitty at freq"""
omega = 2*np.pi*freq
return self.eps_inf - np.sum(self.omega_0[:,np.newaxis]**2/(omega**2 + 1j*omega*self.gamma[:,np.newaxis]), axis=0)
class lorentz:
def __init__(self, eps_inf, delta_epsilon, omega_0, gamma, material_type = "lorentz", name = None):
self.eps_inf = eps_inf
self.delta_epsilon = delta_epsilon
self.omega_0 = omega_0
self.gamma = gamma
self.material_type = material_type
self.name = name
def perimitivitty(self, freq):
"""Return the complex permitvitty at freq"""
omega = 2*np.pi*freq
return self.eps_inf - np.sum(self.delta_epsilon[:,np.newaxis]*self.omega_0[:,np.newaxis]**2/(self.omega_0[:,np.newaxis]**2 - omega**2 - 2j*omega*self.gamma[:,np.newaxis]), axis=0)
def load_material(filename, material_name):
"""Load a material from a file of name material_name"""
kwargs = {}
path = "materials/{0}".format(material_name)
with h5py.File(filename, 'r') as f:
g = f[path]
for item in g:
kwargs[item] = g[item][...]
mat_type = kwargs["material_type"].tolist().decode()
mat_map = {"simple": simple_material,
"lorentz": lorentz,
"drude": drude,
"debye": debye }
return mat_map[mat_type](**kwargs)
def load_all_materials(filename):
"""Load all materials in file. Returns a dictionary"""
materials = {}
with h5py.File(filename, 'r') as f:
g = f["materials"]
for material_name in g:
materials[material_name] = load_material(filename, material_name)
return materials
```
#### File: Qpost/qpost/objects.py
```python
import h5py
import numpy as np
import qpost
class object:
def __init__(self, filename, group_name, name):
self.filename = filename
self.group_name = group_name
self.name = name
self.path = "/objects/{0}/{1}".format(group_name, name)
self.position = qpost.vec.load_vec(filename, self.path, "position")
self.theta = qpost.vec.load_scalar(filename, self.path, "theta")
with h5py.File(filename, 'r') as f:
ref = f[self.path + "/material"][...].tolist()
material_path = f[ref].name
material_name = material_path[material_path.rfind("/")+1:]
self.material = qpost.materials.load_material(filename, material_name)
class cylinder(object):
def __init__(self, filename, name):
super().__init__(filename, "cylinders", name)
with h5py.File(filename, 'r') as f:
self.radius = f[self.path]["radius"][...]
class ellipse(object):
def __init__(self, filename, name):
super().__init__(filename, "ellipses", name)
with h5py.File(filename, 'r') as f:
self.rx = f[self.path]["rx"][...]
self.ry = f[self.path]["ry"][...]
class block(object):
def __init__(self, filename, name):
super().__init__(filename, "blocks", name)
with h5py.File(filename, 'r') as f:
self.dimensions = f[self.path]["dimensions"][...]
```
#### File: Qpost/qpost/sources.py
```python
import numpy as np
import h5py
import qpost.vec as vec
class source:
def __init__(self, filename, group_name, source_name):
self.filename = filename
self.group_name = group_name
self.path = "/sources/{0}/{1}".format(group_name, source_name)
class point_source(source):
def __init__(self, filename, name):
super().__init__(filename, "point", name)
self.position = vec.load_vec(filename, self.path, "position")
class line_source(source):
def __init__(self, filename, name):
super().__init__(filename, "line", name)
self.surface = vec.load_surface(filename, self.path)
class tfsf:
def __init__(self, filename):
self.path = "/sources/tfsf"
self.volume = vec.load_volume(filename, self.path)
with h5py.File(filename, 'r') as f:
g = f[self.path]
try:
self.frequency = g["dft_frequency"][...]
self.flux = g["flux"][...]
except KeyError:
self.frequency = None
self.flux = None
```
#### File: Qpost/qpost/vec.py
```python
import h5py
import numpy as np
def load_scalar(filename, path, scalar_name):
with h5py.File(filename, 'r') as f:
return f[path][scalar_name][...]
def load_vec(filename, path, vec_name):
with h5py.File(filename, 'r') as f:
return f[path][vec_name][...]
def load_surface(filename, path):
D = {}
with h5py.File(filename, 'r') as f:
D["p1"] = f[path]["p1"][...]
D["p2"] = f[path]["p2"][...]
return D
def load_volume(filename, path):
return load_surface(filename, path)
def load_cylinder_surface(filename, path):
D = {}
with h5py.File(filename, 'r') as f:
D["center"] = f[path]["center"][...]
D["radius"] = f[path]["radius"][...]
return D
def load_grid(filename):
D = {}
with h5py.File(filename, 'r') as f:
g = f['grid']
for dset_name in g:
dset = g[dset_name]
D[dset_name] = dset[...]
return D
```
#### File: qpost/viz/material.py
```python
import matplotlib.pyplot as plt
import numpy as np
import qpost
def plot_materials(h5file):
materials = qpost.materials.load_all_materials(h5file)
f_min, f_max = qpost.monitors.get_freq_range(h5file)
freq = np.linspace(f_min, f_max, 1000)
for name,material in materials.items():
eps = material.perimitivitty(freq)
plt.figure()
plt.plot(freq, eps.real, label = r'Re($\varepsilon_r$)')
plt.plot(freq, eps.imag, label = r'Im($\varepsilon_r$)')
plt.title(name)
plt.xlabel("frequency")
plt.ylabel("permitvitty")
plt.legend()
plt.show()
``` |
{
"source": "johnaparker/stoked",
"score": 2
} |
#### File: johnaparker/stoked/setup.py
```python
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
NAME = 'stoked'
DESCRIPTION = "Simulation and visualization of Stokesian dynamics for N interacting particles"
URL = ''
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
KEYWORDS = 'stokesian dynamics brownian'
# REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.3.5'
LICENSE = 'MIT'
REQUIRED = [
'numpy',
'scipy',
'matplotlib',
'tqdm',
'numpy_quaternion',
]
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
keywords=KEYWORDS,
url=URL,
packages=find_packages(),
long_description=read('README.md'),
long_description_content_type='text/markdown',
install_requires=REQUIRED,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Development Status :: 3 - Alpha',
'Topic :: Scientific/Engineering :: Physics',
'Intended Audience :: Science/Research',
],
zip_safe=False,
)
```
#### File: stoked/stoked/collisions.py
```python
import numpy as np
from stoked import interactions
from stoked.forces import pairwise_central_force
def collisions_sphere(radius, kn):
def F(r):
nonlocal radius
if np.isscalar(radius):
Nparticles = len(r)
radius = np.full(Nparticles, radius, dtype=float)
T1 = np.add.outer(radius, radius)
overlap = T1 - r
overlap[overlap<0] = 0
return kn*overlap**1.5
return pairwise_central_force(F)
class collisions_sphere_interface(interactions):
"""
Collision between spheres and a planar surface using the force model F = kn*overlap^1.5
"""
def __init__(self, radii, kn, zpos=0):
"""
Arguments:
radii particle radii
kn force constant
zpos z-position of the plane (default: 0)
"""
self.radii = radii
self.kn = kn
self.zpos = zpos
def force(self):
Nparticles = len(self.position)
if np.isscalar(self.radii):
rad = np.full(Nparticles, self.radii, dtype=float)
else:
rad = np.asarray(self.radii, dtype=float)
F = np.zeros_like(self.position)
dz = self.position[:,2] - self.zpos
overlap = dz - rad
idx = overlap < 0
F[idx,2] = np.sign(dz[idx])*self.kn*np.sqrt(-overlap[idx])**3
return F
def torque(self):
return np.zeros_like(self.position)
```
#### File: stoked/stoked/forces.py
```python
import numpy as np
from stoked import interactions
class pairwise_force(interactions):
"""
Pair-wise interaction force
"""
def __init__(self, force_func):
"""
Arguments:
force_func force function of the form F(r[dim]) -> [dim]
"""
self.force_func = force_func
def force(self):
Nparticles = len(self.position)
r_ij = self.position[:,np.newaxis] - self.position[np.newaxis] # N x N x 3
with np.errstate(divide='ignore'):
F_ij = self.force_func(r_ij) # N x N x 3
np.einsum('iix->ix', F_ij)[...] = 0
F_i = np.sum(F_ij, axis=1)
return F_i
def torque(self):
T_i = np.zeros_like(self.position)
return T_i
def __add__(self, other):
def new_func(r):
return self.force_func(r) + other.force_func(r)
return pairwise_force(new_func)
class pairwise_central_force(interactions):
def __init__(self, force_func):
"""
Arguments:
force_func force function of the form F(r[dim]) -> [dim]
"""
self.force_func = force_func
def force(self):
Nparticles = len(self.position)
r_ij = self.position[:,np.newaxis] - self.position[np.newaxis] # N x N x 3
r = np.linalg.norm(r_ij, axis=-1)
with np.errstate(divide='ignore'):
F = self.force_func(r)
r_inv = 1/r
F_ij = np.einsum('ij,ijx,ij->ijx', F, r_ij, r_inv)
np.einsum('iix->ix', F_ij)[...] = 0
F_i = np.sum(F_ij, axis=1)
return F_i
def torque(self):
T_i = np.zeros_like(self.position)
return T_i
def __add__(self, other):
def new_func(r):
return self.force_func(r) + other.force_func(r)
return pairwise_central_force(new_func)
def pairwise_potential(potential_func):
pass
# def F(rvec):
# eps = 1e-15*np.ones_like(rvec)
# U1 = potential_func(rvec)
# U2 = potential_func(rvec + eps)
# = -(U2 - U1)/eps
# return pairwise_force(F)
def pairwise_central_potential():
pass
```
#### File: stoked/stoked/hydrodynamics.py
```python
import numpy as np
class interface:
"""A no-slip interface"""
def __init__(self, z=0):
"""
Arguments:
z z-position of the interface
"""
self.z = z
def levi_civita():
"""return the levi-civita symbol"""
eijk = np.zeros((3, 3, 3), dtype=float)
eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
return eijk
def particle_wall_self_mobility(position, interface, viscosity, radius):
"""
Construct the particle wall self-mobility matrix for a single particle
Arguments:
position[3] position of particle
interface interface object
viscosity dynamic viscosity µ of surrounding fluid
radius particle radius
"""
M = np.zeros([2, 2, 3, 3], dtype=float)
h = (position[2] - interface.z)/radius
gamma_T = 6*np.pi*viscosity*radius
gamma_R = 6*np.pi*viscosity*radius**3
a = 1/(16*gamma_T)*(9/h - 2/h**3 + 1/h**5)
b = 1/(8*gamma_T)*(9/h - 4/h**3 + 1/h**5)
M[0,0] = np.diag([a,a,b])
a = 15/(64*gamma_R)*(1/h**3)
b = 3/(32*gamma_R)*(1/h**3)
M[1,1] = np.diag([a,a,b])
return M
def grand_mobility_matrix(position, drag_T, drag_R, viscosity):
"""
Construct the grand mobility matrix for a given cluster
Arguments:
position[N,3] position of N particles
drag_T[N,3,3] 3 by 3 translational drag tensors of N particles
drag_R[N,3,3] 3 by 3 rotational drag tensors of N particles
viscosity dynamic viscosity µ of surrounding fluid
"""
Nparticles = len(position)
M = np.zeros([2, 3*Nparticles, 2, 3*Nparticles], dtype=float)
### block-diagonal components
for i in range(Nparticles):
idx = np.s_[0,3*i:3*i+3,0,3*i:3*i+3]
M[idx] = drag_T[i]
idx = np.s_[1,3*i:3*i+3,1,3*i:3*i+3]
M[idx] = drag_R[i]
### Off block-diagonal components
factor = 1/(8*np.pi*viscosity)
eps = levi_civita()
for i in range(Nparticles):
for j in range(i+1, Nparticles):
r_ijx = position[i] - position[j]
r_ij = np.linalg.norm(r_ijx)
I = np.identity(3, dtype=float)
T = np.outer(r_ijx, r_ijx)/r_ij**2
K = np.einsum('ijk,k->ij', eps, r_ijx)/r_ij
### TT coupling
idx = np.s_[0,3*i:3*i+3,0,3*j:3*j+3]
M[idx] = factor/r_ij*(I + T)
idx2 = np.s_[0,3*j:3*j+3,0,3*i:3*i+3]
M[idx2] = M[idx]
### RR coupling
idx = np.s_[1,3*i:3*i+3,1,3*j:3*j+3]
M[idx] = factor/(2*r_ij**3)*(3*T - I)
idx2 = np.s_[1,3*j:3*j+3,1,3*i:3*i+3]
M[idx2] = M[idx]
### RT coupling
idx = np.s_[1,3*i:3*i+3,0,3*j:3*j+3]
M[idx] = -factor/r_ij**2*(K)
idx2 = np.s_[1,3*j:3*j+3,0,3*i:3*i+3]
M[idx2] = -M[idx]
### TR coupling
idx3 = np.s_[0,3*i:3*i+3,1,3*j:3*j+3]
M[idx3] = -M[idx]
idx4 = np.s_[0,3*j:3*j+3,1,3*i:3*i+3]
M[idx4] = -M[idx2]
return M.reshape([6*Nparticles, 6*Nparticles])
```
#### File: stoked/stoked/inertia.py
```python
import numpy as np
from abc import ABCMeta, abstractmethod
class inertia:
"""
Abstract base class for drag coeffecients
"""
__metaclass__ = ABCMeta
def __init__(self, density, isotropic=False):
"""
Arguments:
density mass density of the object
isotropic (bool) True if the moment of inertia is isotropic
"""
self.density = density
self.isotropic = isotropic
@abstractmethod
def _mass(self):
raise NotImplementedError('mass has not been implemented for this type of particle')
@abstractmethod
def _moment(self):
raise NotImplementedError('moment has not been implemented for this type of particle')
@property
def mass(self):
"""particle mass"""
return self._mass()
@property
def moment(self):
"""particle moment of inertia"""
return self._moment()
class inertia_sphere(inertia):
"""
Inertia values for a sphere
"""
def __init__(self, radius, density):
"""
Arguments:
radii[N,3] ellipsoid radii
density mass density of the object
"""
super().__init__(density, isotropic=True)
self.radius = np.asarray(radius, dtype=float)
def _mass(self):
return 4/3*np.pi*self.radius**3*self.density
def _moment(self):
M = self.mass
return 2/5*M*self.radius**2
class inertia_ellipsoid(inertia):
"""
Inertia values for an ellipsoid
"""
def __init__(self, radii, density):
"""
Arguments:
radii[N,3] ellipsoid radii
density mass density of the object
"""
super().__init__(density, isotropic=True)
self.radii = np.atleast_2d(np.asarray(radii, dtype=float))
def _mass(self):
V = 4/3*np.pi*np.product(self.radii, axis=1)
return V*self.density
def _moment(self):
M = self.mass
Ix = 1/5*M*(self.radii[:,1]**2 + self.radii[:,2]**2)
Iy = 1/5*M*(self.radii[:,0]**2 + self.radii[:,2]**2)
Iz = 1/5*M*(self.radii[:,0]**2 + self.radii[:,1]**2)
return np.array([Ix, Iy, Iz]).T
```
#### File: stoked/integrators/predictor_corrector_integrator.py
```python
from . import integrator
import numpy as np
class predictor_corrector_integrator(integrator):
def __init__(self, grand_mobility_interval=10):
super().__init__(grand_mobility_interval)
def bd_step(self):
r0 = np.copy(self.solver.position)
o0 = np.copy(self.solver.orientation)
F = self.solve_forces()
Fr = self.random_force()
F += Fr
v1 = self.alpha_T*F
self.solver.velocity = v1
self.solver.position += self.dt*v1
if self.solver.rotating:
T = self.solve_torques()
w1 = self.alpha_R*T
w1_q = np.array([np.quaternion(*omega) for omega in w1])
w0 = np.copy(self.solver.angular_velocity)
o0 = np.copy(self.solver.orientation)
self.solver.angular_velocity = w1
self.solver.orientation = (1 + w1_q*self.dt/2)*self.solver.orientation
self.solver.time += self.dt
self.perform_constraints()
self.pre_step()
F = self.solve_forces()
F += Fr
v2 = self.alpha_T*F
self.solver.velocity = (v1 + v2)/2
self.solver.position = r0 + self.dt*self.solver.velocity
if self.solver.rotating:
T = self.solve_torques()
w2 = self.alpha_R*T
w2_q = np.array([np.quaternion(*omega) for omega in w1])
w_q = (w1_q + w2_q)/2
self.solver.angular_velocity = (w1 + w2)/2
self.solver.orientation = (1 + w1_q*self.dt/2)*o0
self.solver.time -= self.dt
```
#### File: stoked/stoked/utility.py
```python
import numpy as np
import quaternion
from scipy.integrate import cumtrapz
def quaternion_to_angles(quat, reference=None):
"""
Convert a quaternion array to an angle representation
Arguments:
quat [T,...] quaternion trajectory of T time-steps
reference reference frame (as quaternion) around which to compute angles (default: z-axis)
"""
if reference is not None:
quat = np.invert(reference)*quat
### calculates differntial angle at each time-step, and cumtrapz to obtain angle
quat_s = np.roll(quat, 1, axis=0)
Q = quat*np.invert(quat_s)
axis_angle = quaternion.as_rotation_vector(Q)
d_angle = axis_angle[...,2]
d_angle[0] = 0 # first entry is unphysical, so set to 0
### obtain the initial angles; multiply phi by 2 if theta = 0 for proper conversion
theta, phi = np.moveaxis(quaternion.as_spherical_coords(quat[0]), -1, 0)
idx = (theta == 0)
phi[idx] *= 2
angle = phi + cumtrapz(d_angle, axis=0, initial=0)
return angle
```
#### File: stoked/vis/animate_3d.py
```python
import numpy as np
import matplotlib as mpl
from itertools import cycle
import quaternion
import stoked
from collections.abc import Iterable
import matplotlib.colors as mcolors
from stoked.vis._internal import patches
from multiprocessing import Process
def sphere_patches(radius):
import vpython
return patches(vpython.sphere, dict(radius=radius))
def ellipsoid_patches(rx, ry, rz):
import vpython
return patches(vpython.ellipsoid, dict(length=2*rz, height=2*ry, width=2*rx, axis=vpython.vec(0,0,1)))
def trajectory_animation_3d(trajectory, patches, wait=False, repeat=True, colors=None, opacity=1, trail=None, axes=False, axes_length=1, grid=False):
p = Process(target=_trajectory_animation_3d, kwargs=dict(
trajectory=trajectory,
patches=patches,
wait=wait,
repeat=repeat,
colors=colors,
opacity=opacity,
trail=trail,
axes=axes,
axes_length=axes_length,
grid=grid
))
p.start()
def _trajectory_animation_3d(trajectory, patches, wait=False, repeat=True, colors=None, opacity=1, trail=None, axes=False, axes_length=1, grid=False):
"""Create a 3D trajectory animation with VPython
Arguments:
trajectory trajectory data for T steps, N particles
patches patches object to represent particle geometry
colors list of colors to cycle through
trail length of particle trail (default: no trail)
axes include x,y,z axes for each particle (default: False)
axes_length length of axes, if set (default: 1)
"""
import vpython
vec = vpython.vector
if not isinstance(trajectory, stoked.trajectory):
trajectory = stoked.trajectory(trajectory)
coordinates = trajectory.position
Nsteps = coordinates.shape[0]
Nparticles = coordinates.shape[1]
if trajectory.orientation is not None:
orientations = trajectory.orientation
else:
orientations = np.full((Nsteps,Nparticles), quaternion.one)
if not isinstance(patches.patches_type, Iterable):
patches_type = [patches.patches_type]*Nparticles
else:
patches_type = patches.patches_type
patches_args = [dict() for i in range(Nparticles)]
for key, value in patches.args.items():
if not isinstance(value, Iterable):
for i in range(Nparticles):
patches_args[i][key] = value
else:
for i in range(Nparticles):
patches_args[i][key] = value[i]
if colors is None:
colors = mcolors.TABLEAU_COLORS
elif isinstance(colors, str):
colors = [colors]
elif not isinstance(colors, Iterable):
colors = [colors]
color_cycle = cycle(colors)
make_trail = False if trail is None else True
scene = vpython.canvas(background=vec(1,1,1))
objs = []
arrows = []
trails = []
for i in range(Nparticles):
color = vec(*mpl.colors.to_rgb(next(color_cycle)))
pos = vec(*coordinates[0,i])
particle = patches_type[i](pos=pos, color=color,
opacity=opacity, **patches_args[i])
objs.append(particle)
if make_trail:
def my_center(num):
def center():
return objs[num].pos + objs[num].axis/2
return center
trails.append(vpython.attach_trail(my_center(i), color=color, retain=trail))
if axes:
arrow_x = vpython.arrow(pos=pos, axis=vec(1,0,0), scale=axes_length, color=vec(0,0,0),
shaftwidth=5)
arrow_y = vpython.arrow(pos=pos, axis=vec(0,1,0), scale=axes_length, color=vec(0,0,0),
shaftwidth=5)
arrow_z = vpython.arrow(pos=pos, axis=vec(0,0,1), scale=axes_length, color=vpython.color.red,
shaftwidth=5)
arrows.append([arrow_x, arrow_y, arrow_z])
for i,obj in enumerate(objs):
rot = quaternion.as_rotation_matrix(orientations[0,i])
a = obj.axis
print(a)
print(rot[:,2])
b = vec(*rot[:,2])
a /= vpython.mag(a)
b /= vpython.mag(b)
axis = vpython.cross(a,b)
angle = vpython.acos(vpython.dot(a,b))
obj.rotate(angle=angle, axis=axis, origin=obj.pos)
# obj.pos -= obj.axis/2
if axes:
for j,arrow in enumerate(arrows[i]):
arrow.pos = vec(*coordinates[0,i])
arrow.axis = vec(*rot[:,j])*arrow.scale
if grid:
vpython.arrow(pos=vpython.vector(0,0,0), axis=vpython.vector(300,0,0), shaftwidth=2, color=vpython.color.black)
vpython.arrow(pos=vpython.vector(0,0,0), axis=vpython.vector(0,300,0), shaftwidth=2, color=vpython.color.black)
vpython.arrow(pos=vpython.vector(0,0,0), axis=vpython.vector(0,0,300), shaftwidth=2, color=vpython.color.black)
trange = range(coordinates.shape[0])
if repeat:
trange = cycle(trange)
for t in trange:
if t == 0:
if wait:
scene.waitfor('click')
for trail in trails:
trail.clear()
for i,obj in enumerate(objs):
rot = quaternion.as_rotation_matrix(orientations[t,i])
a = obj.axis
b = vec(*rot[:,2])
a /= vpython.mag(a)
b /= vpython.mag(b)
axis = vpython.cross(a,b)
angle = vpython.acos(vpython.dot(a,b))
obj.rotate(angle=angle, axis=axis, origin=obj.pos)
if patches_type[i] in (vpython.cylinder, vpython.arrow, vpython.cone, vpython.pyramid):
obj.pos = vec(*coordinates[t,i]) - obj.axis/2
else:
obj.pos = vec(*coordinates[t,i])
if axes:
for j,arrow in enumerate(arrows[i]):
arrow.pos = vec(*coordinates[t,i])
arrow.axis = vec(*rot[:,j])*arrow.scale
vpython.rate(30)
if __name__ == '__main__':
coordinates = np.zeros([100, 2, 3])
coordinates[:,0,0] = np.linspace(10, 100, 100)
coordinates[:,1,0] = -np.linspace(10, 100, 100)
patches = sphere_patches([5,10])
trajectory_animation_3d(coordinates, patches, opacity=.5, colors='C0', trail=100, wait=True)
```
#### File: stoked/tests/harmonic_potential.py
```python
from stoked import brownian_dynamics, trajectory_animation, drag_sphere
from functools import partial
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
from scipy.constants import k as kb
no_force = None
def harmonic_force(t, rvec, orientation, k=1):
return -k*rvec
position = [0,0,0]
drag = drag_sphere(1/(6*np.pi), 1)
temperature = .05
dt = 10
Nsteps = 10000
fig, ax = plt.subplots()
ax.set_aspect('equal')
for Fext in [no_force, harmonic_force]:
history = np.zeros([Nsteps,3], dtype=float)
if Fext is no_force:
sim = brownian_dynamics(position=position, drag=drag, temperature=temperature, dt=dt, force=Fext)
else:
sim = brownian_dynamics(position=position, drag=drag, temperature=temperature, dt=dt, force=partial(Fext, k=0.01))
for i in tqdm(range(Nsteps)):
history[i] = sim.position.squeeze()
sim.step()
ax.plot(history[:,0], history[:,1], lw=.5)
ax.legend()
if Fext is harmonic_force:
fig, ax = plt.subplots()
ax.hexbin(history[:,0], history[:,1])
ax.set_aspect('equal')
fig, ax = plt.subplots()
rad = np.linalg.norm(history, axis=1)
hist, edges = np.histogram(rad, bins=100)
rad = edges[1:]
counts = hist/(4*np.pi*rad**2)
E = -kb*temperature*np.log(counts)
E -= E[6]
ax.plot(rad, E)
ax.plot(rad, 0.5*.01*rad**2, 'o', ms=3)
fig, ax = plt.subplots()
anim = trajectory_animation(history.reshape([Nsteps,1,3]), radii=1e-11, projection='z', interval=30)
plt.show()
```
#### File: stoked/tests/uniform_force.py
```python
import stoked
from stoked import brownian_dynamics, drag_sphere
from functools import partial
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
def uniform_force(t, rvec, orient, Fx):
F = np.zeros_like(rvec)
F[:,0] = Fx
return F
def test_uniform_motion():
position = [[0,0,0]]
drag = drag_sphere(100e-9, 1)
temperature = 300
dt = 1e-6
Fx = 10e-12
Nsteps = 50000
history = np.zeros([Nsteps,3], dtype=float)
velocity = np.zeros([Nsteps,3], dtype=float)
sim = brownian_dynamics(position=position,
drag=drag,
temperature=temperature,
dt=dt,
force=partial(uniform_force, Fx=Fx),
inertia=stoked.inertia_sphere(100e-9, 10490))
for i in tqdm(range(Nsteps)):
history[i] = sim.position.squeeze()
velocity[i] = sim.velocity.squeeze()
sim.step()
fig, ax = plt.subplots()
ax.set_aspect('equal')
ax.plot(history[:,0], history[:,1], lw=.5)
fig, ax = plt.subplots()
ax.hist(velocity[:,0], bins=100)
v_drift_theory = Fx/(drag.drag_T)
v_drift_sim = np.average(velocity[:,0])
print(v_drift_theory, v_drift_sim)
plt.show()
test_uniform_motion()
``` |
{
"source": "johnapo/Python-Scripts",
"score": 4
} |
#### File: johnapo/Python-Scripts/JApo_MemoryUtilization.py
```python
import random
import copy
import matplotlib.pyplot as plt
"""
printMemory prints the contents of the physical memory
"""
def printMemory():
ctr = 0
block = 1
# print(memory) # Debugging
while(ctr < len(memory)):
if(memory[ctr] > 0):
for i in range(memory[ctr]):
print(block)
block += 1
else:
for i in range(abs(memory[ctr])):
print("-")
ctr = ctr + abs(memory[ctr])
return
"""
firstFit runs the First-Fit memory allocation algorithm
"""
def firstFit(requestSize):
full = False
allocated = False
ctrSearches = 0
index = 0
# print("Start of First-Fit, size is ", requestSize) # Debugging
while(index < len(memory) and not allocated):
ctrSearches += 1
if(memory[index] < 0 and abs(memory[index]) >= requestSize): # Allocate memory
allocated = insSpecificLoc(index, requestSize)
else:
index += abs(memory[index]) # Keep iterating through array
if(not allocated):
full = True # stop inserting memory because its full
ffSearches[requestSize - minR] += ctrSearches
# print("End of First-Fit") # Debugging
# print(memory)
return full
"""
bestFit runs the Best-Fit memory allocation algorithm
"""
def bestFit(requestSize):
allocated = False
full = False
bestHole = len(memory)
ctrSearches = 0
insIndex = -1
index = 0
# print("Start of Best-Fit") # Debugging
while(index < len(memory)): # Search entire length of memory for best hole to allocate memory
ctrSearches += 1
if(memory[index] < 0): # Find empty holes
if(abs(memory[index]) >= requestSize): # Find empty holes with big enough size
if(abs(memory[index]) < bestHole): # Compare to current best hole size
bestHole = abs(memory[index])
insIndex = index
index += abs(memory[index]) # Keep iterating through array
if(insIndex > -1):
allocated = insSpecificLoc(insIndex, requestSize)
if(not allocated):
full = True # stop inserting memory because its full
bfSearches[requestSize - minR] += ctrSearches
# print("End of Best-Fit") # Debugging
return full
"""
worstFit runs the Worst-Fit memory allocation algorithm
"""
def worstFit(requestSize):
allocated = False
full = False
worstHole = 0
ctrSearches = 0
insIndex = -1
index = 0
# print("Start of Worst-Fit") # Debugging
while(index < len(memory)): # Search entire length of memory for best hole to allocate memory
ctrSearches += 1
if(memory[index] < 0): # Find empty holes
if(abs(memory[index]) >= requestSize): # Find empty holes with big enough size
if(abs(memory[index]) > worstHole): # Compare to current best hole size
worstHole = abs(memory[index])
insIndex = index
index += abs(memory[index]) # Keep iterating through array
if(insIndex > -1):
allocated = insSpecificLoc(insIndex, requestSize)
if(allocated == False):
full = True # stop inserting memory because its full
wfSearches[requestSize - minR] += ctrSearches
# print("End of Worst-Fit") # Debugging
return full
"""
checkAllocation returns the number of allocated spaces of memory
"""
def checkAllocation():
allocMem = 0
for i in range(len(memory)):
if(memory[i]>0):
allocMem = allocMem + memory[i]
return allocMem
"""
checkBlocks returns the number of blocks allocated
"""
def checkBlocks():
allocBlocks = 0
for i in range(len(memory)):
if(memory[i]>0):
allocBlocks += 1
return allocBlocks
"""
checkBlockInd takes a block number and finds its index in memory to be used in freeBlock()
"""
def checkBlockInd(blockNum):
blockCtr = 1
retInd = 0
for i in range(len(memory)):
if(memory[i] > 0 and blockCtr == blockNum):
retInd = i
else:
blockCtr += 1
return retInd
"""
insertRequest inserts initial requests into the block of memory
"""
def insertRequest(loc, size):
filled = False
index = 0
while(index < len(memory) and not filled):
if(memory[index] < 0 and (index <= loc) and (abs(memory[index]) + index >= loc + size)):
next = abs(memory[index]) + index
memory[index] = index - loc
memory[loc] = size
memory[loc + size] = (loc + size) - next
filled = True
else:
index += abs(memory[index])
return filled
"""
insSpecificLoc inserts requests from each memory allocation algorithm
"""
def insSpecificLoc(loc, size):
filled = False
holeSize = abs(memory[loc])
if(memory[loc] < 0 and abs(memory[loc]) > size): # Allocating memory into a space with extra memory
filled = True
memory[loc] = size # Allocate first part of space to hold data
memory[loc + size] = -(holeSize - size) # Reallocate rest of space to be empty space
elif(memory[loc] < 0 and abs(memory[loc]) == size): # Allocating memory into a space the exact same size
filled = True
memory[loc] = size
else:
# Do Nothing
filled = False
return filled
"""
freeBlock frees a block of allocated memory
"""
def freeBlock(blockIndex):
freed = False
prev = -1
cur = 0
index = 0
# Find the block
while(not freed and cur < len(memory)):
if(blockIndex == 1): # Special case
if(memory[cur] > 0): # No free memory before this allocated block
if(memory[memory[cur]] > 0): # No free memory after this block
memory[cur] = -memory[cur] # Set it to be free
else: # Free memory after block and coalesce
memory[cur] = -memory[cur] + memory[memory[cur]]
else: # Free memory prior to allocated block
prev = 0
cur = abs(memory[cur])
if((memory[cur] + cur < len(memory)) and memory[memory[cur]] > 0): # No free memory after this block
memory[prev] = memory[prev] - memory[cur] # Set it to be free
elif(memory[cur] + cur < len(memory)): # Free memory after block and coalesce
memory[prev] = memory[prev] - memory[cur] + memory[memory[cur]]
else:
memory[prev] = memory[prev] - memory[cur] # set it to be free
freed = True
else: # blockIndex != 1
while(index < blockIndex): # Search for the right set of blocks
prev = cur
if(memory[cur] > 0):
index += 1
else:
# Hole -- do nothing
if(index < blockIndex):
cur = cur + abs(memory[cur])
if(memory[prev] > 0): # No free memory before this allocated block
if((memory[cur] + cur < len(memory)) and memory[memory[cur]] > 0): # No free memory after this block
memory[cur] = -memory[cur] # Set it to be free
elif(memory[cur] + cur < len(memory)): # Free memory after block and coalesce
memory[cur] = -memory[cur] + memory[memory[cur]]
else:
memory[prev] = memory[prev] - memory[cur] # Set it to be free
else: # Free memory prior to allocated block
if((memory[cur] + cur < len(memory)) and memory[memory[cur]] > 0): # No free memory after this block
memory[prev] = memory[prev] - memory[cur] # Set it to be free
elif(memory[cur] + cur < len(memory)): # Free memory after block and coalesce
memory[prev] = memory[prev] - memory[cur] + memory[memory[cur]]
else:
memory[prev] = memory[prev] - memory[cur] # Set it to be free
freed = True
print("Block %s freed." % (blockIndex))
printMemory()
return freed
"""
plotPoints graphs the outcomes of 3 memory allocation algorithms: First-Fit, Best-Fit, and Worst-Fit
"""
def plotPoints(rSize):
# Plot results for Memory Utilization
fig, ax = plt.subplots()
ax.set(xlabel="d", ylabel="Memory Utilization",
title="Average Memory Utilization using Same Memory & Same Requests")
ax.plot(rSize, ffUtil, label="First-Fit")
ax.plot(rSize, bfUtil, label="Best-Fit")
ax.plot(rSize, wfUtil, label="Worst-Fit")
ax.legend()
fig.savefig("UtilizationGraph.png")
plt.show()
# Plot results for Search Times
fig, ax = plt.subplots()
ax.set(xlabel="d", ylabel="Search Times",
title="Average Search Times using Same Memory & Same Requests")
ax.plot(rSize, ffSearches, label="First-Fit")
ax.plot(rSize, bfSearches, label="Best-Fit")
ax.plot(rSize, wfSearches, label="Worst-Fit")
ax.legend()
fig.savefig("TimeGraph.png")
plt.show()
return
"""
Main is the primary method that does everything include starting the simulation
"""
def main():
global memory
# Keeps track of the memory utilization per request size of each memory allocation algorithm
global ffUtil
global bfUtil
global wfUtil
# Instatiate vars
ffUtil = []
bfUtil = []
wfUtil = []
# Keeps track of the number of holes examined by each memory allocation algorithm
global ffSearches
global bfSearches
global wfSearches
# Instatiate vars
ffSearches = []
bfSearches = []
wfSearches = []
# global firstFitBlocks = [] # Number of blocks allocated in first fit
# global bestFitBlocks = [] # Number of blocks allocated in best fit
# global worstFitBlocks = [] # Number of blocks allocated in worst fit
global minR
minR = 2 # Minimum d value, hard-coded for simulation
maxR = 10 # Maximum d value, hard-coded for simulation
n = 0 # input var
s = -1 # size var
while (n < 1):
n = int(input("Enter the physical memory size, n > 0: ")) # Manually entering physical memory size
memory = [0] * n
memory[0] = -n
printMemory()
d = int(input("Enter the average request size, d: ")) # Manually entering avg request siZe
v = int(input("Enter the request size standard deviation, v: ")) # Manually entering std dev
# Fill memory randomly to achieve ~50%
utilNum = float(0.0)
while(utilNum < 0.25):
while(s < 0 or s >= n):
s = int(random.gauss(d, v))
filled = False
while(not filled):
loc = random.randint(0 , n-1)
filled = insertRequest(loc, s)
utilNum += float(s) / len(memory)
printMemory()
origUtil = checkAllocation()
# Start of simulation
rSize = []
z = range(minR, maxR) # Testing different average request sizes; hard-coded for this simulation
origMem = copy.deepcopy(memory)
for i in z:
print("Size d is ", i)
rSize.append(i) # This is x-axis for graphing purposes
# Initialize this iteration's current utilization of memory
ffUtil.append(origUtil)
bfUtil.append(origUtil)
wfUtil.append(origUtil)
# Initialize this iteration's current search amount
ffSearches.append(0)
bfSearches.append(0)
wfSearches.append(0)
# Flag when request cannot be met
ffDone = False
bfDone = False
wfDone = False
memory = copy.deepcopy(origMem)
while (ffDone == False): # Loops through First-Fit simulation until insert request cannot be met
# randNum = random.gauss(i, v) # random sized request for more variability
# ffDone = firstFit(randNum)
ffDone = firstFit(i)
# Free random block
blocks = checkBlocks()
randFree = random.randint(1, blocks)
freeBlock(randFree)
ffUtil[i - minR] = checkAllocation()
memory = copy.deepcopy(origMem) # Reset memory to original values
while(bfDone == False): # Loops through Best-Fit simulation until insert request cannot be met
# randNum = random.gauss(i, v) # random sized request for more variability
# bfDone = bestFit(randNum)
bfDone = bestFit(i)
# Free random block
blocks = checkBlocks()
randFree = random.randint(1, blocks)
freeBlock(randFree)
bfUtil[i - minR] = checkAllocation()
memory = copy.deepcopy(origMem) # Reset memory to original values
while(wfDone == False): # Loops through Worst-Fit simulation until insert request cannot be met
# randNum = random.gauss(i, v) # random sized request for more variability
# wfDone = worstFit(randNum)
wfDone = worstFit(i)
# free random block
blocks = checkBlocks()
randFree = random.randint(1, blocks)
freeBlock(randFree)
wfUtil[i - minR] = checkAllocation()
# Change utilization number from spaces of allocated memory to ratio of allocated : total
ffUtil[i-minR] = ffUtil[i-minR] / len(memory)
bfUtil[i-minR] = bfUtil[i-minR] / len(memory)
wfUtil[i-minR] = wfUtil[i-minR] / len(memory)
plotPoints(rSize)
# Print out data for debugging and exporting purposes
print("First-Fit Utilization")
print(ffUtil)
print("Best-Fit Utilization")
print(bfUtil)
print("Worst-Fit Utilization")
print(wfUtil)
print("First-Fit Searches/Time")
print(ffSearches)
print("Best-Fit Searches/Time")
print(bfSearches)
print("Worst-Fit Searches/Time")
print(wfSearches)
return
"""
Required script to run program
"""
if __name__ == "__main__":
#execute only if run as a script
main()
``` |
{
"source": "John-Appleseed/fix",
"score": 2
} |
#### File: tests/rules/test_brew_unknown_command.py
```python
import pytest
from thefuck.rules.brew_unknown_command import match, get_new_command
from thefuck.rules.brew_unknown_command import brew_commands
from tests.utils import Command
@pytest.fixture
def brew_unknown_cmd():
return '''Error: Unknown command: inst'''
@pytest.fixture
def brew_unknown_cmd2():
return '''Error: Unknown command: instaa'''
def test_match(brew_unknown_cmd):
assert match(Command('brew inst', stderr=brew_unknown_cmd), None)
for command in brew_commands:
assert not match(Command('brew ' + command), None)
def test_get_new_command(brew_unknown_cmd, brew_unknown_cmd2):
assert get_new_command(Command('brew inst', stderr=brew_unknown_cmd),
None) == 'brew list'
assert get_new_command(Command('brew instaa', stderr=brew_unknown_cmd2),
None) == 'brew install'
```
#### File: tests/rules/test_ssh_known_host.py
```python
import os
import pytest
from mock import Mock
from thefuck.rules.ssh_known_hosts import match, get_new_command,\
side_effect
from tests.utils import Command
@pytest.fixture
def ssh_error(tmpdir):
path = os.path.join(str(tmpdir), 'known_hosts')
def reset(path):
with open(path, 'w') as fh:
lines = [
'123.234.567.890 asdjkasjdakjsd\n'
'98.765.432.321 ejioweojwejrosj\n'
'111.222.333.444 qwepoiwqepoiss\n'
]
fh.writelines(lines)
def known_hosts(path):
with open(path, 'r') as fh:
return fh.readlines()
reset(path)
errormsg = u"""@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
Someone could be eavesdropping on you right now (man-in-the-middle attack)!
It is also possible that a host key has just been changed.
The fingerprint for the RSA key sent by the remote host is
b6:cb:07:34:c0:a0:94:d3:0d:69:83:31:f4:c5:20:9b.
Please contact your system administrator.
Add correct host key in {0} to get rid of this message.
Offending RSA key in {0}:2
RSA host key for {1} has changed and you have requested strict checking.
Host key verification failed.""".format(path, '98.765.432.321')
return errormsg, path, reset, known_hosts
def test_match(ssh_error):
errormsg, _, _, _ = ssh_error
assert match(Command('ssh', stderr=errormsg), None)
assert match(Command('ssh', stderr=errormsg), None)
assert match(Command('scp something something', stderr=errormsg), None)
assert match(Command('scp something something', stderr=errormsg), None)
assert not match(Command(stderr=errormsg), None)
assert not match(Command('notssh', stderr=errormsg), None)
assert not match(Command('ssh'), None)
def test_side_effect(ssh_error):
errormsg, path, reset, known_hosts = ssh_error
command = Command('ssh user@host', stderr=errormsg)
side_effect(command, None)
expected = ['123.234.567.890 asdjkasjdakjsd\n', '111.222.333.444 qwepoiwqepoiss\n']
assert known_hosts(path) == expected
def test_get_new_command(ssh_error, monkeypatch):
errormsg, _, _, _ = ssh_error
assert get_new_command(Command('ssh user@host', stderr=errormsg), None) == 'ssh user@host'
``` |
{
"source": "johnarban/ads-tools",
"score": 3
} |
#### File: johnarban/ads-tools/ads_lib_pull.py
```python
import os
import ads
# if you don't want bibtexabs, pip install abs,
# otherwise pip install git+git://github.com/andycasey/ads@master
# this script currently has bibtexabs hardcoded in two places
# a simple find/replace should be safe to change it to bibtex
import requests # pip install requests
import math
import argparse
# code source:
# https://github.com/adsabs/ads-examples/blob/master/library_csv/lib_2_csv.py
def get_config(token=None):
"""
Load ADS developer key from file
:return: str
@andycasey
"""
# global token
if token is None:
try:
with open(os.path.expanduser("~/.ads/dev_key")) as f:
# ~/.ads/dev_key should contain your ADS API token
token = f.read().strip()
except IOError:
print(
"The script assumes you have your ADS developer token in the"
"folder: {}".format()
)
return {
"url": "https://api.adsabs.harvard.edu/v1/biblib",
"headers": {
"Authorization": "Bearer:{}".format(token),
"Content-Type": "application/json",
},
}
def get_libraries():
"""
Get a list of all my libraries and their meta-data
:return: list
@andycasey
"""
config = get_config()
r = requests.get("{}/libraries".format(config["url"]), headers=config["headers"])
# Collect a list of all of our libraries, this will include the number
# of documents, the name, description, id, and other meta data
try:
data = r.json()["libraries"]
return data
except ValueError:
raise ValueError(r.text)
def get_library(library_id, num_documents, start=0, rows=25):
"""
Get the content of a library when you know its id. As we paginate the
requests from the private library end point for document retrieval,
we have to repeat requests until we have all documents.
:param library_id: identifier of the library
:type library_id:
:param num_documents: number of documents in the library
:type num_documents: int
:param start: start with a given row
:type start: int
:param rows: number of rows to request
:type rows: int
:return: list
@andycasey
CHANGES: @astrojthe3 - Dec 4 2020
added start keyword,
to start at arbitrary row
"""
config = get_config()
# start = 0
num_documents -= start
# rows = 25
num_paginates = int(math.ceil(num_documents / (1.0 * rows)))
documents = []
for i in range(num_paginates):
print("Pagination {} out of {}: rows:".format(i + 1, num_paginates))
r = requests.get(
"{}/libraries/{id}?start={start}&rows={rows}".format(
config["url"], id=library_id, start=start, rows=rows
),
headers=config["headers"],
)
# Get all the documents that are inside the library
try:
data = r.json()["documents"]
except ValueError:
raise ValueError(r.text)
documents.extend(data)
start += rows
return documents
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Create/update a bibliography file\n for an ADS Library"
)
parser.add_argument(
"-l",
"--library_id",
help="""ID of ADS Library: https://ui.adsabs.harvard.edu/user/libraries/[library_id].
If not passed, it should either be hardcoded or in the file library.id.
library.id is created when the script is first run""",
)
parser.add_argument(
"-t",
"--token",
default=None,
help="ADS developer token otherwise defaults to ~/.ads/dev_key",
)
parser.add_argument(
"-r",
"--refresh",
action="store_true",
help="create a new bibtex file and bibcode list even if one exists. This will overwrite any changes you've made",
)
parser.add_argument(
"--list",
"--list-libaries",
action="store_true",
help="List your library names and IDs",
)
parser.add_argument(
"-b",
"--bibcodes",
help="name of file to store bibcodes",
default="bibcodes",
dest="bibcodes",
)
parser.add_argument(
"-f",
"--bibfile",
help="name of bibtex (.bib) file",
default="library.bib",
dest="bibfile",
)
parser.add_argument(
"--bib-format",
choices=["bibtex", "bibtexabs"],
help="""[[DISABLED]] Format for bibtex file.
bibtexabs only works if using the git version of the abs module""",
default="bibtex",
)
parser.add_argument(
"--api-rows",
type=int,
help="number of rows retreived with each api call to download the library",
default=25,
)
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
library_id = args.library_id
bibcodefile = args.bibcodes
bibfile = args.bibfile
# get the saved library ID
# if one isn't passed
if os.path.isfile("library.id"):
with open("library.id", "r") as f:
lib_config = f.read()
if library_id is None:
library_id = lib_config
# if bibcodefile is None:
# bibcodefile = lib_config[1]
# if bibfile is None:
# bibfile = lib_config[2]
token = args.token
refresh = args.refresh
rows = args.api_rows
if args.debug:
print(bibcodefile)
print(bibfile)
print(token)
print(refresh)
print(rows)
if args.list:
libraries = get_libraries()
n = max([len(n["name"]) for n in libraries])
print(("{:>%s} {}" % n).format("NAME", "ADS ID"))
print("-" * 72)
for item in libraries:
print(("{:>%s} {}" % n).format(item["name"], item["id"]))
# id I have no library_id
# exit with a message
elif library_id is None:
print("Please provide library ID, or hardcode one")
print("run with the --list flag to list available libraries")
else:
config = get_config(token) # if there is no token it will fail thnx
# to @andycasey's much better coding which
# actually catches exceptions
# we need to get the number of entries in the library
# that is stored in metadata
r = requests.get(
"{}/libraries/{id}".format(config["url"], id=library_id),
headers=config["headers"],
)
metadata = r.json()["metadata"]
# if we are running for the first time
# then there is no file of bibcodes to compare to
# so we will just download the whole library
if (not os.path.isfile(bibcodefile)) or refresh:
print(
'Creating new bib file for ADS Library "{}", id: {}'.format(
metadata["name"], metadata["id"]
)
)
library = get_library(
library_id=metadata["id"],
num_documents=metadata["num_documents"],
rows=rows,
)
print("New bib file has {} items".format(len(library)))
bibtex = ads.ExportQuery(library, format=args.bib_format).execute()
with open("library.id", "w") as f:
f.write(library_id)
# f.write(bibcodefile)
# f.write(bibfile)
with open(bibcodefile, "w") as f:
f.writelines("{}\n".format(bc) for bc in library)
with open(bibfile, "w") as f:
f.write(bibtex)
# if there is a file of bibcodes we
# need to compare with it. Unfortunately,
# we have to download the entire library
# for this too. A savvy person could do some
# table manipulation server side via the API
# to offload some of the work, but that might
# be even slower, than downloading
elif os.path.isfile(bibcodefile):
print("bibcode file {} already exists".format(bibcodefile))
with open(bibcodefile, "r") as f:
current = [i.strip() for i in f.readlines()]
print("Contains {} items".format(len(current)))
library = get_library(
library_id=metadata["id"],
num_documents=metadata["num_documents"],
rows=rows,
)
# get the exclusive join of the sets
new = list(set(current) ^ set(library))
if len(new) > 0:
print("Adding {} new items".format(len(new)))
bibtex = ads.ExportQuery(new, format=args.bib_format).execute()
with open(bibcodefile, "a") as f:
f.writelines("{}\n".format(bc) for bc in new)
with open(bibfile, "a") as f:
f.write("\n\n\n\n\n")
f.write(bibtex)
``` |
{
"source": "johnarban/arban",
"score": 3
} |
#### File: johnarban/arban/error_prop.py
```python
import numpy as np
def err_div(x, y, ex, ey):
Q = x / y
dQ = np.abs(Q) * np.sqrt((ex / x) ** 2 + (ey / y) ** 2)
return Q, dQ
def err_multiply(x, y, ex, ey):
Q = x * y
dQ = np.abs(Q) * np.sqrt( (ex/x)**2 + (ey/y)**2 )
return Q, dQ
def err_add(x, y, ex, ey):
Q = x + y
dQ = np.sqrt(ex**2 + ey**2)
return Q, dQ
def err_power_1(x, p, ex):
"""error propogation for
x^p where p is a constant
"""
Q = x ** p
dQ = np.abs(p * x**(p-1) * ex)
return Q, dQ
def err_power_2(x, p, ex, ep):
"""error propogation for
x^p where x & p have errors
"""
Q = x ** p
dQ_sq = Q**2 * ( (p * ex / x)**2 + (np.log(x) * ep)**2 )
return Q, dQ_sq**0.5
def log_err(x, dx):
return dx / (np.log(10) * x)
def log_errorbar(x, dx):
logxerr = log_err(x,dx)
logxupp = np.log10(x) + logxerr
logxlow = np.log10(x) - logxerr
xupp = 10**logxupp - x
xlow = x - 10**logxlow
return xupp, xlow
# 1st moment error is v * sqrt(2) sigma_w / w
# 2nd moment error
```
#### File: arban/utils_module/stats.py
```python
def minmax(arr, axis=None):
return np.nanmin(arr, axis=axis), np.nanmax(arr, axis=axis)
def weighted_generic_moment(x, k, w=None):
x = np.asarray(x, dtype=np.float64)
if w is not None:
w = np.asarray(w, dtype=np.float64)
else:
w = np.ones_like(x)
return np.sum(x ** k * w) / np.sum(w)
def weighted_mean(x, w):
return np.sum(x * w) / np.sum(w)
def weighted_std(x, w):
x = np.asarray(x, dtype=np.float64)
w = np.asarray(w, dtype=np.float64)
SS = np.sum(w * (x - weighted_mean(x, w)) ** 2) / np.sum(w)
# quantile(x, w, 0.5)
return np.sqrt(SS)
def weighted_percentile(x, w, percentile, p=0):
k = np.isfinite(x + w)
clean_x = np.asarray(x[k], dtype=np.float64)
clean_w = np.asarray(w[k], dtype=np.float64)
srt = np.argsort(clean_x)
sorted_w = clean_w[srt]
sorted_x = clean_x[srt]
Sn = np.cumsum(sorted_w)
Pn = (Sn - 0.5 * sorted_w) / Sn[-1]
return np.interp(np.asarray(percentile) / 100.0, Pn, sorted_x)
def weighted_median(x, w):
return weighted_percentile(x, w, 50)
def weighted_mad(x, w, stddev=True):
x = np.asarray(x, dtype=np.float64)
w = np.asarray(w, dtype=np.float64)
if stddev:
return 1.4826 * weighted_median(np.abs(x - weighted_median(x, w)), w)
else:
return weighted_median(np.abs(x - weighted_median(x, w)), w)
def mavg(arr, n=2, axis=-1):
"""
returns the moving average of an array.
returned array is shorter by (n-1)
applied along last axis by default
"""
return np.mean(rolling_window(arr, n), axis=axis)
def mgeo(arr, n=2, axis=-1):
"""rolling geometric mean
Arguments:
arr {no.array} -- array
Keyword Arguments:
n {int} -- window size (default: {2})
axis {int} -- axis to roll over (default: {-1})
Returns:
[type] -- [description]
"""
return stats.gmean(rolling_window(arr, n), axis=axis)
def pdf(values, bins=None, range=None):
"""
** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
"""
if hasattr(bins, "__getitem__") and (range is None):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
h, x = np.histogram(values, bins=bins, range=range, density=False)
# From the definition of Pr(x) = dF(x)/dx this
# is the correct form. It returns the correct
# probabilities when tested
pdf = h / (np.sum(h, dtype=float) * np.diff(x))
return pdf, bin_center(x)
def pdf2(values, bins=None, range=None):
"""
N * PDF(x)
The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1]
"""
if hasattr(bins, "__getitem__") and (range is None):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
pdf, x = np.histogram(values, bins=bins, range=range, density=False)
pdf = pdf.astype(float) / np.diff(x)
return pdf, bin_center(x)
def edf(data, pdf=False):
y = np.arange(len(data), dtype=float)
x = np.sort(data).astype(float)
return y, x
def cdf(values, bins):
"""
CDF(x)
(statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1]
"""
if hasattr(bins, "__getitem__"):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False) # returns int
# cumulative fraction below bin_k
c = np.cumsum(h / np.sum(h, dtype=float))
# append 0 to beginning because P(X < min(x)) = 0
return np.append(0, c), bins
def cdf2(values, bins):
"""
# # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1
"""
if hasattr(bins, "__getitem__"):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
h, bins = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return np.append(0.0, c), bins
def area_function(extmap, bins, scale=1):
"""
Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b.
"""
c, bins = cdf2(extmap, bins)
return scale * (c.max() - c), bins
def diff_area_function(extmap, bins, scale=1):
"""
See pdf2
"""
s, bins = area_function(extmap, bins)
dsdx = -np.diff(s) / np.diff(bins)
return dsdx * scale, bin_center(bins)
def log_diff_area_function(extmap, bins):
"""
See pdf2
"""
s, bins = diff_area_function(extmap, bins)
g = s > 0
dlnsdlnx = np.diff(np.log(s[g])) / np.diff(np.log(bins[g]))
return dlnsdlnx, bin_center(bins[g])
def mass_function(values, bins, scale=1, aktomassd=183):
"""
M(>Ak), mass weighted complimentary cdf
"""
if hasattr(bins, "__getitem__"):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
if scale != 1:
aktomassd = scale
h, bins = np.histogram(
values,
bins=bins,
range=range,
density=False,
weights=values * aktomassd * scale,
)
c = np.cumsum(h).astype(float)
return c.max() - c, bins
def ortho_dist(x, y, m, b):
"""
get the orthogonal distance
from a point to a line
"""
ortho_dist = (y - m * x - b) / np.sqrt(1 + m ** 2)
return ortho_dist
def mad(X, stddev=True, axis=None):
if stddev:
return 1.4826 * np.nanmedian(np.abs(X - np.nanmedian(X, axis=axis)), axis=axis)
else:
return np.nanmedian(np.abs(X - np.nanmedian(X, axis=axis)), axis=axis)
def mean_mad(X, stddev=True, axis=None):
if stddev:
return 1.4826 * np.nanmedian(np.abs(X - np.nanmeam(X, axis=axis)), axis=axis)
else:
return np.nanmedian(np.abs(X - np.nanmean(X, axis=axis)), axis=axis)
def rms(X, axis=None):
return np.sqrt(np.nanmean(X ** 2, axis=axis))
def standardize(X, remove_mean=True, remove_std=True):
if remove_mean:
mean = np.nanmean(X)
else:
mean = 0
if remove_std:
std = np.nanstd(X)
else:
std = 1
return (X - mean) / std
def pdf_pareto(t, a, k, xmax=None):
"""PDF of Pareto distribution
Parameters
----------
t : input
array
a : power-law power (a = alpha-1 from real Pareto)
array
k : minimum value for power law
array
xmax : max value for, optional, by default None
Returns
-------
PDF(t|a,k,xmax)
numpy array
"""
if xmax is None:
out = ((a - 1) / k) * (t / k) ** (-a)
out[(t < k)] = 0
return out
else:
out = ((a - 1) / (k ** (1 - a) - xmax ** (1 - a))) * t ** (-a)
out[(t <= k) | (t > xmax)] = 0
return out
def cdf_pareto(t, a, k, xmax=None):
"""CDF of Pareto distribution
Parameters
----------
t : input
array
a : power-law power (a = alpha-1 from real Pareto)
array
k : minimum value for power law
array
xmax : max value for, optional, by default None
Returns
-------
CDF(t|a,k,xmax)
numpy array
"""
if xmax is None:
out = 1 - (k / t) ** (a - 1)
out[t < k] = 0
return out
else:
out = (1 - (t / k) ** (1 - a)) / (1 - (xmax / k) ** (1 - a))
out[t <= k] = 0
out[t > xmax] = 1
return out
from scipy.spatial.distance import cdist
def mahalanobis(X,X2=None):
"""mahalanobis distance for data
X = np.array([x1,x2,x3,...])
Parameters
----------
X : np.array (M x N)
M x N array, with M varialbes,
and N observations.
print(X) should look like
# [[x1, x2, x3, x4...xn],
# [y1, y2, y3, y4...yn].
# [z1, z2, z3, z4...zn],
# ..]
# as if X = np.array([x, y, z, ...])
Returns
-------
md: np.array
the square of maholanobis distance
it follows a chi2 distribution for normally
distributed data
"""
# let scipy do all the lifting
# but this is a nice way anyways
# C = np.cov(X.T)
# P, D, T = eigen_decomp(C)
# mu = np.mean(X, axis=1)
# X = (X - mu)
# wX = X @ np.linalg.inv(T.T) #whitened data
# md = np.linalg.norm(wX, axis=1)**2 #norm spannign [xi,yi,zi]
# #wXT = np.linalg.inv(T) @ X.T
# #md = wX @ wX.T
# #md = np.sqrt(md.diagonal())
# #md is distributed as chi2 with d.o.f. = # independent axes
if X2 is None:
return cdist(X,np.atleast_2d(X.mean(axis=0)),metric='mahalanobis')[:,0]**2
else:
C = np.cov(X.T)
P, D, T = eigen_decomp(C)
mu = np.mean(X2, axis=1)
wX = (X2-mu) @ np.linalg.inv(T.T)
md = np.linalg.norm(wX, axis=1)** 2
return md
``` |
{
"source": "JohnAresHao/examination",
"score": 2
} |
#### File: examination/competition/rank_views.py
```python
from account.models import Profile
from competition.models import CompetitionKindInfo
from utils.response import json_response
from utils.decorators import check_login
from utils.redis.rrank import get_user_rank, get_rank
from utils.errors import UserError, CompetitionError
@check_login
def get_my_rank(request):
uid = request.GET.get('uid', '')
kind_id = request.GET.get('kind_id', '')
try:
profile = Profile.objects.get(uid=uid)
except Profile.DoesNotExist:
return json_response(*UserError.UserNotFound)
try:
kind_info = CompetitionKindInfo.objects.get(kind_id=kind_id)
except CompetitionKindInfo.DoesNotExist:
return json_response(*CompetitionError.CompetitionNotFound)
return json_response(200, 'OK', {
'time': get_user_rank(kind_id, uid).get('time', 0),
'score': get_user_rank(kind_id, uid).get('score', 0),
'rank': get_rank(kind_id, uid)
})
```
#### File: examination/utils/processors.py
```python
import re
from django.utils.deprecation import MiddlewareMixin
class ConstExtendIntField(int):
def __new__(cls, flag, version=''):
obj = int.__new__(cls, flag)
obj.version = version
return obj
class UserAgentDetectionMiddleware(MiddlewareMixin):
def process_request(self, request):
ua = request.META.get('HTTP_USER_AGENT', '').lower()
# ####### Device、OS #######
# Windows
request.Windows = 'windows nt' in ua
# Linux
request.Linux = 'linux x86_64' in ua
# iMac、iPhone、iPad、iPod
request.iMac, request.iPhone, request.iPad, request.iPod = 'macintosh' in ua, 'iphone' in ua, 'ipad' in ua, 'ipod' in ua
# PC
request.PC = request.Windows or request.Linux or request.iMac
# iOS
request.iOS = request.iPhone or request.iPad or request.iMac or request.iPod
# Android and Version
adr = re.findall(r'android ([\d.]+)', ua)
request.Android = ConstExtendIntField(True, adr[0]) if adr else ConstExtendIntField('android' in ua, '')
# ####### APP #######
# Weixin/Wechat and Version
wx = re.findall(r'micromessenger[\s/]([\d.]+)', ua)
request.weixin = request.wechat = ConstExtendIntField(True, wx[0]) if wx else ConstExtendIntField(False, '')
return None
```
#### File: utils/redis/rprofile.py
```python
import json
from utils.jsonencoder import JsonEncoder
from utils.redis.connect import r
from utils.redis.constants import (REDIS_USER_INFO, USER_LOGIN_VCODE, USER_PASSWORD_RESET,
USER_SIGN_VCODE, USERINFO_HAS_ENTERED, USER_HAS_SENT_EMAIL,
USER_HAS_SENT_REGEMAIL)
def set_profile(data={}):
if isinstance(data, dict):
uid = data.get('uid', '')
key = REDIS_USER_INFO % uid
data = json.dumps(data, cls=JsonEncoder)
r.set(key, data)
def delete_profile(uid):
r.delete(REDIS_USER_INFO % uid)
def get_profile(uid):
ret = r.get(REDIS_USER_INFO % uid).decode('utf-8') if r.get(REDIS_USER_INFO % uid) else '{}'
return json.loads(ret)
def enter_userinfo(kind_id, uid):
key = USERINFO_HAS_ENTERED % (kind_id, uid)
r.set(key, 1)
def get_enter_userinfo(kind_id, uid):
key = USERINFO_HAS_ENTERED % (kind_id, uid)
return r.get(key)
def set_vcode(sign, value):
key = USER_LOGIN_VCODE % sign
r.setex(key, 180, value)
def get_vcode(sign):
key = USER_LOGIN_VCODE % sign
return r.get(key)
def set_signcode(sign, value):
key = USER_SIGN_VCODE % sign
r.setex(key, 1800, value)
def get_signcode(sign):
key = USER_SIGN_VCODE % sign
return r.get(key)
def set_passwd(sign, passwd):
key = USER_PASSWORD_RESET % sign
r.setex(key, 1860, passwd)
def get_passwd(sign):
key = USER_PASSWORD_RESET % sign
return r.get(key)
def set_has_sentemail(email):
r.setex(USER_HAS_SENT_EMAIL % email, 1800, 1)
def get_has_sentemail(email):
return r.get(USER_HAS_SENT_EMAIL % email)
def set_has_sentregemail(email):
r.setex(USER_HAS_SENT_REGEMAIL % email, 60, 1)
def get_has_sentregemail(email):
return r.get(USER_HAS_SENT_REGEMAIL % email)
```
#### File: utils/redis/rrank.py
```python
import json
from utils.jsonencoder import JsonEncoder
from utils.redis.connect import r
from utils.redis.constants import REDIS_RANK, REDIS_RANK_USER_INFO
from utils.redis.rprofile import get_profile
def add_to_rank(uid, kind_id, score, time):
key = REDIS_RANK % kind_id
pre_score = int(r.zscore(key, uid)) if r.zscore(key, uid) else 0
rank_socre = score * 100000000 + (86400000 - time)
if pre_score == 0 or (pre_score != 0 and rank_socre > pre_score):
r.zadd(key, rank_socre, uid)
user_info = get_profile(uid)
ret = {
'nickname': user_info.get('nickname', ''),
'numid': user_info.get('numid', ''),
'avatar': user_info.get('avatar', ''),
'score': score,
'time': time
}
r.hset(REDIS_RANK_USER_INFO % kind_id, uid, json.dumps(ret, cls=JsonEncoder))
def get_rank(kind_id, uid):
key = REDIS_RANK % kind_id
rank = r.zrevrank(key, uid)
return (int(rank) + 1) if rank is not None else None
def get_user_rank(kind_id, uid):
key = REDIS_RANK_USER_INFO % kind_id
ret = r.hget(key, uid)
return json.loads(ret.decode('utf-8')) if ret else {}
def get_rank_data(kind_id, start=0, end=-1):
ranks = r.zrevrange(REDIS_RANK % kind_id, start, end)
if not ranks:
return [], []
ret = r.hmget(REDIS_RANK_USER_INFO % kind_id, ranks)
ret = [json.loads(i.decode('utf-8') if i else '{}') for i in ret]
return ranks, ret
``` |
{
"source": "johnarevalo/gmu-mmimdb",
"score": 2
} |
#### File: gmu-mmimdb/generators/moe.py
```python
import os
import sys
import json
import numpy
conf_file = sys.argv[1] if len(sys.argv) > 1 else None
with open(conf_file) as f:
conf = json.load(f)
num_jobs = int(sys.argv[2]) if len(sys.argv) > 2 else 25
dir_json = sys.argv[3] if len(sys.argv) > 3 else 'json/'
rng = numpy.random.RandomState(conf['rng_seed'])
model_name = conf['model_name']
if not os.path.exists(dir_json):
os.makedirs(dir_json)
def random_init_string(sparse=False):
if rng.randint(2) and sparse:
sparse_init = rng.randint(10, 30)
return "sparse_init: " + str(sparse_init)
irange = 10. ** rng.uniform(-2.3, -1.)
return "irange: " + str(irange)
def opt_param(max_val=1.0, min_val=0.):
if rng.randint(2):
return 0
return rng.uniform(min_val, max_val)
for job_id in range(num_jobs):
conf['model_name'] = '%s_%02i' % (model_name, job_id)
if os.path.isfile(conf['model_name']):
print('%s already exists, skipping...' % (conf['model_name']))
continue
conf['init_ranges'] = [10. ** rng.uniform(-3, -1.) for _ in range(5)]
# Regularization params
# http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf
if rng.randint(2):
conf['max_norms'] = [rng.uniform(5., 20.) for _ in range(5)]
conf['hidden_size'] = [64, 128, 256, 512][rng.randint(4)]
conf['dropout'] = rng.uniform(0.3, 0.7)
conf['learning_rate'] = 10. ** rng.uniform(-3., -1.)
with open(os.path.join(dir_json, conf['model_name'] + '.json'), 'w') as f:
json.dump(conf, f, indent=4)
``` |
{
"source": "johnarleyburns/lambda-refarch-mapreduce",
"score": 2
} |
#### File: src/python/lambdautils.py
```python
import boto3
import botocore
import os
class LambdaManager(object):
def __init__ (self, l, s3, region, codepath, job_id, fname, handler, lmem=1536):
self.awslambda = l;
self.region = "us-east-1" if region is None else region
self.s3 = s3
self.codefile = codepath
self.job_id = job_id
self.function_name = fname
self.handler = handler
self.role = os.environ.get('serverless_mapreduce_role')
self.memory = lmem
self.timeout = 300
self.function_arn = None # set after creation
# TracingConfig parameter switches X-Ray tracing on/off.
# Change value to 'Mode':'PassThrough' to switch it off
def create_lambda_function(self):
runtime = 'python2.7';
response = self.awslambda.create_function(
FunctionName = self.function_name,
Code = {
"ZipFile": open(self.codefile, 'rb').read()
},
Handler = self.handler,
Role = self.role,
Runtime = runtime,
Description = self.function_name,
MemorySize = self.memory,
Timeout = self.timeout,
TracingConfig={'Mode':'PassThrough'}
)
self.function_arn = response['FunctionArn']
print(response)
def update_function(self):
'''
Update lambda function
'''
response = self.awslambda.update_function_code(
FunctionName = self.function_name,
ZipFile=open(self.codefile, 'rb').read(),
Publish=True
)
updated_arn = response['FunctionArn']
# parse arn and remove the release number (:n)
arn = ":".join(updated_arn.split(':')[:-1])
self.function_arn = arn
print(response)
def update_code_or_create_on_noexist(self):
'''
Update if the function exists, else create function
'''
try:
self.create_lambda_function()
except botocore.exceptions.ClientError as e:
# parse (Function already exist)
self.update_function()
def add_lambda_permission(self, sId, bucket):
resp = self.awslambda.add_permission(
Action = 'lambda:InvokeFunction',
FunctionName = self.function_name,
Principal = 's3.amazonaws.com',
StatementId = '%s' % sId,
SourceArn = 'arn:aws:s3:::' + bucket
)
print(resp)
def create_s3_eventsource_notification(self, bucket, prefix=None):
if not prefix:
prefix = self.job_id +"/task";
self.s3.put_bucket_notification_configuration(
Bucket = bucket,
NotificationConfiguration = {
'LambdaFunctionConfigurations': [
{
'Events': [ 's3:ObjectCreated:*'],
'LambdaFunctionArn': self.function_arn,
'Filter' : {
'Key': {
'FilterRules' : [
{
'Name' : 'prefix',
'Value' : prefix
},
]
}
}
}
],
#'TopicConfigurations' : [],
#'QueueConfigurations' : []
}
)
def delete_function(self):
self.awslambda.delete_function(FunctionName=self.function_name)
@classmethod
def cleanup_logs(cls, func_name):
'''
Delete all Lambda log group and log streams for a given function
'''
log_client = boto3.client('logs')
#response = log_client.describe_log_streams(logGroupName='/aws/lambda/' + func_name)
response = log_client.delete_log_group(logGroupName='/aws/lambda/' + func_name)
return response
def compute_batch_size(keys, lambda_memory, concurrent_lambdas):
max_mem_for_data = 0.6 * lambda_memory * 1000 * 1000;
size = 0.0
for key in keys:
if isinstance(key, dict):
size += key['Size']
else:
size += key.size
avg_object_size = size/len(keys)
print("Dataset size: %s, nKeys: %s, avg: %s" %(size, len(keys), avg_object_size))
if avg_object_size < max_mem_for_data and len(keys) < concurrent_lambdas:
b_size = 1
else:
b_size = int(round(max_mem_for_data/avg_object_size))
return b_size
def batch_creator(all_keys, batch_size):
'''
'''
# TODO: Create optimal batch sizes based on key size & number of keys
batches = []
batch = []
for i in range(len(all_keys)):
batch.append(all_keys[i]);
if (len(batch) >= batch_size):
batches.append(batch)
batch = []
if len(batch):
batches.append(batch)
return batches
```
#### File: src/python/reducer.py
```python
import boto3
import json
import random
import resource
from io import StringIO
import urllib2
import time
# create an S3 & Dynamo session
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
# constants
TASK_MAPPER_PREFIX = "task/mapper/";
TASK_REDUCER_PREFIX = "task/reducer/";
def write_to_s3(bucket, key, data, metadata):
# Write to S3 Bucket
s3.Bucket(bucket).put_object(Key=key, Body=data, Metadata=metadata)
def lambda_handler(event, context):
start_time = time.time()
job_bucket = event['jobBucket']
bucket = event['bucket']
reducer_keys = event['keys']
job_id = event['jobId']
r_id = event['reducerId']
step_id = event['stepId']
n_reducers = event['nReducers']
# aggr
results = {}
line_count = 0
# INPUT JSON => OUTPUT JSON
# Download and process all keys
for key in reducer_keys:
response = s3_client.get_object(Bucket=job_bucket, Key=key)
contents = response['Body'].read()
try:
for srcIp, val in json.loads(contents).iteritems():
line_count +=1
if srcIp not in results:
results[srcIp] = 0
results[srcIp] += float(val)
except Exception, e:
print(e)
time_in_secs = (time.time() - start_time)
#timeTaken = time_in_secs * 1000000000 # in 10^9
#s3DownloadTime = 0
#totalProcessingTime = 0
pret = [len(reducer_keys), line_count, time_in_secs]
print("Reducer ouputput", pret)
if n_reducers == 1:
# Last reducer file, final result
fname = "%s/result" % job_id
else:
fname = "%s/%s%s/%s" % (job_id, TASK_REDUCER_PREFIX, step_id, r_id)
metadata = {
"linecount": '%s' % line_count,
"processingtime": '%s' % time_in_secs,
"memoryUsage": '%s' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
}
write_to_s3(job_bucket, fname, json.dumps(results), metadata)
return pret
'''
ev = {
"bucket": "-useast-1",
"jobBucket": "-useast-1",
"jobId": "py-biglambda-1node-3",
"nReducers": 1,
"keys": ["py-biglambda-1node-3/task/mapper/1"],
"reducerId": 1,
"stepId" : 1
}
lambda_handler(ev, {});
'''
``` |
{
"source": "johnarn/price_observer_bot",
"score": 2
} |
#### File: PyInstaller/building/imphook.py
```python
import glob, sys, weakref
import os.path
from .. import log as logging
from ..compat import (
expand_path, importlib_load_source, FileNotFoundError, UserDict,)
from .imphookapi import PostGraphAPI
from .utils import format_binaries_and_datas
logger = logging.getLogger(__name__)
# Note that the "UserDict" superclass is old-style under Python 2.7! Avoid
# calling the super() method for this subclass.
class ModuleHookCache(UserDict):
"""
Cache of lazily loadable hook script objects.
This cache is implemented as a `dict` subclass mapping from the
fully-qualified names of all modules with at least one hook script to lists
of `ModuleHook` instances encapsulating these scripts. As a `dict` subclass,
all cached module names and hook scripts are accessible via standard
dictionary operations.
Attributes
----------
module_graph : ModuleGraph
Current module graph.
_hook_module_name_prefix : str
String prefixing the names of all in-memory modules lazily loaded from
cached hook scripts. See also the `hook_module_name_prefix` parameter
passed to the `ModuleHook.__init__()` method.
"""
_cache_id_next = 0
"""
0-based identifier unique to the next `ModuleHookCache` to be instantiated.
This identifier is incremented on each instantiation of a new
`ModuleHookCache` to isolate in-memory modules of lazily loaded hook scripts
in that cache to the same cache-specific namespace, preventing edge-case
collisions with existing in-memory modules in other caches.
"""
def __init__(self, module_graph, hook_dirs):
"""
Cache all hook scripts in the passed directories.
**Order of caching is significant** with respect to hooks for the same
module, as the values of this dictionary are lists. Hooks for the same
module will be run in the order in which they are cached. Previously
cached hooks are always preserved rather than overidden.
By default, official hooks are cached _before_ user-defined hooks. For
modules with both official and user-defined hooks, this implies that the
former take priority over and hence will be loaded _before_ the latter.
Parameters
----------
module_graph : ModuleGraph
Current module graph.
hook_dirs : list
List of the absolute or relative paths of all directories containing
**hook scripts** (i.e., Python scripts with filenames matching
`hook-{module_name}.py`, where `{module_name}` is the module hooked
by that script) to be cached.
"""
UserDict.__init__(self)
# To avoid circular references and hence increased memory consumption,
# a weak rather than strong reference is stored to the passed graph.
# Since this graph is guaranteed to live longer than this cache, this is
# guaranteed to be safe.
self.module_graph = weakref.proxy(module_graph)
# String unique to this cache prefixing the names of all in-memory
# modules lazily loaded from cached hook scripts, privatized for safety.
self._hook_module_name_prefix = '__PyInstaller_hooks_{}_'.format(
ModuleHookCache._cache_id_next)
ModuleHookCache._cache_id_next += 1
# Cache all hook scripts in the passed directories.
self._cache_hook_dirs(hook_dirs)
def _cache_hook_dirs(self, hook_dirs):
"""
Cache all hook scripts in the passed directories.
Parameters
----------
hook_dirs : list
List of the absolute or relative paths of all directories containing
hook scripts to be cached.
"""
for hook_dir in hook_dirs:
# Canonicalize this directory's path and validate its existence.
hook_dir = os.path.abspath(expand_path(hook_dir))
if not os.path.isdir(hook_dir):
raise FileNotFoundError(
'Hook directory "{}" not found.'.format(hook_dir))
# For each hook script in this directory...
hook_filenames = glob.glob(os.path.join(hook_dir, 'hook-*.py'))
for hook_filename in hook_filenames:
# Fully-qualified name of this hook's corresponding module,
# constructed by removing the "hook-" prefix and ".py" suffix.
module_name = os.path.basename(hook_filename)[5:-3]
# Lazily loadable hook object.
module_hook = ModuleHook(
module_graph=self.module_graph,
module_name=module_name,
hook_filename=hook_filename,
hook_module_name_prefix=self._hook_module_name_prefix,
)
# Add this hook to this module's list of hooks.
module_hooks = self.setdefault(module_name, [])
module_hooks.append(module_hook)
def remove_modules(self, *module_names):
"""
Remove the passed modules and all hook scripts cached for these modules
from this cache.
Parameters
----------
module_names : list
List of all fully-qualified module names to be removed.
"""
for module_name in module_names:
# Unload this module's hook script modules from memory. Since these
# are top-level pure-Python modules cached only in the "sys.modules"
# dictionary, popping these modules from this dictionary suffices
# to garbage collect these modules.
module_hooks = self.get(module_name, [])
for module_hook in module_hooks:
sys.modules.pop(module_hook.hook_module_name, None)
# Remove this module and its hook script objects from this cache.
self.pop(module_name, None)
# Dictionary mapping the names of magic attributes required by the "ModuleHook"
# class to 2-tuples "(default_type, sanitizer_func)", where:
#
# * "default_type" is the type to which that attribute will be initialized when
# that hook is lazily loaded.
# * "sanitizer_func" is the callable sanitizing the original value of that
# attribute defined by that hook into a safer value consumable by "ModuleHook"
# callers if any or "None" if the original value requires no sanitization.
#
# To avoid subtleties in the ModuleHook.__getattr__() method, this dictionary is
# declared as a module rather than a class attribute. If declared as a class
# attribute and then undefined (...for whatever reason), attempting to access
# this attribute from that method would produce infinite recursion.
_MAGIC_MODULE_HOOK_ATTRS = {
# Collections in which order is insignificant. This includes:
#
# * "datas", sanitized from hook-style 2-tuple lists defined by hooks into
# TOC-style 2-tuple sets consumable by "ModuleHook" callers.
# * "binaries", sanitized in the same way.
'datas': (set, format_binaries_and_datas),
'binaries': (set, format_binaries_and_datas),
'excludedimports': (set, None),
# Collections in which order is significant. This includes:
#
# * "hiddenimports", as order of importation is significant. On module
# importation, hook scripts are loaded and hook functions declared by
# these scripts are called. As these scripts and functions can have side
# effects dependent on module importation order, module importation itself
# can have side effects dependent on this order!
'hiddenimports': (list, None),
}
class ModuleHook(object):
"""
Cached object encapsulating a lazy loadable hook script.
This object exposes public attributes (e.g., `datas`) of the underlying hook
script as attributes of the same name of this object. On the first access of
any such attribute, this hook script is lazily loaded into an in-memory
private module reused on subsequent accesses. These dynamic attributes are
referred to as "magic." All other static attributes of this object (e.g.,
`hook_module_name`) are referred to as "non-magic."
Attributes (Magic)
----------
datas : set
Set of `TOC`-style 2-tuples `(target_file, source_file)` for all
external non-executable files required by the module being hooked,
converted from the `datas` list of hook-style 2-tuples
`(source_dir_or_glob, target_dir)` defined by this hook script.
binaries : set
Set of `TOC`-style 2-tuples `(target_file, source_file)` for all
external executable files required by the module being hooked, converted
from the `binaries` list of hook-style 2-tuples
`(source_dir_or_glob, target_dir)` defined by this hook script.
excludedimports : set
Set of the fully-qualified names of all modules imported by the module
being hooked to be ignored rather than imported from that module,
converted from the `excludedimports` list defined by this hook script.
These modules will only be "locally" rather than "globally" ignored.
These modules will remain importable from all modules other than the
module being hooked.
hiddenimports : set
Set of the fully-qualified names of all modules imported by the module
being hooked that are _not_ automatically detectable by PyInstaller
(usually due to being dynamically imported in that module), converted
from the `hiddenimports` list defined by this hook script.
Attributes (Non-magic)
----------
module_graph : ModuleGraph
Current module graph.
module_name : str
Name of the module hooked by this hook script.
hook_filename : str
Absolute or relative path of this hook script.
hook_module_name : str
Name of the in-memory module of this hook script's interpreted contents.
_hook_module : module
In-memory module of this hook script's interpreted contents, lazily
loaded on the first call to the `_load_hook_module()` method _or_ `None`
if this method has yet to be accessed.
"""
## Magic
def __init__(self, module_graph, module_name, hook_filename,
hook_module_name_prefix):
"""
Initialize this metadata.
Parameters
----------
module_graph : ModuleGraph
Current module graph.
module_name : str
Name of the module hooked by this hook script.
hook_filename : str
Absolute or relative path of this hook script.
hook_module_name_prefix : str
String prefixing the name of the in-memory module for this hook
script. To avoid namespace clashes with similar modules created by
other `ModuleHook` objects in other `ModuleHookCache` containers,
this string _must_ be unique to the `ModuleHookCache` container
containing this `ModuleHook` object. If this string is non-unique,
an existing in-memory module will be erroneously reused when lazily
loading this hook script, thus erroneously resanitizing previously
sanitized hook script attributes (e.g., `datas`) with the
`format_binaries_and_datas()` helper.
"""
# Note that the passed module graph is already a weak reference,
# avoiding circular reference issues. See ModuleHookCache.__init__().
assert isinstance(module_graph, weakref.ProxyTypes)
self.module_graph = module_graph
self.module_name = module_name
self.hook_filename = hook_filename
# Name of the in-memory module fabricated to refer to this hook script.
self.hook_module_name = (
hook_module_name_prefix + self.module_name.replace('.', '_'))
# Attributes subsequently defined by the _load_hook_module() method.
self._hook_module = None
def __getattr__(self, attr_name):
'''
Get the magic attribute with the passed name (e.g., `datas`) from this
lazily loaded hook script if any _or_ raise `AttributeError` otherwise.
This special method is called only for attributes _not_ already defined
by this object. This includes undefined attributes and the first attempt
to access magic attributes.
This special method is _not_ called for subsequent attempts to access
magic attributes. The first attempt to access magic attributes defines
corresponding instance variables accessible via the `self.__dict__`
instance dictionary (e.g., as `self.datas`) without calling this method.
This approach also allows magic attributes to be deleted from this
object _without_ defining the `__delattr__()` special method.
See Also
----------
Class docstring for supported magic attributes.
'''
# If this is a magic attribute, initialize this attribute by lazy
# loading this hook script and then return this attribute. To avoid
# recursion, the superclass method rather than getattr() is called.
if attr_name in _MAGIC_MODULE_HOOK_ATTRS:
self._load_hook_module()
return super(ModuleHook, self).__getattr__(attr_name)
# Else, this is an undefined attribute. Raise an exception.
else:
raise AttributeError(attr_name)
def __setattr__(self, attr_name, attr_value):
'''
Set the attribute with the passed name to the passed value.
If this is a magic attribute, this hook script will be lazily loaded
before setting this attribute. Unlike `__getattr__()`, this special
method is called to set _any_ attribute -- including magic, non-magic,
and undefined attributes.
See Also
----------
Class docstring for supported magic attributes.
'''
# If this is a magic attribute, initialize this attribute by lazy
# loading this hook script before overwriting this attribute.
if attr_name in _MAGIC_MODULE_HOOK_ATTRS:
self._load_hook_module()
# Set this attribute to the passed value. To avoid recursion, the
# superclass method rather than setattr() is called.
return super(ModuleHook, self).__setattr__(attr_name, attr_value)
## Loading
def _load_hook_module(self):
"""
Lazily load this hook script into an in-memory private module.
This method (and, indeed, this class) preserves all attributes and
functions defined by this hook script as is, ensuring sane behaviour in
hook functions _not_ expecting unplanned external modification. Instead,
this method copies public attributes defined by this hook script
(e.g., `binaries`) into private attributes of this object, which the
special `__getattr__()` and `__setattr__()` methods safely expose to
external callers. For public attributes _not_ defined by this hook
script, the corresponding private attributes will be assigned sane
defaults. For some public attributes defined by this hook script, the
corresponding private attributes will be transformed into objects more
readily and safely consumed elsewhere by external callers.
See Also
----------
Class docstring for supported attributes.
"""
# If this hook script module has already been loaded, noop.
if self._hook_module is not None:
return
# Load and execute the hook script. Even if mechanisms from the import
# machinery are used, this does not import the hook as the module.
logger.info(
'Loading module hook "%s"...', os.path.basename(self.hook_filename))
self._hook_module = importlib_load_source(
self.hook_module_name, self.hook_filename)
# Copy hook script attributes into magic attributes exposed as instance
# variables of the current "ModuleHook" instance.
for attr_name, (default_type, sanitizer_func) in (
_MAGIC_MODULE_HOOK_ATTRS.items()):
# Unsanitized value of this attribute.
attr_value = getattr(self._hook_module, attr_name, None)
# If this attribute is undefined, expose a sane default instead.
if attr_value is None:
attr_value = default_type()
# Else if this attribute requires sanitization, do so.
elif sanitizer_func is not None:
attr_value = sanitizer_func(attr_value)
# Else, expose the unsanitized value of this attribute.
# Expose this attribute as an instance variable of the same name.
setattr(self, attr_name, attr_value)
## Hooks
def post_graph(self):
"""
Call the **post-graph hook** (i.e., `hook()` function) defined by this
hook script if any.
This method is intended to be called _after_ the module graph for this
application is constructed.
"""
# Lazily load this hook script into an in-memory module.
self._load_hook_module()
# Call this hook script's hook() function, which modifies attributes
# accessed by subsequent methods and hence must be called first.
self._process_hook_func()
# Order is insignificant here.
self._process_hidden_imports()
self._process_excluded_imports()
def _process_hook_func(self):
"""
Call this hook's `hook()` function if defined.
"""
# If this hook script defines no hook() function, noop.
if not hasattr(self._hook_module, 'hook'):
return
# Call this hook() function.
hook_api = PostGraphAPI(
module_name=self.module_name, module_graph=self.module_graph)
self._hook_module.hook(hook_api)
# Update all magic attributes modified by the prior call.
self.datas.update(set(hook_api._added_datas))
self.binaries.update(set(hook_api._added_binaries))
self.hiddenimports.extend(hook_api._added_imports)
#FIXME: Deleted imports should be appended to
#"self.excludedimports" rather than handled here. However, see the
#_process_excluded_imports() FIXME below for a sensible alternative.
for deleted_module_name in hook_api._deleted_imports:
# Remove the graph link between the hooked module and item.
# This removes the 'item' node from the graph if no other
# links go to it (no other modules import it)
self.module_graph.removeReference(
hook_api.node, deleted_module_name)
def _process_hidden_imports(self):
"""
Add all imports listed in this hook script's `hiddenimports` attribute
to the module graph as if directly imported by this hooked module.
These imports are typically _not_ implicitly detectable by PyInstaller
and hence must be explicitly defined by hook scripts.
"""
# For each hidden import required by the module being hooked...
for import_module_name in self.hiddenimports:
try:
# Graph node for this module. Do not implicitly create namespace
# packages for non-existent packages.
caller = self.module_graph.findNode(
self.module_name, create_nspkg=False)
# Manually import this hidden import from this module.
self.module_graph.import_hook(import_module_name, caller)
# If this hidden import is unimportable, print a non-fatal warning.
# Hidden imports often become desynchronized from upstream packages
# and hence are only "soft" recommendations.
except ImportError:
logger.warning('Hidden import "%s" not found!', import_module_name)
#FIXME: This is pretty... intense. Attempting to cleanly "undo" prior module
#graph operations is a recipe for subtle edge cases and difficult-to-debug
#issues. It would be both safer and simpler to prevent these imports from
#being added to the graph in the first place. To do so:
#
#* Remove the _process_excluded_imports() method below.
#* Remove the PostGraphAPI.del_imports() method, which cannot reasonably be
# supported by the following solution, appears to be currently broken, and
# (in any case) is not called anywhere in the PyInstaller codebase.
#* Override the ModuleGraph._safe_import_hook() superclass method with a new
# PyiModuleGraph._safe_import_hook() subclass method resembling:
#
# def _safe_import_hook(
# self, target_module_name, source_module, fromlist,
# level=DEFAULT_IMPORT_LEVEL, attr=None):
#
# if source_module.identifier in self._module_hook_cache:
# for module_hook in self._module_hook_cache[
# source_module.identifier]:
# if target_module_name in module_hook.excludedimports:
# return []
#
# return super(PyiModuleGraph, self)._safe_import_hook(
# target_module_name, source_module, fromlist,
# level=level, attr=attr)
def _process_excluded_imports(self):
"""
'excludedimports' is a list of Python module names that PyInstaller
should not detect as dependency of this module name.
So remove all import-edges from the current module (and it's
submodules) to the given `excludedimports` (end their submodules).
"""
def find_all_package_nodes(name):
mods = [name]
name += '.'
for subnode in self.module_graph.nodes():
if subnode.identifier.startswith(name):
mods.append(subnode.identifier)
return mods
# If this hook excludes no imports, noop.
if not self.excludedimports:
return
# Collect all submodules of this module.
hooked_mods = find_all_package_nodes(self.module_name)
# Collect all dependencies and their submodules
# TODO: Optimize this by using a pattern and walking the graph
# only once.
for item in set(self.excludedimports):
excluded_node = self.module_graph.findNode(item, create_nspkg=False)
if excluded_node is None:
logger.info("Import to be excluded not found: %r", item)
continue
logger.info("Excluding import %r", item)
imports_to_remove = set(find_all_package_nodes(item))
# Remove references between module nodes, as though they would
# not be imported from 'name'.
# Note: Doing this in a nested loop is less efficient than
# collecting all import to remove first, but log messages
# are easier to understand since related to the "Excluding ..."
# message above.
for src in hooked_mods:
# modules, this `src` does import
references = set(
node.identifier
for node in self.module_graph.getReferences(src))
# Remove all of these imports which are also in
# "imports_to_remove".
for dest in imports_to_remove & references:
self.module_graph.removeReference(src, dest)
logger.info(
" Removing import of %s from module %s", dest, src)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#FIXME: This class has been obsoleted by "ModuleHookCache" and will be removed.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
class HooksCache(dict):
"""
Dictionary mapping from the fully-qualified names of each module hooked by
at least one hook script to lists of the absolute paths of these scripts.
This `dict` subclass caches the list of all hooks applicable to each module,
permitting Pythonic mapping, iteration, addition, and removal of such hooks.
Each dictionary key is a fully-qualified module name. Each dictionary value
is a list of the absolute paths of all hook scripts specific to that module,
including both official PyInstaller hooks and unofficial user-defined hooks.
See Also
----------
`_load_file_list()`
For details on hook priority.
"""
def __init__(self, hooks_dir):
"""
Initialize this dictionary.
Parameters
----------
hook_dir : str
Absolute or relative path of the directory containing hooks with
which to populate this cache. By default, this is the absolute path
of the `PyInstaller/hooks` directory containing official hooks.
"""
super(dict, self).__init__()
self._load_file_list(hooks_dir)
def _load_file_list(self, hooks_dir):
"""
Cache all hooks in the passed directory.
**Order of caching is significant** with respect to hooks for the same
module, as the values of this dictionary are ordered lists. Hooks for
the same module will be run in the order in which they are cached.
Previously cached hooks are always preserved (rather than overidden).
Specifically, any hook in the passed directory having the same module
name as that of a previously cached hook will be appended to the list of
hooks for that module name. By default, official hooks are cached
_before_ user-defined hooks. For modules with both official and
user-defined hooks, this implies that the former take priority over and
will be run _before_ the latter.
Parameters
----------
hooks_dir : str
Absolute or relative path of the directory containing additional
hooks to be cached. For convenience, tilde and variable expansion
will be applied to this path (e.g., a leading `~` will be replaced
by the absolute path of the corresponding home directory).
"""
# Perform tilde and variable expansion and validate the result.
hooks_dir = expand_path(hooks_dir)
if not os.path.isdir(hooks_dir):
logger.error('Hook directory %r not found',
os.path.abspath(hooks_dir))
return
# For each hook in the passed directory...
hook_files = glob.glob(os.path.join(hooks_dir, 'hook-*.py'))
for hook_file in hook_files:
# Absolute path of this hook's script.
hook_file = os.path.abspath(hook_file)
# Fully-qualified name of this hook's corresponding module,
# constructed by removing the "hook-" prefix and ".py" suffix.
module_name = os.path.basename(hook_file)[5:-3]
# If this module already has cached hooks, append this hook's path
# to the existing list of such paths.
if module_name in self:
self[module_name].append(hook_file)
# Else, default to a new list containing only this hook's path.
else:
self[module_name] = [hook_file]
def add_custom_paths(self, hooks_dirs):
"""
Cache all hooks in the list of passed directories.
Parameters
----------
hooks_dirs : list
List of the absolute or relative paths of all directories containing
additional hooks to be cached.
"""
for hooks_dir in hooks_dirs:
self._load_file_list(hooks_dir)
def remove(self, module_names):
"""
Remove all key-value pairs whose key is a fully-qualified module name in
the passed list from this dictionary.
Parameters
----------
module_names : list
List of all fully-qualified module names to be removed.
"""
for module_name in set(module_names): # Eliminate duplicate entries.
if module_name in self:
del self[module_name]
class AdditionalFilesCache(object):
"""
Cache for storing what binaries and datas were pushed by what modules
when import hooks were processed.
"""
def __init__(self):
self._binaries = {}
self._datas = {}
def add(self, modname, binaries, datas):
self._binaries[modname] = binaries or []
self._datas[modname] = datas or []
def __contains__(self, name):
return name in self._binaries or name in self._datas
def binaries(self, modname):
"""
Return list of binaries for given module name.
"""
return self._binaries[modname]
def datas(self, modname):
"""
Return list of datas for given module name.
"""
return self._datas[modname]
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#FIXME: This class has been obsoleted by "ModuleHook" and will be removed.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
class ImportHook(object):
"""
Class encapsulating processing of hook attributes like hiddenimports, etc.
"""
def __init__(self, modname, hook_filename):
"""
:param hook_filename: File name where to load hook from.
"""
logger.info('Processing hook %s' % os.path.basename(hook_filename))
self._name = modname
self._filename = hook_filename
# _module represents the code of 'hook-modname.py'
# Load hook from file and parse and interpret it's content.
hook_modname = 'PyInstaller_hooks_' + modname.replace('.', '_')
self._module = importlib_load_source(hook_modname, self._filename)
# Public import hook attributes for further processing.
self.binaries = set()
self.datas = set()
# Internal methods for processing.
def _process_hook_function(self, mod_graph):
"""
Call the hook function hook(mod).
Function hook(mod) has to be called first because this function
could update other attributes - datas, hiddenimports, etc.
"""
# Process a `hook(hook_api)` function.
hook_api = PostGraphAPI(self._name, mod_graph)
self._module.hook(hook_api)
self.datas.update(set(hook_api._added_datas))
self.binaries.update(set(hook_api._added_binaries))
for item in hook_api._added_imports:
self._process_one_hiddenimport(item, mod_graph)
for item in hook_api._deleted_imports:
# Remove the graph link between the hooked module and item.
# This removes the 'item' node from the graph if no other
# links go to it (no other modules import it)
mod_graph.removeReference(hook_api.node, item)
def _process_hiddenimports(self, mod_graph):
"""
'hiddenimports' is a list of Python module names that PyInstaller
is not able detect.
"""
# push hidden imports into the graph, as if imported from self._name
for item in self._module.hiddenimports:
self._process_one_hiddenimport(item, mod_graph)
def _process_one_hiddenimport(self, item, mod_graph):
try:
# Do not try to first find out if a module by that name already exist.
# Rely on modulegraph to handle that properly.
# Do not automatically create namespace packages if they do not exist.
caller = mod_graph.findNode(self._name, create_nspkg=False)
mod_graph.import_hook(item, caller=caller)
except ImportError:
# Print warning if a module from hiddenimport could not be found.
# modulegraph raises ImporError when a module is not found.
# Import hook with non-existing hiddenimport is probably a stale hook
# that was not updated for a long time.
logger.warning("Hidden import '%s' not found (probably old hook)",
item)
def _process_excludedimports(self, mod_graph):
"""
'excludedimports' is a list of Python module names that PyInstaller
should not detect as dependency of this module name.
So remove all import-edges from the current module (and it's
submodules) to the given `excludedimports` (end their submodules).
"""
def find_all_package_nodes(name):
mods = [name]
name += '.'
for subnode in mod_graph.nodes():
if subnode.identifier.startswith(name):
mods.append(subnode.identifier)
return mods
# Collect all submodules of this module.
hooked_mods = find_all_package_nodes(self._name)
# Collect all dependencies and their submodules
# TODO: Optimize this by using a pattern and walking the graph
# only once.
for item in set(self._module.excludedimports):
excluded_node = mod_graph.findNode(item, create_nspkg=False)
if excluded_node is None:
logger.info("Import to be excluded not found: %r", item)
continue
logger.info("Excluding import %r", item)
imports_to_remove = set(find_all_package_nodes(item))
# Remove references between module nodes, as though they would
# not be imported from 'name'.
# Note: Doing this in a nested loop is less efficient than
# collecting all import to remove first, but log messages
# are easier to understand since related to the "Excluding ..."
# message above.
for src in hooked_mods:
# modules, this `src` does import
references = set(n.identifier for n in mod_graph.getReferences(src))
# Remove all of these imports which are also in `imports_to_remove`
for dest in imports_to_remove & references:
mod_graph.removeReference(src, dest)
logger.warning(" From %s removing import %s", src, dest)
def _process_datas(self, mod_graph):
"""
'datas' is a list of globs of files or
directories to bundle as datafiles. For each
glob, a destination directory is specified.
"""
# Find all files and interpret glob statements.
self.datas.update(set(format_binaries_and_datas(self._module.datas)))
def _process_binaries(self, mod_graph):
"""
'binaries' is a list of files to bundle as binaries.
Binaries are special that PyInstaller will check if they
might depend on other dlls (dynamic libraries).
"""
self.binaries.update(set(format_binaries_and_datas(self._module.binaries)))
# Public methods
def update_dependencies(self, mod_graph):
"""
Update module dependency graph with import hook attributes (hiddenimports, etc.)
:param mod_graph: PyiModuleGraph object to be updated.
"""
if hasattr(self._module, 'hook'):
self._process_hook_function(mod_graph)
if hasattr(self._module, 'hiddenimports'):
self._process_hiddenimports(mod_graph)
if hasattr(self._module, 'excludedimports'):
self._process_excludedimports(mod_graph)
if hasattr(self._module, 'datas'):
self._process_datas(mod_graph)
if hasattr(self._module, 'binaries'):
self._process_binaries(mod_graph)
``` |
{
"source": "johnashu/dataplicity-lomond",
"score": 3
} |
#### File: lomond/examples/chat.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import logging
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
from threading import Thread
from six.moves import input
from lomond import WebSocket
def get_input(text=''):
i = input(text)
if isinstance(i, bytes):
return i.decode('utf-8', errors='replace')
return i
name = get_input("your name: ")
ws = WebSocket('wss://ws.willmcgugan.com/chat/')
def run():
for event in ws:
print(event)
if event.name == 'connected':
ws.send_text("<{} connected>".format(name))
elif event.name == 'text':
print(event.text)
Thread(target=run).start()
while True:
try:
ws.send_text("[{}] {}".format(name, get_input()))
except KeyboardInterrupt:
ws.close()
break
```
#### File: dataplicity-lomond/lomond/extension.py
```python
from __future__ import unicode_literals
def parse_extension(extension):
"""Parse a single extension in to an extension token and a dict
of options.
"""
# Naive http header parser, works for current extensions
tokens = [token.strip() for token in extension.split(';')]
extension_token = tokens[0]
options = {}
for token in tokens[1:]:
key, sep, value = token.partition('=')
value = value.strip().strip('"')
options[key.strip()] = value
return extension_token, options
```
#### File: dataplicity-lomond/lomond/response.py
```python
from __future__ import unicode_literals
from collections import defaultdict
import six
LWS = (' ', '\t', '\n', '\r')
class Response(object):
"""A HTTP response.
:param bytes header_data: Raw response.
"""
def __init__(self, header_data):
self.raw = header_data
lines = iter(header_data.split(b'\r\n'))
status_line = next(lines, b'')
tokens = iter(status_line.split(None, 2))
self.http_ver = next(tokens, b'').decode('ascii', 'replace')
try:
self.status_code = int(next(tokens, b''))
except ValueError:
self.status_code = None
self.status = next(tokens, b'').decode('ascii', 'replace')
headers = defaultdict(list)
header = None
for _line in lines:
line = _line.decode('ascii', 'replace')
if not line.strip():
continue
if line.startswith(LWS):
if header:
headers[header].append(' ')
headers[header].append(line.lstrip())
else:
header, _colon, value = line.partition(':')
header = header.lower().strip()
if header in headers:
headers[header].append(',')
headers[header].append(value)
self.headers = {
header: ''.join(value).strip()
for header, value in headers.items()
}
def __repr__(self):
return "<response {} {} {}>".format(
self.http_ver,
self.status_code,
self.status
)
def get(self, name, default=None):
"""Get a header.
:param str name: Name of the header to retrieve.
:param default: Default value if header is not present.
:rtype: str
"""
assert isinstance(name, six.text_type), "must be unicode"
return self.headers.get(name.lower(), default)
def get_list(self, name):
"""Extract a list from a header.
:param bytes name: Name of the header to retrieve.
:rtype: str
:returns: A list of strings in the header.
"""
value = self.get(name, '')
if not value.strip():
return []
parts = [part.strip() for part in value.split(',')]
return parts
```
#### File: dataplicity-lomond/lomond/selectors.py
```python
from __future__ import unicode_literals
import select
class SelectorBase(object):
"""Abstraction for a kernel object that waits for socket data."""
def __init__(self, socket):
"""Construct with an open socket."""
self._socket = socket
def wait(self, max_bytes, timeout=0.0):
"""Block until socket is readable or a timeout occurs. Return
a tuple of <readable>, <max bytes>.
"""
if hasattr(self._socket, 'pending') and self._socket.pending():
return True, self._socket.pending()
readable = self.wait_readable(timeout=timeout)
return readable, max_bytes
def wait_readable(self, timeout=0.0):
"""Block until socket is readable or a timeout occurs, return
`True` if the socket is readable, or `False` if the timeout
occurred.
"""
def close(self):
"""Close the selector (not the socket)."""
class SelectSelector(SelectorBase): # pragma: no cover
"""Select Selector for use on Windows."""
def __repr__(self):
return '<SelectSelector>'
def wait_readable(self, timeout=0.0):
rlist, _wlist, _xlist = (
select.select([self._socket.fileno()], [], [], timeout)
)
return bool(rlist)
class KQueueSelector(SelectorBase): # pragma: no cover
"""KQueue selector for MacOS & BSD"""
def __init__(self, socket):
super(KQueueSelector, self).__init__(socket)
self._queue = select.kqueue()
self._events = [
select.kevent(
self._socket.fileno(),
filter=select.KQ_FILTER_READ
)
]
def __repr__(self):
return '<KQueueSelector>'
def wait_readable(self, timeout=0.0):
events = self._queue.control(
self._events, 1, timeout
)
return bool(events)
def close(self):
self._queue.close()
class PollSelector(SelectorBase):
"""Poll selector for *nix"""
def __init__(self, socket):
super(PollSelector, self).__init__(socket)
self._poll = select.poll()
events = (
select.POLLIN |
select.POLLPRI |
select.POLLERR |
select.POLLHUP
)
self._poll.register(socket.fileno(), events)
def __repr__(self):
return '<PollSelector>'
def wait_readable(self, timeout=0.0):
events = self._poll.poll(timeout * 1000.0)
return bool(events)
# Pick the appropriate selector for the given platform
if hasattr(select, 'kqueue'):
PlatformSelector = KQueueSelector
elif hasattr(select, 'poll'):
PlatformSelector = PollSelector
else:
PlatformSelector = SelectSelector
```
#### File: dataplicity-lomond/tests/test_compression.py
```python
from __future__ import unicode_literals
import pytest
from lomond.compression import CompressionParameterError, Deflate
from lomond.frame import Frame
def test_deflate_repr():
deflate = Deflate(15, 8, False, False)
assert repr(deflate) == 'Deflate(decompress_wbits=15, compress_wbits=8, reset_decompress=False, reset_compress=False)'
def test_get_wbits():
with pytest.raises(CompressionParameterError):
Deflate.get_wbits({'foo': 'nan'}, 'foo')
with pytest.raises(CompressionParameterError):
Deflate.get_wbits({'foo': '100'}, 'foo')
with pytest.raises(CompressionParameterError):
Deflate.get_wbits({'foo': '7'}, 'foo')
with pytest.raises(CompressionParameterError):
Deflate.get_wbits({'foo': '16'}, 'foo')
assert Deflate.get_wbits({'foo': '8'}, 'foo') == 8
def test_compression():
data = [
b'Hello ',
b'World!',
b'foo',
b'bar',
b'baz'
]
deflate = Deflate(15, 8, True, True)
for raw in data:
compressed_data = deflate.compress(raw)
frames = [Frame(1, compressed_data, fin=1)]
assert deflate.decompress(frames) == raw
```
#### File: dataplicity-lomond/tests/test_parser.py
```python
import types
import pytest
from lomond.parser import ParseError, Parser
def test_parser_reset_is_a_generator():
parser = Parser()
assert isinstance(parser.parse(), types.GeneratorType)
def test_max_bytes():
class TestParser(Parser):
def parse(self):
data = yield self.read_until(b'\r\n\r\n', max_bytes=100)
yield data
test_data = [b'foo'] * 100
test_parser = TestParser()
with pytest.raises(ParseError):
for data in test_data:
for _data in test_parser.feed(data):
print(_data)
def test_eof():
class TestParser(Parser):
def parse(self):
data = yield self.read_until(b'\r\n\r\n', max_bytes=100)
yield data
test_parser = TestParser()
test_data = [b'foo', b'']
assert not test_parser.is_eof
with pytest.raises(ParseError):
for data in test_data:
for _token in test_parser.feed(data):
print(_token)
assert test_parser.is_eof
test_parser.feed(b'more')
with pytest.raises(ParseError):
for data in test_parser.feed('foo'):
print(data)
```
#### File: dataplicity-lomond/tests/test_persist.py
```python
from lomond.persist import persist
from lomond import events
class FakeEvent(object):
def wait(self, wait_for=None):
return True
class FakeWebSocket(object):
def connect(self, poll=None, ping_rate=None, ping_timeout=None):
yield events.Connecting('ws://localhost:1234/')
yield events.ConnectFail('test')
def persist_testing_helper(mocker, validate_function, websocket_connect=None):
# ok. First, we start off by calling .spy on our fake function, so that
# we could verify that it was called
mocker.spy(FakeEvent, 'wait')
# now, we patch the import path to threading.Event and replace it with
# our own FakeEvent. Therefore, whenever persist.py will try to import
# threading.Event, it will actually import FakeEvent
mocker.patch('lomond.persist.threading.Event', FakeEvent)
# great, now a simple websocket imposter
websocket = FakeWebSocket()
if websocket_connect:
websocket.connect = websocket_connect
yielded_events = list(persist(websocket))
# the sole fact that we ended up in this line means that the event
# method was called, but we can nevertheless check it
assert FakeEvent.wait.call_count == 1
# and now we can validate the events.
validate_function(yielded_events)
def test_persist_with_nonexisting_server(mocker):
def validate_events(_events):
# the server doesn't exist, so we expect 3 entries:
assert len(_events) == 3
# 0/ Connecting
assert isinstance(_events[0], events.Connecting)
# 1/ a ConnectFail - because the server doesn't exist ..
assert isinstance(_events[1], events.ConnectFail)
# 2/ and a BackOff which means that we are ready to start a new
# iteration.
assert isinstance(_events[2], events.BackOff)
persist_testing_helper(mocker, validate_events)
def test_emulate_ready_event(mocker):
def successful_connect(poll=None, ping_rate=None, ping_timeout=None):
yield events.Connecting('ws://localhost:1234')
yield events.Ready(None, None, None)
def validate_events(_events):
assert len(_events) == 3
assert isinstance(_events[0], events.Connecting)
assert isinstance(_events[1], events.Ready)
assert isinstance(_events[2], events.BackOff)
persist_testing_helper(mocker, validate_events, successful_connect)
```
#### File: dataplicity-lomond/tests/test_session.py
```python
import socket
import pytest
from freezegun import freeze_time
from lomond import errors, events
from lomond import constants
from lomond.session import WebsocketSession, _ForceDisconnect, _SocketFail
from lomond.websocket import WebSocket
@pytest.fixture()
def session(monkeypatch):
monkeypatch.setattr(
'os.urandom', b'\xaa'.__mul__
)
# ^^ the above line will be significant in the test where we want
# to validate the headers being sent to the socket. Namely, the
# websocket key which is based on os.urandom. Obviously, we can't
# have an actual random call here because the test wouldn't be
# deterministic, hence this sequence of bytes.
return WebsocketSession(WebSocket('wss://example.com/'))
class FakeSocket(object):
def __init__(self, *args, **kwargs):
self.buffer = b''
self._sendall = kwargs.get('sendall', None)
def setsockopt(self, *args):
pass
def settimeout(self, *args):
pass
def connect(self, *args):
raise socket.error('fail')
def fileno(self):
return 999
def recv(self, *args, **kwargs):
raise socket.error('fail')
def recv_into(self, *args, **kwargs):
raise socket.error('fail')
def shutdown(self, *args, **kwargs):
pass
def close(self):
return
def sendall(self, data):
self.buffer += data
if callable(self._sendall):
self._sendall(data)
def pending(self):
return 0
class FakeWebSocket(object):
sent_close_time = -100
def send_pong(self, data):
raise errors.WebSocketClosed('sorry')
class FakeSelector(object):
def __init__(self, socket):
pass
def wait(self, max_bytes, timeout):
return True, max_bytes
def wait_readable(self, timeout):
return True
def close(self):
pass
class FakeBrokenSelector(object):
def __init__(self, socket):
pass
def wait_readable(self, timeout):
raise IOError('broke')
def close(self):
pass
def test_write_without_sock_fails(session):
with pytest.raises(errors.WebSocketUnavailable) as e:
session.write(b'\x01')
assert str(e.value) == 'not connected'
def test_write_with_closed_websocket_fails(session):
session.websocket.state.closed = True
session._sock = FakeSocket()
with pytest.raises(errors.WebSocketClosed) as e:
session.write(b'\x01')
assert str(e.value) == 'data not sent'
def test_write_with_closing_websocket_fails(session):
session.websocket.state.closing = True
session._sock = FakeSocket()
with pytest.raises(errors.WebSocketClosing) as e:
session.write(b'\x01')
assert str(e.value) == 'data not sent'
def test_socket_error_propagates(session):
def sendall(data):
raise socket.error('just testing errors')
session._sock = FakeSocket()
session._sock.sendall = sendall
with pytest.raises(errors.TransportFail) as e:
session.write(b'\x01')
assert str(e.value) == 'socket fail; just testing errors'
def test_non_network_error_propagates(session):
def sendall(data):
raise ValueError('some random exception')
session._sock = FakeSocket()
session._sock.sendall = sendall
with pytest.raises(errors.TransportFail) as e:
session.write(b'\x01')
assert str(e.value) == 'socket error; some random exception'
def test_repr(session):
assert repr(session) == "<ws-session 'wss://example.com/'>"
def test_close_socket(session, mocker):
session._sock = FakeSocket()
mocker.spy(FakeSocket, 'shutdown')
mocker.spy(FakeSocket, 'close')
session._close_socket()
assert FakeSocket.shutdown.call_count == 1
assert FakeSocket.close.call_count == 1
def test_send_request(session):
session._sock = FakeSocket()
session._send_request()
assert session._sock.buffer == (
b'GET / HTTP/1.1\r\n'
b'Host: example.com:443\r\n'
b'Upgrade: websocket\r\n'
b'Connection: Upgrade\r\n'
b'Sec-WebSocket-Key: qqqqqqqqqqqqqqqqqqqqqg==\r\n'
b'Sec-WebSocket-Version: 13\r\n'
b'User-Agent: ' + constants.USER_AGENT.encode('utf-8') + b'\r\n'
b'\r\n'
)
def test_run_with_socket_open_error(session):
def connect_which_raises_error():
raise ValueError('fail')
session._connect = connect_which_raises_error
_events = list(session.run())
assert len(_events) == 2
assert isinstance(_events[0], events.Connecting)
assert _events[0].url == 'wss://example.com/'
assert isinstance(_events[1], events.ConnectFail)
assert str(_events[1]) == "ConnectFail(reason='fail')"
def test_run_with_send_request_raising_transport_error(session):
# _send_request can raise TransportFail inside write() call
# in order to do that, the socket has to be opened and raise
# either socket.error or Exception during sendall() call.
# let's do just that. First of all, the method in question:
def sendall_which_raises_error(data):
raise socket.error('error during sendall')
# here's where the plot thickens. socket connection is established
# during self._connect, so we have to substitude this method so that
# it returns our FakeSocket object.
def return_fake_socket():
return FakeSocket(sendall=sendall_which_raises_error), None
session._connect = return_fake_socket
_events = list(session.run())
assert isinstance(_events[-1], events.ConnectFail)
assert str(_events[-1]) == (
"ConnectFail(reason='request failed; socket fail; error during sendall')"
)
def test_that_on_ping_responds_with_pong(session, mocker):
# we don't actually care that much for the whole stack underneath,
# we only want to check whether a certain method was called..
send_pong = mocker.patch(
'lomond.websocket.WebSocket.send_pong'
)
session._send_pong(events.Ping(b'\x00'))
assert send_pong.called_with(b'\x00')
def test_error_on_close_socket(caplog, session):
def close_which_raises_error():
raise ValueError('a problem occurred')
session._sock = FakeSocket()
session._sock.close = close_which_raises_error
session._close_socket()
import logging
assert caplog.record_tuples[-1] == (
'lomond',
logging.WARNING,
'error closing socket; a problem occurred'
)
def test_check_poll(session):
session._on_ready()
assert session._check_poll(60, 61)
assert not session._check_poll(60, 59)
def test_check_auto_ping(session, mocker):
session._on_ready()
mocker.patch.object(session.websocket, 'send_ping')
assert session.websocket.send_ping.call_count == 0
session._check_auto_ping(10, 12)
assert session.websocket.send_ping.call_count == 1
session._check_auto_ping(10, 15)
assert session.websocket.send_ping.call_count == 1
def test_check_ping_timeout(session, mocker):
session._on_ready()
assert not session._check_ping_timeout(10, 5)
assert session._check_ping_timeout(10, 11)
def test_recv_no_sock(session):
session._sock = None
assert session._recv(1) == b''
def test_on_pong(session):
session._on_ready()
session._on_pong(events.Pong(b'foo'))
assert session.session_time - session._last_pong < 0.01
def test_context_manager():
ws = WebSocket('ws://example.com/')
session = WebsocketSession(ws)
session._selector_cls = FakeSelector
session._on_ready()
with ws:
for event in ws:
pass
def test_connect_sock_fail_socket(monkeypatch, session):
def fail_socket(*args):
raise socket.error('foo')
monkeypatch.setattr('socket.socket', fail_socket)
with pytest.raises(_SocketFail):
session._connect_sock('google.com', 80)
def test_connect_sock_fail_connect(monkeypatch, session):
monkeypatch.setattr('socket.socket', lambda *args: FakeSocket())
with pytest.raises(_SocketFail):
session._connect_sock('google.com', 80)
def test_sock_recv(session):
session._sock = FakeSocket()
with pytest.raises(_SocketFail):
session._recv(128)
def test_send_pong(session):
session.websocket = FakeWebSocket()
session._send_pong(events.Ping(b'foo'))
def test_check_close_timeout(session):
session._on_ready()
session.websocket = FakeWebSocket()
session.websocket.sent_close_time = 10
session._check_close_timeout(10, 19)
with pytest.raises(_ForceDisconnect):
session._check_close_timeout(10, 21)
``` |
{
"source": "johnashu/docker_command",
"score": 2
} |
#### File: johnashu/docker_command/containers.py
```python
from client_config import client
cons = client.containers
def container_manage(container, stop=False):
# GET
container = cons.get(container)
# ATTRS
attrs = container.attrs["Config"]["Image"]
# Logs
logs = container.logs()
# stop
if stop:
container.stop()
def start_log_stream(container):
for line in container.logs(stream=True):
print(line)
def stop_all_containers(con_lst):
for con in con_lst:
con.stop()
def remove_containers(obj, lst, force=False):
for i in lst:
r = obj.remove(i.id, force=force)
print(r)
if __name__ == "__main__":
# cons.run("ubuntu", "echo hello world")
# run containers in the background:
# cons.run("bfirsh/reticulate-splines", detach=True)
# LIst
con_lst = cons.list()
print(con_lst)
remove_containers(cons, con_lst, force=True)
# start_log_stream(con_lst[0])
# stop_all_containers(con_lst)
```
#### File: johnashu/docker_command/networks.py
```python
from client_config import client
net = client.networks
def display_networks(obj, lst):
for i in lst:
r = obj.get(i.id)
name = r.name
print(name)
if __name__ == "__main__":
# volumes
lst = net.list()
print(lst)
net.prune()
display_networks(net, lst)
```
#### File: johnashu/docker_command/nodes.py
```python
from client_config import client
node = client.nodes
def display_nodes(obj, lst):
for i in lst:
r = obj.get(i.id)
name = r.name
print(s)
if __name__ == "__main__":
# volumes
lst = node.list()
print(lst)
node.prune()
display_nodes(node, lst)
```
#### File: johnashu/docker_command/swarm.py
```python
from client_config import client
ser = client.services
def display_services(obj, lst):
for i in lst:
r = obj.get(i.id)
name = r.name
print(name)
if __name__ == "__main__":
# volumes
lst = ser.list()
print(lst)
ser.prune()
display_services(ser, lst)
```
#### File: johnashu/docker_command/volumes.py
```python
from client_config import client
vol = client.volumes
def remove_volumes(obj, lst, force=False):
for i in lst:
r = obj.get(i.id)
s = r.remove(force=force)
print(s)
if __name__ == "__main__":
# volumes
lst = vol.list()
print(lst)
vol.prune()
remove_volumes(vol, lst, force=True)
``` |
{
"source": "johnashu/Hex-Dec-Binary-Ascii-Convertor",
"score": 3
} |
#### File: johnashu/Hex-Dec-Binary-Ascii-Convertor/binary_to_ascii.py
```python
import re
bin_display = """
# 0 : 0 : 0 : 0 : 0 : 0 : 0 : 0 = 0 (OFF)
# 128 64 32 16 8 4 2 1 = 1 (ON)
"""
input_data = '0100100001100101011011 00011011000110111100100000010011010111100100100000010011100110000101101101011001010010000001001001011100110010000001001010 01101111011010000110111000101110001000000100100100100000010011000110111101110110011001010010000001010000011100100110111101100111011100100110000101101101 011011010110100101101110011001110010000001101001011011100010000001010000010110010101010001001000010011110100111000100001' # "010001000100000101000100" # Reads DAD
binary_string = input_data.replace(' ', '')
h = {'0': '0', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', '6': '6', '7': '7',
'8': '8', '9': '9', 'A': '10', 'B': '11', 'C': '12', 'D': '13', 'E': '14', 'F': '15'}
b = {'0000': '0', '0001': '1', '0010': '2', '0011': '3', '0100': '4', '0101': '5', '0110': '6', '0111': '7',
'1000': '8', '1001': '9', '1010': 'A', '1011': 'B', '1100': 'C', '1101': 'D', '1110': 'E', '1111': 'F'}
match = re.match('^[_0-1]$', binary_string)
binary_lst = [128, 64, 32, 16, 8, 4, 2, 1]
def n():
print("\n")
def nl():
banner = '#' * 50
print(banner)
nl()
def bin2hex(binary):
""" Converts Binary to HExaDEcimal"""
binary_split1 = (binary[0:4])
binary_split2 = (binary[4:8])
hex1 = b[binary_split1]
hex2 = b[binary_split2]
return str(hex1) + str(hex2)
def split_string(string, length):
""" function to split a string into differnt lengths or chunks"""
return [string[max(i - length, 0):i] for i in range(len(string), 0, -length)][::-1]
def hex2integer(hex_string_input):
""" convert from hex string to integers"""
hex1 = h[hex_string_input[0]]
hex2 = h[hex_string_input[1]]
return [str(hex1), str(hex2)]
def hex_int_to_dec(hex1, hex2):
""" calculates a 2 digit hexadecimal to normal decimal """
current_digit = int(hex1)
current_digit1 = int(hex2)
power = 1
power1 = 0
hex_iter = []
if hex1:
mul_dig = current_digit * (16 ** power)
hex_iter.append(mul_dig)
if hex2:
mul_dig = current_digit1 * (16 ** power1)
hex_iter.append(mul_dig)
return sum(hex_iter)
def decimal2ascii(decimal_num):
""" converts a decimal number to an ASCII character """
return chr(decimal_num)
def run_the_show():
rtn_bin_split = split_string(binary_string, 8)
hex_string = [(bin2hex(item)) for item in rtn_bin_split]
hex_integers = [hex2integer(item) for item in hex_string]
hex_full_dec = [hex_int_to_dec(item[0], item[1]) for item in hex_integers]
ascii_msg = [decimal2ascii(item) for item in hex_full_dec]
print(''.join(map(str, ascii_msg)))
run_the_show()
nl()
``` |
{
"source": "johnashu/Object-Oriented-Programming",
"score": 3
} |
#### File: johnashu/Object-Oriented-Programming/server.py
```python
import socket
import select
class Server:
def __init__(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind(('localhost', 2000))
self.socket_list = [self.server]
self.addresses = ['localhost']
self.commands = [""]
self.running = True
self.server.listen(10)
def listen(self):
while self.running:
read, write, error = select.select(self.socket_list, [], self.socket_list, 0)
for sock in read:
if sock == self.server and self.running:
try:
conn, address = self.server.accept()
conn.settimeout(10)
self.socket_list.append(conn)
self.addresses.append(address[0])
self.commands.append("")
except:
self.shutdown()
break
elif self.running:
try:
packet = sock.recv(60)
print('packet received')
if not packet:
self.close_conn(sock)
index = self.socket_list.index(sock)
self.commands[index] += packet
if '\n' in self.commands[index]:
print('handle')
except:
self.close_conn(sock)
def close_conn(self, conn):
print('close client conn')
def shutdown(self):
print('shutdown server')
if __name__ == "__main__":
Server().listen()
``` |
{
"source": "johnashu/password_generator",
"score": 4
} |
#### File: johnashu/password_generator/password_generator.py
```python
import random
from includes.config import *
def remove_duplicates(elem: str, allowed: str) -> str:
result = ""
for x in allowed:
if x != elem:
result += x
return result
def gen_unique(length: int) -> str:
allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ012345678900123456789001234567890!@#$%^&*()?|}{_+/\.=-!@#$%^&*()?|}{_+/\.=-!@#$%^&*()?|}{_+/\.=-"
pw = ""
for _ in range(length):
item = random.choice(allowed)
pw += item
allowed = remove_duplicates(item, allowed)
return pw
def passwd(length: int, user: str) -> str:
"""
Generates a Random Password based on an inputted integer.
allowed
It will create x number of lists of x length, It will then choose a random password from the list
"""
unique = gen_unique(length)
unique_list = [gen_unique(length) for i in unique]
password = random.choice(unique_list)
assert length == len(password)
return f"\nUsername: {user}\nPassword: " + "".join(password) + "\n"
if __name__ == "__main__":
users = ("<EMAIL>",)
for u in users:
log.info(passwd(25, u))
``` |
{
"source": "johnashu/pyhmy",
"score": 3
} |
#### File: pyhmy/pyhmy/blockchain.py
```python
from .rpc.request import (
rpc_request
)
from .exceptions import (
InvalidRPCReplyError
)
_default_endpoint = 'http://localhost:9500'
_default_timeout = 30
#############################
# Node / network level RPCs #
#############################
def get_bad_blocks(endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
[WIP] Get list of bad blocks in memory of specific node
Known issues with RPC not returning correctly
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list of bad blocks in node memory
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#0ba3c7b6-6aa9-46b8-9c84-f8782e935951
"""
method = 'hmyv2_getCurrentBadBlocks'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def chain_id(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Chain id of the chain
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int that represents the chain id
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/343dbe89b3c105f8104ab877769070ba6fdd0133/rpc/blockchain.go#L44
"""
method = 'hmyv2_chainId'
try:
data = rpc_request(method, endpoint=endpoint, timeout=timeout)
return data['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_node_metadata(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get config for the node
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict with the following keys:
blskey: :obj:`list` of BLS keys on the node
version: :obj:`str` representing the Harmony binary version
network: :obj:`str` the Network name that the node is on (Mainnet or Testnet)
chain-config: :obj:`dict` with the following keys (more are added over time):
chain-id: :obj:`int` Chain ID of the network
cross-tx-epoch: :obj:`int` Epoch at which cross shard transactions were enabled
cross-link-epoch: :obj:`int` Epoch at which cross links were enabled
staking-epoch: :obj:`int` Epoch at which staking was enabled
prestaking-epoch: :obj:`int` Epoch at which staking features without election were allowed
quick-unlock-epoch: :obj:`int` Epoch at which undelegations unlocked in one epoch
eip155-epoch: :obj:`int` Epoch at with EIP155 was enabled
s3-epoch: :obj:`int` Epoch at which Mainnet V0 was launched
receipt-log-epoch: :obj:`int` Epoch at which receipt logs were enabled
eth-compatible-chain-id: :obj:`int` EVM network compatible chain ID
eth-compatible-epoch: :obj:`int` Epoch at which EVM compatibility was launched
eth-compatible-shard-0-chain-id: :obj:`int` EVM network compatible chain ID on shard 0
five-seconds-epoch: :obj:`int` Epoch at which five second finality was enabled and block rewards adjusted to 17.5 ONE/block
istanbul-epoch: :obj:`int` Epoch at which Ethereum's Istanbul upgrade was added to Harmony
no-early-unlock-epoch: :obj:`int` Epoch at which early unlock of tokens was disabled (https://github.com/harmony-one/harmony/pull/3605)
redelegation-epoch: :obj:`int` Epoch at which redelegation was enabled (staking)
sixty-percent-epoch: :obj:`int` Epoch when internal voting power reduced from 68% to 60%
two-seconds-epoch: :obj:`int` Epoch at which two second finality was enabled and block rewards adjusted to 7 ONE/block
is-leader: :obj:`bool` Whether the node is currently leader or not
shard-id: :obj:`int` Shard that the node is on
current-epoch: :obj:`int` Current epoch
blocks-per-epoch: :obj:`int` Number of blocks per epoch (only available on Shard 0)
role: :obj:`str` Node type(Validator or ExplorerNode)
dns-zone: :obj:`str`: Name of the DNS zone
is-archival: :obj:`bool` Whether the node is currently in state pruning mode or not
node-unix-start-time: :obj:`int` Start time of node un Unix time
p2p-connectivity: :obj:`dict` with the following keys:
connected: :obj:`int` Number of connected peers
not-connected: :obj:`int` Number of peers which are known but not connected
total-known-peers: :obj:`int` Number of peers which are known
peerid: :obj:`str` PeerID, the pubkey for communication
consensus: :obj:`dict` with following keys:
blocknum: :obj:`int` Current block number of the consensus
finality: :obj:`int` The finality time in milliseconds of previous consensus
mode: :obj:`str` Current consensus mode
phase: :obj:`str` Current consensus phase
viewChangeId: :obj:`int` Current view changing ID
viewId: :obj:`int` Current view ID
sync-peers: dictionary of connected sync peers for each shard
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#03c39b56-8dfc-48ce-bdad-f85776dd8aec
https://github.com/harmony-one/harmony/blob/v1.10.2/internal/params/config.go#L233 for chain-config dict
https://github.com/harmony-one/harmony/blob/9f320436ff30d9babd957bc5f2e15a1818c86584/node/api.go#L110 for consensus dict
"""
method = 'hmyv2_getNodeMetadata'
try:
metadata = rpc_request(method, endpoint=endpoint, timeout=timeout)
return metadata['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_peer_info(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get peer info for the node
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
if has peers, dict with the following keys:
blocked-peers: :obj:`list` list of blocked peers by peer ID
connected-peers: :obj:`list` list of connected peers by topic
peers: :obj:`list` list of connected peer IDs
topic: :obj:`list` topic of the connection, for example:
'harmony/0.0.1/client/beacon'
'harmony/0.0.1/node/beacon'
peerid: :obj:`str` Peer ID of the node
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
See also
--------
get_node_metadata
"""
method = 'hmyv2_getPeerInfo'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def protocol_version(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get the current Harmony protocol version this node supports
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
The current Harmony protocol version this node supports
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#cab9fcc2-e3cd-4bc9-b62a-13e4e046e2fd
"""
method = 'hmyv2_protocolVersion'
try:
value = rpc_request(method, endpoint=endpoint, timeout=timeout)
return value['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_num_peers(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get number of peers connected to the node
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Number of connected peers
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#09287e0b-5b61-4d18-a0f1-3afcfc3369c1
"""
method = 'net_peerCount'
try: # Number of peers represented as a hex string
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'], 16)
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_version(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get version of the EVM network (https://chainid.network/)
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Version if the network
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#09287e0b-5b61-4d18-a0f1-3afcfc3369c1
"""
method = 'net_version'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'], 16) # this is hexadecimal
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def in_sync(endpoint=_default_endpoint, timeout=_default_timeout) -> bool:
"""
Whether the shard chain is in sync or syncing (not out of sync)
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
bool, True if in sync
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/blockchain.go#L690
"""
method = 'hmyv2_inSync'
try:
return bool(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def beacon_in_sync(endpoint=_default_endpoint, timeout=_default_timeout) -> bool:
"""
Whether the beacon chain is in sync or syncing (not out of sync)
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
bool, True if sync
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/blockchain.go#L695
"""
method = 'hmyv2_beaconInSync'
try:
return bool(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_staking_epoch(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get epoch number when blockchain switches to EPoS election
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Epoch at which blockchain switches to EPoS election
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
---------
https://github.com/harmony-one/harmony/blob/v1.10.2/internal/params/config.go#L233
See also
------
get_node_metadata
"""
method = 'hmyv2_getNodeMetadata'
try:
data = rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
return int(data['chain-config']['staking-epoch'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_prestaking_epoch(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get epoch number when blockchain switches to allow staking features without election
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Epoch at which blockchain switches to allow staking features without election
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/v1.10.2/internal/params/config.go#L233
See also
------
get_node_metadata
"""
method = 'hmyv2_getNodeMetadata'
try:
data = rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
return int(data['chain-config']['prestaking-epoch'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
########################
# Sharding information #
########################
def get_shard(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get shard ID of the node
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Shard ID of node
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
See also
--------
get_node_metadata
"""
method = 'hmyv2_getNodeMetadata'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']['shard-id']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_sharding_structure(endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Get network sharding structure
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list of dictionaries of shards; each shard has the following keys
shardID: :obj:`int` ID of the shard
current: :obj:`bool` True if the endpoint passed is the same shard as this one
http: :obj:`str` Link to the HTTP(s) API endpoint
wss: :obj:`str` Link to the Web socket endpoint
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#9669d49e-43c1-47d9-a3fd-e7786e5879df
"""
method = 'hmyv2_getShardingStructure'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
#############################
# Current status of network #
#############################
def get_leader_address(endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Get current leader one address
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
One address of current leader
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#8b08d18c-017b-4b44-a3c3-356f9c12dacd
"""
method = 'hmyv2_getLeader'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def is_last_block(block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> bool:
"""
If the block at block_num is the last block
Parameters
----------
block_num: :obj:`int`
Block number to fetch
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
bool: True if the block is last epoch block, False otherwise
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/blockchain.go#L286
"""
params = [
block_num,
]
method = 'hmyv2_isLastBlock'
try:
return bool(rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def epoch_last_block(epoch, endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Returns the number of the last block in the epoch
Parameters
----------
epoch: :obj:`int`
Epoch for which the last block is to be fetched
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int: Number of the last block in the epoch
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/blockchain.go#L294
"""
params = [
epoch,
]
method = 'hmyv2_epochLastBlock'
try:
return int(rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_circulating_supply(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get current circulation supply of tokens in ONE
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
Current circulation supply (with decimal point)
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#8398e818-ac2d-4ad8-a3b4-a00927395044
"""
method = 'hmyv2_getCirculatingSupply'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_total_supply(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get total number of pre-mined tokens
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
Total number of pre-mined tokens, or None if no such tokens
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#3dcea518-9e9a-4a20-84f4-c7a0817b2196
"""
method = 'hmyv2_getTotalSupply'
try:
rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_number(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get current block number
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Current block number
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#2602b6c4-a579-4b7c-bce8-85331e0db1a7
"""
method = 'hmyv2_blockNumber'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_current_epoch(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get current epoch number
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Current epoch number
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#9b8e98b0-46d1-4fa0-aaa6-317ff1ddba59
"""
method = 'hmyv2_getEpoch'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_last_cross_links(endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Get last cross shard links
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list of dictionaries, one for each shard except the one at the endpoint; each representing
the last block on the beacon-chain
hash: :obj:`str` Parent block hash
block-number: :obj:`int` Block number
view-id: :obj:`int` View ID
signature: :obj:`str` Hex representation of aggregated signature
signature-bitmap: :obj:`str` Hex representation of aggregated signature bitmap
shard-id: :obj:`str` (other) shard ID
epoch-number: :obj:`int` Block epoch
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#4994cdf9-38c4-4b1d-90a8-290ddaa3040e
"""
method = 'hmyv2_getLastCrossLinks'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_gas_price(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get network gas price
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Network gas price
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#1d53fd59-a89f-436c-a171-aec9d9623f48
"""
method = 'hmyv2_gasPrice'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
##############
# Block RPCs #
##############
def get_latest_header(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block header of latest block
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict with the following keys:
blockHash: :obj:`str` Block hash
blockNumber: :obj:`int` Block number
shardID: :obj:`int` Shard ID
leader: :obj:`str` Wallet address of leader that proposed this block if prestaking, otherwise sha256 hash of leader's public bls key
viewID: :obj:`int` View ID of the block
epoch: :obj:`int` Epoch of block
timestamp: :obj:`str` Timestamp that the block was finalized in human readable format
unixtime: :obj:`int` Timestamp that the block was finalized in Unix time
lastCommitSig: :obj:`str` Hex representation of aggregated signatures of the previous block
lastCommitBitmap: :obj:`str` Hex representation of aggregated signature bitmap of the previous block
crossLinks: list of dicts describing the cross shard links, each dict to have the following keys:
block-number: :obj:`int` Number of the cross link block
epoch-number: :obj:`int` Epoch of the cross link block
hash: :obj:`str` Hash of the cross link block
shard-id: :obj:`int` Shard ID for the cross link (besides the shard at endpoint)
signature: :obj:`str` Aggregated signature of the cross link block
siganture-bitmap: :obj:`str` Aggregated signature bitmap of the cross link block
view-id: :obj:`int` View ID of the cross link block
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#73fc9b97-b048-4b85-8a93-4d2bf1da54a6
"""
method = 'hmyv2_latestHeader'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_header_by_number(block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block header of block at block_num
Parameters
----------
block_num: :obj:`int`
Number of the block whose header is requested
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
See get_latest_header for header structure
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#01148e4f-72bb-426d-a123-718a161eaec0
"""
method = 'hmyv2_getHeaderByNumber'
params = [
block_num
]
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_latest_chain_headers(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block header of latest block for beacon chain & shard chain
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict with two keys:
beacon-chain-header: :obj:`dict` with the following keys, applicable to the beacon chain (cross shard links)
shard-chain-header: :obj:`dict` with the following keys, applicable to the shard chain
difficulty: legacy
epoch: :obj:`int` Epoch of the block
extraData: legacy
gasLimit: legacy
gasUsed: legacy
hash: :obj:`int` Hash of the block
logsBloom: legacy
miner: legacy
mixHash: legacy
nonce: legacy
number: :obj:`int` Block number
parentHash: legacy
receiptsRoot: legacy
sha3Uncles: legacy
shardID :obj:`int` Shard ID
stateRoot: legacy
timestamp: legacy
transactionsRoot: legacy
viewID: View ID
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#7625493d-16bf-4611-8009-9635d063b4c0
"""
method = 'hmyv2_getLatestChainHeaders'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_by_number(block_num, full_tx=False, include_tx=False, include_staking_tx=False,
include_signers=False, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block by number
Parameters
----------
block_num: :obj:`int`
Block number to fetch
full_tx: :obj:`bool`, optional
Include full transactions data for the block
include_tx: :obj:`bool`, optional
Include regular transactions for the block
include_staking_tx: :obj:`bool`, optional
Include staking transactions for the block
include_signers: :obj:`bool`, optional
Include list of signers for the block
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict with the following keys
difficulty: legacy
epoch: :obj:`int` Epoch number of block
extraData: :obj:`str` Hex representation of extra data in the block
gasLimit: :obj:`int` Maximum gas that can be used for transactions in the block
gasUsed: :obj:`int` Gas that was actually used for transactions in the block
hash: :obj:`str` Block hash
logsBloom: :obj:`str` Bloom logs
miner: :obj:`str` Wallet address of the leader that proposed this block
mixHash: legacy
nonce: legacy
number: :obj:`int` Block number
parentHash: :obj:`str` Hash of parent block
receiptsRoot: :obj:`str` Hash of transaction receipt root
signers: :obj:`list` List of signers (only if include_signers is set to True)
size: :obj:`int` Block size in bytes
stakingTransactions: :obj:`list`
if full_tx is True: List of dictionaries, each containing a staking transaction (see account.get_staking_transaction_history)
if full_tx is False: List of staking transaction hashes
stateRoot: :obj:`str` Hash of state root
timestamp: :obj:`int` Unix timestamp of the block
transactions: :obj:`list`
if full_tx is True: List of dictionaries, each containing a transaction (see account.get_transaction_history)
if full_tx is False: List of transaction hashes
transactionsRoot: :obj:`str` Hash of transactions root
uncles: :obj:`str` legacy
viewID: :obj:`int` View ID
transactionsInEthHash: :obj:`str` Transactions in ethereum hash
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#52f8a4ce-d357-46f1-83fd-d100989a8243
"""
params = [
block_num,
{
'inclTx': include_tx,
'fullTx': full_tx,
'inclStaking': include_staking_tx,
'withSigners': include_signers,
},
]
method = 'hmyv2_getBlockByNumber'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_by_hash(block_hash, full_tx=False, include_tx=False, include_staking_tx=False,
include_signers=False, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block by hash
Parameters
----------
block_hash: :obj:`str`
Block hash to fetch
full_tx: :obj:`bool`, optional
Include full transactions data for the block
include_tx: :obj:`bool`, optional
Include regular transactions for the block
include_staking_tx: :obj:`bool`, optional
Include staking transactions for the block
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
See get_block_by_number for block structure
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#6a49ec47-1f74-4732-9f04-e5d76160bd5c
"""
params = [
block_hash,
{
'inclTx': include_tx,
'fullTx': full_tx,
'inclStaking': include_staking_tx,
'withSigners': include_signers,
},
]
method = 'hmyv2_getBlockByHash'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_transaction_count_by_number(block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get transaction count for specific block number
Parameters
----------
block_num: :obj:`int`
Block number to get transaction count for
endpoint: :obj:`str`, optional
Endpoint to send request to
include_full_tx: :obj:`bool`, optional
Include list of full transactions data for each block
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Number of transactions in the block
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#26c5adfb-d757-4595-9eb7-c6efef63df32
"""
params = [
block_num
]
method = 'hmyv2_getBlockTransactionCountByNumber'
try:
return int(rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_transaction_count_by_hash(block_hash, endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get transaction count for specific block hash
Parameters
----------
block_hash: :obj:`str`
Block hash to get transaction count
endpoint: :obj:`str`, optional
Endpoint to send request to
include_full_tx: :obj:`bool`, optional
Include list of full transactions data for each block
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Number of transactions in the block
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#66c68844-0208-49bb-a83b-08722bc113eb
"""
params = [
block_hash
]
method = 'hmyv2_getBlockTransactionCountByHash'
try:
return int(rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_staking_transaction_count_by_number(block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get staking transaction count for specific block number
Parameters
----------
block_num: :obj:`int`
Block number to get transaction count for
endpoint: :obj:`str`, optional
Endpoint to send request to
include_full_tx: :obj:`bool`, optional
Include list of full transactions data for each block
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Number of staking transactions in the block
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/transaction.go#L494
"""
params = [
block_num
]
method = 'hmyv2_getBlockStakingTransactionCountByNumber'
try:
return int(rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_staking_transaction_count_by_hash(block_hash, endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get staking transaction count for specific block hash
Parameters
----------
block_hash: :obj:`str`
Block hash to get transaction count
endpoint: :obj:`str`, optional
Endpoint to send request to
include_full_tx: :obj:`bool`, optional
Include list of full transactions data for each block
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Number of transactions in the block
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/transaction.go#L523
"""
params = [
block_hash
]
method = 'hmyv2_getBlockStakingTransactionCountByHash'
try:
return int(rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_blocks(start_block, end_block, full_tx=False, include_tx=False, include_staking_tx=False,
include_signers=False, endpoint=_default_endpoint, timeout=_default_timeout
) -> list:
"""
Get list of blocks from a range
Parameters
----------
start_block: :obj:`int`
First block to fetch (inclusive)
end_block: :obj:`int`
Last block to fetch (inclusive)
full_tx: :obj:`bool`, optional
Include full transactions data for the block
include_tx: :obj:`bool`, optional
Include regular transactions for the block
include_staking_tx: :obj:`bool`, optional
Include staking transactions for the block
include_signers: :obj:`bool`, optional
Include list of signers for the block
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list of blocks, see get_block_by_number for block structure
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#ab9bdc59-e482-436c-ab2f-10df215cd0bd
"""
params = [
start_block,
end_block,
{
'withSigners': include_signers,
'fullTx': full_tx,
'inclStaking': include_staking_tx,
'inclTx': include_tx
},
]
method = 'hmyv2_getBlocks'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_signers(block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Get list of block signers for specific block number
Parameters
----------
block_num: :obj:`int`
Block number to get signers for
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list
List of one addresses that signed the block
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#1e4b5f41-9db6-4dea-92fb-4408db78e622
"""
params = [
block_num
]
method = 'hmyv2_getBlockSigners'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_signers_keys(block_num, endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Get list of block signer public bls keys for specific block number
Parameters
----------
block_num: :obj:`int`
Block number to get signer keys for
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list
List of bls public keys that signed the block
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#9f9c8298-1a4e-4901-beac-f34b59ed02f1
"""
params = [
block_num
]
method = 'hmyv2_getBlockSignerKeys'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def is_block_signer(block_num, address, endpoint=_default_endpoint, timeout=_default_timeout) -> bool:
"""
Determine if the account at address is a signer for the block at block_num
Parameters
----------
block_num: :obj:`int`
Block number to check
address: :obj:`str`
Address to check
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
bool: True if the address was a signer for block_num, False otherwise
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/blockchain.go#L368
"""
params = [
block_num,
address
]
method = 'hmyv2_isBlockSigner'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_signed_blocks(address, endpoint=_default_endpoint, timeout=_default_timeout) -> bool:
"""
The number of blocks a particular validator signed for last blocksPeriod (1 epoch)
Parameters
----------
address: :obj:`str`
Address to check
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int: Number of blocks signed by account at address for last blocksPeriod
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://github.com/harmony-one/harmony/blob/1a8494c069dc3f708fdf690456713a2411465199/rpc/blockchain.go#L406
"""
params = [
address
]
method = 'hmyv2_getSignedBlocks'
try:
return int(rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_validators(epoch, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get list of validators for specific epoch number
Parameters
----------
epoch: :obj:`int`
Epoch to get list of validators for
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict with the following keys
shardID: :obj:`int` ID of the shard
validators: :obj:`list` of dictionaries, each with the following keys
address: :obj:`str` address of the validator
balance: :obj:`int` balance of the validator in ATTO
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#4dfe91ad-71fa-4c7d-83f3-d1c86a804da5
"""
params = [
epoch
]
method = 'hmyv2_getValidators'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_validator_keys(epoch, endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Get list of validator public bls keys for specific epoch number
Parameters
----------
epoch: :obj:`int`
Epoch to get list of validator keys for
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list
List of bls public keys in the validator committee
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
API Reference
-------------
https://api.hmny.io/#1439b580-fa3c-4d44-a79d-303390997a8c
"""
params = [
epoch
]
method = 'hmyv2_getValidatorKeys'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
```
#### File: pyhmy/pyhmy/validator.py
```python
import json
from eth_account.datastructures import (
SignedTransaction
)
from decimal import (
Decimal,
InvalidOperation
)
from .account import (
get_balance,
is_valid_address
)
from .numbers import (
convert_one_to_atto
)
from .exceptions import (
InvalidValidatorError,
RPCError,
RequestsError,
RequestsTimeoutError
)
from .staking import (
get_all_validator_addresses,
get_validator_information
)
from .staking_structures import (
Directive
)
from .staking_signing import (
sign_staking_transaction
)
_default_endpoint = 'http://localhost:9500'
_default_timeout = 30
# TODO: Add unit testing
class Validator:
name_char_limit = 140
identity_char_limit = 140
website_char_limit = 140
security_contact_char_limit = 140
details_char_limit = 280
min_required_delegation = convert_one_to_atto(10000) # in ATTO
def __init__(self, address):
if not isinstance(address, str):
raise InvalidValidatorError(1, 'given ONE address was not a string')
if not is_valid_address(address):
raise InvalidValidatorError(1, f'{address} is not valid ONE address')
self._address = address
self._bls_keys = []
self._name = None
self._identity = None
self._website = None
self._details = None
self._security_contact = None
self._min_self_delegation = None
self._max_total_delegation = None
self._inital_delegation = None
self._rate = None
self._max_change_rate = None
self._max_rate = None
def _sanitize_input(self, data, check_str=False) -> str:
"""
If data is None, return '' else return data
Raises
------
InvalidValidatorError if check_str is True and str is not passed
"""
if check_str:
if not isinstance(data, str):
raise InvalidValidatorError(3, f'Expected data to be string to avoid floating point precision issues but got {data}')
return '' if not data else str(data)
def __str__(self) -> str:
"""
Returns JSON string representation of Validator fields
"""
info = self.export()
for key, value in info.items():
if isinstance(value, Decimal):
info[key] = str(value)
return json.dumps(info)
def __repr__(self) -> str:
return f'<Validator: {hex(id(self))}>'
def get_address(self) -> str:
"""
Get validator address
Returns
-------
str
Validator address
"""
return self._address
def add_bls_key(self, key) -> bool:
"""
Add BLS public key to validator BLS keys if not already in list
Returns
-------
bool
If adding BLS key succeeded
"""
key = self._sanitize_input(key)
if key not in self._bls_keys:
self._bls_keys.append(key)
return True
return False
def remove_bls_key(self, key) -> bool:
"""
Remove BLS public key from validator BLS keys if exists
Returns
-------
bool
If removing BLS key succeeded
"""
key = self._sanitize_input(key)
if key in self._bls_keys:
self._bls_keys.remove(key)
return True
return False
def get_bls_keys(self) -> list:
"""
Get list of validator BLS keys
Returns
-------
list
List of validator BLS keys (strings)
"""
return self._bls_keys
def set_name(self, name):
"""
Set validator name
Parameters
----------
name: str
Name of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
name = self._sanitize_input(name)
if len(name) > self.name_char_limit:
raise InvalidValidatorError(3, f'Name must be less than {self.name_char_limit} characters')
self._name = name
def get_name(self) -> str:
"""
Get validator name
Returns
-------
str
Validator name
"""
return self._name
def set_identity(self, identity):
"""
Set validator identity
Parameters
----------
identity: str
Identity of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
identity = self._sanitize_input(identity)
if len(identity) > self.identity_char_limit:
raise InvalidValidatorError(3, f'Identity must be less than {self.identity_char_limit} characters')
self._identity = identity
def get_identity(self) -> str:
"""
Get validator identity
Returns
-------
str
Validator identity
"""
return self._identity
def set_website(self, website):
"""
Set validator website
Parameters
----------
website: str
Website of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
website = self._sanitize_input(website)
if len(website) > self.website_char_limit:
raise InvalidValidatorError(3, f'Website must be less than {self.website_char_limit} characters')
self._website = website
def get_website(self) -> str:
"""
Get validator website
Returns
-------
str
Validator website
"""
return self._website
def set_security_contact(self, contact):
"""
Set validator security contact
Parameters
----------
contact: str
Security contact of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
contact = self._sanitize_input(contact)
if len(contact) > self.security_contact_char_limit:
raise InvalidValidatorError(3, f'Security contact must be less than {self.security_contact_char_limit} characters')
self._security_contact = contact
def get_security_contact(self) -> str:
"""
Get validator security contact
Returns
-------
str
Validator security contact
"""
return self._security_contact
def set_details(self, details):
"""
Set validator details
Parameters
----------
details: str
Details of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
details = self._sanitize_input(details)
if len(details) > self.details_char_limit:
raise InvalidValidatorError(3, f'Details must be less than {self.details_char_limit} characters')
self._details = details
def get_details(self) -> str:
"""
Get validator details
Returns
-------
str
Validator details
"""
return self._details
def set_min_self_delegation(self, delegation):
"""
Set validator min self delegation
Parameters
----------
delegation: int
Minimum self delegation of validator in ATTO
Raises
------
InvalidValidatorError
If input is invalid
"""
delegation = self._sanitize_input(delegation)
try:
delegation = Decimal(delegation)
except (TypeError, InvalidOperation) as e:
raise InvalidValidatorError(3, 'Min self delegation must be a number') from e
if delegation < self.min_required_delegation:
raise InvalidValidatorError(3, f'Min self delegation must be greater than {self.min_required_delegation} ATTO')
self._min_self_delegation = delegation
def get_min_self_delegation(self) -> Decimal:
"""
Get validator min self delegation
Returns
-------
Decimal
Validator min self delegation in ATTO
"""
return self._min_self_delegation
def set_max_total_delegation(self, max_delegation):
"""
Set validator max total delegation
Parameters
----------
max_delegation: int
Maximum total delegation of validator in ATTO
Raises
------
InvalidValidatorError
If input is invalid
"""
max_delegation = self._sanitize_input(max_delegation)
try:
max_delegation = Decimal(max_delegation)
except (TypeError, InvalidOperation) as e:
raise InvalidValidatorError(3, 'Max total delegation must be a number') from e
if self._min_self_delegation:
if max_delegation < self._min_self_delegation:
raise InvalidValidatorError(3, f'Max total delegation must be greater than min self delegation: '
'{self._min_self_delegation}')
else:
raise InvalidValidatorError(4, 'Min self delegation must be set before max total delegation')
self._max_total_delegation = max_delegation
def get_max_total_delegation(self) -> Decimal:
"""
Get validator max total delegation
Returns
-------
Decimal
Validator max total delegation in ATTO
"""
return self._max_total_delegation
def set_amount(self, amount):
"""
Set validator initial delegation amount
Parameters
----------
amount: str
Initial delegation amount of validator in ATTO
Raises
------
InvalidValidatorError
If input is invalid
"""
amount = self._sanitize_input(amount)
try:
amount = Decimal(amount)
except (TypeError, InvalidOperation) as e:
raise InvalidValidatorError(3, 'Amount must be a number') from e
if self._min_self_delegation:
if amount < self._min_self_delegation:
raise InvalidValidatorError(3, 'Amount must be greater than min self delegation: '
f'{self._min_self_delegation}')
else:
raise InvalidValidatorError(4, 'Min self delegation must be set before amount')
if self._max_total_delegation:
if amount > self._max_total_delegation:
raise InvalidValidatorError(3, 'Amount must be less than max total delegation: '
f'{self._max_total_delegation}')
else:
raise InvalidValidatorError(4, 'Max total delegation must be set before amount')
self._inital_delegation = amount
def get_amount(self) -> Decimal:
"""
Get validator initial delegation amount
Returns
-------
Decimal
Intended initial delegation amount in ATTO
"""
return self._inital_delegation
def set_max_rate(self, rate):
"""
Set validator max commission rate
Parameters
----------
rate: str (to avoid precision troubles)
Max commission rate of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
rate = self._sanitize_input(rate, True)
try:
rate = Decimal(rate)
except (TypeError, InvalidOperation) as e:
raise InvalidValidatorError(3, 'Max rate must be a number') from e
if rate < 0 or rate > 1:
raise InvalidValidatorError(3, 'Max rate must be between 0 and 1')
self._max_rate = rate
def get_max_rate(self) -> Decimal:
"""
Get validator max commission rate
Returns
-------
Decimal
Validator max rate
"""
return self._max_rate
def set_max_change_rate(self, rate):
"""
Set validator max commission change rate
Parameters
----------
rate: str (to avoid precision troubles)
Max commission change rate of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
rate = self._sanitize_input(rate, True)
try:
rate = Decimal(rate)
except (TypeError, InvalidOperation) as e:
raise InvalidValidatorError(3, 'Max change rate must be a number') from e
if rate < 0:
raise InvalidValidatorError(3, 'Max change rate must be greater than or equal to 0')
if self._max_rate:
if rate > self._max_rate:
raise InvalidValidatorError(3, f'Max change rate must be less than or equal to max rate: {self._max_rate}')
else:
raise InvalidValidatorError(4, 'Max rate must be set before max change rate')
self._max_change_rate = rate
def get_max_change_rate(self) -> Decimal:
"""
Get validator max commission change rate
Returns
-------
Decimal (to avoid precision troubles)
Validator max change rate
"""
return self._max_change_rate
def set_rate(self, rate):
"""
Set validator commission rate
Parameters
----------
rate: str (to avoid precision troubles)
Commission rate of validator
Raises
------
InvalidValidatorError
If input is invalid
"""
rate = self._sanitize_input(rate, True)
try:
rate = Decimal(rate)
except (TypeError, InvalidOperation) as e:
raise InvalidValidatorError(3, 'Rate must be a number') from e
if rate < 0:
raise InvalidValidatorError(3, 'Rate must be greater than or equal to 0')
if self._max_rate:
if rate > self._max_rate:
raise InvalidValidatorError(3, f'Rate must be less than or equal to max rate: {self._max_rate}')
else:
raise InvalidValidatorError(4, 'Max rate must be set before rate')
self._rate = rate
def get_rate(self) -> Decimal:
"""
Get validator commission rate
Returns
-------
Decimal
Validator rate
"""
return self._rate
def does_validator_exist(self, endpoint=_default_endpoint, timeout=_default_timeout) -> bool:
"""
Check if validator exists on blockchain
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
bool
Does validator exist on chain
Raises
------
RPCError, RequestsError, RequestsTimeoutError
If unable to get list of validators on chain
"""
all_validators = get_all_validator_addresses(endpoint, timeout)
if self._address in all_validators:
return True
return False
def load(self, info):
"""
Import validator information
Parameters
----------
info: dict
Validator information with dictionary
Will ignore any extra fields in the input dictionary
Example input:
{
"name": "",
"website": "",
"security-contact": "",
"identity": "",
"details": "",
"amount": 0,
"min-self-delegation": 0,
"max-total-delegation": 0,
"rate": '0',
"max-rate": '0',
"max-change-rate": '0',
"bls-public-keys": [ "" ]
}
Raises
------
InvalidValidatorError
If input value is invalid
"""
try:
self.set_name(info['name'])
self.set_identity(info['identity'])
self.set_website(info['website'])
self.set_details(info['details'])
self.set_security_contact(info['security-contact'])
self.set_min_self_delegation(info['min-self-delegation'])
self.set_max_total_delegation(info['max-total-delegation'])
self.set_amount(info['amount'])
self.set_max_rate(info['max-rate'])
self.set_max_change_rate(info['max-change-rate'])
self.set_rate(info['rate'])
self._bls_keys = []
for key in info['bls-public-keys']:
self.add_bls_key(key)
except KeyError as e:
raise InvalidValidatorError(3, 'Info has missing key') from e
def load_from_blockchain(self, endpoint=_default_endpoint, timeout=_default_timeout):
"""
Import validator information from blockchain with given address
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Raises
------
InvalidValidatorError
If any error occur getting & importing validator information from the blockchain
"""
try:
if not self.does_validator_exist(endpoint, timeout):
raise InvalidValidatorError(5, f'Validator does not exist on chain according to {endpoint}')
except (RPCError, RequestsError, RequestsTimeoutError) as e:
raise InvalidValidatorError(5, 'Error requesting validator information') from e
try:
validator_info = get_validator_information(self._address, endpoint, timeout)
except (RPCError, RequestsError, RequestsTimeoutError) as e:
raise InvalidValidatorError(5, 'Error requesting validator information') from e
# Skip additional sanity checks when importing from chain
try:
info = validator_info['validator']
self._name = info['name']
self._identity = info['identity']
self._website = info['website']
self._details = info['details']
self._security_contact = info['security-contact']
self._min_self_delegation = info['min-self-delegation']
self._max_total_delegation = info['max-total-delegation']
self._inital_delegation = self._min_self_delegation # Since validator exists, set initial delegation to 0
self._max_rate = Decimal(info['max-rate'])
self._max_change_rate = Decimal(info['max-change-rate'])
self._rate = Decimal(info['rate'])
self._bls_keys = info[ 'bls-public-keys' ]
except KeyError as e:
raise InvalidValidatorError(5, 'Error importing validator information from RPC result') from e
def export(self) -> dict:
"""
Export validator information as dict
Returns
-------
dict
Dictionary representation of validator
"""
info = {
"validator-addr": self._address,
"name": self._name,
"website": self._website,
"security-contact": self._security_contact,
"identity": self._identity,
"details": self._details,
"amount": self._inital_delegation,
"min-self-delegation": self._min_self_delegation,
"max-total-delegation": self._max_total_delegation,
"rate": self._rate,
"max-rate": self._max_rate,
"max-change-rate": self._max_change_rate,
"bls-public-keys": self._bls_keys
}
return info
def sign_create_validator_transaction(self, nonce, gas_price, gas_limit, private_key, chain_id=None) -> SignedTransaction:
"""
Create but not post a transaction to Create the Validator using private_key
Returns
-------
SignedTransaction object, the hash of which can be used to send the transaction
using transaction.send_raw_transaction
Raises
------
rlp.exceptions.ObjectSerializationError for malformed inputs
API Reference
-------------
https://github.com/harmony-one/sdk/blob/99a827782fabcd5f91f025af0d8de228956d42b4/packages/harmony-staking/src/stakingTransaction.ts#L413
"""
info = self.export().copy()
info['directive'] = Directive.CreateValidator
info['validatorAddress'] = info.pop('validator-addr') # change the key
info['nonce'] = nonce
info['gasPrice'] = gas_price
info['gasLimit'] = gas_limit
if chain_id:
info['chainId'] = chain_id
return sign_staking_transaction(info, private_key)
def sign_edit_validator_transaction(self, nonce, gas_price, gas_limit, rate, bls_key_to_add, bls_key_to_remove, private_key, chain_id=None) -> SignedTransaction:
"""
Create but not post a transaction to Edit the Validator using private_key
Returns
-------
SignedTransaction object, the hash of which can be used to send the transaction
using transaction.send_raw_transaction
Raises
------
rlp.exceptions.ObjectSerializationError for malformed inputs
API Reference
-------------
https://github.com/harmony-one/sdk/blob/99a827782fabcd5f91f025af0d8de228956d42b4/packages/harmony-staking/src/stakingTransaction.ts#L460
"""
self.set_rate(rate)
self.add_bls_key(bls_key_to_add)
self.remove_bls_key(bls_key_to_remove)
info = self.export().copy()
info['directive'] = Directive.EditValidator
info['validatorAddress'] = info.pop('validator-addr') # change the key
info['nonce'] = nonce
info['gasPrice'] = gas_price
info['gasLimit'] = gas_limit
_ = info.pop('max-rate') # not needed
_ = info.pop('max-change-rate') # not needed
_ = info.pop('bls-public-keys') # remove this list
_ = info.pop('amount') # also unused
info['bls-key-to-remove'] = bls_key_to_remove
info['bls-key-to-add'] = bls_key_to_add
if chain_id:
info['chainId'] = chain_id
return sign_staking_transaction(info, private_key)
```
#### File: tests/sdk-pyhmy/test_blockchain.py
```python
import pytest
import requests
from pyhmy import (
blockchain
)
from pyhmy.rpc import (
exceptions
)
test_epoch_number = 0
genesis_block_number = 0
test_block_number = 1
test_block_hash = None
fake_shard = 'http://example.com'
def _test_blockchain_rpc(fn, *args, **kwargs):
if not callable(fn):
pytest.fail(f'Invalid function: {fn}')
try:
response = fn(*args, **kwargs)
except Exception as e:
if isinstance(e, exceptions.RPCError) and 'does not exist/is not available' in str(e):
pytest.skip(f'{str(e)}')
pytest.fail(f'Unexpected error: {e.__class__} {e}')
return response
@pytest.mark.run(order=1)
def test_get_node_metadata(setup_blockchain):
metadata = _test_blockchain_rpc(blockchain.get_node_metadata)
assert isinstance(metadata, dict)
@pytest.mark.run(order=2)
def test_get_sharding_structure(setup_blockchain):
sharding_structure = _test_blockchain_rpc(blockchain.get_sharding_structure)
assert isinstance(sharding_structure, list)
assert len(sharding_structure) > 0
@pytest.mark.run(order=3)
def test_get_leader_address(setup_blockchain):
leader = _test_blockchain_rpc(blockchain.get_leader_address)
assert isinstance(leader, str)
assert 'one1' in leader
@pytest.mark.run(order=4)
def test_get_block_number(setup_blockchain):
current_block_number = _test_blockchain_rpc(blockchain.get_block_number)
assert isinstance(current_block_number, int)
@pytest.mark.run(order=5)
def test_get_current_epoch(setup_blockchain):
current_epoch = _test_blockchain_rpc(blockchain.get_current_epoch)
assert isinstance(current_epoch, int)
@pytest.mark.run(order=6)
def tset_get_gas_price(setup_blockchain):
gas = _test_blockchain_rpc(blockchain.get_gas_price)
assert isinstance(gas, int)
@pytest.mark.run(order=7)
def test_get_num_peers(setup_blockchain):
peers = _test_blockchain_rpc(blockchain.get_num_peers)
assert isinstance(peers, int)
@pytest.mark.run(order=8)
def test_get_latest_header(setup_blockchain):
header = _test_blockchain_rpc(blockchain.get_latest_header)
assert isinstance(header, dict)
@pytest.mark.run(order=9)
def test_get_latest_chain_headers(setup_blockchain):
header_pair = _test_blockchain_rpc(blockchain.get_latest_chain_headers)
assert isinstance(header_pair, dict)
@pytest.mark.run(order=10)
def test_get_block_by_number(setup_blockchain):
global test_block_hash
block = _test_blockchain_rpc(blockchain.get_block_by_number, test_block_number)
assert isinstance(block, dict)
assert 'hash' in block.keys()
test_block_hash = block['hash']
@pytest.mark.run(order=11)
def test_get_block_by_hash(setup_blockchain):
if not test_block_hash:
pytest.skip('Failed to get reference block hash')
block = _test_blockchain_rpc(blockchain.get_block_by_hash, test_block_hash)
assert isinstance(block, dict)
@pytest.mark.run(order=12)
def test_get_block_transaction_count_by_number(setup_blockchain):
tx_count = _test_blockchain_rpc(blockchain.get_block_transaction_count_by_number, test_block_number)
assert isinstance(tx_count, int)
@pytest.mark.run(order=13)
def test_get_block_transaction_count_by_hash(setup_blockchain):
if not test_block_hash:
pytest.skip('Failed to get reference block hash')
tx_count = _test_blockchain_rpc(blockchain.get_block_transaction_count_by_hash, test_block_hash)
assert isinstance(tx_count, int)
@pytest.mark.run(order=14)
def test_get_blocks(setup_blockchain):
blocks = _test_blockchain_rpc(blockchain.get_blocks, genesis_block_number, test_block_number)
assert isinstance(blocks, list)
assert len(blocks) == (test_block_number - genesis_block_number + 1)
@pytest.mark.run(order=15)
def test_get_block_signers(setup_blockchain):
block_signers = _test_blockchain_rpc(blockchain.get_block_signers, test_block_number)
assert isinstance(block_signers, list)
assert len(block_signers) > 0
@pytest.mark.run(order=16)
def test_get_validators(setup_blockchain):
validators = _test_blockchain_rpc(blockchain.get_validators, test_epoch_number)
assert isinstance(validators, dict)
assert 'validators' in validators.keys()
assert len(validators['validators']) > 0
@pytest.mark.run(order=17)
def test_get_shard(setup_blockchain):
shard = _test_blockchain_rpc(blockchain.get_shard)
assert isinstance(shard, int)
assert shard == 0
@pytest.mark.run(order=18)
def test_get_staking_epoch(setup_blockchain):
staking_epoch = _test_blockchain_rpc(blockchain.get_staking_epoch)
assert isinstance(staking_epoch, int)
@pytest.mark.run(order=19)
def test_get_prestaking_epoch(setup_blockchain):
prestaking_epoch = _test_blockchain_rpc(blockchain.get_prestaking_epoch)
assert isinstance(prestaking_epoch, int)
@pytest.mark.run(order=20)
def test_get_bad_blocks(setup_blockchain):
# TODO: Remove skip when RPC is fixed
pytest.skip("Known error with hmyv2_getCurrentBadBlocks")
bad_blocks = _test_blockchain_rpc(blockchain.get_bad_blocks)
assert isinstance(bad_blocks, list)
@pytest.mark.run(order=21)
def test_get_validator_keys(setup_blockchain):
keys = _test_blockchain_rpc(blockchain.get_validator_keys, test_epoch_number)
assert isinstance(keys, list)
assert len(keys) > 0
@pytest.mark.run(order=22)
def test_get_block_signers_keys(setup_blockchain):
keys = _test_blockchain_rpc(blockchain.get_block_signers_keys, test_block_number)
assert isinstance(keys, list)
assert len(keys) > 0
@pytest.mark.run(order=23)
def test_chain_id(setup_blockchain):
chain_id = _test_blockchain_rpc(blockchain.chain_id)
assert isinstance(chain_id, int)
@pytest.mark.run(order=24)
def test_get_peer_info(setup_blockchain):
peer_info = _test_blockchain_rpc(blockchain.get_peer_info)
assert isinstance(peer_info, dict)
@pytest.mark.run(order=25)
def test_protocol_version(setup_blockchain):
protocol_version = _test_blockchain_rpc(blockchain.protocol_version)
assert isinstance(protocol_version, int)
@pytest.mark.run(order=26)
def test_is_last_block(setup_blockchain):
is_last_block = _test_blockchain_rpc(blockchain.is_last_block, 0)
assert isinstance(is_last_block, bool)
assert not is_last_block
@pytest.mark.run(order=27)
def test_epoch_last_block(setup_blockchain):
epoch_last_block = _test_blockchain_rpc(blockchain.epoch_last_block, 0)
assert isinstance(epoch_last_block, int)
@pytest.mark.run(order=28)
def test_get_circulating_supply(setup_blockchain):
circulating_supply = _test_blockchain_rpc(blockchain.get_circulating_supply)
assert isinstance(circulating_supply, str)
@pytest.mark.run(order=29)
def test_get_total_supply(setup_blockchain):
total_supply = _test_blockchain_rpc(blockchain.get_total_supply)
assert isinstance(total_supply, str) or total_supply == None
@pytest.mark.run(order=30)
def test_get_last_cross_links(setup_blockchain):
last_cross_links = _test_blockchain_rpc(blockchain.get_last_cross_links)
assert isinstance(last_cross_links, list)
@pytest.mark.run(order=31)
def test_get_gas_price(setup_blockchain):
gas_price = _test_blockchain_rpc(blockchain.get_gas_price)
assert isinstance(gas_price, int)
@pytest.mark.run(order=32)
def test_get_version(setup_blockchain):
version = _test_blockchain_rpc(blockchain.get_version)
assert isinstance(version, int)
@pytest.mark.run(order=33)
def test_get_header_by_number(setup_blockchain):
header_pair = _test_blockchain_rpc(blockchain.get_header_by_number, 0)
assert isinstance(header_pair, dict)
@pytest.mark.run(order=34)
def test_get_block_staking_transaction_count_by_number(setup_blockchain):
tx_count = _test_blockchain_rpc(blockchain.get_block_staking_transaction_count_by_number, test_block_number)
assert isinstance(tx_count, int)
@pytest.mark.run(order=35)
def test_get_block_staking_transaction_count_by_hash(setup_blockchain):
if not test_block_hash:
pytest.skip('Failed to get reference block hash')
tx_count = _test_blockchain_rpc(blockchain.get_block_staking_transaction_count_by_hash, test_block_hash)
assert isinstance(tx_count, int)
@pytest.mark.run(order=36)
def test_is_block_signer(setup_blockchain):
is_signer = _test_blockchain_rpc(blockchain.is_block_signer, test_block_number, '0x0')
assert isinstance(is_signer, bool)
@pytest.mark.run(order=37)
def test_get_signed_blocks(setup_blockchain):
signed_blocks = _test_blockchain_rpc(blockchain.get_signed_blocks, '0x0')
assert isinstance(signed_blocks, int)
@pytest.mark.run(order=38)
def test_in_sync(setup_blockchain):
in_sync = _test_blockchain_rpc(blockchain.in_sync)
assert isinstance(in_sync, bool)
@pytest.mark.run(order=38)
def test_beacon_in_sync(setup_blockchain):
beacon_in_sync = _test_blockchain_rpc(blockchain.beacon_in_sync)
assert isinstance(beacon_in_sync, bool)
def test_errors():
with pytest.raises(exceptions.RPCError):
blockchain.chain_id(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_node_metadata(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_peer_info(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.protocol_version(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_shard(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_staking_epoch(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_prestaking_epoch(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_sharding_structure(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_leader_address(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.is_last_block(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.epoch_last_block(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_circulating_supply(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_total_supply(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_number(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_current_epoch(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_last_cross_links(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_gas_price(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_num_peers(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_version(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_latest_header(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_header_by_number(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_latest_chain_headers(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_by_number(0, endpoint=fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_by_hash('', endpoint=fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_transaction_count_by_number(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_transaction_count_by_hash('', fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_staking_transaction_count_by_number(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_staking_transaction_count_by_hash('', fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_blocks(0, 1, endpoint=fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_signers(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_block_signers_keys(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.is_block_signer(0, '', fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_signed_blocks('', fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_validators(1, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.get_validator_keys(0, fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.in_sync(fake_shard)
with pytest.raises(exceptions.RPCError):
blockchain.beacon_in_sync(fake_shard)
```
#### File: tests/sdk-pyhmy/test_staking.py
```python
import pytest
import requests
from pyhmy import (
staking
)
from pyhmy.rpc import (
exceptions
)
explorer_endpoint = 'http://localhost:9599'
test_validator_address = 'one18tvf56zqjkjnak686lwutcp5mqfnvee35xjnhc'
fake_shard = 'http://example.com'
def _test_staking_rpc(fn, *args, **kwargs):
if not callable(fn):
pytest.fail(f'Invalid function: {fn}')
try:
response = fn(*args, **kwargs)
except Exception as e:
if isinstance(e, exceptions.RPCError) and 'does not exist/is not available' in str(e):
pytest.skip(f'{str(e)}')
pytest.fail(f'Unexpected error: {e.__class__} {e}')
return response
@pytest.mark.run(order=1)
def test_get_all_validator_addresses(setup_blockchain):
validator_addresses = _test_staking_rpc(staking.get_all_validator_addresses)
assert isinstance(validator_addresses, list)
assert len(validator_addresses) > 0
assert test_validator_address in validator_addresses
@pytest.mark.run(order=2)
def test_get_validator_information(setup_blockchain):
info = _test_staking_rpc(staking.get_validator_information, test_validator_address)
assert isinstance(info, dict)
@pytest.mark.run(order=3)
def test_get_all_validator_information(setup_blockchain):
all_validator_information = _test_staking_rpc(staking.get_all_validator_information)
assert isinstance(all_validator_information, list)
assert len(all_validator_information) > 0
@pytest.mark.run(order=4)
def test_get_delegations_by_delegator(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_delegator, test_validator_address)
assert isinstance(delegations, list)
assert len(delegations) > 0
@pytest.mark.run(order=5)
def test_get_delegations_by_validator(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_validator, test_validator_address)
assert isinstance(delegations, list)
assert len(delegations) > 0
@pytest.mark.run(order=6)
def test_get_current_utility_metrics(setup_blockchain):
metrics = _test_staking_rpc(staking.get_current_utility_metrics)
assert isinstance(metrics, dict)
@pytest.mark.run(order=7)
def test_get_staking_network_info(setup_blockchain):
info = _test_staking_rpc(staking.get_staking_network_info)
assert isinstance(info, dict)
@pytest.mark.run(order=8)
def test_get_super_committees(setup_blockchain):
committee = _test_staking_rpc(staking.get_super_committees)
assert isinstance(committee, dict)
@pytest.mark.run(order=9)
def test_get_raw_median_stake_snapshot(setup_blockchain):
median_stake = _test_staking_rpc(staking.get_raw_median_stake_snapshot)
assert isinstance(median_stake, dict)
@pytest.mark.run(order=10)
def test_get_validator_information_by_block(setup_blockchain):
# Apparently validator information not created until block after create-validator transaction is accepted, so +1 block
info = _test_staking_rpc(staking.get_validator_information_by_block_number, test_validator_address, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(info, dict)
@pytest.mark.run(order=11)
def test_get_validator_information_by_block(setup_blockchain):
# Apparently validator information not created until block after create-validator transaction is accepted, so +1 block
info = _test_staking_rpc(staking.get_all_validator_information_by_block_number, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(info, list)
@pytest.mark.run(order=12)
def test_get_delegations_by_delegator_by_block(setup_blockchain):
delegations = _test_staking_rpc(staking.get_delegations_by_delegator_by_block_number, test_validator_address, setup_blockchain + 1, endpoint=explorer_endpoint)
assert isinstance(delegations, list)
@pytest.mark.run(order=13)
def test_get_elected_validator_addresses(setup_blockchain):
validator_addresses = _test_staking_rpc(staking.get_elected_validator_addresses)
assert isinstance(validator_addresses, list)
assert len(validator_addresses) > 0
@pytest.mark.run(order=14)
def test_get_validators(setup_blockchain):
validators = _test_staking_rpc(staking.get_validators, 2)
assert isinstance(validators, dict)
assert len(validators['validators']) > 0
@pytest.mark.run(order=15)
def test_get_validator_keys(setup_blockchain):
validators = _test_staking_rpc(staking.get_validator_keys, 2)
assert isinstance(validators, list)
@pytest.mark.run(order=16)
def test_get_validator_self_delegation(setup_blockchain):
self_delegation = _test_staking_rpc(staking.get_validator_self_delegation, test_validator_address)
assert isinstance(self_delegation, int)
assert self_delegation > 0
@pytest.mark.run(order=17)
def test_get_validator_total_delegation(setup_blockchain):
total_delegation = _test_staking_rpc(staking.get_validator_total_delegation, test_validator_address)
assert isinstance(total_delegation, int)
assert total_delegation > 0
@pytest.mark.run(order=18)
def test_get_all_delegation_information(setup_blockchain):
delegation_information = _test_staking_rpc(staking.get_all_delegation_information, 0)
assert isinstance(delegation_information, list)
assert len(delegation_information) > 0
@pytest.mark.run(order=19)
def test_get_delegation_by_delegator_and_validator(setup_blockchain):
delegation_information = _test_staking_rpc(staking.get_delegation_by_delegator_and_validator, test_validator_address, test_validator_address)
assert isinstance(delegation_information, dict)
@pytest.mark.run(order=20)
def test_get_available_redelegation_balance(setup_blockchain):
redelgation_balance = _test_staking_rpc(staking.get_available_redelegation_balance, test_validator_address)
assert isinstance(redelgation_balance, int)
assert redelgation_balance == 0
@pytest.mark.run(order=21)
def test_get_total_staking(setup_blockchain):
total_staking = _test_staking_rpc(staking.get_total_staking)
assert isinstance(total_staking, int)
assert total_staking > 0
@pytest.mark.run(order=22)
def test_errors():
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_addresses(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_information('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_elected_validator_addresses(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validators(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_keys(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_information_by_block_number('', 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_information(-1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_self_delegation('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_validator_total_delegation('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_validator_information_by_block_number(1, 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_all_delegation_information(1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_delegator('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_delegator_by_block_number('', 1, fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegation_by_delegator_and_validator('', '', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_available_redelegation_balance('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_delegations_by_validator('', fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_current_utility_metrics(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_staking_network_info(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_super_committees(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_total_staking(fake_shard)
with pytest.raises(exceptions.RPCError):
staking.get_raw_median_stake_snapshot(fake_shard)
```
#### File: tests/sdk-pyhmy/test_validator.py
```python
import pytest
import requests
from decimal import (
Decimal
)
from pyhmy import (
validator
)
from pyhmy.rpc import (
exceptions
)
from pyhmy.numbers import (
convert_one_to_atto
)
from pyhmy.exceptions import (
InvalidValidatorError
)
import sys
test_epoch_number = 0
genesis_block_number = 0
test_block_number = 1
test_validator_object = None
test_validator_loaded = False
@pytest.mark.run(order=0)
def test_instantiate_validator(setup_blockchain):
global test_validator_object
test_validator_object = validator.Validator('one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9')
assert isinstance(test_validator_object, validator.Validator)
@pytest.mark.run(order=1)
def test_load_validator(setup_blockchain):
if not test_validator_object:
pytest.skip('Validator not instantiated yet')
info = {
'name': 'Alice',
'identity': 'alice',
'website': 'alice.harmony.one',
'details': "Don't mess with me!!!",
'security-contact': 'Bob',
'min-self-delegation': convert_one_to_atto(10000),
'amount': convert_one_to_atto(10001),
'max-rate': '0.9',
'max-change-rate': '0.05',
'rate': '0.01',
'bls-public-keys': ['0xb9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b62247608611'],
'max-total-delegation': convert_one_to_atto(40000)
}
test_validator_object.load(info)
global test_validator_loaded
test_validator_loaded = True
"""
TypeScript signature source
const description: Description = new Description('Alice', 'alice', 'alice.harmony.one', 'Bob', "Don't mess with me!!!")
const commissionRates: CommissionRate = new CommissionRate(new Decimal('0.01'), new Decimal('0.9'), new Decimal('0.05'))
const stakeMsg: CreateValidator = new CreateValidator(
'one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9',
description,
commissionRates,
numberToHex(new Unit('10000').asOne().toWei()), // minSelfDelegation
numberToHex(new Unit('40000').asOne().toWei()), // maxTotalDelegation
[ '0xb9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b62247608611' ],
numberToHex(new Unit('10001').asOne().toWei()) // amount
)
const stakingTx: StakingTransaction = new StakingTransaction(
Directive.DirectiveCreateValidator,
stakeMsg,
2, // nonce
numberToHex(new Unit('1').asOne().toWei()), // gasPrice
100, // gasLimit
null, // chainId
);
const signed = stakingTx.rlpSign('4edef2c24995d15b0e25cbd152fb0e2c05d3b79b9c2afd134e6f59f91bf99e48')
console.log( 'Signed transaction' )
console.log(signed)
"""
@pytest.mark.run(order=2)
def test_create_validator_sign(setup_blockchain):
if not (test_validator_object or test_validator_loaded):
pytest.skip('Validator not ready yet')
signed_hash = test_validator_object.sign_create_validator_transaction(
2,
int(convert_one_to_atto(1)),
100,
'4edef2c24995d15b0e25cbd152fb0e2c05d3b79b9c2afd134e6f59f91bf99e48',
None).rawTransaction.hex()
assert signed_hash == '0xf9010580f8bf94ebcd16e8c1d8f493ba04e99a56474122d81a9c58f83885416c69636585616c69636591616c6963652e6861726d6f6e792e6f6e6583426f6295446f6e2774206d6573732077697468206d65212121dcc8872386f26fc10000c9880c7d713b49da0000c887b1a2bc2ec500008a021e19e0c9bab24000008a0878678326eac9000000f1b0b9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b622476086118a021e27c1806e59a4000024a047c6d444971d4d3c48e8b255aa0e543ebb47b60f761582694e5af5330445aba5a04db1ffea9cca9f9e56e8f782c689db680992903acfd9c06f4593f7fd9a781bd7'
"""
Signature matched from TypeScript
import {
CreateValidator,
EditValidator,
Delegate,
Undelegate,
CollectRewards,
Directive,
Description,
CommissionRate,
Decimal,
StakingTransaction,
} from '@harmony-js/staking'
const { numberToHex, Unit } = require('@harmony-js/utils');
const description: Description = new Description('Alice', 'alice', 'alice.harmony.one', 'Bob', "Don't mess with me!!!")
const commissionRates: CommissionRate = new CommissionRate(new Decimal('0.01'), new Decimal('0.9'), new Decimal('0.05'))
const stakeMsg: EditValidator = new EditValidator(
'one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9',
description,
new Decimal('0.06'),
numberToHex(new Unit('10000').asOne().toWei()), // minSelfDelegation
numberToHex(new Unit('40000').asOne().toWei()), // maxTotalDelegation
'0xb9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b62247608611', // remove key
'0xb9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b62247608612' // add key
)
const stakingTx: StakingTransaction = new StakingTransaction(
Directive.DirectiveEditValidator,
stakeMsg,
2, // nonce
numberToHex(new Unit('1').asOne().toWei()), // gasPrice
100, // gasLimit
2, // chainId
);
const signed = stakingTx.rlpSign('<KEY>')
console.log( 'Signed transaction' )
console.log(signed)
"""
@pytest.mark.run(order=3)
def test_edit_validator_sign(setup_blockchain):
if not (test_validator_object or test_validator_loaded):
pytest.skip('Validator not ready yet')
signed_hash = test_validator_object.sign_edit_validator_transaction(
2,
int(convert_one_to_atto(1)),
100,
'0.06',
'0xb9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b62247608612', # add key
"0xb9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b62247608611", # remove key
'4edef2c24995d15b0e25cbd152fb0e2c05d3b79b9c2afd134e6f59f91bf99e48',
2).rawTransaction.hex()
assert signed_hash == '0xf9012101f8d094ebcd16e8c1d8f493ba04e99a56474122d81a9c58f83885416c69636585616c69636591616c6963652e6861726d6f6e792e6f6e6583426f6295446f6e2774206d6573732077697468206d65212121c887d529ae9e8600008a021e19e0c9bab24000008a0878678326eac9000000b0b9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b62247608611b0b9486167ab9087ab818dc4ce026edb5bf216863364c32e42df2af03c5ced1ad181e7d12f0e6dd5307a73b6224760861202880de0b6b3a76400006428a0656d6741687ec1e42d1699274584a1777964e939b0ef11f3ff0e161859da21a2a03fc51e067f9fb6c96bee5ceccad4104f5b4b334a86a36a2f53d10b9a8e4a268a'
@pytest.mark.run(order=4)
def test_invalid_validator(setup_blockchain):
if not (test_validator_object or test_validator_loaded):
pytest.skip('Validator not ready yet')
with pytest.raises(InvalidValidatorError):
info = {
'name': 'Alice',
}
test_validator_object.load(info)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_name('a'*141)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_identity('a'*141)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_website('a'*141)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_security_contact('a'*141)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_details('a'*281)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_min_self_delegation(1)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_max_total_delegation(1)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_amount(1)
with pytest.raises(InvalidValidatorError):
test_validator_object.set_max_rate('2.0')
with pytest.raises(InvalidValidatorError):
test_validator_object.set_max_change_rate('-2.0')
with pytest.raises(InvalidValidatorError):
test_validator_object.set_rate('-2.0')
@pytest.mark.run(order=5)
def test_validator_getters(setup_blockchain):
if not (test_validator_object or test_validator_loaded):
pytest.skip('Validator not ready yet')
assert test_validator_object.get_address() == 'one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9'
assert test_validator_object.add_bls_key('5')
assert test_validator_object.remove_bls_key('5')
assert test_validator_object.get_name() == 'Alice'
assert test_validator_object.get_identity() == 'alice'
assert test_validator_object.get_website() == 'alice.harmony.one'
assert test_validator_object.get_security_contact() == 'Bob'
assert test_validator_object.get_details() == "Don't mess with me!!!"
assert isinstance(test_validator_object.get_min_self_delegation(), Decimal)
assert isinstance(test_validator_object.get_max_total_delegation(), Decimal)
assert isinstance(test_validator_object.get_amount(), Decimal)
assert isinstance(test_validator_object.get_max_rate(), Decimal)
assert isinstance(test_validator_object.get_max_change_rate(), Decimal)
assert isinstance(test_validator_object.get_rate(), Decimal)
assert len(test_validator_object.get_bls_keys()) > 0
@pytest.mark.run(order=6)
def test_validator_load_from_blockchain(setup_blockchain):
test_validator_object2 = validator.Validator('one109r0tns7av5sjew7a7fkekg4fs3pw0h76pp45e')
test_validator_object2.load_from_blockchain()
```
#### File: tests/util-pyhmy/test_util.py
```python
import shutil
import os
import decimal
import json
import subprocess
from pathlib import Path
import pytest
from pyhmy import util
TEMP_DIR = "/tmp/pyhmy-testing/test-util"
@pytest.fixture(scope="session", autouse=True)
def setup():
shutil.rmtree(TEMP_DIR, ignore_errors=True)
os.makedirs(TEMP_DIR, exist_ok=True)
def test_json_load():
dec = util.json_load('1.1', parse_float=decimal.Decimal)
assert isinstance(dec, decimal.Decimal)
assert float(dec) == 1.1
ref_dict = {
'test': 'val',
'arr': [
1,
2,
3
]
}
loaded_dict = util.json_load(json.dumps(ref_dict))
assert str(ref_dict) == str(loaded_dict)
def test_chain_id_to_int():
assert util.chain_id_to_int(2) == 2
assert util.chain_id_to_int('HmyMainnet') == 1
def test_get_gopath():
assert isinstance(util.get_gopath(), str)
def test_get_goversion():
assert isinstance(util.get_goversion(), str)
def test_convert_one_to_hex():
assert util.convert_one_to_hex('0xebcd16e8c1d8f493ba04e99a56474122d81a9c58') == '0xeBCD16e8c1D8f493bA04E99a56474122D81A9c58'
assert util.convert_one_to_hex('one1a0x3d6xpmr6f8wsyaxd9v36pytvp48zckswvv9') == '0xeBCD16e8c1D8f493bA04E99a56474122D81A9c58'
def test_get_bls_build_variables():
assert isinstance(util.get_bls_build_variables(), dict)
def test_is_active_shard():
assert isinstance(util.is_active_shard(''), bool)
``` |
{
"source": "johnashu/Various-Sorting-and-SEarching-Algorithms",
"score": 4
} |
#### File: Various-Sorting-and-SEarching-Algorithms/search/linear.py
```python
a = [1, 5, 6, 7, 8, 9, 0, 'john', 56, 74, 456, 3, 6, 42, 53]
def maf_linear(a, x):
"""
Step 1: Set i to 1
Step 2: if i > n then go to step 7
Step 3: if A[i] = x then go to step 6
Step 4: Set i to i + 1
Step 5: Go to Step 2
Step 6: Print Element x Found at index i and go to step 8
Step 7: Print element not found
Step 8: Exit
"""
e = "Item Not Found"
for i in a:
if i == x:
print('Item found at index:', a.index(i))
return a.index(i)
elif i != a:
print(e)
v = 'john'
normal = maf_linear(a, v)
print("Results from for Loop", normal)
def maf_lin_com(a, x):
""" comprehensive linear search"""
e = "Item Not Found"
return [a.index(i) for i in a if i == x]
print("Results from List Comprehension:", maf_lin_com(a, v) )
```
#### File: Various-Sorting-and-SEarching-Algorithms/search/strassen_matrix_multiplication.py
```python
a = [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]
b = [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]
def create_matrix(p, q): # create a matrix filled with 0s
matrix = [[0 for row in range(p)] for col in range(q)]
return matrix
def mul_matrix(a, b): # multiply matrix a and b
if len(a[0]) != len(b):
return
else:
p_matrix = create_matrix(len(a), len(b[0]))
for i in range(len(a)):
for j in range(len(b[0])):
for k in range(len(b)):
p_matrix[i][j] += a[i][k] * b[k][j]
return p_matrix
def quart_split(matrix): # split down into 1/4
a = matrix
b = matrix
c = matrix
d = matrix
while(len(a) > len(matrix) / 2):
a = a[:len(a) // 2]
b = b[:len(b) // 2]
c = c[len(c) // 2:]
d = d[len(d) // 2:]
while(len(a[0]) > len(matrix[0]) // 2):
for i in range(len(a[0]) // 2):
a[i] = a[i][:len(a[i]) // 2]
b[i] = b[i][len(b[i]) // 2:]
c[i] = c[i][:len(c[i]) // 2]
d[i] = d[i][len(d[i]) // 2:]
return a, b, c, d
def add_matrix(a, b):
if type(a) == int:
d = a + b
else:
d = []
for i in range(len(a)):
c = []
for j in range(len(a[0])):
c.append(a[i][j] + b[i][j])
d.append(c)
return d
def sub_matrix(a, b):
if type(a) == int:
d = a - b
else:
d = []
for i in range(len(a)):
c = []
for j in range(len(a[0])):
c.append(a[i][j] - b[i][j])
d.append(c)
return d
def strassen(a, b, q):
# base case: 1x1 matrix
if q == 1:
d = [[0]]
d[0][0] = a[0][0] * b[0][0]
return d
else:
# split matrices into quarters
a11, a12, a21, a22 = quart_split(a)
b11, b12, b21, b22 = quart_split(b)
# p1 = (a11+a22) * (b11+b22)
p1 = strassen(add_matrix(a11, a22), add_matrix(b11, b22), q / 2)
# p2 = (a21+a22) * b11
p2 = strassen(add_matrix(a21, a22), b11, q / 2)
# p3 = a11 * (b12-b22)
p3 = strassen(a11, sub_matrix(b12, b22), q / 2)
# p4 = a22 * (b12-b11)
p4 = strassen(a22, sub_matrix(b21, b11), q / 2)
# p5 = (a11+a12) * b22
p5 = strassen(add_matrix(a11, a12), b22, q / 2)
# p6 = (a21-a11) * (b11+b12)
p6 = strassen(sub_matrix(a21, a11), add_matrix(b11, b12), q / 2)
# p7 = (a12-a22) * (b21+b22)
p7 = strassen(sub_matrix(a12, a22), add_matrix(b21, b22), q / 2)
# c11 = p1 + p4 - p5 + p7
c11 = add_matrix(sub_matrix(add_matrix(p1, p4), p5), p7)
# c12 = p3 + p5
c12 = add_matrix(p3, p5)
# c21 = p2 + p4
c21 = add_matrix(p2, p4)
# c22 = p1 + p3 - p2 + p6
c22 = add_matrix(sub_matrix(add_matrix(p1, p3), p2), p6)
c = create_matrix(len(c11) * 2, len(c11) * 2)
for i in range(len(c11)):
for j in range(len(c11)):
c[i][j] = c11[i][j]
c[i][j + len(c11)] = c12[i][j]
c[i + len(c11)][j] = c21[i][j]
c[i + len(c11)][j + len(c11)] = c22[i][j]
return c
print("Strassen Outputs:", strassen(a, b, 4))
print("Should be:", mul_matrix(a, b))
``` |
{
"source": "JohnAspro/TelecomsExercise",
"score": 3
} |
#### File: TelecomsExercise/IntroToTele/testEx.py
```python
import os
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from scipy import signal
from matplotlib.ticker import StrMethodFormatter
# School ID = 03118942
# F=9+4+2=15=>1+5=6
myFreq=6000
A=4
# Βοηθιτικες Συναρτησεις
def PlotYLim(Max, Min):
plt.ylim([Min,Max])
def plotSignals(time1, signal1, color1, legend1, PlotTitle, numberOfSignals=1, time2=None, signal2=None, color2=None, legend2=None):
if numberOfSignals==1:
plt.plot(time1, signal1, color1)
plt.legend(legend1)
elif numberOfSignals==2:
plt.plot(time1, signal1, color1, time2, signal2, '.', color2)
plt.legend([legend1, legend2])
else:
return None
plt.xlabel('Seconds')
plt.ylabel('Volts')
plt.title(PlotTitle)
plt.grid()
plt.show()
#---------------|ΑΣΚΗΣΗ 2|-------------------
#(A)
fs1=30*myFreq #180kHz
fs2=50*myFreq #300kHz
def mid_riser(signal):
for i in range(len(signal)):
if signal[i]>0xb0111:
signal[i]=7
elif signal[i]<-0xb1000:
signal[i]=-8
else:
if (signal[i] - round(signal[i]) > 0) and (signal[i] > 0):
signal[i] = round(signal[i]) + 1
elif (signal[i] - round(signal[i]) < 0) and (signal[i] < 0):
signal[i] = round(signal[i]) - 1
else:
signal[i] = round(signal[i])
return signal
# grayCodeBinary = [0000, 0001, 0011, 0010, 0110, 0111, 0101, 0100, 1100, 1101, 1111, 1110, 1010, 1011, 1001, 1000]
def grayCodeMap(signal):
grayCode4bit = [0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8]
for i in range(len(signal)):
signal[i] = grayCode4bit[int(signal[i])+8]
return signal
def calcError(QuantifiedSamples, accualSignalSamples, numOfSamples):
i=0
s=0
while i < numOfSamples:
s+=accualSignalSamples[i]-QuantifiedSamples[i]
i+=1
return s/numOfSamples
def calcAverageSigPower(signal, numOfSamples):
i=0
s=0
while i < numOfSamples:
s += signal[i]**2
return s/numOfSamples
def calcSNR(StartingSignal, numOfSamples):
numOfBitsPerSample = 4
maxSigVoltage = 7
return ((2**(2*numOfBitsPerSample))*(3*calcAverageSigPower(StartingSignal, numOfSamples)/maxSigVoltage**2))
#(a)
# t1 = np.linspace(0, 4/myFreq, 4*int(fs1/myFreq))
t1 = np.arange(0, 4/myFreq, 1/fs1)
triangle1 = signal.sawtooth(2 * np.pi * myFreq * t1, 0.5)*4
trigCopy = signal.sawtooth(2 * np.pi * myFreq * t1, 0.5)
x = mid_riser(triangle1)
# y = grayCodeMap(x)
fig, ax = plt.subplots()
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:04b}"))
ax.yaxis.set_ticks(np.arange(-4, 15, 1))
plotSignals(t1, 4*trigCopy, 'o', 'Fs1', 'Quantified Triangle sampled Fs1')
plotSignals(t1, x, 'o', 'Fs1', 'Quantified Triangle sampled Fs1')
plt.show()
print(calcError(mid_riser(triangle1), trigCopy, 10))
print(calcError(mid_riser(triangle1), trigCopy, 20))
# print(calcSNR(4*triangle1, 10))
# print(calcSNR(4*triangle1, 20))
``` |
{
"source": "JohnataDavi/2d-game-python",
"score": 3
} |
#### File: JohnataDavi/2d-game-python/killer.py
```python
import pygame
class Killer(pygame.sprite.Sprite):
def __init__(self, group, window_size):
super().__init__(group)
self.WINDOW_SIZE = window_size
self.image = pygame.image.load("data/images/piru.png").convert_alpha()
self.image = pygame.transform.scale(self.image, [100, 100])
self.rect = self.image.get_rect()
self.rect[0] = 30
self.speed = 0
self.acceleration = .2
def update(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_w]:
self.speed -= self.acceleration
elif keys[pygame.K_s]:
self.speed += self.acceleration
else:
self.speed *= .95
self.rect[1] += self.speed
if self.rect[1] < -16:
self.rect[1] = -16
self.speed = 0
elif self.rect[1] > self.WINDOW_SIZE[1] - 95:
self.rect[1] = self.WINDOW_SIZE[1] - 95
self.speed = 0
``` |
{
"source": "johnatanDM/silenceUs",
"score": 3
} |
#### File: johnatanDM/silenceUs/main.py
```python
import discord
import os
token = os.getenv("tokenSilenceUs")
class MyClient(discord.Client):
async def on_ready(self):
print('Logged on as', self.user)
servers = len(self.guilds)
activity=discord.Activity(type=discord.ActivityType.watching, name=f"{servers} servidores lotados de impostores")
await self.change_presence(activity=activity)
async def on_message(self, message):
# don't respond to ourselves
if message.author == self.user:
return
if message.content == '!help' or '!ajuda':
msg = '''
Olá estou aqui para lhe ajudar em suas jogatinas de Among Us!
Tenha certeza
'''
if message.content == '!playing' or message.content == '!p':
#Improvement: Verify if the bot has mute permission!
if message.author.voice and message.author.voice.channel:
channel = message.author.voice.channel
for member in channel.members:
await member.edit(mute = True)
await message.channel.send('Shhhhhhhhh!')
else:
await message.channel.send("You are not connected to a voice channel")
return
if message.content == '!meeting' or message.content == '!m':
if message.author.voice and message.author.voice.channel:
channel = message.author.voice.channel
for member in channel.members:
await member.edit(mute = False)
await message.channel.send('You can speak now!')
else:
await message.channel.send("You are not connected to a voice channel")
return
client = MyClient()
client.run(token)
``` |
{
"source": "JohnatanLicar/GameCalcPy",
"score": 4
} |
#### File: JohnatanLicar/GameCalcPy/game.py
```python
from calcular import Calcular
def start(pontos):
pontos: int = 0
jogar(pontos)
def jogar(pontos: int):
dificuldade: int = int(input('Qual o nivel de dificuldade [1, 2, 3 e 4]: '))
calc: Calcular = Calcular(dificuldade)
calc.mostra_calc
resultado: int = int(input('Resultado: '))
if calc.resposta(resultado):
print('Resposta correta!')
pontos += 1
else:
print('Resposta incorreta!')
pontos -= 1
print(f'Você tem {pontos} Ponto(s)')
continuar: int = int(input('Deseja continuar? [1 - Sim / 0 - Não]: '))
if continuar:
jogar(pontos)
else:
print('obrigado por participar, Volte sempre!')
if __name__ == "__main__":
start(0)
``` |
{
"source": "johnataylor/simplegraph",
"score": 3
} |
#### File: simplegraph/simplegraph/graph.py
```python
class GraphValue:
def __init__(self, value, is_ref=False):
self.value = value
self.is_ref = is_ref
def __hash__(self):
return hash((self.value, self.is_ref))
def __eq__(self, other):
return (self.value, self.is_ref) == (other.value, other.is_ref)
def __str__(self):
return str(self.value) if not self.is_ref else '<' + str(self.value) + '>'
class Triple:
def __init__(self, s, p, o):
self.s = s
self.p = p
self.o = o
class Graph:
def __init__(self):
self._spo = {}
self._pos = {}
self._osp = {}
self.count = 0
def __len__(self):
return self.count
def merge(self, graph):
for t in graph.triples():
self.add(t.s, t.p, t.o)
def add(self, s, p, o):
Graph.__add_to_index(self._spo, s, p, o)
Graph.__add_to_index(self._pos, p, o, s)
if Graph.__add_to_index(self._osp, o, s, p):
self.count += 1
def remove(self, s, p, o):
pass
@staticmethod
def __add_to_index(index, x, y, z):
if x not in index:
index[x] = {y:{z}}
return True
if y not in index[x]:
index[x][y] = {z}
return True
s = index[x][y]
before = len(s)
s.add(z)
after = len(s)
return after > before
def triples(self):
for s in self._spo:
for p in self._spo[s]:
for o in self._spo[s][p]:
yield Triple(s, p, o)
def get_by_subject(self, s):
if s in self._spo:
for p in self._spo[s]:
for o in self._spo[s][p]:
yield Triple(s, p, o)
def get_by_subject_predicate(self, s, p):
if s in self._spo and p in self._spo[s]:
for o in self._spo[s][p]:
yield Triple(s, p, o)
def get_by_predicate(self, p):
if p in self._pos:
for o in self._pos[p]:
for s in self._pos[p][o]:
yield Triple(s, p, o)
def get_by_predicate_object(self, p, o):
if p in self._pos and o in self._pos[p]:
for s in self._pos[p][o]:
yield Triple(s, p, o)
def get_by_object(self, o):
if o in self._osp:
for s in self._osp[o]:
for p in self._osp[o][s]:
yield Triple(s, p, o)
def get_by_object_subject(self, o, s):
if o in self._osp and s in self._osp[o]:
for p in self._osp[o][s]:
yield Triple(s, p, o)
def contains(self, t):
if t.s in self._spo:
tpo = self._spo[t.s]
if t.p in tpo:
to = tpo[t.p]
return t.o in to
return False
def contains_graph(self, g):
for t in g.triples():
if not self.contains(t):
return False
return True
def equals(self, g):
if len(self) != len(g):
return False
return self.contains_graph(g)
def print_triple(t):
print('<' + str(t.s) + '> <' + str(t.p) + '> ' + str(t.o))
def print_graph(graph):
for t in graph.triples():
print_triple(t)
```
#### File: simplegraph/tests/data.py
```python
from simplegraph import Graph, GraphValue
def graph_a0():
g = Graph()
g.add('a0', 'p', GraphValue('o0'))
g.add('a0', 'p', GraphValue('o1'))
g.add('a0', 'p', GraphValue('o2'))
g.add('a0', 'q', GraphValue('o0'))
g.add('a0', 'q', GraphValue('o1'))
g.add('a0', 'q', GraphValue('o2'))
return g
def graph_a1():
g = Graph()
g.add('a1', 'p', GraphValue('o0'))
g.add('a1', 'p', GraphValue('o1'))
g.add('a1', 'p', GraphValue('o2'))
g.add('a1', 'q', GraphValue('o0'))
g.add('a1', 'q', GraphValue('o1'))
g.add('a1', 'q', GraphValue('o2'))
return g
def graph_a2():
g = Graph()
g.add('a2', 'p', GraphValue('o0'))
g.add('a2', 'p', GraphValue('o1'))
g.add('a2', 'p', GraphValue('o2'))
g.add('a2', 'q', GraphValue('o0'))
g.add('a2', 'q', GraphValue('o1'))
g.add('a2', 'q', GraphValue('o2'))
return g
``` |
{
"source": "johnathandavis/dotnet-codebuild-corebuilder",
"score": 2
} |
#### File: dotnet-codebuild-corebuilder/corebuild/build-all-dotnet.py
```python
import os
import glob
import subprocess
CB_BASE_DIR = os.environ['CODEBUILD_SRC_DIR']
BUILD_SCRIPT_LOCATION = '/corebuild/build_project.sh'
def find_build_projects():
csprojs = []
for filename in glob.iglob('**/*.csproj'):
csprojs.append(filename)
print('Found csproj "' + filename + "'")
build_dirs = []
for csproj in csprojs:
builddir = os.path.dirname(os.path.abspath(csproj))
if 'Test' in builddir:
print('Skipping "' + builddir + '" because it appears to be a test project.')
else:
build_dirs.append(builddir)
print('Adding build directory to list: "' + builddir + '"')
return build_dirs
def build_project(project_dir):
build_process = subprocess.Popen(BUILD_SCRIPT_LOCATION, cwd=project_dir)
build_process.communicate()
code = build_process.returncode
if code != 0:
raise Exception("Non-zero response code from dotnet build.")
build_projects = find_build_projects()
print('Starting build process for ' + str(len(build_projects)) + ' project(s).')
for bp in build_projects:
project_name = os.path.basename(bp)
print('Building project "' + project_name + '"...')
build_project(bp)
``` |
{
"source": "johnathan-dev/discord-bot-template",
"score": 3
} |
#### File: discord-bot-template/cogs/Misc.py
```python
# Discord libraries
import discord
from discord.ext import commands
from discord.ext.commands import cooldown, command, BucketType
from datetime import datetime
# Allows asynchronous I/O
import asyncio
# Allows access to .json files
import json
class Misc(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Misc commands are ready")
# Responds with the bot's latency
@command()
@cooldown(1, 5, BucketType.guild)
async def ping(self, ctx):
print(f"{ctx.author} in {ctx.message.guild.name} ({ctx.guild.id}) pinged the bot | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
await ctx.send(f"Pong: {round(self.client.latency * 1000)}ms")
# Responds with a help embed
@command()
@cooldown(1, 10, BucketType.guild)
async def help(self, ctx):
embed = discord.Embed(title="Help", colour=discord.Colour(0xd99e1f), description="----------")
embed.set_author(name="Bot")
embed.add_field(name="**Fun Stuff**", value="------------", inline=False)
embed.add_field(name="*8ball*", value="Takes in a question a returns an answer.")
embed.add_field(name="*dice*", value="Returns a number 1-6. If no range is specified or the range is too big, it will default to 1-6.")
embed.add_field(name="*fact*", value="Returns a cool fact.")
embed.add_field(name="*getpfp*", value="Returns the avatar of a specified user. If no user is specified, It will return the avatar of the user who called the command.")
embed.add_field(name="", value="", inline=False)
embed.add_field(name="**Moderation**", value="------------", inline=False)
embed.add_field(name="*clear*", value="Clears a specified amount of messages. If no amount is specified, 5 messages will be cleared.")
embed.add_field(name="*kick*", value="Kicks a specified user.")
embed.add_field(name="*ban*", value="Bans a specified user.")
embed.add_field(name="", value="", inline=False)
embed.add_field(name="**Games**", value="------------", inline=False)
embed.add_field(name="*rps*", value="Plays rock paper scissors with the bot.")
embed.add_field(name="", value="", inline=False)
embed.add_field(name="**Misc**", value="------------", inline=False)
embed.add_field(name="*help*", value="I think you can figure this one out")
embed.add_field(name="*info*", value="Returns information about the bot, the guild, and the user that called the command.")
embed.add_field(name="*changeprefix*", value="Changes prefix. You need the **changeprefix** role to do this.")
await ctx.message.author.send(embed=embed)
await ctx.send(f"Check your dms, {ctx.message.author} :wink:")
# Responds with an info embed
@command()
@cooldown(1, 5, BucketType.guild)
async def info(self, ctx):
print(f"{ctx.author} in {ctx.message.guild.name} ({ctx.guild.id}) requested info | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
try:
embed = discord.Embed(title="Info", colour=discord.Colour(0xffaaaa), description="Information about the bot, the guild, and the user that called the command")
embed.set_author(name="Bot")
embed.add_field(name="__***Bot info***__", value="------------", inline=False)
embed.add_field(name="Name:", value=f"```{self.client.user}```")
embed.add_field(name="ID:", value=f"```{self.client.user.id}```")
embed.add_field(name="Servers:", value=f"```{len(self.client.guilds)}```")
embed.add_field(name="Latency:", value=f"```{round(self.client.latency*1000)}```")
embed.add_field(name="*Guild info*", value="------------", inline=False)
embed.add_field(name="Name:", value=f"```{ctx.guild.name}```")
embed.add_field(name="ID:", value=f"```{ctx.guild.id}```")
embed.add_field(name="Member count:", value=f"```{ctx.guild.member_count}```")
embed.add_field(name="*Member info*", value="------------", inline=False)
embed.add_field(name="Name:", value=f"```{ctx.message.author}```")
embed.add_field(name="ID:", value=f"```{ctx.message.author.id}```")
embed.add_field(name="Nick:", value=f"```{ctx.message.author.nick}```")
embed.add_field(name="Account created:", value=f"```{ctx.message.author.created_at}```")
except:
await ctx.send("There was an error. Make sure you're not in a dm")
return
await ctx.send(embed=embed)
#------------------------------------------------ Errors Section ------------------------------------------------
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
try:
print(f"There was an error in {ctx.message.guild} caused by {ctx.message.author} | {error}")
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(f"This command is on a cooldown. Please wait {round(error.retry_after)} seconds")
elif isinstance(error, commands.BadArgument):
await ctx.send(f"Error: Improper arguments")
elif isinstance(error, commands.MissingPermissions):
await ctx.send(f"Error: You or the bot don't have the proper permissions to do carry out that command")
elif isinstance(error, commands.CommandNotFound):
pass
elif isinstance(error, commands.DiscordException):
await ctx.send(f"Error: There was an error")
else:
await ctx.send(f"There was an unknown error.")
except:
print(f"There was an error in {ctx.message.guild} caused by {ctx.message.author} | {error}")
def setup(client):
client.add_cog(Misc(client))
``` |
{
"source": "johnathaningle/CityYouthMatrix",
"score": 2
} |
#### File: apps/accounts/admin.py
```python
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import UserCreationForm
from .models import (
Address,
Driver,
Family,
FamilyAddress,
FamilyMember,
User,
)
class UserAdmin(UserAdmin):
list_display = ('email', 'first_name', 'last_name', 'contact_number', 'is_superuser')
fieldsets = (
(('Personal info'), {'fields': ('first_name', 'last_name', 'email', 'contact_number')}),
(None, {'fields': ('password',)}),
(('Permissions'), {
'fields': ('is_active', 'is_staff', 'is_superuser'),
}),
(('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
)
add_form = UserCreationForm
class DriverAdmin(admin.ModelAdmin):
list_filter = ('is_verified',)
list_select_related = True
def get_queryset(self, request):
return super(DriverAdmin, self).get_queryset(request).select_related('user')
class FamilyMemberInline(admin.TabularInline):
model = FamilyMember
class FamilyAddressInline(admin.TabularInline):
model = FamilyAddress
class FamilyAdmin(admin.ModelAdmin):
list_display = ('user', 'family_name', 'adults', 'children')
list_select_related = True
inlines = [
FamilyMemberInline,
FamilyAddressInline,
]
def family_name(self, obj):
return obj.user.last_name
def adults(self, obj):
return obj.familymember_set.filter(member_type='A').count()
def children(self, obj):
return obj.familymember_set.filter(member_type='C').count()
class FamilyAddressAdmin(admin.ModelAdmin):
list_display = ('user', 'family', '__str__')
list_select_related = True
def user(self, obj):
return obj.family.user.email
class FamilyMemberAdmin(admin.ModelAdmin):
list_display = ('user', 'family', 'full_name', 'member_type')
list_filter = ('family', 'member_type',)
list_select_related = True
def user(self, obj):
return obj.family.user.email
admin.site.register(User, UserAdmin)
admin.site.register(Driver, DriverAdmin)
admin.site.register(Family, FamilyAdmin)
admin.site.register(FamilyAddress, FamilyAddressAdmin)
admin.site.register(FamilyMember, FamilyMemberAdmin)
```
#### File: apps/trips/models.py
```python
from django.db import models
from django.db.models import F
from django.utils import timezone
from smart_selects.db_fields import ChainedForeignKey, ChainedManyToManyField
from cityyouthmatrix.apps.accounts.models import Address
class ActivityPartner(models.Model):
"""
An organization hosting the extra-curricular event and learning experience.
The location/destination information for a CYM-sponsored activity
The location stores the address for activity-partner or CYM location.
"""
name = models.CharField(max_length=100, unique=True)
is_active = models.BooleanField(default=True, help_text='Is the activity partner still active')
def __str__(self):
return self.name
class EventAddress(Address):
pass
class Event(models.Model):
"""Every event takes place at an activity partner's location
"""
class EventSeasons(models.TextChoices):
SPRING = "Spring", ("Spring")
SUMMER = "Summer", ("Summer")
FALL = "Fall", ("Fall")
WINTER = "Winter", ("Winter")
name = models.CharField(max_length=200)
activity_partner = models.ForeignKey(
ActivityPartner,
null=True,
blank=True,
on_delete=models.DO_NOTHING,
limit_choices_to={'is_active': True}
)
event_datetime = models.DateTimeField(help_text='The start date and time of the event')
address = models.ForeignKey(EventAddress, on_delete=models.CASCADE)
season = models.CharField(max_length=6, choices=EventSeasons.choices)
def __str__(self):
return f"{str(self.activity_partner) + ' - ' if self.activity_partner else ''}{self.name}"
class Trip(models.Model):
event = models.ForeignKey(Event, on_delete=models.CASCADE)
family = models.ForeignKey('accounts.Family', on_delete=models.CASCADE)
passengers = ChainedManyToManyField(
'accounts.FamilyMember',
chained_field='family',
chained_model_field='family',
horizontal=False
)
pickup_address = ChainedForeignKey(
'accounts.FamilyAddress',
chained_field='family',
chained_model_field='family',
related_name='+',
on_delete=models.DO_NOTHING,
)
pickup_location = models.CharField(max_length=100, blank=True, help_text='Specific pickup location at the address (front, side, etc)')
pickup_datetime = models.DateTimeField(null=True, db_index=True, help_text='Date and time to pickup')
pickup_completed = models.BooleanField(default=False, help_text='Was the family picked up and dropped off at the event')
pickup_completed_datetime = models.DateTimeField(null=True, help_text='Date and time the family was dropped off at the event')
pickup_driver = models.ForeignKey(
'accounts.Driver',
related_name='pickup_driver',
on_delete=models.DO_NOTHING,
limit_choices_to={'is_verified': True},
null=True,
blank=True,
)
pickup_driver_notes = models.CharField(max_length=500, blank=True, help_text='Notes about the pickup from the driver')
pickup_family_notes = models.CharField(max_length=500, blank=True, help_text='Notes about the pickup from the family')
return_address = ChainedForeignKey(
'accounts.FamilyAddress',
chained_field='family',
chained_model_field='family',
related_name='+',
on_delete=models.DO_NOTHING,
)
return_datetime = models.DateTimeField(null=True, db_index=True, help_text='Date and time to return')
return_completed = models.BooleanField(default=False, help_text='Was the family picked up and dropped off at the return address')
return_completed_datetime = models.DateTimeField(null=True, help_text='Date and time the family was dropped off at the return address')
return_driver = models.ForeignKey(
'accounts.Driver',
related_name='return_driver',
on_delete=models.DO_NOTHING,
limit_choices_to={'is_verified': True},
null=True,
blank=True,
)
return_driver_notes = models.CharField(max_length=500, blank=True, help_text='Notes about the return from the driver')
return_family_notes = models.CharField(max_length=500, blank=True, help_text='Notes about the return from the family')
is_cancelled = models.BooleanField(default=False, help_text='Was the trip cancelled')
cancelled_datetime = models.DateTimeField(null=True, help_text='Date and time the trip was cancelled')
car_seat_required = models.BooleanField(default=False)
booster_seat_required = models.BooleanField(default=False)
special_needs = models.CharField(max_length=300, blank=True, help_text='Any special needs of the family')
next_required_datetime = models.DateTimeField(null=True, db_index=True, help_text='Internal field for sorting')
class Meta(object):
unique_together = ('event', 'family')
ordering = [F('next_required_datetime').asc(nulls_last=True)]
def __str__(self):
return f'{self.family} {self.event.name} Trip'
def save(self, *args, **kwargs):
if self.is_cancelled and self.cancelled_datetime is None:
self.cancelled_datetime = timezone.now()
if not self.is_cancelled and self.cancelled_datetime is not None:
self.cancelled_datetime = None
if self.pickup_completed and self.pickup_completed_datetime is None:
self.pickup_completed_datetime = timezone.now()
if self.return_completed and self.return_completed_datetime is None:
self.return_completed_datetime = timezone.now()
if self.is_cancelled:
self.next_required_datetime = None
elif self.pickup_completed:
self.next_required_datetime = self.return_datetime
elif self.return_completed:
self.next_required_datetime = None
else:
self.next_required_datetime = self.pickup_datetime
super(Trip, self).save(*args, **kwargs)
@property
def is_available(self):
return (
not self.is_cancelled and
(self.pickup_driver is None or self.return_driver is None) and
not self.return_completed
)
@property
def pickup_duration(self):
if self.pickup_datetime is None or self.pickup_completed_datetime is None:
return None
return (self.pickup_completed_datetime - self.pickup_datetime).minutes
@property
def return_duration(self):
if self.return_datetime is None or self.return_completed_datetime is None:
return None
return (self.return_completed_datetime - self.return_datetime).minutes
``` |
{
"source": "johnathanlouie/PyAeonDB",
"score": 3
} |
#### File: PyAeonDB/PyAeonDB/PyAeonDB.py
```python
from typing import List, Set, Dict, Tuple
import csv
import os
import json
import time
import datetime
Table = List[str]
Index = Dict[str, List[int]]
Fuzzy = Dict[str, List[str]]
ROOT_PATH = "C:/Arcology/AeonDB"
TABLE_DIR = "C:/Arcology/AeonDB/%s"
TABLE_PATH = "C:/Arcology/AeonDB/%s/table.txt"
INDEX_PATH = "C:/Arcology/AeonDB/%s/index.txt"
FUZZY_PATH = "C:/Arcology/AeonDB/%s/fuzzy.txt"
FUZZY2_PATH = "C:/Arcology/AeonDB/%s/fuzzy2.txt"
g_tables: Dict[str, Table] = dict()
g_indices: Dict[str, Index] = dict()
g_fuzzyDict: Dict[str, Fuzzy] = dict()
g_fuzzyDict2: Dict[str, Fuzzy] = dict()
def readTable(tableName: str) -> Table:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(TABLE_PATH % tableName))
def writeTable(tableName: str, table: Table) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(table, open(TABLE_PATH % tableName, 'w+'))
return None
def readIndex(tableName: str) -> Index:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(INDEX_PATH % tableName))
def writeIndex(tableName: str, index: Index) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(index, open(INDEX_PATH % tableName, 'w+'))
return None
def readFuzzy(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY_PATH % tableName))
def writeFuzzy(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY_PATH % tableName, 'w+'))
return None
def readFuzzy2(tableName: str) -> Fuzzy:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
return json.load(open(FUZZY2_PATH % tableName))
def writeFuzzy2(tableName: str, fuzzy: Fuzzy) -> None:
os.makedirs(TABLE_DIR % tableName, exist_ok=True)
json.dump(fuzzy, open(FUZZY2_PATH % tableName, 'w+'))
return None
def listTables() -> List[str]:
os.makedirs(ROOT_PATH, exist_ok=True)
return os.listdir(ROOT_PATH)
def timestamp() -> str:
return datetime.datetime.fromtimestamp(time.time()).strftime("%m/%d/%Y %H:%M:%S")
g_cmdHelpMap = {
"createtable" : "createTable {tableDesc}",
"getrows" : "getRows {tableName} {key} {count}",
"importtable" : "importTable {tableName} {CSV filespec}",
"listtables" : "listTables",
"indextable" : "indexTable {tableName}",
"find" : "find {tableName} {term1 term2 term3...}",
"fuzzysearch" : "fuzzySearch {tableName} {term1 term2 term3...}",
"quit" : "quit"
}
def printHelp() -> None:
for help in g_cmdHelpMap.values():
print(help)
return
def toBigrams(s: str) -> Set[str]:
ngrams = set()
if len(s) < 2:
ngrams.add(s)
return ngrams
for i in range(len(s) - 1):
ngrams.add(s[i:i+2])
return ngrams
def dicesCoefficient(a: Set[str], b: Set[str]) -> float:
return float(2 * len(a.intersection(b))) / float(len(a) + len(b))
def preprocess(s: str) -> str:
s = s.replace("~", " ")
s = s.replace("`", " ")
s = s.replace("!", " ")
s = s.replace("@", " ")
s = s.replace("#", " ")
s = s.replace("$", " ")
s = s.replace("%", " ")
s = s.replace("^", " ")
s = s.replace("&", " ")
s = s.replace("*", " ")
s = s.replace("(", " ")
s = s.replace(")", " ")
s = s.replace("-", " ")
s = s.replace("_", " ")
s = s.replace("+", " ")
s = s.replace("=", " ")
s = s.replace("{", " ")
s = s.replace("}", " ")
s = s.replace("[", " ")
s = s.replace("]", " ")
s = s.replace("|", " ")
s = s.replace("\\", " ")
s = s.replace(";", " ")
s = s.replace(":", " ")
s = s.replace('"', " ")
s = s.replace("'", " ")
s = s.replace("<", " ")
s = s.replace(">", " ")
s = s.replace(",", " ")
s = s.replace(".", " ")
s = s.replace("/", " ")
s = s.replace("?", " ")
s = s.replace("1", " ")
s = s.replace("2", " ")
s = s.replace("3", " ")
s = s.replace("4", " ")
s = s.replace("5", " ")
s = s.replace("6", " ")
s = s.replace("7", " ")
s = s.replace("8", " ")
s = s.replace("9", " ")
s = s.replace("0", " ")
return s
def createIndex(table: Table) -> Tuple[Index, Fuzzy, Fuzzy]:
startTime = time.time()
index: Index = dict()
fuzzy1: Fuzzy = dict()
fuzzy2: Fuzzy = dict()
fuzzy3: Dict[str, Set[str]] = dict()
for rowId in range(len(table)):
row = table[rowId]
row = preprocess(row).lower()
terms = set(row.split())
if "" in terms:
terms.remove("")
for term in terms:
if term not in index:
index.update({term: list()})
rowIds = index.get(term)
if rowId not in rowIds:
rowIds.append(rowId)
if term not in fuzzy3:
atLeastOneBigram = set()
bigrams = toBigrams(term)
fuzzy3.update({term: bigrams})
for bigram in bigrams:
if bigram not in fuzzy2:
fuzzy2.update({bigram: list()})
bigramList = fuzzy2.get(bigram)
bigramList.append(term)
atLeastOneBigram.update(bigramList)
related = list()
for term2 in atLeastOneBigram:
if term == term2:
related.append(term2)
elif dicesCoefficient(fuzzy3.get(term), fuzzy3.get(term2)) > 0.6:
related.append(term2)
fuzzy1.get(term2).append(term)
fuzzy1.update({term: related})
print("Indexed row %d of %d." % (rowId, len(table)))
print("Indexing Time: " + str(time.time() - startTime))
return index, fuzzy1, fuzzy2
def importCsv(filename: str) -> Table:
table = [" ".join(row) for row in csv.reader(open(filename))]
table.pop(0)
return table
def expandQuery(term: str, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy) -> Set[int]:
rowIds = set()
relateds = set()
if term not in fuzzy:
possiblyRelateds = set()
bigrams = toBigrams(term)
for bigram in bigrams:
if bigram in fuzzy2:
possiblyRelateds.update(fuzzy2.get(bigram))
for pRelated in possiblyRelateds:
if dicesCoefficient(toBigrams(pRelated), bigrams) > 0.6:
relateds.add(pRelated)
else:
relateds = fuzzy.get(term)
for related in relateds:
rowIds.update(index.get(related))
return rowIds
def find(keyTerms: Set[str], table: Table, index: Index, fuzzy: Fuzzy, fuzzy2: Fuzzy, isFuzzy: bool) -> Table:
lowKeyTerms = {term.lower() for term in keyTerms}
rowIds = set()
results = list()
first = lowKeyTerms.pop()
if isFuzzy:
rowIds.update(expandQuery(first, index, fuzzy, fuzzy2))
elif first in index:
rowIds.update(index.get(first))
else:
return results
for word in lowKeyTerms:
if isFuzzy:
rowIds.intersection_update(expandQuery(word, index, fuzzy, fuzzy2))
elif word in index:
rowIds.intersection_update(index.get(word))
else:
return results
for i in rowIds:
results.append(table[i])
return results
def loadAllTables() -> None:
tableNames = listTables()
for tableName in tableNames:
print("%s Log.info: Table %s: Backup volume offline. Waiting for new volume." % (timestamp(), tableName))
try:
table = readTable(tableName)
g_tables.update({tableName: table})
print("%s Log.info: Table %s: Recovered %d rows." % (timestamp(), tableName, len(table)))
except OSError:
print("%s Log.info: Table %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Table %s: File is corrupted." % (timestamp(), tableName))
try:
index = readIndex(tableName)
g_indices.update({tableName: index})
print("%s Log.info: Index %s: Recovered %d terms." % (timestamp(), tableName, len(index)))
except OSError:
print("%s Log.info: Index %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Index %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy = readFuzzy(tableName)
g_fuzzyDict.update({tableName: fuzzy})
print("%s Log.info: Fuzzy %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy)))
except OSError:
print("%s Log.info: Fuzzy %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy %s: File is corrupted." % (timestamp(), tableName))
try:
fuzzy2 = readFuzzy2(tableName)
g_fuzzyDict2.update({tableName: fuzzy2})
print("%s Log.info: Fuzzy2 %s: Recovered %d terms." % (timestamp(), tableName, len(fuzzy2)))
except OSError:
print("%s Log.info: Fuzzy2 %s: Could not read file." % (timestamp(), tableName))
except json.JSONDecodeError:
print("%s Log.info: Fuzzy2 %s: File is corrupted." % (timestamp(), tableName))
print("AeonDB ready. %d tables available." % len(tableNames))
return None
def prompt() -> List[str]:
args = input(" : ").split()
args[0] = args[0].lower()
return args
def main() -> None:
print("%s AeonDB 1.0 beta 65" % timestamp())
print(u"%s Copyright © 2011-2018 by Kronosaur Productions LLC. All Rights Reserved." % timestamp())
loadAllTables()
args = prompt()
while args[0] != "quit":
# createtable
if args[0] == "createtable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# getrows
elif args[0] == "getrows":
if len(args) < 4:
print(g_cmdHelpMap.get(args[0]))
else:
print("Not implemented for demo.")
# importtable
elif args[0] == "importtable":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
csvName = args[2]
csvName = csvName.replace('"', "")
csvName = csvName.replace("'", "")
csvName = csvName.replace("/", "\\")
try:
tableObj = importCsv(csvName)
print("Imported %d rows to table %s." % (len(tableObj), args[1]))
g_tables.update({args[1] : tableObj})
print("Saving table %s to file." % args[1])
writeTable(args[1], tableObj)
except:
print("Failed to import table. Check URI.")
# listtables
elif args[0] == "listtables":
if len(args) < 1:
print(g_cmdHelpMap.get(args[0]))
else:
for x in listTables():
print(x)
# indextable
elif args[0] == "indextable":
if len(args) < 2:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] in g_tables:
tableIndex, tableFuzzy1, tableFuzzy2 = createIndex(g_tables.get(args[1]))
g_indices.update({args[1] : tableIndex})
g_fuzzyDict.update({args[1] : tableFuzzy1})
g_fuzzyDict2.update({args[1] : tableFuzzy2})
try:
print("Saving index %s." % args[1])
writeIndex(args[1], tableIndex)
print("Saving fuzzy %s." % args[1])
writeFuzzy(args[1], tableFuzzy1)
print("Saving fuzzy2 %s." % args[1])
writeFuzzy2(args[1], tableFuzzy2)
except:
print("Failed to write index to file.")
else:
print("Table %s does not exist." % args[1])
# find
elif args[0] == "find":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif args[1] not in g_indices:
print("Index %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict:
print("Fuzzy1 %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict2:
print("Fuzzy2 %s does not exist." % args[1])
else:
results = find(set(args[2:]), g_tables.get(args[1]), g_indices.get(args[1]), g_fuzzyDict.get(args[1]), g_fuzzyDict2.get(args[1]), False)
for row in results:
print(row)
print("Found %d rows." % len(results))
# fuzzysearch
elif args[0] == "fuzzysearch":
if len(args) < 3:
print(g_cmdHelpMap.get(args[0]))
else:
if args[1] not in g_tables:
print("Table %s does not exist." % args[1])
elif args[1] not in g_indices:
print("Index %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict:
print("Fuzzy1 %s does not exist." % args[1])
elif args[1] not in g_fuzzyDict2:
print("Fuzzy2 %s does not exist." % args[1])
else:
results = find(set(args[2:]), g_tables.get(args[1]), g_indices.get(args[1]), g_fuzzyDict.get(args[1]), g_fuzzyDict2.get(args[1]), True)
for row in results:
print(row)
print("Found %d rows." % len(results))
# Bad commands
else:
printHelp()
# Next loop
args = prompt()
return None
main()
``` |
{
"source": "johnathanMorales/facturas29",
"score": 3
} |
#### File: facturas29/facturas/models.py
```python
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class factura (models.Model):
"""Create anulation factura request"""
billNumber = models.CharField(max_length=10, unique=True)
reason = models.CharField(max_length=50, blank=False)
comment = models.CharField(max_length=250, blank=False)
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
profile = models.ForeignKey('users.Profile', on_delete=models.CASCADE)
def __str__(self):
"""Return billnumber."""
return 'Factura: {} ID: {}'.format(self.billNumber, self.id)
``` |
{
"source": "johnathanvidu/ass",
"score": 2
} |
#### File: johnathanvidu/ass/run.py
```python
import os
import sys
import glob
import json
import shutil
import logging
import subprocess
# logging
logger = logging.getLogger()
level = logging.DEBUG
logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('- %(message)s | %(asctime)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class Track:
def __init__(self, uid, track_name, lang, default, matching):
self.uid = uid
self.track_name = track_name
self.lang = lang
self.default = default
self.matching = matching
def __str__(self):
return f'track {self.track_name} id {self.uid} lang {self.lang} matching correct lang requested {self.matching}'
def scan_tracks(jmkv, audio_lang, subs_lang):
result = {}
tracks = jmkv['tracks']
for track in tracks:
type = track['type']
properties = track['properties']
matching = False
if type == 'audio' and properties['language'] == audio_lang:
matching = True
if type == 'subtitles' and properties['language'] == subs_lang:
matching = True
if type not in result:
result[type] = []
uid = properties['number']
track_name = properties['track_name'] if 'track_name' in properties else ''
lang = properties['language']
default = properties['default_track']
result[type].append(Track(uid, track_name, lang, default, matching))
return result
def set_default_audio(mkv, audio):
logger.info('guessing correct audio track')
correct_found = False
edits = []
for atrack in audio:
logger.debug(atrack)
if not atrack.matching:
edits = edits + ['--edit', f'track:{int(atrack.uid)}', '--set', 'flag-default=0']
continue
track_name = atrack.track_name.lower()
if 'commentary' in track_name:
continue
if 'song' in track_name or 'sing' in track_name:
continue
correct_found = True
edits = edits + ['--edit', f'track:{int(atrack.uid)}', '--set', 'flag-default=1']
logger.info(f'setting default audio track on {track_name} uid {atrack.uid}')
if not correct_found:
logger.warn('did not find suitable audio track, doing nothing...')
return
logger.info('mkvpropedit out')
proc = subprocess.run(['mkvpropedit', mkv] + edits)
logger.debug(' '.join(proc.args))
def set_default_subtitles(mkv, subtitles):
logger.info('guessing correct subtitles track')
correct_found = False
edits = []
for strack in subtitles:
logger.debug(strack)
if not strack.matching:
edits = edits + ['--edit', f'track:{int(strack.uid)}', '--set', 'flag-default=0']
continue
track_name = strack.track_name.lower()
if 'commentary' in track_name:
edits = edits + ['--edit', f'track:{int(strack.uid)}', '--set', 'flag-default=0']
continue
if 'song' in track_name:
edits = edits + ['--edit', f'track:{int(strack.uid)}', '--set', 'flag-default=0']
continue
if 'sing' in track_name:
edits = edits + ['--edit', f'track:{int(strack.uid)}', '--set', 'flag-default=0']
continue
correct_found = True
edits = edits + ['--edit', f'track:{int(strack.uid)}', '--set', 'flag-default=1']
logger.info(f'setting default subtitles track on {track_name} uid {strack.uid}')
if not correct_found:
logger.warn('did not find suitable subtitles track, doing nothing...')
return
logger.info('mkvpropedit out')
#proc = subprocess.run(['mkvpropedit', mkv, '--edit', f'track:{int(strack.uid)}', '--set', 'flag-default=1'])
proc = subprocess.run(['mkvpropedit', mkv] + edits)
logger.debug(' '.join(proc.args))
# main
logger.info('looking for mkvtoolnix...')
if shutil.which('mkvmerge') and shutil.which('mkvpropedit'):
logger.info('mkvtoolnix found')
else:
logger.error('mkvtoolnix must be installed')
sys.exit(1)
logger.info(f'scanning current folder: {os.getcwd()}')
mkvs = glob.glob(f'{os.getcwd()}/*.mkv')
for mkv in mkvs:
mkvmerge_proc = subprocess.run(['mkvmerge', '-J', mkv], stdout=subprocess.PIPE)
jmkv = json.loads(mkvmerge_proc.stdout)
tracks = scan_tracks(jmkv, 'jpn', 'eng')
logger.info(f'editing {mkv}...')
audio = tracks['audio']
set_default_audio(mkv, audio)
subs = tracks['subtitles']
set_default_subtitles(mkv, subs)
logger.info('ass is done')
sys.exit(0)
``` |
{
"source": "johnathanvidu/tlushim",
"score": 3
} |
#### File: tlushim/tlushim/__main__.py
```python
import sys
import datetime
import argparse
import tlushim_log
from argparse import RawTextHelpFormatter
from application import Application
def main(args=None):
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
user_input_grp = parser.add_argument_group('user input')
user_input_grp.add_argument('-m', '--month', help='Provide specific month in format mm',
default=datetime.datetime.now().strftime('%m'))
user_input_grp.add_argument('-y', '--year', help='Provide specific year in format yyyy',
default=datetime.datetime.now().strftime('%Y'))
config_grp = parser.add_argument_group('configuration')
config_grp.add_argument('-c', '--configure', help='Start with configuration wizard', action='store_true')
logging_grp = parser.add_argument_group('logging')
logging_grp.add_argument('-v', '--verbose', help='Verbose logging', action='store_true')
logging_grp.add_argument('-d', '--debug', help='Debug log level', action='store_true')
namespace = parser.parse_args(args)
root_logger = tlushim_log.configure_logging(namespace)
sys.exit(calculate_exit_code(lambda: Application(namespace).run(), root_logger))
def calculate_exit_code(app_lambda, logger):
try:
app_lambda()
exit_code = 0
except BaseException as e:
exit_code = 1
logger.error(e.message)
return exit_code
if __name__ == "__main__":
main()
``` |
{
"source": "johnatspreadstreet/Morph-RESTful-HTTP-Utility-API",
"score": 3
} |
#### File: apis/db/models.py
```python
from sqlalchemy import Table, Column, Integer, Text
from sqlalchemy.orm import mapper
from apis.db.database import metadata, db_session
class BlogPost(object):
query = db_session.query_property()
def __init__(self, id=None, title=None, post=None):
self.id = id
self.title = title
self.post = post
blog_posts = Table('blog_posts', metadata,
Column('id', Integer, primary_key=True),
Column('title', Text),
Column('post', Text)
)
mapper(BlogPost, blog_posts)
```
#### File: apis/strings/scratch.py
```python
def splitter(string):
split = string.split(' ')
return split
print(splitter('this is a very messy string'))
``` |
{
"source": "johnattan/BrainyAnt2.0",
"score": 3
} |
#### File: johnattan/BrainyAnt2.0/userControl.py
```python
import RPi.GPIO as gpio
def move_forward(speed):
if speed > 0:
init()
print('Moving FORWARD with speed = {}'.format(speed))
gpio.output(17, True)
gpio.output(22, False)
gpio.output(23, True)
gpio.output(24, False)
else:
gpio.cleanup()
def move_left(speed):
if speed > 0:
init()
print('Moving LEFT with speed = {}'.format(speed))
gpio.output(17, False)
gpio.output(22, True)
gpio.output(23, True)
gpio.output(24, False)
else:
gpio.cleanup()
def move_right(speed):
if speed > 0:
init()
print('Moving RIGHT with speed = {}'.format(speed))
gpio.output(17, True)
gpio.output(22, False)
gpio.output(23, False)
gpio.output(24, True)
else:
gpio.cleanup()
def move_back(speed):
if speed > 0:
init()
print('Moving BACK with speed = {}'.format(speed))
gpio.output(17, False)
gpio.output(22, True)
gpio.output(23, False)
gpio.output(24, True)
else:
gpio.cleanup()
def init():
gpio.setmode(gpio.BCM)
gpio.setup(17, gpio.OUT)
gpio.setup(22, gpio.OUT)
gpio.setup(23, gpio.OUT)
gpio.setup(24, gpio.OUT)
``` |
{
"source": "john-aws/elastic-load-balancing-tools",
"score": 3
} |
#### File: application-load-balancer-serverless-app/whatismyip/whatismyip.py
```python
import time
import json
import os
TEMPLATE = './whatismyip_template.html'
def lambda_handler(event, context):
print '==event=='
print event
with open(TEMPLATE, 'rw') as template:
template_html = template.read()
response = {
"statusCode": 200,
"headers": {
"Content-Type": "text/html;"
},
"isBase64Encoded": False
}
if event['headers']['user-agent'] == 'ELB-HealthChecker/2.0':
print('HealthCheck Request')
data = 'Response to HealthCheck'
response['body'] = data
return response
sourceip_list = event['headers']['x-forwarded-for'].split(',')
if sourceip_list:
sourceip = str(sourceip_list[0])
data = "<h3>Your IP is {}</h3>".format(sourceip)
if event['queryStringParameters'] == {"output":"text"}:
response['body']=sourceip
return response
if event['queryStringParameters'] == {"output":"json"}:
response['body'] = json.dumps({"Source IP":sourceip})
return response
else:
data = '<h3>No source IP found</h3>'
response_html = template_html.replace("<!--whatismyip-->", data)
return response_html
print type(template_html)
response_html = template_html.replace("<!--whatismyip-->", data)
response['body'] = response_html
print response
return response
``` |
{
"source": "John-AZ1/app4flask",
"score": 3
} |
#### File: John-AZ1/app4flask/Timetable.py
```python
import mechanicalsoup
import re
from Day import Day
from Session import Session
def get_timetable(school, user, password):
# App4 Timetable Url with school identifier
url = "http://{}.app4.ws/portal/timetable.php".format(school)
# Creates a browser. Then opens the Timetable Url which leads to a login page
br = mechanicalsoup.StatefulBrowser()
br.open(url)
# Log in using user and password
br.select_form(nr=0)
br["txtLoginUserID"] = user
br["txtLoginPassword"] = password
br.submit_selected()
# With Permission reopens the Timetable page
br.open(url)
# Makes a soup of the pages html
page = br.get_current_page()
# Gets everyday's html
daysRaw = page.find_all('td', {"width": '18%'})
# Empty Container to be populated with day objects
timetable = []
for day in daysRaw:
# Variables with Day args
day_name = day.find_all("td", {"class": re.compile("^ttdark")})[0].string.extract()
day_id = daysRaw.index(day)+1
# Appends a Day Object and Passes an empty list to be populated bt Session Objects, day_name and day_id
timetable.append(Day([], day_name, day_id))
# List with all day's sessions html
sessions = day.find_all("td", {"class": re.compile("^tt")})
# Iterates through all sessions appending a Session object to each Day object
for session in sessions:
# If session is a session continue
if len(session.find_all("td")) > 0:
# Variables with Session args
session_number = int(session.find_all("td")[0].string.extract())
session_code = session.find_all("td")[1].find_all("span")[0].string.extract()
# List of teacher, room and time
session_periph = session.find_all("td")[1].find_all("span")[1].get_text("|").split("|")
# Appends a Session Object to current day (i.e, days[-1])
# Checks if it is not an empty session (e.g, Lunch, Community Service)
if len(session_periph) > 2:
timetable[-1].append(
Session(
session_number,
session_code,
session_periph[0],
str(session_periph[1]).strip(),
session_periph[2]
)
)
# If it is an empty session do not pass a teacher or room
else:
timetable[-1].append(
Session(
session_number,
session_code,
session_periph[0],
None,
None
)
)
return timetable
def write_db(timetable, db):
# Clears database so it can be written over
db.purge()
# Appends a Day object passsing day_name, day_id and sessions
for day in timetable:
sessions = []
for session in day:
sessions.append(
{
'number': session.number,
'code': session.code,
'teacher': session.teacher,
'room': session.room,
'time': session.time
}
)
db.insert({
'day_name': day.day_name,
'day_id': day.day_id,
'sessions': sessions
})
{
'number': session.number,
'code': session.code,
'teacher': session.teacher,
'room': session.room,
'time': session.time
}
def read_db(db):
# Clears timetable so it can be overwritten
timetable = []
# Loops over timetable every day and
for day in db.all():
# Creates a local empty lsit to be populated with database session
sessions = []
# Appends a Session to current day passing session number, code, teacher, room, time
for session in day['sessions']:
sessions.append(Session(
session['number'],
session['code'],
session['teacher'],
session['room'],
session['time']
))
# Appends a Day object passing session, day_name, day_id
timetable.append(
Day(
sessions,
day['day_name'],
day['day_id']
)
)
return timetable
def get_day(tt, day_name, week):
days = list(filter(lambda day: day.day_name == day_name, tt))
try:
return days[0] if days[0].week_id == week else days[1]
except IndexError:
return Day([], day_name, 0)
``` |
{
"source": "JohnAZoidberg/misc",
"score": 3
} |
#### File: misc/k_means/k_means.py
```python
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
import random
# import seaborn as sns
# sns.set()
def get_points():
data = np.array([[0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 1]])
# get the indices where data is 1
return np.argwhere(data == 1)
def draw_clusters(centroids, clusters):
# plt.figure(figsize=(10, 10))
colors = ["blue", "green", "red", "cyan", "magenta",
"yellow"]
if len(clusters) > len(colors):
raise Exception("Too many clusters - not enough colors")
for i, points in enumerate(clusters):
color = colors.pop()
x, y = points.T
plt.scatter(x, y, color=color)
plt.scatter(*centroids[i], color="black")
# for centroid in centroids:
# plt.scatter(*centroid, color="black")
plt.show()
def distance(point, other):
return sqrt(pow(point[0] - other[0], 2) +
pow(point[1] - other[1], 2))
def assign_to_cluster(k, points, centroids):
clusters = [[] for _ in xrange(k)]
for x, y in points:
closest = (0, centroids[0])
for i, c in enumerate(centroids[1:], start=1):
if distance((x, y), c) < distance((x, y), closest[1]):
closest = (i, c)
clusters[closest[0]].append([x, y])
return np.array([np.array(c) for c in clusters])
def move_centroids(clusters):
centroids = []
for cluster in clusters:
x = np.mean(cluster.T[0])
y = np.mean(cluster.T[1])
centroids.append((x, y))
return centroids
def kmeans(k, points):
x, y = points.T
limits = (np.max(x), np.max(y))
centroids = [(random.uniform(0, limits[0]),
random.uniform(0, limits[1])) for _ in xrange(k)]
while True:
print centroids
clusters = assign_to_cluster(k, points, centroids)
new_centroids = move_centroids(clusters)
if new_centroids == centroids:
centroids = new_centroids
break
centroids = new_centroids
return centroids, clusters
if __name__ == "__main__":
points = get_points()
centroids, clusters = kmeans(2, points)
draw_clusters(centroids, clusters)
``` |
{
"source": "JohnAZoidberg/python-redfish-utility",
"score": 2
} |
#### File: extensions/COMMANDS/CommitCommand.py
```python
""" Commit Command for RDMC """
from argparse import SUPPRESS
from redfish.ris.rmc_helper import NothingSelectedError
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS, FailureDuringCommitError,\
NoChangesFoundOrMadeError, NoCurrentSessionEstablished
class CommitCommand():
""" Constructor """
def __init__(self):
self.ident = {
'name':'commit',
'usage': None,
'description':'commit [OPTIONS]\n\n\tRun to apply all changes made during'
' the current session\n\texample: commit',
'summary':'Applies all the changes made during the current session.',
'aliases': [],
'auxcommands': ["LogoutCommand", "RebootCommand"]
}
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
def commitfunction(self, options=None):
""" Main commit worker function
:param options: command line options
:type options: list.
"""
self.commitvalidation()
self.rdmc.ui.printer("Committing changes...\n")
if options:
if options.biospassword:
self.rdmc.app.current_client.bios_password = options.biospassword
try:
failure = False
commit_opp = self.rdmc.app.commit()
for path in commit_opp:
if self.rdmc.opts.verbose:
self.rdmc.ui.printer('Changes are being made to path: %s\n' % path)
if next(commit_opp):
failure = True
except NothingSelectedError:
raise NoChangesFoundOrMadeError("No changes found or made during commit operation.")
else:
if failure:
raise FailureDuringCommitError('One or more types failed to commit. Run the '
'status command to see uncommitted data. '
'if you wish to discard failed changes refresh the '
'type using select with the --refresh flag.')
if options.reboot:
self.auxcommands['reboot'].run(options.reboot)
self.auxcommands['logout'].run("")
def run(self, line, help_disp=False):
""" Wrapper function for commit main function
:param line: command line input
:type line: string.
"""
if help_disp:
self.parser.print_help()
return ReturnCodes.SUCCESS
try:
(options, _) = self.rdmc.rdmc_parse_arglist(self, line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
# self.rdmc.ui.printer(self.ident['usage'])
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.commitfunction(options)
#Return code
return ReturnCodes.SUCCESS
def commitvalidation(self):
""" Commit method validation function """
try:
_ = self.rdmc.app.current_client
except:
raise NoCurrentSessionEstablished("Please login and make setting"
" changes before using commit command.")
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
self.cmdbase.add_login_arguments_group(customparser)
customparser.add_argument(
'--reboot',
dest='reboot',
help="Use this flag to perform a reboot command function after"\
" completion of operations. For help with parameters and"\
" descriptions regarding the reboot flag, run help reboot.",
default=None
)
```
#### File: COMMANDS/REQUIREDCOMMANDS/ExitCommand.py
```python
""" Exit Command for rdmc """
import sys
from rdmc_helper import ReturnCodes, InvalidCommandLineErrorOPTS
class ExitCommand():
""" Exit class to handle exiting from interactive mode """
def __init__(self):
self.ident = {
'name':'exit',
'usage': None,
'description':'Run to exit from the interactive shell\n\texample: exit',
'summary':'Exits from the interactive shell.',
'aliases': ['quit'],
'auxcommands': ["LogoutCommand"]
}
#self.rdmc = rdmcObj
#self.logoutobj = rdmcObj.commands_dict["LogoutCommand"](rdmcObj)
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
def run(self, line, help_disp=False):
"""If an argument is present, print help else exit
:param line: command line input
:type line: string.
"""
if help_disp:
self.parser.print_help()
return ReturnCodes.SUCCESS
try:
(_, args) = self.rdmc.rdmc_parse_arglist(self, line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
if not args or not line:
self.auxcommands['logout'].run("")
#System exit
sys.exit(ReturnCodes.SUCCESS)
else:
self.rdmc.ui.error("Exit command does not take any parameters.\n")
raise InvalidCommandLineErrorOPTS("Invalid command line arguments.")
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
```
#### File: extensions/PERSISTENT MEMORY COMMANDS/ClearPendingConfigCommand.py
```python
from __future__ import absolute_import
from rdmc_helper import ReturnCodes, InvalidCommandLineError, InvalidCommandLineErrorOPTS,\
NoContentsFoundForOperationError, \
NoChangesFoundOrMadeError, LOGGER
from .lib.RestHelpers import RestHelpers
class ClearPendingConfigCommand():
""" Command to clear pending config tasks"""
def __init__(self):
self.ident = {
'name':'clearpmmpendingconfig',
'usage': None,
'description':"Clear pmm pending config tasks\n"
"\texample: clearpmmpendingconfig",
'summary':"Clear pending config tasks",
'aliases': [],
'auxcommands': []
}
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
def get_memory_chunk_tasks(self):
"""
Function to retrieve Memory Chunk Tasks
:returns: Retrieved Memory Chunk Tasks
"""
# Retrieving tasks members
tasks = RestHelpers(rdmcObject=self.rdmc).retrieve_task_members()
if tasks:
# Filtering out Memory Chunk Tasks
memory_chunk_tasks = RestHelpers(rdmcObject=self.rdmc).filter_task_members(tasks)
if memory_chunk_tasks:
return memory_chunk_tasks
self.rdmc.ui.printer("No pending configuration tasks found.\n\n")
return []
def delete_tasks(self, memory_chunk_tasks, verbose=False):
"""
Function to delete pending configuration tasks
:param memory_chunk_tasks: Pending confguration tasks.
:type memory_chunk_tasks: list
:param verbose: Toggles verbose mode, which print task IDs as
individual tasks are deleted.
:type verbose: Boolean
:returns: None
"""
for task in memory_chunk_tasks:
data_id = task.get("@odata.id")
task_id = task.get("Id")
resp = RestHelpers(rdmcObject=self.rdmc).delete_resource(data_id)
if resp:
if verbose:
self.rdmc.ui.printer("Deleted Task #{}".format(task_id)+"\n")
else:
raise NoChangesFoundOrMadeError("Error occured while deleting "
"task #{}".format(task_id))
return None
def run(self, line, help_disp=False):
"""
Wrapper function for new command main function
:param line: command line input
:type line: string.
"""
if help_disp:
self.parser.print_help()
return ReturnCodes.SUCCESS
LOGGER.info("Clear Pending Configuration: %s", self.ident['name'])
# pylint: disable=unused-variable
try:
(options, args) = self.rdmc.rdmc_parse_arglist(self, line)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
# self.rdmc.ui.printer(self.ident['usage'])
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineError("Failed to parse options")
if args:
raise InvalidCommandLineError("Chosen flag doesn't expect additional arguments")
# Raise exception if server is in POST
if RestHelpers(rdmcObject=self.rdmc).in_post():
raise NoContentsFoundForOperationError("Unable to retrieve resources - "
"server might be in POST or powered off")
memory_chunk_tasks = self.get_memory_chunk_tasks()
self.delete_tasks(memory_chunk_tasks, verbose=True)
return ReturnCodes.SUCCESS
def definearguments(self, customparser):
"""
Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
self.cmdbase.add_login_arguments_group(customparser)
``` |
{
"source": "JohnAZoidberg/YoutubeRSS",
"score": 3
} |
#### File: YoutubeRSS/youtuberss/fetcher.py
```python
import json
import sqlite3
import requests
import converter
BASEURL = 'https://www.googleapis.com/youtube/v3'
# TODO prevent from running to long and fetch only 50 more than are in the DB
class Fetcher:
def __init__(self, config_file):
with open(config_file) as f:
config = json.load(f)
api_key = config["api_key"]
basefolder = config["flask_root"]
self.database_path = config["db_path"]
self.api_suffix = '&key=' + api_key
self.converturl = basefolder + 'converter/file/'
pass
def _build_url(self, request):
return BASEURL + request + self.api_suffix
def _extract_video_info(self, vid, conn):
video = {}
snippet = vid['snippet']
video["published_date"] = snippet['publishedAt']
video["title"] = snippet['title']
video["id"] = vid['snippet']['resourceId']['videoId']
video["file_url"] = self.converturl + video["id"]
video["description"] = snippet['description']
video["url"] = 'https://www.youtube.com/watch?v=' + video["id"]
# get size and duration
info = self._get_cached_video_info(video["id"], conn)
video["length"] = info["size"]
video["duration"] = info["duration"]
return video
def get_data(self, url):
itemJson = requests.get(url).json()
channel = itemJson['items'][0]
podcast = {}
podcast["url"] = 'https://www.youtube.com/channel/' + channel['id']
podcast["thumbnail"] = channel['snippet']['thumbnails']['high']['url']
podcast["title"] = channel['snippet']['title']
podcast["description"] = channel['snippet']['description']
upload_playlist = \
channel['contentDetails']['relatedPlaylists']['uploads']
return podcast, upload_playlist
def get_channel_data(self, channelId):
url = self._build_url('/channels?' +
'part=snippet%2CcontentDetails&id=' + channelId)
return self.get_data(url)
def get_user_data(self, name):
url = self._build_url('/channels?' +
'part=snippet%2CcontentDetails&forUsername=' + name)
return self.get_data(url)
def get_playlist_data(self, uploadPlaylist):
url = self._build_url('/playlists?part=snippet&id=' + uploadPlaylist)
itemJson = requests.get(url).json()
playlist = itemJson['items'][0]['snippet']
podcast = {}
podcast["url"] = 'https://www.youtube.com/playlist?list=' + \
uploadPlaylist
podcast["thumbnail"] = playlist['thumbnails']['high']['url']
podcast["title"] = playlist['title']
podcast["description"] = playlist['description']
return podcast, uploadPlaylist
def get_videos(self, playlist_id, limit=None):
conn = sqlite3.connect(self.database_path)
conn.execute('''
CREATE TABLE IF NOT EXISTS videos
(id VARCHAR PRIMARY KEY NOT NULL,
size VARCHAR NOT NULL,
duration INT NOT NULL
);''')
conn.commit()
url = self._build_url('/playlistItems' +
'?part=snippet%2CcontentDetails' +
'&maxResults=50&playlistId=' + playlist_id)
vids = []
newest_date = None
next_page = ''
counter = 0
limit = None if limit is None else int(limit)
while (limit is None or counter < limit):
# We are limited to 50 results.
# If the user subscribed to more than 50 channels
# we have to make multiple requests here
# which can only be fetched one after another.
data = requests.get(url + next_page).json()
vidsBatch = data['items']
for vid in vidsBatch:
try:
print "VideoId: ", vid['snippet']['resourceId']['videoId']
video = self._extract_video_info(vid, conn)
except IOError:
continue
except:
print "VideoId: ", vid['snippet']['resourceId']['videoId']
conn.commit()
continue
# raise
print "VideoId: ", vid['snippet']['resourceId']['videoId']
vids.append(video)
if newest_date is None:
newest_date = video['published_date']
elif video['published_date'] > newest_date:
newest_date = video['published_date']
counter += 1
if limit is not None and counter >= limit:
break
try: # loop until there are no more pages
next_page = '&pageToken=' + data['nextPageToken']
except KeyError:
break
conn.commit()
conn.close()
return vids, newest_date
def _get_cached_video_info(self, video_id, conn):
cur = conn.execute('''SELECT id, size, duration FROM videos
WHERE id = ?''', (video_id,))
video = cur.fetchone()
if video is None:
info = converter.get_video_info(video_id, action="size")
conn.execute(
'''INSERT INTO videos (id, size, duration)
VALUES (?, ?, ?)''',
(video_id, info['size'], info["duration"])
)
return info
else:
return {"id": video_id, "size": video[1],
"duration": video[2]}
``` |
{
"source": "johnb30/atlas",
"score": 4
} |
#### File: atlas/page/connectors.py
```python
import datetime
def add_entry(collection, text, title, url, date, website, lang):
"""
Function that creates the dictionary of content to add to a MongoDB
instance and inserts the information into an external data store.
Parameters
----------
collection : pymongo Collection.
Collection within MongoDB that in which results are stored.
text : String.
Text from a given webpage.
text_feats : Dict.
Features returned by the hermes API.
title : String.
Title of the news story.
url : String.
URL of the webpage from which the content was pulled.
date : String.
Date pulled from the RSS feed.
website : String.
Nickname of the site from which the content was pulled.
Returns
-------
object_id : String
"""
to_insert = make_entry(collection, text, title, url, date,
website, lang)
object_id = collection.insert(to_insert)
return object_id
def make_entry(collection, text, title, url, date, website, lang):
"""
Function that creates the dictionary of content to add to an external data
store.
Parameters
----------
text : String.
Text from a given webpage.
title : String.
Title of the news story.
url : String.
URL of the webpage from which the content was pulled.
date : String.
Date pulled from the RSS feed.
website : String.
Nickname of the site from which the content was pulled.
Returns
-------
to_inser : Dictionary.
Dictionary of text and other content.
"""
if lang == 'arabic':
toInsert = {"url": url,
"title": title,
"source": website,
"date": date,
"date_added": datetime.datetime.utcnow(),
"content_ar": text,
"content_en": '',
"stanford": 0,
"geo": 0,
"language": lang}
elif lang == 'english':
trees = []
stanford_coded = 0
geo_info = {}
topic_info = {}
full_stanford = {}
toInsert = {"url": url,
"title": title,
"source": website,
"date": date,
"date_added": datetime.datetime.utcnow(),
"content_en": text,
"content_ar": '',
"stanford": stanford_coded,
"geo_info": geo_info,
"topic_info": topic_info,
"full_stanford": full_stanford,
"parsed_sents": trees,
"language": lang}
return toInsert
```
#### File: atlas/rss/utilities.py
```python
import glob
import os
import pika
import redis
from ConfigParser import ConfigParser
def make_redis(host='localhost'):
r = redis.StrictRedis(host=host, port=6379, db=0)
return r
def make_queue(host='localhost'):
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=host))
channel = connection.channel()
channel.queue_declare(queue='scraper_queue', durable=True)
return channel
def parse_config():
"""Function to parse the config file."""
config_file = glob.glob('config.ini')
if config_file:
print 'Found a config file in working directory'
else:
cwd = os.path.abspath(os.path.dirname(__file__))
config_file = os.path.join(cwd, 'default_config.ini')
print 'No config found. Using default.'
config_dict = dict()
parser = ConfigParser(allow_no_value=True)
parser.read(config_file)
for section in parser.sections():
for option in parser.options(section):
config_dict[option] = parser.get(section, option)
# handle special case of URL 'proxies' comma delimited list
plist = config_dict.get('proxy_list')
config_dict['proxy_list'] = plist.split(',') if type(plist) is str else []
return config_dict
``` |
{
"source": "johnb30/mordecai",
"score": 2
} |
#### File: mordecai/resources/easter_egg.py
```python
from flask.ext.restful import Resource
output = """
_ _ _ _ _ _ _ _ _
/\_\/\_\ _ /\ \ /\ \ /\ \ /\ \ /\ \ / /\ /\ \
/ / / / //\_\ / \ \ / \ \ / \ \____ / \ \ / \ \ / / \ \ \ \
/\ \/ \ \/ / // /\ \ \ / /\ \ \ / /\ \_____\ / /\ \ \ / /\ \ \ / / /\ \ /\ \_\
/ \____\__/ // / /\ \ \ / / /\ \_\ / / /\/___ // / /\ \_\ / / /\ \ \ / / /\ \ \ / /\/_/
/ /\/________// / / \ \_\ / / /_/ / // / / / / // /_/_ \/_/ / / / \ \_\ / / / \ \ \ / / /
/ / /\/_// / // / / / / // / /__\/ // / / / / // /____/\ / / / \/_/ / / /___/ /\ \ / / /
/ / / / / // / / / / // / /_____// / / / / // /\____\/ / / / / / /_____/ /\ \ / / /
/ / / / / // / /___/ / // / /\ \ \ \ \ \__/ / // / /______ / / /________ / /_________/\ \ \ ___/ / /__
\/_/ / / // / /____\/ // / / \ \ \ \ \___\/ // / /_______\/ / /_________\/ / /_ __\ \_\/\__\/_/___\
\/_/ \/_________/ \/_/ \_\/ \/_____/ \/__________/\/____________/\_\___\ /____/_/\/_________/
"She may not look like much, but she's got it where it counts, kid. I've made a lot of special modifications myself."
"""
class EasterEgg(Resource):
def get(self):
return output
```
#### File: mordecai/resources/utilities.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import glob
from mitie import *
from elasticsearch_dsl import Search
from ConfigParser import ConfigParser
from elasticsearch import Elasticsearch
from elasticsearch_dsl.query import MultiMatch
# read in config file
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
config_file = glob.glob(os.path.join(__location__, '../config.ini'))
parser = ConfigParser()
parser.read(config_file)
mitie_directory = parser.get('Locations', 'mitie_directory')
mitie_ner_model = parser.get('Locations', 'mitie_ner_model')
def setup_mitie():
sys.path.append(mitie_directory)
ner_model = named_entity_extractor(mitie_ner_model)
return ner_model
def setup_es():
try:
if 'Server' in parser.sections():
es_ip = parser.get('Server', 'geonames')
else:
es_ip = os.environ['ELASTIC_PORT_9200_TCP_ADDR']
es_url = 'http://{}:{}/'.format(es_ip, '9200')
CLIENT = Elasticsearch(es_url)
S = Search(CLIENT)
return S
except Exception as e:
print('Problem parsing config file. {}'.format(e))
def talk_to_mitie(text, ner_model):
# Function that accepts text to MITIE and gets entities and HTML in response
text = text.encode("utf-8")
tokens = tokenize(text)
tokens.append(' x ')
# eventually, handle different NER models.
entities = ner_model.extract_entities(tokens)
out = []
for e in entities:
range = e[0]
tag = e[1]
score = e[2]
entity_text = str(" ").join(tokens[i] for i in range)
out.append({u'tag': unicode(tag), u'text': entity_text,
u'score': score})
for e in reversed(entities):
range = e[0]
tag = e[1]
newt = tokens[range[0]]
if len(range) > 1:
for i in range:
if i != range[0]:
newt += str(' ') + tokens[i]
newt = (str('<span class="mitie-') + tag + str('">') + newt +
str('</span>'))
tokens = tokens[:range[0]] + [newt] + tokens[(range[-1] + 1):]
del tokens[-1]
html = str(' ').join(tokens)
htmlu = unicode(html.decode("utf-8"))
return {"entities": out, "html": htmlu}
def mitie_context(text, ner_model):
# Function that accepts text to MITIE and returns entities
# (and +/- 3 words of context)
text = text.encode("utf-8")
tokens = tokenize(text)
# eventually, handle different NER models.
entities = ner_model.extract_entities(tokens)
out = []
for e in entities:
range = e[0]
tag = e[1]
score = e[2]
entity_text = str(" ").join(tokens[i] for i in range)
beg_token = min(range)
end_token = max(range)
context = []
for i in [3, 2, 1]:
try:
context.append(tokens[beg_token - i])
except:
pass
try:
context.append(tokens[end_token + i])
except:
pass
out.append({u'tag': unicode(tag), u'text': entity_text, u'score': score,
u'context': context})
return {"entities": out}
def query_geonames(conn, placename, country_filter):
country_lower = [x.lower() for x in country_filter]
q = MultiMatch(query=placename, fields=['asciiname', 'alternativenames'])
res = conn.filter('term', country_code3=country_lower).query(q).execute()
out = {'hits': {'hits': []}}
keys = [u'admin1_code', u'admin2_code', u'admin3_code', u'admin4_code',
u'alternativenames', u'asciiname', u'cc2', u'coordinates',
u'country_code2', u'country_code3', u'dem', u'elevation',
u'feature_class', u'feature_code', u'geonameid',
u'modification_date', u'name', u'population', u'timzeone']
for i in res:
i_out = {}
for k in keys:
i_out[k] = i[k]
out['hits']['hits'].append(i_out)
return out
# e.g.: query_geonames("Aleppo", ["IRQ", "SYR"])
def query_geonames_featureclass(conn, placename, country_filter, feature_class):
country_lower = [x.lower() for x in [country_filter]]
feature_lower = [x.lower() for x in feature_class]
q = MultiMatch(query=placename, fields=['asciiname', 'alternativenames'])
res = conn.filter('term', country_code3=country_lower).filter('term', feature_class=feature_lower).query(q).execute()
out = {'hits': {'hits': []}}
keys = [u'admin1_code', u'admin2_code', u'admin3_code', u'admin4_code',
u'alternativenames', u'asciiname', u'cc2', u'coordinates',
u'country_code2', u'country_code3', u'dem', u'elevation',
u'feature_class', u'feature_code', u'geonameid',
u'modification_date', u'name', u'population', u'timzeone']
for i in res:
i_out = {}
for k in keys:
i_out[k] = i[k]
out['hits']['hits'].append(i_out)
return out
# e.g.: query_geonames_featureclass("Aleppo", ["IRQ", "SYR"], ["P"])
``` |
{
"source": "johnb30/petrarch",
"score": 3
} |
#### File: petrarch/petrarch/utilities.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
import corenlp
import dateutil.parser
import PETRglobals
from collections import defaultdict, Counter
def stanford_parse(event_dict):
logger = logging.getLogger('petr_log')
# What is dead can never die...
print("\nSetting up StanfordNLP. The program isn't dead. Promise.")
logger.info('Setting up StanfordNLP')
core = corenlp.StanfordCoreNLP(PETRglobals.stanfordnlp,
properties=_get_data('data/config/',
'petrarch.properties'),
memory='2g')
total = len(list(event_dict.keys()))
print(
"Stanford setup complete. Starting parse of {} stories...".format(total))
logger.info(
'Stanford setup complete. Starting parse of {} stories.'.format(total))
for i, key in enumerate(event_dict.keys()):
if (i / float(total)) * 100 in [10.0, 25.0, 50, 75.0]:
print('Parse is {}% complete...'.format((i / float(total)) * 100))
for sent in event_dict[key]['sents']:
logger.info('StanfordNLP parsing {}_{}...'.format(key, sent))
sent_dict = event_dict[key]['sents'][sent]
if len(sent_dict['content']) > 512 or len(
sent_dict['content']) < 64:
logger.warning(
'\tText length wrong. Either too long or too short.')
pass
else:
try:
stanford_result = core.raw_parse(sent_dict['content'])
s_parsetree = stanford_result['sentences'][0]['parsetree']
if 'coref' in stanford_result:
sent_dict['coref'] = stanford_result['coref']
# TODO: To go backwards you'd do str.replace(' ) ', ')')
sent_dict['parsed'] = _format_parsed_str(s_parsetree)
except Exception as e:
print('Something went wrong. ¯\_(ツ)_/¯. See log file.')
logger.warning(
'Error on {}_{}. ¯\_(ツ)_/¯. {}'.format(key, sent, e))
print('Done with StanfordNLP parse...\n\n')
logger.info('Done with StanfordNLP parse.')
return event_dict
def story_filter(story_dict, story_id):
"""
One-a-story filter for the events. There can only be only one unique
(DATE, SRC, TGT, EVENT) tuple per story.
Parameters
----------
story_dict: Dictionary.
Story-level dictionary as stored in the main event-holding
dictionary within PETRARCH.
story_id: String.
Unique StoryID in standard PETRARCH format.
Returns
-------
filtered: Dictionary.
Holder for filtered events with the format
{(EVENT TUPLE): {'issues': [], 'ids': []}} where the 'issues'
list is optional.
"""
filtered = defaultdict(dict)
story_date = story_dict['meta']['date']
for sent in story_dict['sents']:
sent_dict = story_dict['sents'][sent]
sent_id = '{}_{}'.format(story_id, sent)
if 'events' in sent_dict:
events = story_dict['sents'][sent]['events']
for event in events:
# do not print unresolved agents
try:
if event[0][0] != '-' and event[1][0] != '-':
alist = [story_date]
alist.extend(event)
event_tuple = tuple(alist)
filtered[event_tuple]
if 'issues' in sent_dict:
filtered[event_tuple]['issues'] = Counter()
issues = sent_dict['issues']
for issue in issues:
filtered[event_tuple]['issues'][
issue[0]] += issue[1]
# Will keep track of this info, but not necessarily write it
# out
filtered[event_tuple]['ids'] = []
filtered[event_tuple]['ids'].append(sent_id)
except IndexError:
pass
else:
pass
return filtered
def _format_parsed_str(parsed_str):
parsed = parsed_str.split('\n')
parsed = [line.strip() + ' ' for line in [line1.strip() for line1 in
parsed if line1] if line]
parsed = [line.replace(')', ' ) ').upper() for line in parsed]
treestr = ''.join(parsed)
return treestr
def _format_datestr(date):
datetime = dateutil.parser.parse(date)
date = '{}{:02}{:02}'.format(datetime.year, datetime.month, datetime.day)
return date
def _get_data(dir_path, path):
"""Private function to get the absolute path to the installed files."""
cwd = os.path.abspath(os.path.dirname(__file__))
joined = os.path.join(dir_path, path)
out_dir = os.path.join(cwd, joined)
return out_dir
def _get_config(config_name):
cwd = os.path.abspath(os.path.dirname(__file__))
out_dir = os.path.join(cwd, config_name)
return out_dir
def init_logger(logger_filename):
logger = logging.getLogger('petr_log')
logger.setLevel(logging.INFO)
cwd = os.getcwd()
logger_filepath = os.path.join(cwd, logger_filename)
fh = logging.FileHandler(logger_filepath, 'w')
formatter = logging.Formatter('%(levelname)s %(asctime)s: %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('Running')
``` |
{
"source": "johnbachman/bax_insertion_paper",
"score": 2
} |
#### File: bid_bim_nbd_release/mcmc/conv_list.py
```python
import os
import numpy as np
import cPickle
import sys
from bax_insertion.data.parse_bid_bim_nbd_release import nbd_residues
import subprocess
def save_last_position(mcmc_filename, pos_filename):
# Get the sampler
with open(mcmc_filename) as f:
print("Loading %s" % mcmc_filename)
(gf, sampler) = cPickle.load(f)
# Get last position
last_pos = sampler.chain[:,:,-1,:]
with open(pos_filename, 'w') as f:
# Get a default random meed
np.random.seed(1)
rs = np.random.get_state()
print("Writing position to %s" % pos_filename)
cPickle.dump((last_pos, rs), f)
# Iterate over the activators
for activator in ['Bid', 'Bim']:
# Iterate over the NBD residues
for nbd_residue in nbd_residues:
# Iterate over the replicates
for rep_ix, rep_num in enumerate(range(1, 4)):
# Iterate over confs
for num_confs in range(2, 6):
filename_base = 'pt_data1_%s_NBD_%s_r%s_%sconfs' % \
(activator, nbd_residue, rep_num, num_confs)
fit_filename = filename_base + '.fit'
mcmc_filename = filename_base + '.mcmc'
out_filename = filename_base + '.fit.out'
err_filename = filename_base + '.fit.err'
pos_filename = filename_base + '.fit.pos'
posx_filename = pos_filename + 'x'
if not os.path.isfile(mcmc_filename):
continue
# Read the outputfile
print("-------")
converged = False
try:
#print("Opening %s" % out_filename)
with open(out_filename) as outfile:
lines = outfile.readlines()
# Search the file backwards
for i in xrange(1, len(lines) + 1):
if lines[-i].startswith('-- Converged!') or \
lines[-i].startswith('Passes: True'):
converged = True
break
print("%s %s" %
(out_filename,
"Converged" if converged else "Not converged"))
except:
print("Could not open %s" % out_filename)
# Check for a position file
if os.path.isfile(pos_filename):
print("Found %s" % pos_filename)
position_filename = pos_filename
elif os.path.isfile(posx_filename):
print("Found %s" % posx_filename)
position_filename = posx_filename
else:
print("Missing %s and %s" % (pos_filename, posx_filename))
save_last_position(mcmc_filename, posx_filename)
# If not converged, submit
if not converged:
if sys.argv[1] == 'qsub':
cmd_list = ['qsub', '-b', 'y', '-cwd', '-V', '-o',
out_filename, '-e', err_filename, '-pe',
'orte', '32', 'mpirun', 'python',
'../../../pt/run_pt.py', fit_filename,
'1', position_filename]
print ' '.join(cmd_list)
subprocess.call(cmd_list)
elif sys.argv[1] == 'bsub':
cmd_list = ['bsub', '-q', 'parallel', '-n', '10',
'-W', '100:00', '-N', '-o',
out_filename, '-a', 'openmpi', 'mpirun.lsf',
'python', '-m', 'bax_insertion.pt.run_pt',
fit_filename, '1', position_filename]
print ' '.join(cmd_list)
subprocess.call(cmd_list)
else:
raise Exception("Invalid scheduler command")
```
#### File: bax_insertion/plots/nbd_bax_analysis.py
```python
import csv
import itertools
import collections
from matplotlib import pyplot as plt
import numpy as np
import scipy.stats
import scipy.signal # For low-pass filtering in derivatives function
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from bax_insertion.util import set_fig_params_for_publication, fontsize, \
format_axis, moving_average
from bax_insertion.util.error_propagation import calc_ratio_mean_sd
import bax_insertion.util.calculate_error_variance as cev
#from bax_insertion.util import fitting
#import bax_insertion.plots.titration_fits as tf
#from bax_insertion.models.nbd.multiconf import Builder
line_colors = {'Bid': 'r', 'Bim': 'b'}
dtype_line_colors = {'Release':'r', 'NBD':'g', 'FRET':'b'}
line_styles = {1:':', 2:'-', 3:'--'}
rep_colors = {1:'r', 2:'g', 3:'b'}
# Create lookup dict for sites
site_region = dict(
map(lambda s: (s, 'nterm'), ['3', '5', '15', '36', '40', '47']) +
map(lambda s: (s, 'bh3'), ['54', '62', '68', '79']) +
map(lambda s: (s, 'h56'), ['120', '122', '126', '138', '151']) +
map(lambda s: (s, 'h9'), ['175', '179', '184', '188']))
# Color code the points based on the region of the protein
color_dict = collections.OrderedDict([
('nterm', 'purple'), ('bh3', 'red'),
('h56', 'green'), ('h9', 'blue')])
def _mean_sd(p_name, builder, pysb_fit):
"""Given the named parameter and the fit result, get mean and SE."""
p = builder.model.parameters[p_name]
p_index = builder.model.parameters.index(p)
p_est_index = builder.estimate_params.index(p)
p_mean = pysb_fit.params[p_index]
cov_x = pysb_fit.result[1]
if cov_x is not None:
p_sd = np.sqrt(cov_x[p_est_index, p_est_index] *
np.var(pysb_fit.residuals))
else:
p_sd = np.nan
return (p_mean, p_sd)
class FitResult(object):
"""A helper class for returning and displaying fit results.
param_dict is a dict mapping parameter name to a tuple of the
mean and standard error for the parameter from the fit.
"""
def __init__(self, builder, activator, nbd_site, rep_index,
measurement, param_dict, t, y):
self.builder = builder
self.activator = activator
self.nbd_site = nbd_site
self.rep_index = rep_index
self.measurement = measurement
self.param_dict = param_dict
self.t = t
self.y = y
def param_result_as_string_list(self, param_name):
"""Useful for making CSV files."""
p_mean = self.param_dict[param_name][0]
p_se = self.param_dict[param_name][1]
return [self.activator, self.nbd_site, str(self.rep_index),
self.measurement, param_name, str(p_mean),
str(p_se)]
def timecourse_filename(self):
return '%s_%s_r%s_%s.csv' % (self.activator, self.nbd_site, \
str(self.rep_index), self.measurement)
def plot_all(df, nbd_residues, datatypes, activators=None, replicates=None,
file_basename=None, normalize_nbd=False):
"""Release should be first in the list of datatypes."""
if datatypes[0] != 'Release':
raise ValueError("Release should be first in the datatypes list.")
# Some definitions
# Define some default values
if activators is None:
activators = ['Bid', 'Bim']
if replicates is None:
replicates = (1, 2, 3)
num_subplots = len(datatypes)
# Labels and titles for each datatype
ylabels = {'Release': '% Dye Release',
'NBD': 'F/$F_0$',
'FRET': '% FRET'}
# Every mutant gets its own plot
for nbd_index, nbd_site in enumerate(nbd_residues):
if len(datatypes) == 2:
fig_width = 11
elif len(datatypes) == 3:
fig_width = 14
fig = plt.figure(figsize=(fig_width, 5))
fig.set_tight_layout(True)
# Define the subplot titles
titles = {'Release': r'Dye release for NBD-%s-Bax' % nbd_site,
'NBD': 'NBD F/$F_0$ for NBD-%s-Bax' % nbd_site,
'FRET': 'FRET, NBD-%s-Bax' % nbd_site}
# Every datatype gets its own subplot
for dtype_ix, dtype in enumerate(datatypes):
# There is no NBD/FRET curve for WT Bax, so skip
if (dtype == 'NBD' and nbd_site == 'WT') or \
(dtype == 'FRET' and nbd_site == 'WT'):
continue
plt.subplot(1, num_subplots, dtype_ix + 1)
# Activators and replicates are separate lines on the same plot
for activator in activators:
# Iterate over replicates...
for i in replicates:
# Get the data
t = df[(activator, dtype, nbd_site, i, 'TIME')]
v = df[(activator, dtype, nbd_site, i, 'VALUE')]
# TODO Do the normalization here if desired TODO
if normalize_nbd:
v = v / v[0]
plt.plot(t, v, label='%s Rep %d' % (activator, i),
color=line_colors[activator],
linestyle=line_styles[i])
plt.xlabel('Time (sec)')
plt.ylabel(ylabels[dtype])
plt.title(titles[dtype])
plt.legend(loc='lower right')
# Datatype-specific formatting
if dtype == 'Release':
plt.ylim([0, 100])
if file_basename:
plt.savefig('%s_%s.pdf' % (file_basename, nbd_site))
plt.savefig('%s_%s.png' % (file_basename, nbd_site))
def plot_all_by_replicate(df, nbd_residues, datatypes, activators=None,
replicates=None, file_basename=None,
normalize_nbd=False):
"""Release should be first in the datatypes list."""
if datatypes[0] != 'Release':
raise ValueError("Release should be first in the datatypes list.")
# Define some default values
if activators is None:
activators = ['Bid', 'Bim']
if replicates is None:
replicates = (1, 2, 3)
# Every mutant gets its own plot
for nbd_index, nbd_site in enumerate(nbd_residues):
for activator in activators:
plt.figure(figsize=(14, 5))
# Each replicate gets its own subplot
ax2_list = []
nbd_max = -np.inf
nbd_min = np.inf
for rep in replicates:
# Make the release plot
plt.subplot(1, 3, rep)
ax1 = plt.gca()
ax2 = ax1.twinx()
ax2_list.append(ax2)
for dtype in datatypes:
# Skip WT for NBD and FRET
if (dtype == 'NBD' and nbd_site == 'WT') or \
(dtype == 'FRET' and nbd_site == 'WT'):
continue
# Get the data
t = df[(activator, dtype, nbd_site, rep, 'TIME')]
v = df[(activator, dtype, nbd_site, rep, 'VALUE')]
# Set the axis
if dtype == 'NBD':
if normalize_nbd:
v = v / v[0]
ax = ax2
if np.max(v) > nbd_max:
nbd_max = np.max(v)
if np.min(v) < nbd_min:
nbd_min = np.min(v)
elif dtype == 'Release' or dtype == 'FRET':
ax = ax1
else:
raise ValueError("Unknown datatype: %s" % dtype)
# Plot the data
if dtype == 'FRET':
ax.plot(t, v, label='%s, %s' % (activator, dtype),
color=dtype_line_colors[dtype],
linestyle='', marker='.')
else:
ax.plot(t, v, label='%s, %s' % (activator, dtype),
color=dtype_line_colors[dtype])
# Adjust and label the figure
ax1.set_ylim([0, 100])
if 'Release' in datatypes and 'FRET' in datatypes:
ax1.set_ylabel('% FRET, % Release')
else:
ax1.set_ylabel('% Release')
ax2.set_xlabel('Time (sec)')
ax2.set_ylabel('NBD $F/F_0$')
ax2.set_title('NBD-%s-Bax, %s, Rep %d' %
(nbd_site, activator, rep))
ax1_lines, ax1_labels = ax1.get_legend_handles_labels()
ax2_lines, ax2_labels = ax2.get_legend_handles_labels()
ax2.legend(ax1_lines + ax2_lines, ax1_labels + ax2_labels,
loc='lower right', prop={'size':8})
plt.xticks([0, 1000, 2000, 3000, 4000])
# Give all plots same range for NBD
for ax2 in ax2_list:
if not ((dtype == 'NBD' and nbd_site == 'WT') or \
(dtype == 'FRET' and nbd_site == 'WT')):
ax2.set_ylim([nbd_min * 0.95, nbd_max * 1.05])
plt.subplots_adjust(wspace=0.4, left=0.06, right=0.95)
# Output file, if desired
if file_basename:
plt.savefig('%s_%s_%s.pdf' %
(file_basename, nbd_site, activator))
plt.savefig('%s_%s_%s.png' %
(file_basename, nbd_site, activator))
def calc_barplot_width(num_sites, rmarg=0.13, lmarg=0.11):
# I found that these numbers worked for a 4 inch wide figure with the
# given relative left and right margins and 19 sites. This allows the
# same proportions for endpoint plots with different numbers of sites.
abs_left = 4 * lmarg
abs_right = 4 * rmarg
abs_middle = 4 * (1 - lmarg - rmarg)
abs_per_site = abs_middle / 19.
fig_width = abs_per_site * num_sites + abs_left + abs_right
rel_left = abs_left / float(fig_width)
rel_right = 1 - (abs_right / float(fig_width))
return (fig_width, rel_left, rel_right)
def plot_nbd_endpoints(df, nbd_sites, datatype='NBD', last_n_pts=3,
file_basename=None, normalize_nbd=False,
activators=None):
replicates = range(1, 4)
if activators is None:
activators = ['Bid', 'Bim']
# Filter out the WT residue from the list, if its there
nbd_sites_no_wt = [s for s in nbd_sites if s != 'WT']
# Matrix for storing the endpoints for all mutants, replicates
n_endpts = np.zeros((len(nbd_sites_no_wt), len(replicates)))
# Figure setup
set_fig_params_for_publication()
if datatype == 'NBD':
if normalize_nbd:
yaxis_label = 'NBD F/$F_0$'
(fig_width, rel_left, rel_right) = \
calc_barplot_width(len(nbd_sites_no_wt))
else:
yaxis_label = 'NBD fluorescence (RFU)'
# If we're not normalizing the NBD values, then the additional 0s
# will push the axis label off the left-hand side. Therefore we
# adjust the left margin accordingly.
(fig_width, rel_left, rel_right) = \
calc_barplot_width(len(nbd_sites_no_wt),
lmarg=0.13)
elif datatype == 'FRET':
yaxis_label = '\% FRET'
(fig_width, rel_left, rel_right) = \
calc_barplot_width(len(nbd_sites_no_wt))
else:
raise Exception('Invalid datatype: %s' % datatype)
plt.figure(file_basename, figsize=(fig_width, 1.5), dpi=300)
plt.ylabel(yaxis_label, fontsize=fontsize)
bar_colors = {'Bid': 'gray', 'Bim': 'black'}
# Bar X position offset to center bars when only one activator
if len(activators) == 1:
offset = 0.5
else:
offset = 0
# For both activators...
for act_ix, activator in enumerate(activators):
# Now iterate over all of the mutants
for nbd_index, nbd_site in enumerate(nbd_sites_no_wt):
# Iterate over the replicates for this mutant
# Note that rep_num is the 1-indexed number of the replicate
# (1, 2, 3) whereas the index is the 0-based index into the array
# for the replicates (0, 1, 2)
for rep_index, rep_num in enumerate(replicates):
# Get the release data
ny = df[(activator, datatype, nbd_site, rep_num, 'VALUE')].values
# Filter out the NaNs, if any
# Fill in entry for this replicate with mean over last n pts
if normalize_nbd:
endpt_vals = np.nanmean(ny[-last_n_pts:] / ny[0])
else:
endpt_vals = np.nanmean(ny[-last_n_pts:])
n_endpts[nbd_index, rep_index] = endpt_vals
# Bar plot of NBD endpoint
plt.bar(np.arange(nbd_index*3 + act_ix, (nbd_index*3) + 1 + act_ix)
+ offset,
np.mean(n_endpts[nbd_index, :]),
width=1, color=bar_colors[activator], linewidth=0,
ecolor='k', capsize=1.5,
yerr=np.std(n_endpts[nbd_index, :], ddof=1))
x_lbound = -1
x_ubound = len(nbd_sites_no_wt) * 3
# Add horizontal gridlines
if datatype == 'NBD':
plt.hlines(range(0, 6), x_lbound, x_ubound, color='0.85', zorder=1,
linewidth=0.5)
elif datatype == 'FRET':
plt.hlines(0, x_lbound, x_ubound, color='0.85', zorder=1,
linewidth=0.5)
# Format the plot
plt.subplots_adjust(left=rel_left, bottom=0.10, right=rel_right, top=0.94)
ax = plt.gca()
ax.set_xlim([x_lbound, x_ubound])
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_tick_params(which='both', labelsize=fontsize, pad=2,
length=2, width=0.5)
ax.yaxis.set_tick_params(which='both', direction='out',
labelsize=fontsize, pad=0, length=2, width=0.5)
ax.xaxis.labelpad = 2
ax.yaxis.labelpad = 2
ax.set_xticks(np.arange(1, 1 + len(nbd_sites_no_wt) * 3, 3))
ax.set_xticklabels(nbd_sites_no_wt)
# Create legend with rectangles to match colors of bars
if len(activators) > 1:
bid_patch = mpatches.Patch(color=bar_colors['Bid'], label='cBid')
bim_patch = mpatches.Patch(color=bar_colors['Bim'], label='Bim')
leg = plt.legend(handles=[bid_patch, bim_patch],
bbox_to_anchor=(1.02, 0.5), loc='center left', borderaxespad=0.,
prop={'size': fontsize}, handlelength=1)
leg.draw_frame(False)
# Output the file, if desired
if file_basename:
plt.savefig('%s.pdf' % file_basename)
plt.savefig('%s.png' % file_basename)
def plot_release_endpoints(df, nbd_sites, normalized_to_wt=False,
last_n_pts=3, file_basename=None, activators=None):
# Check for nonsense
if normalized_to_wt and 'WT' not in nbd_sites:
raise ValueError("Cannot normalized to WT release if WT is not in the "
"dataset!")
replicates = range(1, 4)
if activators is None:
activators = ['Bid', 'Bim']
# If we're normalizing to WT, filter the WT entry from the site list
if normalized_to_wt:
nbd_sites_filt = [s for s in nbd_sites if s != 'WT']
else:
nbd_sites_filt = nbd_sites
# Matrix for storing the endpoints for all mutants, replicates
r_endpts = np.zeros((len(nbd_sites_filt), len(replicates)))
# Figure setup
set_fig_params_for_publication()
(fig_width, rel_left, rel_right) = \
calc_barplot_width(len(nbd_sites_filt))
plt.figure(file_basename, figsize=(fig_width, 1.5), dpi=300)
# Set the yaxis label according to whether we're normalizing
if normalized_to_wt:
ylabel_str = r'\% Dye Release' + '\n(normalized to WT)'
else:
ylabel_str = r'\% Dye Release'
plt.ylabel(ylabel_str, fontsize=fontsize, multialignment='center')
# Bar colors for the different activators
bar_colors = {'Bid': 'gray', 'Bim': 'black'}
# Bar X position offset to center bars when only one activator
if len(activators) == 1:
offset = 0.5
else:
offset = 0
# For both activators...
for act_ix, activator in enumerate(activators):
# Get the wild type release as a baseline, averaging over last n pts
if normalized_to_wt:
wt_endpts = []
for rep_num in replicates:
ry = df[(activator, 'Release', 'WT', rep_num, 'VALUE')].values
wt_endpts.append(np.mean(ry[-last_n_pts:]))
wt_mean = np.mean(wt_endpts)
wt_sd = np.std(wt_endpts, ddof=1)
# Now iterate over all of the mutants
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
# Iterate over the replicates for this mutant
# Note that rep_num is the 1-indexed number of the replicate
# (1, 2, 3) whereas the index is the 0-based index into the array
# for the replicates (0, 1, 2)
for rep_index, rep_num in enumerate(replicates):
# Get the release data
ry = df[(activator, 'Release', nbd_site,
rep_num, 'VALUE')].values
# Fill in entry for this replicate with mean over last n pts
r_endpts[nbd_index, rep_index] = np.mean(ry[-last_n_pts:])
# Bar plot of release endpoint
# Calculate percent release relative to wild type
rel_mean = np.mean(r_endpts[nbd_index, :])
rel_sd = np.std(r_endpts[nbd_index, :], ddof=1)
if normalized_to_wt:
(rel_mean, rel_sd) = \
calc_ratio_mean_sd(rel_mean, rel_sd, wt_mean, wt_sd)
rel_mean *= 100
rel_sd *= 100
plt.bar(np.arange(nbd_index*3 + act_ix, (nbd_index*3) + 1 + act_ix)
+ offset,
rel_mean, width=1, color=bar_colors[activator],
linewidth=0, ecolor='k', capsize=1.5, yerr=rel_sd)
# Add horizontal gridlines
x_lbound = -1
x_ubound = len(nbd_sites_filt) * 3
# Format the plot
plt.subplots_adjust(left=rel_left, bottom=0.10, right=rel_right, top=0.94)
ax = plt.gca()
ax.set_xlim([x_lbound, x_ubound])
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_tick_params(which='both', labelsize=fontsize, pad=2,
length=2, width=0.5)
ax.yaxis.set_tick_params(which='both', direction='out',
labelsize=fontsize, pad=0, length=2, width=0.5)
ax.xaxis.labelpad = 2
ax.yaxis.labelpad = 2
ax.set_xticks(np.arange(1, 1 + len(nbd_sites_filt) * 3, 3))
ax.set_xticklabels(nbd_sites_filt)
if len(activators) > 1:
# Create legend with rectangles to match colors of bars
bid_patch = mpatches.Patch(color=bar_colors['Bid'], label='cBid')
bim_patch = mpatches.Patch(color=bar_colors['Bim'], label='Bim')
leg = plt.legend(handles=[bid_patch, bim_patch],
bbox_to_anchor=(1.02, 0.5), loc='center left', borderaxespad=0.,
prop={'size': fontsize}, handlelength=1)
leg.draw_frame(False)
# Add hlines every 20 percent until the top of the plot is reached
(ymin, ymax) = ax.get_ylim()
plt.hlines(range(20, int(ymax), 20), x_lbound, x_ubound, color='0.85',
zorder=1, linewidth=0.5)
# Output the file, if desired
if file_basename:
plt.savefig('%s.pdf' % file_basename)
plt.savefig('%s.png' % file_basename)
def plot_initial_rate_fits(df, nbd_sites, activator, num_pts=4, plot=False):
replicates = range(1, 4)
fit_results =[]
# Lists for storing all of the various fits, slopes
r_slopes_all = np.zeros((len(nbd_sites), len(replicates)))
n_slopes_all = np.zeros((len(nbd_sites), len(replicates)))
r_errs_all = np.zeros((len(nbd_sites), len(replicates)))
n_errs_all = np.zeros((len(nbd_sites), len(replicates)))
n_max_all = np.zeros((len(nbd_sites), len(replicates)))
# Iterate over all of the mutants
for nbd_index, nbd_site in enumerate(nbd_sites):
# Iterate over the replicates for this mutant
# Note that rep_num is the 1-indexed number of the replicate
# (1, 2, 3) whereas the index is the 0-based index into the array for
# the replicates (0, 1, 2)
for rep_index, rep_num in enumerate(replicates):
# Get the release data
rt = df[(activator, 'Release', nbd_site, rep_num, 'TIME')].values
ry = df[(activator, 'Release', nbd_site, rep_num, 'VALUE')].values
# Fit line to first n pts
r_lin = scipy.stats.linregress(rt[0:num_pts], ry[0:num_pts])
# Store the slope of the line
r_slope = r_lin[0]
r_slopes_all[nbd_index, rep_index] = r_slope
r_int = r_lin[1] # Intercept
# Uncertainty associated with the slope
r_slope_err = r_lin[4]
r_errs_all[nbd_index, rep_index] = r_slope_err
# Store fit result
tb_param_dict = {'Initial rate (first %d pts)' % num_pts :
(r_slope, r_slope_err)}
fit_results.append(FitResult(None, activator, nbd_site,
rep_num, 'Tb release', tb_param_dict,
None, None))
# Now do the NBD slope calculation, but ignore the WT (since there
# is no NBD label)
if nbd_site == 'WT':
n_max_all[nbd_index, rep_index] = 0
n_errs_all[nbd_index, rep_index] = 0
n_slopes_all[nbd_index, rep_index] = 0
else:
# Get the NBD data
nt = df[(activator, 'NBD', nbd_site, rep_num, 'TIME')].values
ny = df[(activator, 'NBD', nbd_site, rep_num, 'VALUE')].values
# Fit line to first n pts
n_lin = scipy.stats.linregress(nt[0:num_pts], ny[0:num_pts])
# Maximum NBD F/F0
n_max_all[nbd_index, rep_index] = np.max(ny)
# Slope and intercept
n_slope = n_lin[0]
n_slopes_all[nbd_index, rep_index] = n_slope
n_int = n_lin[1] # Intercept
# Uncertainty associated with slope
n_slope_err = n_lin[4]
n_errs_all[nbd_index, rep_index] = n_slope_err
# Store fit result
nbd_param_dict = {'Initial rate (first %d pts)' % num_pts :
(n_slope, n_slope_err)}
fit_results.append(FitResult(None, activator, nbd_site,
rep_num, 'NBD', nbd_param_dict,
None, None))
if plot:
# Make subpanel showing linear fit for release slope
plt.figure('%s, NBD-%s-Bax initial rates' %
(activator, nbd_site), figsize=(12, 5))
# Dye release slope subplot
plt.subplot(1, 2, 1)
# Plot the data
plt.plot(rt[0:num_pts], ry[0:num_pts], linestyle='',
marker='o', color=rep_colors[rep_num])
# Plot the fit
plt.plot(rt[0:num_pts], r_int + r_slope * rt[0:num_pts],
color=rep_colors[rep_num],
label='%s Rep %d' % (activator, rep_num))
plt.title('NBD-%s-Bax, Tb initial rate' % (nbd_site))
plt.legend(loc='lower right')
# Make subpanel showing linear fit for NBD slope
if plot and nbd_site != 'WT':
plt.subplot(1, 2, 2)
# Plot the data
plt.plot(nt[0:num_pts], ny[0:num_pts], linestyle='',
marker='o', color=rep_colors[rep_num])
# Plot the fit
plt.plot(nt[0:num_pts], n_int + n_slope * nt[0:num_pts],
color=rep_colors[rep_num],
label='%s Rep %d' % (activator, rep_num))
plt.xlabel('Time (sec)')
plt.ylabel('$F/F_0$')
plt.title('NBD-%s-Bax, NBD initial rate' % (nbd_site))
plt.legend(loc='lower right')
return fit_results
class InitialRateSamples(object):
def __init__(self, r_slopes, n_slopes, n_maxes, wt_r_slopes, time):
self.r_slopes = r_slopes
self.n_slopes = n_slopes
self.n_maxes = n_maxes
self.time = time
self.wt_r_slopes = wt_r_slopes
def release_avg_std(self):
r_slope_avgs = np.mean(self.r_slopes, axis=2)
r_slope_stds = np.std(self.r_slopes, axis=2, ddof=1)
return (r_slope_avgs, r_slope_stds)
def release_avg_std_wt_normalized(self):
if np.any(np.isnan(self.wt_r_slopes)):
raise ValueError("Can't normalize to WT because there are "
"missing entries for initial WT release "
"rates (found NaNs in the matrix).")
(wt_r_avgs, wt_r_stds) = self.wt_release_avg_std()
(r_slope_avgs, r_slope_stds) = self.release_avg_std()
r_norm_avgs = np.zeros(r_slope_avgs.shape)
r_norm_stds = np.zeros(r_slope_stds.shape)
for act_ix in range(r_slope_avgs.shape[0]):
for nbd_index in range(r_slope_avgs.shape[1]):
(r_norm_avgs[act_ix, nbd_index],
r_norm_stds[act_ix, nbd_index]) = calc_ratio_mean_sd(
r_slope_avgs[act_ix, nbd_index],
r_slope_stds[act_ix, nbd_index],
wt_r_avgs[act_ix],
wt_r_stds[act_ix])
return (r_norm_avgs, r_norm_stds)
def release_slopes_wt_normalized(self):
"""Get the release for each individual replicate normalized
to the WT release at the timepoint."""
if np.any(np.isnan(self.wt_r_slopes)):
raise ValueError("Can't normalize to WT because there are "
"missing entries for initial WT release "
"rates (found NaNs in the matrix).")
(wt_r_avgs, wt_r_stds) = self.wt_release_avg_std()
r_slopes_norm = np.zeros(self.r_slopes.shape)
for act_ix in range(r_slopes_norm.shape[0]):
r_slopes_norm[act_ix] = self.r_slopes[act_ix] / wt_r_avgs[act_ix]
return r_slopes_norm
def wt_release_avg_std(self):
wt_r_avgs = np.mean(self.wt_r_slopes, axis=1)
wt_r_stds = np.std(self.wt_r_slopes, axis=1, ddof=1)
return (wt_r_avgs, wt_r_stds)
def nbd_avg_std(self):
n_slope_avgs = np.mean(self.n_slopes, axis=2)
n_slope_stds = np.std(self.n_slopes, axis=2, ddof=1)
return (n_slope_avgs, n_slope_stds)
def nbd_norm_slopes(self):
return (self.n_slopes / self.n_maxes) * 100
def nbd_norm_avg_std(self):
n_norm_slopes = self.nbd_norm_slopes()
n_norm_slope_avgs = np.mean(n_norm_slopes, axis=2)
n_norm_slope_stds = np.std(n_norm_slopes, axis=2, ddof=1)
return (n_norm_slope_avgs, n_norm_slope_stds)
def calc_initial_rate_samples(df, nbd_sites, timepoint_ix=4,
normalize_nbd=False):
"""Get initial NBD/Tb release values from the data at the given timepoint.
Returns an instance of InitialRateSamples containing the data at the
given timepoint for all of the mutants/replicates.
If normalize_nbd is set, then the raw NBD fluorescence data is converted to
F/F0 values.
"""
replicates = range(1, 4)
activators = ['Bid', 'Bim']
nbd_sites_filt = [s for s in nbd_sites if s != 'WT']
# Lists for storing all of the various fits, slopes
r_slopes = np.zeros((len(activators), len(nbd_sites_filt), len(replicates)))
n_slopes = np.zeros((len(activators), len(nbd_sites_filt), len(replicates)))
n_maxes = np.zeros((len(activators), len(nbd_sites_filt), len(replicates)))
wt_r_slopes = np.zeros((len(activators), len(replicates)))
# For both Bid and Bim...
for act_ix, activator in enumerate(activators):
# Get release data for WT as a reference
if 'WT' in nbd_sites:
for rep_index, rep_num in enumerate(replicates):
ry = df[(activator, 'Release', 'WT', rep_num, 'VALUE')].values
wt_r_slopes[act_ix, rep_index] = ry[timepoint_ix]
# If we don't have an entry for WT, put a NaN into the matrix
else:
for rep_index, rep_num in enumerate(replicates):
wt_r_slopes[act_ix, rep_index] = ry[timepoint_ix] = np.nan
# Iterate over all of the mutants
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
# Iterate over the replicates for this mutant. Note that rep_num is
# the 1-indexed number of the replicate (1, 2, 3) whereas the index
# is the 0-based index into the array for the replicates (0, 1, 2)
for rep_index, rep_num in enumerate(replicates):
# Get the release data
rt = df[(activator, 'Release',
nbd_site, rep_num, 'TIME')].values
ry = df[(activator, 'Release',
nbd_site, rep_num, 'VALUE')].values
time = rt[timepoint_ix]
# Store the y value at the given timepoint
r_slopes[act_ix, nbd_index, rep_index] = ry[timepoint_ix]
# Get the NBD data
nt = df[(activator, 'NBD', nbd_site, rep_num, 'TIME')].values
ny = df[(activator, 'NBD', nbd_site, rep_num, 'VALUE')].values
# Normalize the NBD data
if normalize_nbd:
ny = ny / ny[0]
f0 = 1.0
else:
f0 = ny[0]
# Maximum NBD F/F0
# Because NBD-47-Bax actually decreases in fluorescence, we
# use the change relative to the minimum.
if nbd_site == '47':
n_slopes[act_ix, nbd_index, rep_index] = \
f0 - ny[timepoint_ix]
n_maxes[act_ix, nbd_index, rep_index] = f0 - np.min(ny)
else:
n_slopes[act_ix, nbd_index, rep_index] = \
ny[timepoint_ix] - f0
n_maxes[act_ix, nbd_index, rep_index] = np.max(ny) - f0
# Save the results in the initial rate structure
irs = InitialRateSamples(r_slopes, n_slopes, n_maxes, wt_r_slopes, time)
return irs
def plot_initial_rate_samples(df, nbd_sites, timepoint_ix=4,
file_basename=None, normalized_to_wt=True):
"""Plot characteristics of initial rates.
"""
set_fig_params_for_publication()
# Important lists
replicates = range(1, 4)
activators = ['Bid', 'Bim']
nbd_sites_filt = [s for s in nbd_sites if s != 'WT']
# Get the initial rate data (copy over to local variables for legacy
# reasons)
irs = calc_initial_rate_samples(df, nbd_sites, timepoint_ix,
normalize_nbd=True)
if normalized_to_wt:
(r_slope_avgs, r_slope_stds) = irs.release_avg_std_wt_normalized()
r_slopes = irs.release_slopes_wt_normalized()
else:
(r_slope_avgs, r_slope_stds) = irs.release_avg_std()
r_slopes = irs.r_slopes
(n_slope_avgs, n_slope_stds) = irs.nbd_avg_std()
n_norm_slopes = irs.nbd_norm_slopes()
(n_norm_slope_avgs, n_norm_slope_stds) = irs.nbd_norm_avg_std()
time = irs.time
# Bar plots of initial points ------------------------------------
fig_names = {'Release': '%s_init_release_bar' % file_basename,
'NBD': '%s_init_nbd_bar' % file_basename}
if normalized_to_wt:
ylabels = {'Release': 'Dye release at %d sec\n(fold-change over WT)' %
time,
'NBD': '\\%% of Max NBD F/$F_0$ at %d sec' % time}
else:
ylabels = {'Release': '\\%% Dye release at %d sec' % time,
'NBD': '\\%% of Max NBD F/$F_0$ at %d sec' % time}
# Make both Release and NBD bar plots
for dtype in ['Release', 'NBD']:
fig_name = fig_names[dtype]
# Get the width of the figure based on the number of sites
(fig_width, rel_left, rel_right) = \
calc_barplot_width(len(nbd_sites_filt))
plt.figure(fig_name, figsize=(fig_width, 1.5), dpi=300)
plt.ylabel(ylabels[dtype], fontsize=fontsize, multialignment='center')
bar_colors = {'Bid': 'gray', 'Bim': 'black'}
# Iterate over the activators
for act_ix, activator in enumerate(activators):
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
# Get the appropriate data
if dtype == 'Release':
yval = r_slope_avgs[act_ix, nbd_index]
yerr = r_slope_stds[act_ix, nbd_index]
elif dtype == 'NBD':
yval = n_norm_slope_avgs[act_ix, nbd_index]
yerr = n_norm_slope_stds[act_ix, nbd_index]
else:
raise ValueError('Unknown datatype: %s' % dtype)
# Plot the data
plt.bar(range(nbd_index*3 + act_ix, (nbd_index*3) + 1 + act_ix),
yval, yerr=yerr, width=1, color=bar_colors[activator],
linewidth=0, ecolor='k', capsize=1.5)
x_lbound = -1
x_ubound = len(nbd_sites_filt) * 3
# Add line showing wild type
plt.hlines(0, x_lbound, x_ubound, linewidth=0.5)
plt.subplots_adjust(left=rel_left, bottom=0.10, right=rel_right,
top=0.94)
# Set bounds
ax = plt.gca()
ax.set_xlim([x_lbound, x_ubound])
# Format the plot
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_tick_params(which='both', labelsize=fontsize, pad=2,
length=2, width=0.5)
ax.yaxis.set_tick_params(which='both', direction='out',
labelsize=fontsize, pad=0, length=2, width=0.5)
ax.xaxis.labelpad = 2
ax.yaxis.labelpad = 2
ax.set_xticks(np.arange(1, 1 + len(nbd_sites_filt) * 3, 3))
ax.set_xticklabels(nbd_sites_filt)
# Create legend with rectangles to match colors of bars
bid_patch = mpatches.Patch(color=bar_colors['Bid'], label='cBid')
bim_patch = mpatches.Patch(color=bar_colors['Bim'], label='Bim')
leg = plt.legend(handles=[bid_patch, bim_patch],
bbox_to_anchor=(1.02, 0.5), loc='center left', borderaxespad=0.,
prop={'size': fontsize}, handlelength=1)
leg.draw_frame(False)
# Output the file, if desired
if file_basename:
plt.savefig('%s.pdf' % fig_name)
plt.savefig('%s.png' % fig_name)
# 2D scatter plot of NBD/Tb rates, no normalization ----------------------
for act_ix, activator in enumerate(activators):
fig_name = "%s_init_scatter_no_norm_%s" % (file_basename, activator)
plt.figure(fig_name, figsize=(2, 2), dpi=300)
plt.errorbar(r_slope_avgs[act_ix], n_slope_avgs[act_ix],
xerr=r_slope_stds[act_ix], yerr=n_slope_stds[act_ix],
marker='o', color='k', markersize=3, linestyle='',
capsize=1.5)
plt.xlabel('Dye release at %d sec' % time)
plt.ylabel('delta F/$F_0$ at %d sec' % time)
x_offset = 0.4
y_offset = 0.04
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
plt.text(r_slope_avgs[act_ix, nbd_index] + x_offset,
n_slope_avgs[act_ix, nbd_index] + y_offset, nbd_site,
fontsize=fontsize)
ax = plt.gca()
ybounds = ax.get_ylim()
#plt.vlines(wt_avg, ybounds[0], ybounds[1], linestyle='--')
format_axis(ax)
plt.subplots_adjust(left=0.17, bottom=0.15)
#if file_basename:
# plt.savefig('%s.pdf' % fig_name)
# 2D scatter plot of NBD/Tb rates, normalized ----------------------------
# Make two separate plots, one for each activator
for act_ix, activator in enumerate(activators):
fig_name = "%s_init_scatter_norm_%s" % (file_basename, activator)
plt.figure(fig_name, figsize=(2, 2), dpi=300)
plt.xlabel('Dye release at %d sec\n(fold-change over WT)' % time)
plt.ylabel('\\%% of Max NBD F/$F_0$ at %d sec' % time)
# Iterate over the sites, plotting one point at a time
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
# Get color for site based on region
pt_color = color_dict[site_region[nbd_site]]
# Plot a point on the scatterplot
plt.errorbar(r_slope_avgs[act_ix, nbd_index],
n_norm_slope_avgs[act_ix, nbd_index],
xerr=r_slope_stds[act_ix, nbd_index],
yerr=n_norm_slope_stds[act_ix, nbd_index],
marker='', color=pt_color, linestyle='',
markersize=2, capsize=1)
#plt.vlines(wt_avg, np.min(n_slope_avgs), np.max(n_slope_avgs),
# linestyle='--')
# Filter out the BH3 region data for linear fit
x_pts = []
y_pts = []
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
# Skip residues in the BH3 region
if site_region[nbd_site] == 'bh3':
continue
# If not in BH3 region, add to list of points
for rep_ix, rep_num in enumerate(replicates):
x_pts.append(r_slopes[act_ix, nbd_index, rep_ix])
y_pts.append(n_norm_slopes[act_ix, nbd_index, rep_ix])
# Turn the lists into numpy arrays
x_pts = np.array(x_pts)
y_pts = np.array(y_pts)
# Fit the filtered data to a line, allowing free intercept
linfit = scipy.stats.linregress(x_pts, y_pts)
plt.plot(x_pts, x_pts * linfit[0] + linfit[1], color='k', zorder=1)
print linfit
# Format the plot
ax = plt.gca()
format_axis(ax)
plt.subplots_adjust(left=0.22, bottom=0.19, right=0.95, top=0.95)
ybounds = ax.get_ylim()
ax.set_ylim(min(0, ybounds[0]), ybounds[1])
# Create legend with lines to match colors of points
n_patch = mlines.Line2D([], [], color=color_dict['nterm'],
label='N-term')
bh3_patch = mlines.Line2D([], [], color=color_dict['bh3'],
label='BH3')
a56_patch = mlines.Line2D([], [], color=color_dict['h56'],
label='$\\alpha$5-6')
a9_patch = mlines.Line2D([], [], color=color_dict['h9'],
label='$\\alpha 9$')
leg = plt.legend(handles=[n_patch, bh3_patch, a56_patch, a9_patch],
loc='lower right', borderaxespad=0.,
prop={'size': fontsize}, handlelength=2,
handletextpad=0.2, labelspacing=0.5)
leg.draw_frame(False)
# Draw residue number text for BH3 residues only
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
(x_lbound, x_ubound) = plt.xlim()
x_range = x_ubound - x_lbound
x_offset = x_range * -0.07
y_offset = 1.15
# Select residues
if site_region[nbd_site] == 'bh3':
# Get color for site text based on region
pt_color = color_dict[site_region[nbd_site]]
# Draw text
plt.text(r_slope_avgs[act_ix, nbd_index] + x_offset,
n_norm_slope_avgs[act_ix, nbd_index] + y_offset,
nbd_site, color=pt_color, fontsize=fontsize)
# Save the fig, if desired
if file_basename:
plt.savefig('%s.pdf' % fig_name)
plt.savefig('%s.png' % fig_name)
# Plot of all replicates
for act_ix, activator in enumerate(activators):
plt.figure(activator)
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
for rep_ix, rep_num in enumerate(replicates):
pt_color = color_dict[site_region[nbd_site]]
# Plot point
plt.plot(r_slopes[act_ix, nbd_index, rep_ix],
n_norm_slopes[act_ix, nbd_index, rep_ix],
marker='o', linestyle='', color=pt_color)
return
def plot_rank_changes(means1, sds1, means2, sds2, nbd_sites):
ubounds1 = means1 + sds1
lbounds1 = means1 - sds1
ubounds2 = means2 + sds2
lbounds2 = means2 - sds2
ubounds = np.vstack([ubounds1, ubounds2]).T
lbounds = np.vstack([lbounds1, lbounds2]).T
means = np.vstack([means1, means2]).T
plt.figure()
num_colors = ubounds.shape[0]
colors = color_list = plt.cm.Set3(np.linspace(0, 1, num_colors))
for i in range(ubounds.shape[0]):
if nbd_sites[i] in ['175', '179', '184', '188']:
#if nbd_sites[i] in ['3', '5', '15', '36', '40', '47']:
color='k'
else:
color=colors[i]
plt.fill_between([1, 2], ubounds[i], lbounds[i], color=color,
alpha=0.2)
plt.plot([1, 2], means[i], color=color)
plt.text(0.95, means[i, 0], nbd_sites[i], color='k',
fontsize=8)
plt.text(2.05, means[i, 1], nbd_sites[i], color='k',
fontsize=8)
plt.xlim(0.5, 2.5)
def plot_bid_vs_bim_release(df, nbd_sites, dtype='Release',
file_basename=None):
replicates = range(1, 4)
activators = ['Bid', 'Bim']
# Get the length of the timecourses
for nbd_index, nbd_site in enumerate(nbd_sites):
color_ix = 0
for act_ix, activator in enumerate(activators):
# Initialization for WT
wt_slice = df[activator][dtype]['WT']
wt_numpts = wt_slice.shape[0]
wt_y = np.zeros((wt_numpts, len(replicates)))
# Initialization for mutant
mut_slice = df[activator][dtype][nbd_site]
mut_numpts = mut_slice.shape[0]
mut_y = np.zeros((mut_numpts, len(replicates)))
# Iterate over reps and get data
for rep_ix, rep_num in enumerate(replicates):
# Only get the time coordinates for the first rep
if rep_ix == 0:
wt_time = wt_slice[rep_num]['TIME'].values
mut_time = mut_slice[rep_num]['TIME'].values
wt_y[:, rep_ix] = wt_slice[rep_num]['VALUE'].values
mut_y[:, rep_ix] = mut_slice[rep_num]['VALUE'].values
# Now get the averages and SDs
wt_avg = np.mean(wt_y, axis=1)
wt_sd = np.std(wt_y, axis=1, ddof=1)
wt_ubound = wt_avg + wt_sd
wt_lbound = wt_avg - wt_sd
mut_avg = np.mean(mut_y, axis=1)
mut_sd = np.std(mut_y, axis=1, ddof=1)
mut_ubound = mut_avg + mut_sd
mut_lbound = mut_avg - mut_sd
#num_colors = 4
#colors = plt.cm.Set3(np.linspace(0, 1, num_colors))
colors = ['r', 'm', 'b', 'g']
fig_name = 'bid_bim_tc_comp_%s' % nbd_site
# Plot the ratio
plt.figure(fig_name, figsize=(10, 10))
plt.subplot(1, 2, 1)
(ratio_avg, ratio_sd) = \
calc_ratio_mean_sd(mut_avg, mut_sd, wt_avg, wt_sd)
plt.plot(wt_time, ratio_avg, color=colors[color_ix],
label=activator)
plt.fill_between(wt_time, ratio_avg + ratio_sd,
ratio_avg - ratio_sd, alpha=0.5,
color=colors[color_ix])
plt.legend(loc='upper right', fontsize=10)
plt.ylim(0, 5)
# Plot the raw timecourses for WT and mutant
# Plot the mutant
plt.subplot(1, 2, 2)
plt.plot(wt_time, mut_avg, color=colors[color_ix],
label='%s, NBD-%sC-Bax' % (activator, nbd_site))
plt.fill_between(wt_time, mut_ubound, mut_lbound,
color=colors[color_ix], alpha=0.2)
color_ix += 1
# Plot the WT
plt.plot(wt_time, wt_avg, color=colors[color_ix],
label='%s, WT Bax' % activator)
plt.fill_between(wt_time, wt_ubound, wt_lbound,
color=colors[color_ix], alpha=0.3)
plt.legend(loc='lower right', fontsize=10)
color_ix += 1
if file_basename:
plt.savefig('%s_%s.pdf' % (file_basename, fig_name))
plt.savefig('%s_%s.png' % (file_basename, fig_name))
def calc_release_peaks(df, nbd_sites, activators=None, replicates=None,
window=1, csv_filename=None):
"""Measure the lag phase of the release data.
Takes the derivative of the release data for the given set of
activators, NBD sites, and replicates, and gets the time associated
with the peak of the derivative.
Returns
-------
Dictionary containing keys of the form (activator, nbd_site, replicate)
mapped to the times of the maximum rate of release.
"""
# Set some defaults
if activators is None:
activators = ['Bid', 'Bim']
if replicates is None:
replicates = range(1, 4)
peak_dict = collections.OrderedDict()
# Initialize the filter
b, a = scipy.signal.butter(1, 0.2)
for activator, nbd_site, rep_index in \
itertools.product(activators, nbd_sites, replicates):
key = (activator, nbd_site, rep_index)
# Get the data
rt = df[(activator, 'Release', nbd_site,
rep_index, 'TIME')].values
ry = df[(activator, 'Release', nbd_site,
rep_index, 'VALUE')].values
# Apply the filter
# Filter the timecourse
r_filt = scipy.signal.filtfilt(b, a, ry)
r_avg = moving_average(r_filt, n=window)
# Take the derivative
r_diff = np.diff(r_avg)
# When we take the derivative, the size of the array shrinks by
# one, because we are calculating the differences between neighboring
# elements. So if the data is [0, 1, 3, 4, 5], the derivatives will
# be [1, 2, 1, 1], and the maximum of the derivative array will be at
# index 1, which corresponds to the difference of two and the entry of
# three in the original array. If we adopt the convention that the
# index to use for the maximum slope is the latter of the two values
# used in calculating the difference, this means we need to add one to
# the index associated with the maximum value of the diff array.
r_max_tpt = np.argmax(r_diff) + 1
peak_dict[key] = rt[r_max_tpt]
if csv_filename:
with open(csv_filename, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
for key, value in peak_dict.iteritems():
line = list(key)
line.append(value)
csv_writer.writerow(line)
return peak_dict
def plot_example_derivatives(df, activator, nbd_site, rep_index, window=1,
normalize_nbd=False, plot_filename=None,
plot_tb_peak=False):
set_fig_params_for_publication()
# Create an order 3 lowpass butterworth filter.
b, a = scipy.signal.butter(1, 0.2)
# RELEASE DERIVATIVE
rt = df[(activator, 'Release', nbd_site, rep_index, 'TIME')].values
ry = df[(activator, 'Release', nbd_site, rep_index, 'VALUE')].values
# Filter the timecourse
r_filt = scipy.signal.filtfilt(b, a, ry)
r_avg = moving_average(r_filt, n=window)
# Take the derivative
r_diff = np.diff(r_avg)
# Peak release derivative
r_max_tpt = np.argmax(r_diff) + 1
peak_pt = rt[r_max_tpt]
# NBD DERIVATIVE
nt = df[(activator, 'NBD', nbd_site, rep_index, 'TIME')].values
ny = df[(activator, 'NBD', nbd_site, rep_index, 'VALUE')].values
# Normalize NBD to F/F0
if normalize_nbd:
ny = ny / float(ny[0])
# Filter
n_filt = scipy.signal.filtfilt(b, a, ny)
n_avg = moving_average(n_filt, n=window)
# Take derivative
n_diff = np.diff(n_avg)
# PLOT
fig = plt.figure(figsize=(1.5, 1.5), dpi=300)
ax = fig.gca()
n_diff_norm = n_diff / np.max(np.abs(n_diff))
r_diff_norm = r_diff / np.max(np.abs(r_diff))
ax.plot(nt[1+window-1:], n_diff_norm, label=r'$\frac{d}{dt}$ NBD')
ax.plot(rt[1+window-1:], r_diff_norm, color='r',
label=r'$\frac{d}{dt}$ Tb')
ax.set_xlabel('Time (sec)')
ax.set_ylabel(r'\% Max Rate')
#ax.set_title('%s, NBD-%s-Bax normalized derivative' % (activator, nbd_site))
ax.set_ylim(-0.25, 1.1)
(xmin, xmax) = (0, 2000)
ax.set_xlim(xmin, xmax)
plt.hlines(0, xmin, xmax, linestyle='-')
plt.subplots_adjust(left=0.27, bottom=0.19)
plt.legend(loc='upper right', fontsize=fontsize, frameon=False)
ymin, ymax = plt.ylim()
format_axis(ax)
if plot_tb_peak:
plt.vlines(peak_pt, ymin, ymax, color='gray', alpha=0.5)
if plot_filename:
plt.savefig('%s.pdf' % plot_filename)
plt.savefig('%s.png' % plot_filename, dpi=300)
def plot_derivatives(df, nbd_sites, normalize_nbd=False):
replicates = range(1, 4)
num_pts = 4
window = 1 # For moving average
activators = ['Bid', 'Bim']
# Create an order 3 lowpass butterworth filter.
b, a = scipy.signal.butter(1, 0.2)
for nbd_index, nbd_site in enumerate(nbd_sites):
for activator in activators:
# We store the list of timepoints where the release derivative
# reaches its peak so that we can plot lines for all three with
# the same upper and lower y-coordinates.
peak_pts = []
for rep_index in replicates:
rt = df[(activator, 'Release', nbd_site,
rep_index, 'TIME')].values
ry = df[(activator, 'Release', nbd_site,
rep_index, 'VALUE')].values
# Filter the timecourse
r_filt = scipy.signal.filtfilt(b, a, ry)
r_avg = moving_average(r_filt, n=window)
# Take the derivative
r_diff = np.diff(r_avg)
# See comment in calc_release_peaks, above
r_max_tpt = np.argmax(r_diff) + 1
peak_pts.append(rt[r_max_tpt])
# Calculate max NBD slope, but not for WT
if nbd_site != 'WT':
nt = df[(activator, 'NBD', nbd_site,
rep_index, 'TIME')].values
ny = df[(activator, 'NBD', nbd_site,
rep_index, 'VALUE')].values
# Normalize NBD to F/F0
if normalize_nbd:
ny = ny / float(ny[0])
# Filter
n_filt = scipy.signal.filtfilt(b, a, ny)
n_avg = moving_average(n_filt, n=window)
# Take derivative
n_diff = np.diff(n_avg)
# Terbium subplot
plt.figure('%s, NBD-%s-Bax derivative' % (activator, nbd_site),
figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(rt[1+window-1:], r_diff, color=rep_colors[rep_index],
label='%s Rep %d' % (activator, rep_index))
plt.ylabel('dRel/dt (% rel $sec^{-1}$)')
plt.title('%s, NBD-%s-Bax, Tb derivative' %
(activator, nbd_site))
plt.legend(loc='upper right')
if nbd_site != 'WT':
# NBD subplot
plt.subplot(1, 2, 2)
plt.plot(nt[1+window-1:], n_diff,
color=rep_colors[rep_index],
label='%s Rep %d' % (activator, rep_index))
plt.xlabel('Time (sec)')
plt.ylabel('dNBD/dt ($F/F_0\ sec^{-1}$)')
plt.title('%s, NBD-%s-Bax, NBD derivative' %
(activator, nbd_site))
plt.legend(loc='upper right')
# Plot normalized derivatives
plt.figure('%s, NBD-%s-Bax normalized derivative' %
(activator, nbd_site))
n_diff_norm = n_diff / np.max(np.abs(n_diff))
r_diff_norm = r_diff / np.max(np.abs(r_diff))
plt.plot(rt[1+window-1:], r_diff_norm,
color=rep_colors[rep_index],
linestyle=line_styles[2],
label='%s Rep %d' % (activator, rep_index))
plt.plot(nt[1+window-1:], n_diff_norm,
color=rep_colors[rep_index],
linestyle=line_styles[3])
plt.xlabel('Time (sec)')
plt.ylabel('% max rate')
plt.title('%s, NBD-%s-Bax normalized derivative' %
(activator, nbd_site))
plt.legend(loc='upper right')
# Add vertical lines to the normalized derivative plot
plt.figure('%s, NBD-%s-Bax normalized derivative' %
(activator, nbd_site))
ymin, ymax = plt.ylim()
plt.vlines(peak_pts, ymin, ymax)
# Call tight_layout for the Tb/NBD 2-panel figure
plt.figure('%s, NBD-%s-Bax derivative' % (activator, nbd_site))
plt.tight_layout()
def welch_t_test(means1, sds1, means2, sds2):
n1 = 3
n2 = 3
t_numer = means1 - means2
sq_sum = ((sds1**2)/n1) + ((sds2**2)/n2)
t_denom = np.sqrt(sq_sum)
t = t_numer / t_denom
print t
v_numer = sq_sum ** 2
v1 = n1 - 1.0
v2 = n2 - 1.0
v_denom = ((sds1 ** 4) / ((n1**2) * v1)) / ((sds2 ** 4) / ((n2**2) * v2))
v = v_numer / v_denom
print v
p_val = scipy.stats.t.sf(t, v)
print p_val
def student_t_test(means1, sds1, means2, sds2, n):
n = float(n)
t_numer = np.abs(means1 - means2)
sx1x2 = np.sqrt(0.5*(sds1**2 + sds2**2))
t_denom = sx1x2 * np.sqrt(2/n)
t = t_numer / t_denom
print t
p_val = scipy.stats.t.sf(t, n - 1)
print p_val * 2
return p_val * 2
# -- Model fits --
params_dict = {'c1_to_c2_k': 1e-4, 'c1_scaling': 2,
'c0_to_c1_k': 2e-3}
def plot_2conf_fits(df, nbd_sites, activator, dtype='NBD', replicates=None,
normalize_nbd=False):
if replicates is None:
replicates = range(1, 4)
fit_results = []
# Filter out the WT residue, if present in the list
nbd_sites_filt = [s for s in nbd_sites if s != 'WT']
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
k1_means = []
k1_sds = []
for rep_index in replicates:
nt = df[(activator, dtype, nbd_site, rep_index, 'TIME')].values
ny = df[(activator, dtype, nbd_site, rep_index, 'VALUE')].values
# Normalize NBD to F/F0
if dtype == 'NBD' and normalize_nbd:
ny = ny / ny[0]
plt.figure('%s, NBD-%s-Bax Fits' % (activator, nbd_site),
figsize=(6, 5))
builder = Builder(params_dict=params_dict)
builder.build_model_multiconf(2, ny[0], normalized_data=True)
# Rough guesses for parameters
# Guess that the final scaling is close to the final data value
builder.model.parameters['c1_scaling'].value = ny[-1]
# Rough guess for the timescale
builder.model.parameters['c0_to_c1_k'].value = 1e-3
pysb_fit = fitting.fit_pysb_builder(builder, 'NBD', nt, ny)
plt.plot(nt, ny, linestyle='', marker='.',
color=rep_colors[rep_index])
plt.plot(nt, pysb_fit.ypred,
label='%s Rep %d' % (activator, rep_index),
color=rep_colors[rep_index])
plt.xlabel('Time (sec)')
plt.ylabel('$F/F_0$')
plt.title('NBD $F/F_0$ fits, NBD-%s-Bax' % nbd_site)
plt.legend(loc='lower right')
# Calculate stderr of parameters (doesn't account for covariance)
(k1_mean, k1_sd) = _mean_sd('c0_to_c1_k', builder, pysb_fit)
k1_means.append(k1_mean)
k1_sds.append(k1_sd)
(c1_mean, c1_sd) = _mean_sd('c1_scaling', builder, pysb_fit)
param_dict = {'c0_to_c1_k': (k1_mean, k1_sd),
'c1_scaling': (c1_mean, c1_sd)}
fit_results.append(FitResult(builder, activator, nbd_site,
rep_index, dtype, param_dict,
nt, pysb_fit.ypred))
plt.figure('%s, NBD-%s-Bax Fits' % (activator, nbd_site))
plt.tight_layout()
plt.figure("Fitted k1")
nreps = len(replicates)
plt.bar(range(nbd_index*(nreps+1), (nbd_index*(nreps+1)) + nreps),
k1_means, yerr=k1_sds, width=1, color='r', ecolor='k')
num_sites = len(nbd_sites_filt)
plt.figure("Fitted k1")
plt.ylabel('Fitted k1 ($sec^{-1}$)')
ax = plt.gca()
ax.set_xticks(np.arange(1.5, 1.5 + num_sites * 4, 4))
ax.set_xticklabels(nbd_sites_filt)
return fit_results
def plot_3conf_fits(df, nbd_sites, activator, dtype='NBD', replicates=None,
normalize_nbd=False, do_plot=True):
# Filter out the WT residue, if present in the list
nbd_sites_filt = [s for s in nbd_sites if s != 'WT']
if replicates is None:
replicates = range(1, 4)
fit_results = []
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
k1_means = []
k2_means = []
k1_sds = []
k2_sds = []
for rep_index in replicates:
print("Fitting %s, %s, %s" % (activator, nbd_site, rep_index))
rt = df[(activator, 'Release', nbd_site, rep_index, 'TIME')].values
ry = df[(activator, 'Release', nbd_site, rep_index, 'VALUE')].values
nt = df[(activator, dtype, nbd_site, rep_index, 'TIME')].values
ny = df[(activator, dtype, nbd_site, rep_index, 'VALUE')].values
# Filter out any Nans
nt_nonan = nt[~np.isnan(ny)]
ny_nonan = ny[~np.isnan(ny)]
# Normalize NBD to F/F0
if dtype == 'NBD' and normalize_nbd:
ny_nonan = ny_nonan / ny_nonan[0]
"""
plt.figure()
plt.plot(rt, ry)
plt.figure()
plt.plot(nt, ny)
plt.figure()
plt.plot(nt, ry / ny)
ry_norm = (ry - np.min(ry)) / (np.max(ry) - np.min(ry))
ny_norm = (ny - np.min(ny)) / (np.max(ny) - np.min(ny))
plt.figure()
plt.plot(rt, ry_norm, color='g')
plt.plot(rt, ny_norm, color='b')
"""
# Fit the release timecourse
twoexp = tf.TwoExpLinear()
#twoexp = tf.TwoExp()
params = twoexp.fit_timecourse(rt, ry)
# Set up the NBD/FRET model
builder = Builder(params_dict=params_dict)
builder.build_model_multiconf(3, ny_nonan[0], normalized_data=True)
# Add some initial guesses
# Guess that the scaling value for the final conformation is close
# to the final data value
builder.model.parameters['c2_scaling'].value = ny_nonan[-1]
# Guess that the scaling value for the intermediate conformation
# is close to the value at ~300 sec
c1_timescale_seconds = 300
c1_timescale_index = np.where(nt > 300)[0].min()
builder.model.parameters['c1_scaling'].value = \
ny_nonan[c1_timescale_index]
# Rough guesses for the timescales of the first and second
# transitions
builder.model.parameters['c0_to_c1_k'].value = 0.025
builder.model.parameters['c1_to_c2_k'].value = 5e-4
# The FRET data contains many negative values, so don't log-transform
# parameters if that's what we're fitting
log_transform = False if dtype == 'FRET' else False
# Do the fit
pysb_fit = fitting.fit_pysb_builder(builder, 'NBD', nt_nonan,
ny_nonan, log_transform=log_transform)
if do_plot:
plt.figure('%s, %s, NBD-%s-Bax Fits' %
(activator, dtype, nbd_site), figsize=(12, 5))
# Plot the release with fits
plt.subplot(1, 2, 1)
plt.plot(rt, ry,
linestyle='', marker='.',
color=rep_colors[rep_index])
plt.plot(rt, twoexp.fit_func(rt, params),
label='%s Rep %d' % (activator, rep_index),
color=rep_colors[rep_index])
plt.xlabel('Time (sec)')
plt.ylabel('% Tb release')
plt.title('%% Tb release fits, NBD-%s-Bax' % nbd_site)
plt.legend(loc='lower right')
# Plot the NBD with fits
plt.subplot(1, 2, 2)
plt.plot(nt_nonan, ny_nonan, linestyle='', marker='.',
color=rep_colors[rep_index])
plt.plot(nt_nonan, pysb_fit.ypred,
label='%s Rep %d' % (activator, rep_index),
color=rep_colors[rep_index])
plt.xlabel('Time (sec)')
plt.ylabel('$F/F_0$')
plt.title('NBD $F/F_0$ fits, NBD-%s-Bax' % nbd_site)
plt.legend(loc='lower right')
plt.figure('%s, %s, NBD-%s-Bax Fits' %
(activator, dtype, nbd_site))
plt.tight_layout()
# Calculate stderr of parameters (doesn't account for covariance)
(k1_mean, k1_sd) = _mean_sd('c0_to_c1_k', builder, pysb_fit)
(k2_mean, k2_sd) = _mean_sd('c1_to_c2_k', builder, pysb_fit)
(c1_mean, c1_sd) = _mean_sd('c1_scaling', builder, pysb_fit)
(c2_mean, c2_sd) = _mean_sd('c2_scaling', builder, pysb_fit)
param_dict = {'c0_to_c1_k': (k1_mean, k1_sd),
'c1_scaling': (c1_mean, c1_sd),
'c1_to_c2_k': (k2_mean, k2_sd),
'c2_scaling': (c2_mean, c2_sd)}
fit_results.append(FitResult(builder, activator, nbd_site,
rep_index, dtype, param_dict,
nt, pysb_fit.ypred))
k1_means.append(k1_mean)
k2_means.append(k2_mean)
k1_sds.append(k1_sd)
k2_sds.append(k2_sd)
if do_plot:
plt.figure("Fitted k1/k2")
nreps = len(replicates)
K = (2 * nreps) + 1 # A shorthand constant
plt.bar(range(nbd_index*K, (nbd_index*K) + nreps), k1_means,
yerr=k1_sds, width=1, color='r', ecolor='k')
plt.bar(range(nbd_index*K+nreps, (nbd_index*K) + (2*nreps)), k2_means,
yerr=k2_sds, width=1, color='g', ecolor='k')
if do_plot:
num_sites = len(nbd_sites_filt)
plt.figure("Fitted k1/k2")
plt.ylabel('Fitted k1/k2 ($sec^{-1}$)')
ax = plt.gca()
ax.set_xticks(np.arange(3, 3 + num_sites * 7, 7))
ax.set_xticklabels(nbd_sites_filt)
return fit_results
def plot_nbd_error_estimates(df, nbd_sites, dtype='NBD', activators=None,
replicates=None, last_n_pts=50, fit_type='cubic',
plot=True, normalize_nbd=False):
# Filter out the WT residue, if present in the list
nbd_sites_filt = [s for s in nbd_sites if s != 'WT']
# Set some defaults
if activators is None:
activators = ['Bid', 'Bim']
if replicates is None:
replicates = (1, 2, 3)
for nbd_index, nbd_site in enumerate(nbd_sites_filt):
for act_ix, activator in enumerate(activators):
residuals_reps = []
for rep_index, rep_num in enumerate(replicates):
ny = df[activator, dtype, nbd_site, rep_num, 'VALUE'].values
# Normalize NBD to F/F0
if dtype == 'NBD' and normalize_nbd:
ny = ny / ny[0]
# Get the error value and the fig (if not figure desired, then
# fig will be None
(residuals, fig) = cev.calc_err_var(ny, last_n_pts=last_n_pts,
fit_type=fit_type, plot=plot)
residuals_reps.append(residuals)
# If we're plotting, we should have a figure object, and if
# not, then not
assert ((plot is None) == (fig is None))
# Add title info to plot
nbd_err = np.std(residuals, ddof=1)
if plot:
fig.subplots_adjust(top=0.86)
fig.text(0.5, 0.95,
'Est. Error for %s, %s, %sC-Bax, rep %d: %f' %
(activator, dtype, nbd_site, rep_num, nbd_err),
verticalalignment='top',
horizontalalignment='center')
# If there is more than one replicates, combine the three residuals
# replicates into one big set of residuals
if len(replicates) == 1:
continue
pooled_residuals = np.array(residuals_reps).flatten()
pooled_err = np.std(pooled_residuals, ddof=1)
if plot:
fig = plt.figure(figsize=(8, 5))
# Plot of fit and distribution of residuals
plt.subplot(1, 2, 1)
plt.hist(pooled_residuals)
plt.title('Histogram of pooled residuals')
plt.subplot(1, 2, 2)
scipy.stats.probplot(pooled_residuals, dist='norm', plot=plt)
plt.title('Quantile-quantile plot vs. normal')
plt.tight_layout()
plt.subplots_adjust(top=0.86)
fig.text(0.5, 0.95,
'Est. Error of %s for %s, %sC-Bax: %f' %
(activator, dtype, nbd_site, pooled_err),
verticalalignment='top',
horizontalalignment='center')
if __name__ == '__main__':
from bax_insertion.plots.bid_bim_fret_nbd_release.preprocess_data \
import df_pre, nbd_residues
#from bax_insertion.data.parse_bid_bim_fret_nbd_release import df, nbd_residues
plt.ion()
plot_nbd_endpoints(df_pre, nbd_residues, datatype='FRET', last_n_pts=10,
file_basename=None, normalize_nbd=False)
#import pickle
#with open('data2.pck', 'w') as f:
#pickle.dump((df, nbd_residues), f)
#with open('data2.pck') as f:
# (df, nbd_residues) = pickle.load(f)
#plot_release_endpoints(df, nbd_residues, normalized_to_wt=True,
# last_n_pts=3, file_basename=None)
#plot_initial_rate_samples(df, nbd_residues, timepoint_ix=20,
# file_basename=None, normalized_to_wt=True)
#plot_example_derivatives(df, 'Bid', '15', 1)
#plot_derivatives(df, ['WT'])
#plot_nbd_error_estimates(df, ['68'], last_n_pts=80, fit_type='cubic')
#plot_release_endpoints(df, nbd_residues, normalized_to_wt=True)
#plot_bid_vs_bim_release(df, nbd_residues)
#plot_nbd_error_estimates(df, ['54', '62', '68'])
#irs = calc_initial_rate_samples(df, nbd_residues, timepoint_ix=15)
#(r_slope_avgs, r_slope_stds) = irs.release_avg_std()
#(n_slope_avgs, n_slope_stds) = irs.nbd_avg_std()
#nbd_sites_filt = [s for s in nbd_residues if s != 'WT']
#plot_rank_changes(r_slope_avgs[0], r_slope_stds[0],
# r_slope_avgs[1], r_slope_stds[1], nbd_sites_filt)
#plot_rank_changes(n_slope_avgs[0], n_slope_stds[0],
# n_slope_avgs[1], n_slope_stds[1], nbd_sites_filt)
#(ra, rs) = irs.release_avg_std_wt_normalized()
#p_vals = student_t_test(ra[0], rs[0], ra[1], rs[1], 3)
#print p_vals < 0.1
```
#### File: bax_insertion/pt/generate_model_ensemble_fit_files.py
```python
import os
import sys
from copy import copy
from itertools import product, izip
import yaml
from bax_insertion.models import multiconf
def model_product(model_dict):
"""A function to take the cross product of all feature implementations and
return the set of dicts specifying one implementation for each feature."""
return [dict(zip(model_dict, x))
for x in product(*model_dict.values())]
def generate_files(args, basename):
# Get the dict specifying the model ensemble feature set
m = args['model']
# Get the set of dicts specifying each individual implementation
m_ensemble = model_product(m)
dependencies_list = []
for m in m_ensemble:
# Multiconf model, supercedes any other model features
if 'multiconf' in m or 'multiconf_nbd_fret' in m:
multiconf_type = 'multiconf' if 'multiconf' in m \
else 'multiconf_nbd_fret'
# Is the NBD data normalized?
norm_data = m['normalized_nbd_data']
# Number of confs and whether the model is reversible
try:
num_confs = int(m[multiconf_type])
reversible = False
# If it's not an int, assume it's two-element list with a flag
# specifying a reversible model, indicated by 'rev'
except TypeError:
num_confs = int(m[multiconf_type][0])
rev = m[multiconf_type][1]
if rev == 'rev':
reversible = True
else:
raise Exception('Unknown multiconf model flag %s' % rev)
# We don't need to build the model just to get the model name
if reversible:
model_name = '%dconfsrev' % num_confs
else:
model_name = '%dconfs' % num_confs
# Mechanistic model
else:
# If Bid doesn't get to the membrane, activation by bound Bid will
# never happen
if 'bidtranslocation' in m and 'activation' in m and \
m['bidtranslocation'] == 0 and m['activation'] == 2:
continue
# If activation is pseudo-first order (not Bid-dependent) don't use
# models where we waste steps on translocating Bid
if 'bidtranslocation' in m and 'activation' in m and \
m['activation'] == 1 and m['bidtranslocation'] != 0:
continue
# If we're monitoring NBD as resulting from a dimer, but
# dimerization doesn't happen, then we'll get nothing
if 'nbd' in m and 'dimerization' in m and \
(m['nbd'] == 2 or m['nbd'] == 3 or m['nbd'] == 4) and \
m['dimerization'] == 0:
continue
# If we're monitoring NBD as resulting from a tBid/Bax complex,
# then make sure that tBid/Bax complex can form
if 'nbd' in m and 'activation' in m and \
m['nbd'] == 4 and m['activation'] == 1:
continue
# If Bid FRET involves a complex of tBid with both mBax and iBax
# (implementation 2) then make sure we have three-step activation,
# which allows tBid to bind to iBax
if 'bidfret' in m and 'activation' in m and \
m['bidfret'] == 2 and not m['activation'] == 3:
continue
# It doesn't matter which builder is instantiated here because
# the build_model_from_dict function is in the superclass, core.
from bax_insertion.models import one_cpt
bd = one_cpt.Builder()
bd.build_model_from_dict(m)
model_name = bd.model.name
# Build up the new yaml dict by copying all fitting parameters over...
yaml_dict = copy(args)
# ...and then filling in the parameters specifying this particular model
yaml_dict['model'] = m
model_filename = '%s_%s' % (basename, model_name)
with open('%s.fit' % model_filename, 'w') as output_file:
output_file.write(yaml.dump(yaml_dict, default_flow_style=False))
dependencies_list.append(model_filename)
# Now write the file with the dependencies of the overall target on the
# list of .mcmc files
deps_filename = '%s.deps.txt' % basename
with open(deps_filename, 'w') as deps_file:
mcmc_file_list = ['%s.mcmc' % fname for fname in dependencies_list]
base_target = os.path.basename(basename) # Strip off the directory info
deps_file.write('%s: ' % base_target) # Strip off the directory info
deps_file.write(' '.join(mcmc_file_list))
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Please specify the name of the .yaml file with the model "
"features to fit.")
sys.exit(1)
# Open the master .yaml file specifying the model ensemble
ens_filename = sys.argv[1]
with open(ens_filename) as ens_file:
args = yaml.load(ens_file)
# Get the basename for the created files
basedir = os.path.dirname(ens_filename)
filename = os.path.basename(ens_filename)
basename = filename.split('.')[0]
basename = os.path.join(basedir, basename)
generate_files(args, basename)
```
#### File: bax_insertion/util/emcee_fit.py
```python
import sys
import cPickle
import warnings
import numpy as np
from scipy.stats import pearsonr
from matplotlib import pyplot as plt
import emcee
from emcee.utils import MPIPool
from pysb.integrate import Solver
from bax_insertion.models import multiconf
def posterior(position, gf):
"""A generic log posterior function."""
post_val = prior(position, gf) + likelihood(position, gf)
return post_val
def negative_posterior(position, gf):
"""A generic negative log posterior function.
Use the negative log posterior when using a minimization (rather than
probability maximization) algorithm.
"""
post_val = prior(position, gf) + likelihood(position, gf)
print("posterior: %s" % post_val)
return -post_val
def prior(position, gf):
"""A generic prior function."""
prior_prob = 0
for i, prior in enumerate(gf.priors):
prior_prob += prior.pdf(position[i])
return -prior_prob
def likelihood(position, gf):
# A generic objective function
# The cumulative error over all timecourses
err = 0
obs_func = getattr(gf.builder, 'obs_func', None)
# Iterate over each condition (1st dimension of the data matrix)
for cond_ix in range(gf.data.shape[0]):
# Create a placeholder for a time offset, if there is one
timeoffset = None
# Set the parameters appropriately for the simulation:
# Iterate over the globally fit parameters
for g_ix, p in enumerate(gf.builder.global_params):
p.value = 10 ** position[g_ix]
if p.name == 'timeoffset':
timeoffset = 10 ** position[g_ix]
# Iterate over the locally fit parameter_s
for l_ix, p in enumerate(gf.builder.local_params):
ix_offset = len(gf.builder.global_params) + \
cond_ix * len(gf.builder.local_params)
p.value = 10 ** position[l_ix + ix_offset]
# Now fill in the initial condition parameters
if gf.params is not None:
for p_name, values in gf.params.iteritems():
p = gf.builder.model.parameters[p_name]
p.value = values[cond_ix]
# Reset the timespan by adding one additional pt at the beginning
if timeoffset:
tspan = np.insert(gf.time, 0, -timeoffset)
else:
tspan = gf.time
# Now run the simulation
if callable(obs_func):
ydict = obs_func(tspan)
else:
gf.solver.tspan = tspan
gf.solver.run()
if gf.use_expr:
ydict = gf.solver.yexpr
else:
ydict = gf.solver.yobs
# Calculate the squared error over all the observables
for obs_ix, obs_name in enumerate(gf.obs_name):
# Get the observable timecourse from the dict/recarray
ysim = ydict[obs_name]
# If we're using a time offset, skip the first point (the offset)
# for the purposes of comparing to data
if timeoffset:
ysim = ysim[1:]
# If integrator fails to converge, the results will contain NaN
if np.any(np.isnan(ysim)):
err = -np.inf
continue
# Get the data slice we want
data = gf.data[cond_ix, obs_ix, :]
# Get the appropriate SD for this data slice
sigma = gf.data_sigma[cond_ix, obs_ix]
# Calculate the log-likelihood
loglkl = ((data - ysim) ** 2) / (2. * sigma ** 2)
# Filter out the NaNs...
filt_loglkl = loglkl[~np.isnan(loglkl)]
# Take the sum
err += -np.sum(filt_loglkl)
return err
class GlobalFit(object):
"""Fit of PySB model to a set of multiple timecourses, with a
mix of globally and locally fit parameters.
Parameters
----------
builder : pysb.builder.Builder
Builder containing the model to fit. Should contain an attribute
builder.global_params for the parameters that are to be fit globally.
time : np.array
The time vector.
data : Three-dimensional np.array
The experimental timecourses to fit. The first dimension corresponds
to the number of experimental conditions; the second dimension
corresponds to the number of observables (in a given timecourse set);
the third dimension corresponds to the timepoints.
data_sigma : np.array
Array of values with dimension corresponding to data indicating the
standard deviation of the data.
params : dict of lists, or None
The keys to the dict should be names of parameters in the PySB model
(e.g., initial conditions); each value should be a list containing
values for the parameter for each of the entries in the data list. The
length of each value in the dict should match the length of the data
list. If None, indicates that there are no local initial conditions.
obs_name : string
The name of the model observable to compare against the data.
obs_type : string, "Expression" or "Observable"
Indicates whether the named expression/observable specified by
obs_name is to be found in the model's set of Expression objects
or Observable objects.
Attributes
----------
result : None or scipy.optimize.minimize fit result object
The result field is initialized to None and is assigned the results
of fitting after the :py:meth:`fit` method completes successfully.
use_expr : boolean
Based on the obs_type argument. True if the named observable is an
Expression, False if it is an Observable.
priors : list of priors
solver : pysb.integrate.Solver
A solver object used to run the model.
"""
def __init__(self, builder, time, data, data_sigma, params, obs_name,
obs_type='Expression'):
# Check that the dimensions of everything that has been provided matches
# Check that the time vector matches the 3rd dimension of the data
# vector
if len(time) != data.shape[2]:
raise ValueError("Length of time vector must match the length "
"of each data vector.")
# Check that we don't have more than one condition but only set of
# initial conditions
if params is None and data.shape[0] != 1:
raise ValueError("There are no initial condition parameters but "
"there is more than one condition in the data "
"matrix.")
# Check that the number of initial conditions specified in the params
# dict matches the first dimension of the data matrix
if params is not None:
for p, vals in params.iteritems():
if not len(vals) == data.shape[0]:
raise ValueError("Each parameter in the params dict must "
"have an entry for each entry in the "
"data list.")
# Check that the number of observables matches the 2nd dimension of
# the data matrix
if len(obs_name) != data.shape[1]:
raise ValueError("The number of observables (%s) must match the "
"second dimension of the data matrix (%s)" %
(len(obs_name), data.shape[1]))
# Check that there is a sigma in the data_sigma matrix for every
# timecourse in data
if data_sigma.shape[0] != data.shape[0] and \
data_sigma.shape[1] != data.shape[1]:
raise ValueError("data_sigma must specify an error SD for every "
"timecourse in the data matrix.")
self.builder = builder
self.time = time
self.data = data
self.data_sigma = data_sigma
self.params = params
self.obs_name = obs_name
self.result = None
if obs_type == 'Expression':
self.use_expr = True
elif obs_type == 'Observable':
self.use_expr = False
else:
raise ValueError('obs_type must be Expression or Observable.')
if self.builder.model.parameters.get('timeoffset'):
use_time_offset = True
else:
use_time_offset = False
self.init_solver(use_time_offset=use_time_offset)
# Used to keep track of the number of steps run
self.nstep = 0
# Build up a list of priors corresponding to the global and local
# parameters
self.priors = []
# Iterate over the globally fit parameters
for g_ix, p in enumerate(self.builder.global_params):
try:
prior_index = self.builder.estimate_params.index(p)
self.priors.append(self.builder.priors[prior_index])
except ValueError:
raise ValueError(
'The parameter %s, in global_params, must also be '
'present in estimate_params.' % p.name)
# Iterate over the locally fit parameters
for data_ix, data in enumerate(self.data):
for l_ix, p in enumerate(self.builder.local_params):
try:
prior_index = self.builder.estimate_params.index(p)
self.priors.append(self.builder.priors[prior_index])
except ValueError:
raise ValueError(
'The parameter %s, in local_params, must also be '
'present in estimate_params.')
def __getstate__(self):
# Clear solver since it causes problems with pickling
state = self.__dict__.copy()
if 'solver' in state:
del state['solver']
return state
def __setstate__(self, state):
# Re-init the solver which we didn't pickle
self.__dict__.update(state)
if self.builder.model.parameters.get('timeoffset'):
use_time_offset = True
else:
use_time_offset = False
self.init_solver(use_time_offset=use_time_offset)
def init_solver(self, use_time_offset=False):
"""Initialize solver from model and tspan."""
Solver._use_inline = True
# If we're using a time offset, note that it doesn't matter what value
# goes in here, since it will be filled in by the fitting.
if use_time_offset:
tspan = np.insert(self.time, 0, 0)
else:
tspan = self.time
self.solver = Solver(self.builder.model, tspan)
def plot_func_single(self, x, data_ix, ax=None, alpha=1.0):
x = 10 ** x
s = Solver(self.builder.model, self.time)
# Set the parameters appropriately for the simulation:
# Iterate over the globally fit parameters
for g_ix, p in enumerate(self.builder.global_params):
p.value = x[g_ix]
# Iterate over the locally fit parameters
for l_ix, p in enumerate(self.builder.local_params):
ix_offset = len(self.builder.global_params) + \
data_ix * len(self.builder.local_params)
p.value = x[l_ix + ix_offset]
# Now fill in the initial condition parameters
for p_name, values in self.params.iteritems():
p = self.builder.model.parameters[p_name]
p.value = values[data_ix]
# Now run the simulation
s.run()
# Plot the observable
if ax is None:
ax = plt.gca()
if self.use_expr:
ax.plot(self.time, s.yexpr[self.obs_name], color='r',
alpha=alpha)
else:
ax.plot(self.time, s.yobs[self.obs_name], color='r',
alpha=alpha)
def plot_func(self, x, ax=None, obs_ix=0, plot_args=None,
normalize_to_f0=False):
"""Plots the timecourses with the parameter values given by x.
Parameters
----------
x : np.array or list
The parameters to use for the plot, in log10 space.These should be
in the same order used by the objective function: globally fit
parameters first, then a set of local parameters for each of the
timecourses being fit.
normalize_to_f0: boolean
If True, divides the observable trajectory by the initial value
before plotting. Defaults to False.
"""
if plot_args is None:
plot_args = {}
if ax is None:
ax = plt.gca()
# Iterate over each entry in the data array
for cond_ix in range(self.data.shape[0]):
self.set_parameters(x, obs_ix=obs_ix, cond_ix=cond_ix)
# Now run the simulation
self.solver.run()
# Plot the observable
obs_colors = ['r', 'g', 'b', 'k']
obs_name = self.obs_name[obs_ix]
# Get from either yexpr or yobs
if self.use_expr:
yplot = self.solver.yexpr[obs_name]
else:
yplot = self.solver.yobs[obs_name]
# Normalize the curve if necessary
if normalize_to_f0:
yplot = yplot / float(yplot[0])
# Plot
ax.plot(self.solver.tspan, yplot, **plot_args)
def set_parameters(self, x, obs_ix=0, cond_ix=0, plot_args=None):
"""Sets the parameter values in the model for simulation.
Parameters
----------
x : np.array or list
The parameters to use, in log10 space.These should be
in the same order used by the objective function: globally fit
parameters first, then a set of local parameters for each of the
timecourses being fit.
"""
x = 10 ** x
timeoffset = None
# Set the parameters appropriately for the simulation:
# Iterate over the globally fit parameters
for g_ix, p in enumerate(self.builder.global_params):
p.value = x[g_ix]
if p.name == 'timeoffset':
timeoffset = x[g_ix]
# Iterate over the locally fit parameters
for l_ix, p in enumerate(self.builder.local_params):
ix_offset = len(self.builder.global_params) + \
cond_ix * len(self.builder.local_params)
p.value = x[l_ix + ix_offset]
# Now fill in the initial condition parameters
if self.params is not None:
for p_name, values in self.params.iteritems():
p = self.builder.model.parameters[p_name]
p.value = values[cond_ix]
# Fill in the time offset, if there is one
if timeoffset:
self.solver.tspan = np.insert(self.time, 0, -timeoffset)
def get_residuals(self, x, obs_ix=0, plot_args=None):
"""Gets the residuals with the parameter values given by x.
Parameters
----------
x : np.array or list
The parameters to use for the simulation, in log10 space.
These should be in the same order used by the objective function:
globally fit parameters first, then a set of local parameters for
each of the timecourses being fit.
"""
if plot_args is None:
plot_args = {}
num_conditions = self.data.shape[0]
num_timepoints = self.data.shape[2]
residuals = np.zeros((num_conditions, num_timepoints))
for cond_ix in range(num_conditions):
self.set_parameters(x, obs_ix=obs_ix, cond_ix=cond_ix)
# Run the simulation
self.solver.run()
# Get the simulated timecourse
obs_name = self.obs_name[obs_ix]
if self.use_expr:
y = self.solver.yexpr[obs_name]
else:
y = self.solver.yobs[obs_name]
# Calculate the residuals
data = self.data[cond_ix, obs_ix, :]
residuals[cond_ix, :] = data - y
return residuals
def ens_sample(gf, nwalkers, burn_steps, sample_steps, threads=1,
pos=None, random_state=None):
"""Samples from the posterior function using emcee.EnsembleSampler.
The EnsembleSampler containing the chain is stored in gf.sampler.
Note that parameters are log10-transformed during fitting, so the
parameter values returned from the walk must be exponentiated to get
them back to untransformed values (e.g., 10 ** gf.sampler.flatchain)
Parameters
----------
gf : emcee_fit.GlobalFit
GlobalFit object containing the timepoints, data, builder object,
Solver, etc.
nwalkers : int
Number of walkers to use in the emcee sampler.
burn_steps : int
Number of burn-in steps.
sample_steps : int
Number of sampling steps.
threads : int
Number of threads to use for parallelization. Default is 1.
pos : numpy.array
Matrix of initial positions for the chain. If None (default) random
positions are chosen from the prior. Assigning a position allows
previously run chains to be extended.
random_state : random state for Mersenne Twister PRNG
The random state to use to initialize the sampler's pseudo-random
number generator. Can be used to continue runs from previous ones.
"""
# Initialize the parameter array with initial values (in log10 units)
# Number of parameters to estimate
ndim = (len(gf.builder.global_params) +
(len(gf.data) * len(gf.builder.local_params)))
# Initialize the walkers with starting positions drawn from the priors
# Note that the priors are in log10 scale already, so they don't
# need to be transformed here
if pos is None:
p0 = np.zeros((nwalkers, ndim))
for walk_ix in range(nwalkers):
for p_ix in range(ndim):
p0[walk_ix, p_ix] = gf.priors[p_ix].random()
else:
p0 = pos
# Create the sampler object
sampler = emcee.EnsembleSampler(nwalkers, ndim, posterior,
args=[gf],
threads=threads)
if random_state is not None:
sampler.random_state = random_state
print "Burn in sampling..."
pos, prob, state = sampler.run_mcmc(p0, burn_steps, storechain=False)
sampler.reset()
print "Main sampling..."
sampler.run_mcmc(pos, sample_steps)
print "Done sampling."
return sampler
def ens_mpi_sample(gf, nwalkers, burn_steps, sample_steps, pos=None,
random_state=None):
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
# Initialize the parameter array with initial values (in log10 units)
# Number of parameters to estimate
ndim = (len(gf.builder.global_params) +
(len(gf.data) * len(gf.builder.local_params)))
# Initialize the walkers with starting positions drawn from the priors
# Note that the priors are in log10 scale already, so they don't
# need to be transformed here
if pos is None:
p0 = np.zeros((nwalkers, ndim))
for walk_ix in range(nwalkers):
for p_ix in range(ndim):
p0[walk_ix, p_ix] = gf.priors[p_ix].random()
else:
p0 = pos
# Create the sampler object
sampler = emcee.EnsembleSampler(nwalkers, ndim, posterior,
args=[gf], pool=pool)
if random_state is not None:
sampler.random_state = random_state
print "Burn in sampling..."
pos, prob, state = sampler.run_mcmc(p0, burn_steps, storechain=False)
sampler.reset()
print "Main sampling..."
sampler.run_mcmc(pos, sample_steps)
# Close the pool!
pool.close()
print "Done sampling."
return sampler
def pt_mpi_sample(gf, ntemps, nwalkers, burn_steps, sample_steps, thin=1,
pool=None, betas=None, pos=None, random_state=None,
pos_filename=None, convergence_interval=50):
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
return pt_sample(gf, ntemps, nwalkers, burn_steps, sample_steps,
thin=thin, pool=pool, betas=betas, pos=pos,
random_state=random_state, pos_filename=pos_filename,
convergence_interval=convergence_interval)
def pt_sample(gf, ntemps, nwalkers, burn_steps, sample_steps, thin=1,
pool=None, betas=None, pos=None, random_state=None,
pos_filename=None, convergence_interval=50):
"""Samples from the posterior function.
The emcee sampler containing the chain is stored in gf.sampler.
Note that parameters are log10-transformed during fitting, so the
parameter values returned from the walk must be exponentiated to get
them back to untransformed values (e.g., 10 ** gf.sampler.flatchain)
Parameters
----------
ntemps : int
The number of temperature to use in the temperature ladder.
nwalkers : int
Number of walkers to use in the emcee sampler.
burn_steps : int
Number of burn-in steps.
sample_steps : int
Number of sampling steps.
thin : int
Thinning interval; saves only every thin number of steps. Default
is 1 (saves all steps, no thinning).
pool : pool object
Pool object for parallelization. Can be instance of emcee.utils.MPIPool
or multiprocessing.pool (or other pool-like object implementing map
method).
betas : np.array
Array containing the values to use for beta = 1/temperature.
pos : numpy.array
Matrix of initial positions for the chain. If None (default) random
positions are chosen from the prior. Assigning a position allows
previously run chains to be extended.
random_state : random state for Mersenne Twister PRNG
The random state to use to initialize the sampler's pseudo-random
number generator. Can be used to continue runs from previous ones.
"""
if pos_filename is None:
warnings.warn('pos_filename was not specified, will not be able to '
'save intermediate burn-in positions.')
# Initialize the parameter array with initial values (in log10 units)
# Number of parameters to estimate
ndim = (len(gf.builder.global_params) +
(len(gf.data) * len(gf.builder.local_params)))
# Initialize the walkers with starting positions drawn from the priors
# Note that the priors are in log10 scale already, so they don't
# need to be transformed here
if pos is None:
p0 = np.zeros((ntemps, nwalkers, ndim))
for temp_ix in range(ntemps):
for walk_ix in range(nwalkers):
for p_ix in range(ndim):
p0[temp_ix, walk_ix, p_ix] = gf.priors[p_ix].random()
else:
print pos.shape
print ntemps
print nwalkers
print ndim
if pos.shape[-1] != ndim:
raise ValueError('The position file must contain %s ndim '
'parameters, not %s' % (ndim, p0.shape[-1]))
if pos.shape == (ntemps, nwalkers, ndim):
print("Setting initial position to contents of file.")
p0 = pos
else:
print("Setting initial position to low temp samples.")
p0 = np.zeros((ntemps, nwalkers, ndim))
pos_nwalkers = pos.shape[1]
pos_samples = pos[0]
for temp_ix in range(ntemps):
# Generate a set of random indices
walker_indices = np.random.randint(0, pos_nwalkers,
size=nwalkers)
for walk_ix in range(nwalkers):
p0[temp_ix, walk_ix, :] = \
pos_samples[walker_indices[walk_ix], :]
# Create the sampler
sampler = emcee.PTSampler(ntemps, nwalkers, ndim, likelihood, prior,
loglargs=[gf], logpargs=[gf], pool=pool,
betas=betas)
# The PTSampler is implemented as a generator, so it is called in a for
# loop
# If we're not doing any burn-in, jump straight to sampling
if burn_steps == 0:
print "Main sampling..."
nstep = 0
for p, lnprob, lnlike in sampler.sample(p0, iterations=sample_steps,
thin=thin):
if nstep % 10 == 0:
print "nstep %d of %d, MAP: %f" % (nstep, sample_steps,
np.max(lnprob[0]))
nstep +=1
# Otherwise, do the burn-in first
else:
print "Burn in sampling..."
nstep = 0
done = False
last_ti = None
print_interval = 1
cur_start_position = p0
abs_tol = 3.0 # The maximum allowable difference for convergence
rel_tol = 0.1 # The fraction of the err allowable for convergence
# Run the chain for rounds of convergence_interval steps; at the end
# of each round, check for convergence. If converged, go on to main
# sampling. If not, reinitialize sampler and run again. Running the
# sampler in small rounds like this reduces the amount of memory
# needed to just enough to store the chain for 1 round.
while not done:
if (burn_steps - nstep) < convergence_interval:
num_iterations = burn_steps - nstep
else:
num_iterations = convergence_interval
# Don't run again if we've already run more than the prescribed
# number of burn in steps
if num_iterations <= 0:
break;
for p, lnprob, lnlike in sampler.sample(cur_start_position,
iterations=num_iterations, storechain=True):
# Increase the step counter by 1 since by the time we've gotten
# here we've run an iteration of the sampler
nstep += 1
if nstep % print_interval == 0:
print("nstep %d of %d, MAP: %f, mean post %f" %
(nstep, burn_steps, np.max(lnprob[0]),
np.mean(lnprob[0])))
print sampler.tswap_acceptance_fraction
# Save the current position
if pos_filename is not None:
with open(pos_filename, 'w') as f:
rs = np.random.get_state()
cPickle.dump((p, rs), f)
nstep += 1
# If this is our first time checking convergence, set the TI
# value and continue
if last_ti is None:
(last_ti, last_ti_err) = \
sampler.thermodynamic_integration_log_evidence()
print "-- Initial TI value: %f, %f" % (last_ti, last_ti_err)
else:
(cur_ti, cur_ti_err) = \
sampler.thermodynamic_integration_log_evidence()
diff = np.abs(last_ti - cur_ti)
print("-- Last: %f, %f Current: %f Diff: %f" %
(last_ti, last_ti_err, cur_ti, diff))
# Check for convergence
if diff < abs_tol and cur_ti_err < abs_tol and \
last_ti_err < abs_tol and \
diff < (last_ti_err * rel_tol) and \
check_convergence_corr(sampler, 0, None,
pval_threshold=0.001):
print "-- Converged!"
done = True
else:
last_ti = cur_ti
last_ti_err = cur_ti_err
# Reset the initial position to our last position
cur_start_position = p
# Reset the sampler
sampler.reset()
print "Main sampling..."
nstep = 0
for p, lnprob, lnlike in sampler.sample(p, lnprob0=lnprob,
lnlike0=lnlike,
iterations=sample_steps, thin=thin):
# Increase the step counter by 1 since by the time we've gotten here
# we've run an iteration of the sampler
nstep += 1
if nstep % 5 == 0:
print "nstep %d of %d, MAP: %f, mean post %f" % \
(nstep, burn_steps, np.max(lnprob[0]), np.mean(lnprob[0]))
print sampler.tswap_acceptance_fraction
# Save the current position
if pos_filename is not None:
with open(pos_filename, 'w') as f:
rs = np.random.get_state()
cPickle.dump((p, rs), f)
(final_ti, final_ti_err) = \
sampler.thermodynamic_integration_log_evidence()
print("-- Final TI: %f, %f --" % (final_ti, final_ti_err))
# Close the pool!
if pool is not None:
pool.close()
print "Done sampling."
return sampler
def geweke_convergence(sampler, burn_frac=0.1, sample_frac=0.5,
p_threshold=0.05):
# Define a few useful numbers
ntemps = sampler.chain.shape[0]
num_steps = sampler.chain.shape[2]
num_params = sampler.chain.shape[3]
burn_ubound = int(burn_frac * num_steps)
nburn = burn_ubound - 1
sample_lbound = int(sample_frac * num_steps)
nsample = num_steps - sample_lbound
converged = True # We've converged until we prove otherwise
# Calculation of the test statistic
def T_func(xm, xvar, nx, ym, yvar, ny):
T = (xm - ym) / np.sqrt(xvar / float(nx) + yvar / float(ny))
return np.abs(T)
# Iterate over all temperatures
for temp_ix in range(0, ntemps):
print "Temp %d" % temp_ix
# Iterate over all parameters
for p_ix in range(num_params):
# Get burn-in and sample steps
burn_steps = sampler.chain[temp_ix, :, 0:burn_ubound, p_ix]
sample_steps = sampler.chain[temp_ix, :, sample_lbound:, p_ix]
# Calculate means and variances
burn_mean = np.mean(burn_steps)
sample_mean = np.mean(sample_steps)
burn_var = np.var(burn_steps)
sample_var = np.var(sample_steps)
T = T_func(sample_mean, sample_var, nsample,
burn_mean, burn_var, nburn)
if T < p_threshold:
plt.ion()
plt.figure()
plt.plot(sampler.chain[temp_ix, :, :, p_ix].T, alpha=0.1)
plt.plot(np.mean(sampler.chain[temp_ix, :, :, p_ix], axis=0))
print("T = %f: not converged!" % T)
converged = False
else:
print("T = %f" % T)
# Now, check for convergence of product
# g = (x_i - X)(y_i - Y)
for q_ix in range(p_ix):
print "param %d by %d" % (p_ix, q_ix)
p_steps = sampler.chain[temp_ix, :, :, p_ix]
q_steps = sampler.chain[temp_ix, :, :, q_ix]
g_steps = (p_steps - np.mean(p_steps)) * \
(q_steps - np.mean(q_steps))
g_burn_steps = g_steps[:, 0:burn_ubound]
g_sample_steps = g_steps[:, sample_lbound:]
g_burn_mean = np.mean(g_burn_steps)
g_sample_mean = np.mean(g_sample_steps)
g_burn_var = np.var(g_burn_steps)
g_sample_var = np.var(g_sample_steps)
T = T_func(g_sample_mean, g_sample_var, nsample,
g_burn_mean, g_burn_var, nburn)
if T < p_threshold:
plt.ion()
plt.figure()
plt.plot(sampler.chain[temp_ix, :, :, p_ix].T, alpha=0.1)
plt.plot(np.mean(sampler.chain[temp_ix, :, :, p_ix],
axis=0))
print("T = %f: not converged!" % T)
converged = False
else:
print("T = %f" % T)
return converged
def check_convergence_corr(sampler, start_step, end_step, pval_threshold=0.2):
# Only do the check if we've got enough steps
if sampler.lnprobability is None:
print "sampler.lnprobability is None."
return
lnpost = sampler.lnprobability[:, :, start_step:end_step]
ntemps = lnpost.shape[0]
nwalkers = lnpost.shape[1]
nsteps = lnpost.shape[2]
step_indices = np.repeat(np.arange(0, nsteps), nwalkers)
pass_arr = np.zeros(ntemps)
for temp_ix in range(0, ntemps):
pts = lnpost[temp_ix].flatten(order='F')
(rval, pval) = pearsonr(step_indices, pts)
print "Temp %d: r = %f, p = %f" % (temp_ix, rval, pval)
if rval < 0 or pval > pval_threshold:
pass_arr[temp_ix] = True
passes = np.all(pass_arr)
print "Passes: ", passes
return np.all(pass_arr)
def global_fit_from_args(args):
### DATA
# Import the module containing the data
data_args = args['data']
__import__(data_args['module'])
data_module = sys.modules[data_args['module']]
# Get the relevant variables from the module containing the data
data_var = data_module.__dict__[data_args['data_var']]
data_sigma_var = data_module.__dict__[data_args['data_sigma_var']]
time_var = data_module.__dict__[data_args['time_var']]
# Get the name of the variable containing the initial conditions vector,
# which may not exist
ic_var_name = data_args['initial_condition_var']
if ic_var_name is None:
ic_var = None
else:
ic_var = data_module.__dict__[ic_var_name]
### MODEL
# Call the appropriate model-building macro
# If there is a multiconf attribute, that trumps any other attribute
# and determines that this is a multiconf model
if 'scaling_prior_type' in args['model']:
spt = args['model']['scaling_prior_type']
if spt not in ['normal', 'linear']:
raise ValueError('scaling_prior_type must be one of normal, '
'linear')
else:
spt = 'linear'
if 'multiconf' in args['model'] or 'multiconf_nbd_fret' in args['model']:
# Determine the multiconf type
multiconf_type = 'multiconf' if 'multiconf' in args['model'] \
else 'multiconf_nbd_fret'
bd = multiconf.Builder()
# Number of confs and whether the model is reversible
try:
num_confs = int(args['model'][multiconf_type])
reversible = False
# If it's not an int, assume it's two-element list with a flag
# specifying a reversible model, indicated by 'rev'
except TypeError:
num_confs = int(args['model'][multiconf_type][0])
rev = args['model'][multiconf_type][1]
if rev == 'rev':
reversible = True
else:
raise Exception('Unknown multiconf model flag %s' % rev)
norm_data = args['model']['normalized_nbd_data']
# Try to get the parameters for the bounds of the data
# (may not be present if the data is normalized)
try:
nbd_ubound = data_module.__dict__[data_args['nbd_ubound']]
except KeyError:
nbd_ubound = None
try:
nbd_lbound = data_module.__dict__[data_args['nbd_lbound']]
except KeyError:
nbd_lbound = None
try:
nbd_f0 = data_module.__dict__[data_args['nbd_f0']]
except KeyError:
nbd_f0 = 1.
if multiconf_type == 'multiconf':
bd.build_model_multiconf(num_confs, nbd_f0, nbd_lbound, nbd_ubound,
normalized_data=norm_data,
reversible=reversible,
scaling_prior_type=spt)
elif multiconf_type == 'multiconf_nbd_fret':
bd.build_model_multiconf_nbd_fret(num_confs, nbd_f0, nbd_lbound,
nbd_ubound,
normalized_data=norm_data,
reversible=reversible,
scaling_prior_type=spt)
# Check the builder: one_cpt or lipo_sites
#elif 'builder' in args['model'] and \
# args['model']['builder'] == 'lipo_sites':
# bd = lipo_sites.Builder()
# bd.build_model_from_dict(args['model'])
# If the builder is one_cpt or is not specified
else:
assert False
bd = one_cpt.Builder()
bd.build_model_from_dict(args['model'])
# Set the initial conditions
for ic_name, ic_value in args['global_initial_conditions'].iteritems():
bd.model.parameters[ic_name].value = ic_value
### PARAMETERS TO FIT
if args['global_params'] == 'all':
bd.global_params = bd.estimate_params
bd.local_params = []
else:
bd.global_params = [bd.model.parameters[p_name]
for p_name in args['global_params']]
bd.local_params = [bd.model.parameters[p_name]
for p_name in args['local_params']]
local_ic_name = args['local_initial_condition']
if ic_var is None or local_ic_name is None:
params = None
else:
params = {local_ic_name: ic_var}
# Create the global fit instance
gf = GlobalFit(bd, time_var, data_var, data_sigma_var, params,
args['model_observable'])
return gf
```
#### File: bax_insertion/util/error_propagation.py
```python
import numpy as np
def calc_ratio_sd(numer_mean, numer_sd, denom_mean, denom_sd,
num_samples=10000):
"""Calculates the variance of a ratio of two normal distributions with
the given means and standard deviations."""
numer_samples = numer_mean + (numer_sd * np.random.randn(num_samples))
denom_samples = denom_mean + (denom_sd * np.random.randn(num_samples))
ratio_samples = numer_samples / denom_samples
return np.std(ratio_samples)
def calc_ratio_mean_sd(numer_mean, numer_sd, denom_mean, denom_sd,
num_samples=10000):
"""Calculates the variance of a ratio of two normal distributions with
the given means and standard deviations."""
# If we're dealing with a numpy array:
if isinstance(numer_mean, np.ndarray) and \
isinstance(denom_mean, np.ndarray) and \
isinstance(numer_sd, np.ndarray) and \
isinstance(denom_sd, np.ndarray):
num_pts = numer_mean.shape[0]
numer_samples = numer_mean + (numer_sd *
np.random.randn(num_samples, num_pts))
denom_samples = denom_mean + (denom_sd *
np.random.randn(num_samples, num_pts))
# Otherwise, assume we're dealing with a number
else:
numer_samples = numer_mean + (numer_sd *
np.random.randn(num_samples))
denom_samples = denom_mean + (denom_sd *
np.random.randn(num_samples))
ratio_samples = numer_samples / denom_samples
return (np.mean(ratio_samples, axis=0), np.std(ratio_samples, axis=0))
``` |
{
"source": "johnbachman/bayessb",
"score": 3
} |
#### File: bayessb/bayessb/parallel_tempering.py
```python
from bayessb import MCMC, MCMCOpts
import numpy as np
import math
class PT_MCMC(object):
"""Implementation of parallel tempering algorithm.
See Geyer, "Maximum likelihood Markov Chain Monte Carlo", 1991.
In this algorithm, a series of chains are run at different temperatures in
parallel. They execute normally (following the Metropolis algorithm) for n
steps (defined by the parameter ``swap_period``) and then a swap is
proposed between two chains that neighbor each other in temperature. If it
is accepted, each chain adopts the position of the other. Since the swap is
symmetric, detailed balance and ergodicity is maintained (when one
considers that the parameter space is now the Cartesian product of the
parameter spaces for all chains).
In this implementation, a :py:class:`PT_MCMC` object is used to contain all
of the chains (as instances of :py:class:`bayessb.MCMC`) and manage their
execution, performing swaps when appropriate.
Temperatures are given as minimum and maximum values; the intermediate
values are interpolated on a log scale. So if ``min_temp`` is 1, and
``max_temp`` is 100, with three temperatures, the temperatures used are 1,
10, 100.
Parameters
----------
opts: bayessb.MCMCOpts
Used to initialize all of the :py:class:`bayessb.MCMC` chains in the
temperature series.
num_chains : int
The number of chains/temperatures to run. Too many temperatures will
make swapping inefficient; too few temperatures will make swaps
unlikely to be accepted.
max_temp : number
The highest temperature in the series.
min_temp : number
The lowest temperature in the series. Should usually be 1 (the default).
swap_period : int
Number of steps of "regular" (Metropolis) MCMC to perform for each
chain before proposing a swap.
"""
def __init__(self, opts, num_chains, max_temp, min_temp=1, swap_period=20):
self.options = opts
self.max_temp = max_temp
self.min_temp = min_temp
self.swap_period = swap_period
self.iter = 0
"""Current step iteration (runs to ``nsteps``)."""
self.chains = []
"""The set of chains in the temperature series."""
# Calculate the temperature series
temps = np.logspace(np.log10(min_temp), np.log10(max_temp),
num_chains)
# Initialize each of the chains
for i, temp in enumerate(temps):
chain = MCMC(opts)
chain.options.T_init = temp
chain.initialize()
chain.iter = chain.start_iter
self.chains.append(chain)
# Initialize arrays for storing swap info
num_swaps = self.options.nsteps / swap_period
self.swap_proposals = np.zeros((num_swaps, 2))
"""Each swap proposal is stored as [i, j] row in this array."""
self.pi_xi = np.zeros(num_swaps)
"""The posterior of chain i at the position of i."""
self.pi_xj = np.zeros(num_swaps)
"""The posterior of chain i at the position of j."""
self.pj_xi = np.zeros(num_swaps)
"""The posterior of chain j at the position of i."""
self.pj_xj = np.zeros(num_swaps)
"""The posterior of chain j at the position of j."""
self.delta_test_posteriors = np.zeros(num_swaps)
"""The posterior probability ratio for swap/noswap."""
self.swap_alphas = np.zeros(num_swaps)
"""The random number used to accept/reject the swap."""
self.swap_accepts = np.zeros(num_swaps, dtype=bool)
"""Booleans indicating accepted swaps."""
self.swap_rejects = np.zeros(num_swaps, dtype=bool)
"""Booleans indicating rejected swaps."""
self.swap_iter = 0
"""Current swap iteration (runs to ``nsteps / swap_period``)"""
def estimate(self):
"""Parallel tempering MCMC algorithm (see Geyer, 1991)."""
while self.iter < self.options.nsteps:
# Check if it's time to propose a swap
swap_index = None
if self.iter >= self.swap_period and \
self.iter % self.swap_period == 0:
swap_index = self.propose_swap()
# Perform Metropolis step for each chain
for i, chain in enumerate(self.chains):
# If we have swapped (or tried to swap) chains, skip the
# Metropolis step for the swapped chains for this round
if swap_index is not None and \
(i == swap_index or i == swap_index+1):
continue
else:
self.metropolis_step(chain)
# Call user-callback step function on the first chain in the series
# to track execution
if self.chains[0].options.step_fn:
self.chains[0].options.step_fn(self.chains[0])
self.iter += 1
def propose_swap(self):
"""Performs the temperature-swapping step of the PT algorithm.
Returns
-------
If there is an accepted swap, returns the index of the lower of the
two chains. Returns None if there is no swap accepted.
"""
# Idea is to pick two neighboring chains to swap, so we
# randomly pick the one with the lower index
i = np.random.randint(len(self.chains)-1)
j = i+1
# First, we introduce a slight shorthand
chain_i = self.chains[i]
chain_j = self.chains[j]
# We treat the swap as just another way of making a jump proposal.
chain_i.test_position = chain_j.position
chain_j.test_position = chain_i.position
(chain_i.test_posterior, chain_i.test_prior, chain_i.test_likelihood) =\
chain_i.calculate_posterior(position=chain_j.position)
(chain_j.test_posterior, chain_j.test_prior, chain_j.test_likelihood) =\
chain_j.calculate_posterior(position=chain_i.position)
# We determine the "net" change in posterior for the two chains
# in the swapped position vs. the unswapped position.
# Since calculations are in logspace, the ratio becomes a difference:
pi_xi = chain_i.accept_posterior
pj_xj = chain_j.accept_posterior
pi_xj = chain_i.test_posterior
pj_xi = chain_j.test_posterior
delta_posterior = (pi_xj + pj_xi) - (pi_xi + pj_xj)
# Decide whether to accept the swap: similar to Metropolis
# If the swap is an increase in probability, do it.
if delta_posterior < 0:
self.accept_swap(i, j)
# Otherwise, choose a random sample to decide whether to swap:
else:
swap_alpha = self.chains[0].random.rand()
self.swap_alphas[self.swap_iter] = swap_alpha
# Accept the swap
if math.e ** -delta_posterior > swap_alpha:
self.accept_swap(i, j)
# Reject the swap
else:
#print "Reject %d, %d" % (i, j)
self.swap_rejects[self.swap_iter] = 1
chain_i.reject_move()
chain_j.reject_move()
# Log some interesting variables
for chain in (chain_i, chain_j):
chain.positions[chain.iter,:] = chain.position #chain.test_position
chain.priors[chain.iter] = chain.accept_prior
chain.likelihoods[chain.iter] = chain.accept_likelihood
chain.posteriors[chain.iter] = chain.accept_posterior
chain.delta_test_posteriors[chain.iter] = delta_posterior
chain.sigmas[chain.iter] = chain.sig_value
chain.ts[chain.iter] = chain.T
# Log some more interesting info just about the swaps.
# Ultimately, it could be possible to extract most of this information
# from the chains themselves, but this is certainly easier.
self.swap_proposals[self.swap_iter,:] = (i, j)
self.pi_xi[self.swap_iter] = pi_xi
self.pi_xj[self.swap_iter] = pi_xj
self.pj_xi[self.swap_iter] = pj_xi
self.pj_xj[self.swap_iter] = pj_xj
self.delta_test_posteriors[self.swap_iter] = chain.delta_posterior
# Increment the swap count
self.swap_iter += 1
chain_i.iter += 1
chain_j.iter += 1
return i
def accept_swap(self, i, j):
"""Do necessary bookkeeping for accepting the proposed swap."""
#print "Accept %d, %d" % (i, j)
self.chains[i].accept_move()
self.chains[j].accept_move()
self.swap_accepts[self.swap_iter] = 1
def metropolis_step(self, chain):
"""Perform a Metropolis update step on the given chain."""
# Get a new position
chain.test_position = chain.generate_new_position()
# Choose test position and calculate posterior there
(chain.test_posterior, chain.test_prior, chain.test_likelihood)\
= chain.calculate_posterior(chain.test_position)
# Decide whether to accept the step
chain.delta_posterior = chain.test_posterior - chain.accept_posterior
if chain.delta_posterior < 0:
chain.accept_move()
else:
alpha = chain.random.rand()
chain.alphas[chain.iter] = alpha; # log the alpha value
if math.e ** -chain.delta_posterior > alpha:
chain.accept_move()
else:
chain.reject_move()
# Log some interesting variables
chain.positions[chain.iter,:] = chain.position #chain.test_position
chain.priors[chain.iter] = chain.accept_prior
chain.likelihoods[chain.iter] = chain.accept_likelihood
chain.posteriors[chain.iter] = chain.accept_posterior
chain.delta_test_posteriors[chain.iter] = chain.delta_posterior
chain.sigmas[chain.iter] = chain.sig_value
chain.ts[chain.iter] = chain.T
chain.iter += 1
```
#### File: bayessb/report/__init__.py
```python
from texttable import Texttable
import TableFactory as tf
from inspect import ismodule
from bayessb.multichain import MCMCSet
import cPickle
import inspect
import scipy.cluster.hierarchy
from matplotlib import pyplot as plt
from matplotlib import cm
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import numpy as np
from StringIO import StringIO
from collections import OrderedDict
reporter_dict = {}
class Report(object):
""".. todo:: document this class """
def __init__(self, chain_filenames, reporters, names=None, burn=0):
"""Create the Report object and run all reporter functions.
Parameters
----------
chain_filenames : OrderedDict of lists of MCMC filenames.
The keys in the dict are the names of the groups of chains, sorted
by name. These should ideally be descriptive abbreviations,
indicating the type of model, number of steps run in each chain,
etc. The entries in the dict are lists of filenames of pickled
MCMC objects, representing completed MCMC estimation runs for the
given model/data.
reporters : mixed list of reporter functions and/or modules
The reporter functions should take an instance of
bayessb.MCMCSet.multichain as an argument and return an
instance of pysb.report.Result. For inclusion in the report results
table. If a module is included in the list, any reporter functions
included in the module (i.e., functions decorated with
@pysb.report.reporter) will be identified and applied to the
chains.
names : optional, list of strings
Names to be used as the column headers in the report results
table. If not provided, the keys from the chains dict are used
as the column names.
burn : int
Number of steps to discard from the beginning of the chain.
"""
self.chain_filenames = OrderedDict()
"""dict of lists of MCMC filenames, sorted by name."""
for key in sorted(chain_filenames):
self.chain_filenames[key] = chain_filenames[key]
self.module_names = []
"""List of module names that parallels that of the reporter names."""
self.reporters = []
"""List of reporter functions to run on the chains."""
self.names = None
"""List of the different types of models/fits reported on."""
self.results = []
"""List of lists containing the results of reporter execution."""
self.burn = burn
"""Number of steps to discard from the beginning of the chain."""
# Unpack reporter modules, adding any reporter functions found
for reporter in reporters:
if ismodule(reporter):
self.reporters += reporter_dict[reporter.__name__]
if hasattr(reporter, 'reporter_group_name'):
module_name = reporter.reporter_group_name
else:
module_name = reporter.__name__
self.module_names += [module_name] * \
len(reporter_dict[reporter.__name__])
else:
self.reporters.append(reporter)
# FIXME FIXME Fix this to sniff out the module for the
# reporter function that was passed in
self.module_names.append("Not given")
# Initialize reporter names and module names
if names is None:
self.names = [n.replace('_', ' ')
for n in self.chain_filenames.keys()]
else:
self.names = names
# Run the reports
reporter_names = [r.reporter_name for r in self.reporters]
for chain_list_name, chain_list in self.chain_filenames.iteritems():
self.get_results_for_chain_set(chain_list_name, chain_list)
# Transpose the results list
self.results = zip(*self.results)
def get_results_for_chain_set(self, chain_list_name, chain_list):
"""Takes a list of filenames for a group of chains, initializes
an MCMCSet object, and calls all of the reporters on the MCMCSet.
Deferred the loading of MCMCSet objects to this function because
it means that only one set of chains needs to be included in memory
at any one time.
"""
print "Loading chains for %s..." % chain_list_name
mcmc_set = MCMCSet(chain_list_name)
# Load the chain files
mcmc_list = []
for filename in chain_list:
mcmc_list.append(cPickle.load(open(filename)))
# Prune and pool the chains in the list
mcmc_set.initialize_and_pool(mcmc_list, self.burn)
print "Running reporters for %s..." % chain_list_name
result = []
for reporter in self.reporters:
result.append(reporter(mcmc_set))
self.results.append(result)
def get_text_table(self, max_width=80):
"""Return the report results as a pretty-printed text table."""
# TODO This will have to be written because structure of results
# table has changed
tt = Texttable(max_width=max_width)
tt.header(self.header_names)
text_results = [[r.value if hasattr(r, 'value') else r for r in r_list]
for r_list in self.results]
tt.add_rows(text_results, header=False)
return tt.draw()
def write_pdf_table(self, filename):
"""Writes the results table to a PDF file.
Parameters
----------
filename : string
The name of the output filename.
"""
# TODO This will have to be written because structure of results
# table has changed
lines = []
for row in self.results:
lines.append(tf.TableRow(*map(tf.Cell, row)))
rowmaker = tf.RowSpec(*map(tf.ColumnSpec, self.header_names))
table = tf.PDFTable('Results', headers=rowmaker)
f = open(filename, 'wb')
f.write(table.render(lines))
def write_html_table(self, filename):
"""Writes the results table to a HTML file.
Parameters
----------
filename : string
The name of the output filename.
"""
lines = []
for i, row in enumerate(self.results):
html_row = []
html_row.append(self.reporters[i].name)
for result in row:
# Here we assume it's a pysb.report.Result object
if result.link is None:
html_row.append(result.value)
else:
html_row.append('<a href=\'%s\'>%s</a>' %
(result.link, result.value))
lines.append(tf.TableRow(*map(tf.Cell, html_row)))
rowmaker = tf.RowSpec(*map(tf.ColumnSpec, self.header_names))
table = tf.HTMLTable('Results', headers=rowmaker)
f = open(filename, 'wb')
f.write(table.render(lines))
def write_html_table_with_links(self, filename):
"""A manual re-write of HTML table export to allow inclusion of
hyperlinks (the TableFactory version escapes the markup)
"""
# Add some formatting for the overall page
lines = """<!DOCTYPE html>
<html>
<head>
<style type="text/css">
body { font-family: sans-serif; font-size: 10pt}
table { border-collapse: collapse; }
th { align: left; font-weight: bold;
vertical-align: top}
td, th { border: 1px solid #aaa; padding: 0.2em; }
</style>
</head>
<body>"""
lines += "<table>"
# Add two empty columns to headers to allow for reporter and module
headers = ['', ''] + self.names
header_string = "<tr><th>"
header_string += '</th><th>'.join(headers)
header_string += "</th></tr>"
lines += header_string
prev_module_name = None
for i, row in enumerate(self.results):
html_row = []
html_row_string = '<tr>'
cur_module_name = self.module_names[i]
# Group the results for a reporter group into a rowspan
if prev_module_name != cur_module_name:
rowspan = 1
while (i + rowspan) < len(self.module_names) and \
self.module_names[i+rowspan] == cur_module_name:
rowspan += 1
html_row_string += '<th rowspan="%d">%s</th>' % \
(rowspan, cur_module_name)
prev_module_name = cur_module_name
# Add the row header showing the name of the current reporter.
# If the reporter has an "evidence" field associated with it,
# create a link to a page describing the evidence
if (hasattr(self.reporters[i], 'reporter_evidence') and
self.reporters[i].reporter_evidence is not None):
evidence_filename = '%s_evidence.html' % \
self.reporters[i].__name__
evidence_str = """
<html>
<head><title>Evidence for %s</title>
<style type="text/css">
img { max-width : 400px;
max-height : 400px; }
body { font-family: sans-serif; font-size: 10pt}
h1 { font-weight : bold;
font-size : 14pt; }
</style>
</head>
<body>
<p><h1>Evidence that %s</h1>
""" % (self.reporters[i].reporter_name,
self.reporters[i].reporter_name)
evidence_str += self.reporters[i].reporter_evidence.get_html()
evidence_str += "</body></html>"
with open(evidence_filename, 'w') as f:
f.write(evidence_str)
reporter_html = '<th><a href="%s">%s</a></th>' % \
(evidence_filename,
self.reporters[i].reporter_name)
else:
reporter_html = '<th>%s</th>' % self.reporters[i].reporter_name
html_row_string += reporter_html
# Add the HTML-ified result
for result in row:
html_row.append(result.get_html())
html_row_string += '\n'.join(html_row)
html_row_string += '</tr>\n\n'
lines += html_row_string
lines += "</table>"
# Add closing tags
lines += "</body></html>"
f = open(filename, 'wb')
f.write(lines)
def write_tsv(self, filename):
"""Write the transposed results table as a tab-separated file.
Transposition of the results table puts the models in rows and the
attributes in the columns, which is more suitable for most
machine-learning/analysis algorithms.
"""
output = StringIO()
# Add the header line
output.write('model_name\t')
output.write('\t'.join([r.func_name for r in self.reporters]))
output.write('\n')
# Transpose the results list
results = zip(*self.results)
for model_name, result_row in zip(self.names, results):
output.write(model_name + '\t')
output.write('\t'.join([r.get_text() for r in result_row]))
output.write('\n')
with open(filename, 'w') as f:
f.write(output.getvalue())
def cluster_by_maximum_likelihood(self):
"""Cluster the models based on maximum_likelihood."""
# Get the maximum likelihood row from the results table
ml_results = None
for i, reporter in enumerate(self.reporters):
if reporter.func_name == "maximum_likelihood":
ml_results = self.results[i]
if ml_results is None:
raise Exception("Couldn't find the row in the "
"results table for the maximum likelihood "
"test.")
# Get the maximum likelihood row from the results table
tBidBax_monotonic_results = None
for i, reporter in enumerate(self.reporters):
if reporter.func_name == "tBid_Bax_monotonically_increasing":
tBidBax_monotonic_results = self.results[i]
if tBidBax_monotonic_results is None:
raise Exception("Couldn't find the row in the "
"results table for the tBidBax monotonically "
"increasing test.")
# Get the maximum likelihood row from the results table
iBax_monotonic_results = None
for i, reporter in enumerate(self.reporters):
if reporter.func_name == "iBax_monotonically_increasing":
iBax_monotonic_results = self.results[i]
if iBax_monotonic_results is None:
raise Exception("Couldn't find the row in the "
"results table for the iBax monotonically "
"increasing test.")
total_ml = np.sum([r.value for r in ml_results])
combined_results = []
# FIXME Need a better way of normalizing the max likelihood
#combined_results.append([r.value / total_ml for r in ml_results])
combined_results.append([np.log10(r.value/2) for r in ml_results])
combined_results.append([r.value for r in tBidBax_monotonic_results])
combined_results.append([r.value for r in iBax_monotonic_results])
combined_results = np.array(combined_results)
# Calculate distances between models based on tests
num_results = combined_results.shape[1]
D1 = scipy.zeros([num_results, num_results])
for i in range(num_results):
for j in range(num_results):
D1[i, j] = np.linalg.norm(combined_results[:,i] -
combined_results[:,j])
# Compute and plot first dendrogram
fig = plt.figure(figsize=(8,8))
ax1 = fig.add_axes([0.09, 0.1, 0.2, 0.6])
Y = scipy.cluster.hierarchy.linkage(D1, method='centroid')
Z1 = scipy.cluster.hierarchy.dendrogram(Y, orientation='right',
labels=[n.split(' ')[0] + ' ' + n.split(' ')[2]
for n in self.names])
ax1.set_xticks([])
ax1.yaxis.tick_left()
# Calculate distances between tests based on models
num_results = combined_results.shape[0]
D2 = scipy.zeros([num_results, num_results])
for i in range(num_results):
for j in range(num_results):
D2[i, j] = np.linalg.norm(combined_results[i,:] -
combined_results[j,:])
# Compute and plot second dendrogram
ax2 = fig.add_axes([0.3, 0.71, 0.6, 0.2])
Y = scipy.cluster.hierarchy.linkage(D2, method='centroid')
Z2 = scipy.cluster.hierarchy.dendrogram(Y,
labels=['Max likelihood', 'tBidBax monotonic',
'iBax monotonic'])
ax2.xaxis.tick_top()
ax2.set_yticks([])
# Plot distance matrix.
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = np.zeros(combined_results.shape)
for i in range(combined_results.shape[0]):
for j in range(combined_results.shape[1]):
D[i,j] = combined_results[idx2[i], idx1[j]]
im = axmatrix.matshow(D.T, aspect='auto', origin='lower',
cmap=cm.YlGnBu)
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
plt.colorbar(im, cax=axcolor)
fig.show()
fig.savefig('dendrogram.png')
class Result(object):
"""Stores the results associated with the execution of a reporter function.
Parameters
----------
value : anything
The return value of a reporter function.
link : string
String representing a hyperlink, e.g. to information or
visualizations supporting the reporter result.
expectation : anything (optional)
The expected value of the reporter.
"""
def __init__(self, value, link, expectation=None):
self.value = value
self.link = link
self.expectation = expectation
def get_text(self):
"""Returns a text representation of the result."""
result_str = ''
if isinstance(self.value, float):
result_str = '%-.2f' % self.value
elif isinstance(self.value, bool):
if self.value:
result_str = 'True'
else:
result_str = 'False'
else:
result_str = str(self.value)
return result_str
def get_html(self):
"""Returns the default HTML string for the table cell to contain the
result.
Returns
-------
string
A string containing the HTML for the table cell, including the
opening and closing (<td>...</td>) tags.
"""
# Format the result
result_str = self.get_text()
if self.link is not None:
result_str = '<a href="%s">%s</a>' % (self.link, result_str)
return '<td>%s</td>' % result_str
class FloatListResult(Result):
"""Implements formatting for a list of floating point values.
In particular, specifies the precision at which they should be displayed.
Parameters
----------
value : anything
The return value of a reporter function.
link : string
String representing a hyperlink, e.g. to information or
visualizations supporting the reporter result.
precision : int (optional)
The number of decimal places to display for each entry in the
list of values. Default is 2.
"""
def __init__(self, value, link, precision=2):
Result.__init__(self, value, link)
self.precision = precision
def get_text(self):
"""Returns the text representation of the floating point list.
The string representation of the floating point list is of the form
"<td>[xxx.xx, x.xx, xx.xx, ...]</td>, where the precision (number of
x's after decimal point) is controlled by the value assigned to the
property ``precision``.
"""
# FIXME Finish comments
if self.value is None:
result_str = str(self.value)
else:
format_str = '%%.%dg' % self.precision
result_str = '['
result_str += ', '.join([format_str % f for f in self.value])
result_str += ']'
return result_str
def get_html(self):
"""Returns the HTML string for the table cell to contain the result.
The string representation of the floating point list is of the form
"<td>[xxx.xx, x.xx, xx.xx, ...]</td>, where the precision (number of
x's after decimal point) is controlled by the value assigned to the
property ``precision``.
Returns
-------
string
A string containing the HTML for the table cell, including the
opening and closing (<td>...</td>) tags.
"""
result_str = self.get_text()
if self.link is not None:
result_str = '<a href="%s">%s</a>' % (self.link, result_str)
return '<td>%s</td>' % result_str
class ThumbnailResult(Result):
"""A result that is an img that should be displayed as a thumbnail.
Results of this type have no value associated with them, so the ``value``
field is set to ``None``.
"""
def __init__(self, thumbnail_link, img_link):
"""Create the FloatListResult object.
Parameters
----------
thumbnail_link : string
Path to the filename of the thumbnail image.
img_link : string
Path to the filename of the full-size image.
"""
if thumbnail_link is None or img_link is None:
raise ValueError("Arguments to ThumbnailResult.__init__() "
"cannot be None.")
Result.__init__(self, None, img_link)
self.thumbnail_link = thumbnail_link
def get_html(self):
"""Returns the HTML string for the table cell to contain the result.
The string representation for the thumbnail is of the form
``<td><a href="..."><img ...></a></td>``, with the anchor tag
linking to the full-size image.
Returns
-------
string
A string containing the HTML for the table cell, including the
opening and closing (<td>...</td>) tags.
"""
return '<td><a href="%s"><img src="%s" /></a></td>' % \
(self.link, self.thumbnail_link)
class MeanSdResult(Result):
"""A result whose value is expressed as a mean and standard deviation.
For example, to summarize a distribution which can be viewed by accessing
the associated link. For these results, the "value" attribute is set to
the mean; the SD is stored in the additional attribute ``sd``.
Parameters
----------
mean : float
The mean value associated with the result.
sd : float
The standard deviation associated with the result.
link : string
Path to the filename of any additional data.
precision : int (optional)
The number of decimal places to use when displaying the mean
and standard deviation. Default is 3.
"""
def __init__(self, mean, sd, link, precision=3):
if mean is None or sd is None or sd < 0:
raise ValueError("Invalid argument to MeanSdResult constructor.")
Result.__init__(self, mean, link)
self.sd = sd
self.precision = precision
def get_html(self):
"""Returns the HTML string for the table cell to contain the result.
The string representation for the thumbnail is of the form
``<td><a href="...">mean ± sd</a></td>``, with the anchor tag
linking to the full-size image.
Returns
-------
string
A string containing the HTML for the table cell, including the
opening and closing (<td>...</td>) tags.
"""
format_str = '<td><a href="%%s">%%.%dg ± %%.%dg</a></td>' % \
(self.precision, self.precision)
return format_str % (self.link, self.value, self.sd)
class FuzzyBooleanResult(Result):
"""Stores the result of a yes/no test applied to a chain by sampling.
Color-codes the result of the boolean test as red (bad) or green (good)
depending on its deviation from the expected value.
"""
def __init__(self, value, link, expectation):
if value is None or expectation is None:
raise ValueError("value and expectation arguments cannot be None.")
if not isinstance(value, float):
raise ValueError("value must be a float.")
if not isinstance(expectation, float):
raise ValueError("value must be a float.")
Result.__init__(self, value, link, expectation)
def get_html(self):
"""Returns the default HTML string for the table cell to contain the
result.
Returns
-------
string
A string containing the HTML for the table cell, including the
opening and closing (<td>...</td>) tags.
"""
# Format the result
result_str = '%-.2f' % self.value
if self.link is not None:
result_str = '<a href="%s">%s</a>' % (self.link, result_str)
error = abs(self.value - self.expectation)
c_map = cm.get_cmap('RdYlGn')
rgba = c_map(1 - error)
color_str = "#%02x%02x%02x" % tuple([v*255 for v in rgba[0:3]])
return '<td style="background: %s">%s</td>' % (color_str, result_str)
# DECORATOR
def reporter(name, evidence=None):
"""Decorator for reporter functions.
Sets the ``name`` field of the function to indicate its name. The name of
the reporter function is meant to be a human-readable name for use in
results summaries.
The decorator also adds the reporter function to the package-level variable
``reporter_dict``, which keeps track of all reporter functions imported (and
decorated) thus far. The ``reporter_dict`` is indexed by the name of the
module containing the reporter function, and each key maps to a list of
reporter functions.
Parameters
----------
name : string
The human-readable name for the reporter function.
evidence : Evidence
The evidence for the reporter.
Returns
-------
The decorated reporter function.
"""
if callable(name):
raise TypeError("The reporter decorator requires a name argument.")
def wrap(f):
# Keep track of all reporters in the package level reporter_dict
reporter_mod_name = inspect.getmodule(f).__name__
reporter_list = reporter_dict.setdefault(reporter_mod_name, [])
reporter_list.append(f)
f.reporter_name = name
f.reporter_evidence = evidence
return f
return wrap
```
#### File: bayessb/report/reporters.py
```python
import numpy as np
from matplotlib import pyplot as plt
from bayessb.report import reporter, Result, FloatListResult, ThumbnailResult
from bayessb import convergence
from StringIO import StringIO
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib import cm
from matplotlib.font_manager import FontProperties
from bayessb.multichain import NoPositionsException
reporter_group_name = "Estimation"
num_samples = 100
@reporter('Number of chains')
def num_chains(mcmc_set):
return Result(len(mcmc_set.chains), None)
@reporter('Estimation parameters')
def estimation_parameters(mcmc_set):
chain = mcmc_set.chains[0]
opts = chain.options
output = StringIO()
output.write("<html><head /><body><pre>")
output.write("model: %s\n" % opts.model.name)
output.write("use_hessian: %s\n" % opts.use_hessian)
output.write("hessian_period: %s\n" % opts.hessian_period)
output.write("hessian_scale: %s\n" % opts.hessian_scale)
output.write("norm_step_size: %s\n" % opts.norm_step_size)
output.write("anneal_length: %s\n" % opts.anneal_length)
output.write("T_init: %s\n" % opts.T_init)
output.write("thermo_temp: %s\n" % opts.thermo_temp)
output.write("accept_rate_target: %s\n" % opts.accept_rate_target)
output.write("sigma_max: %s\n" % opts.sigma_max)
output.write("sigma_min: %s\n" % opts.sigma_min)
output.write("sigma_step: %s\n" % opts.sigma_step)
output.write("sigma_adj_interval: %s\n" % opts.sigma_adj_interval)
output.write("initial_values: %s\n" % opts.initial_values)
output.write("</pre></body></html>")
# Write the estimation parameter description to a file
param_file_name = '%s_estimation_params.html' % mcmc_set.name
with open(param_file_name, 'w') as f:
f.write(output.getvalue())
return Result(None, param_file_name)
@reporter('<NAME>')
def convergence_criterion(mcmc_set):
"""Returns the vector of Gelman-Rubin convergence criterion values and a
link to an HTML file containing plots of the traces of the walk for each
parameter fitted."""
# Prepare html for page showing plots of parameter traces
html_str = "<html><head><title>Parameter traces for %s</title></head>\n" \
% mcmc_set.name
html_str += "<body><p>Parameter traces for %s</p>\n" \
% mcmc_set.name
img_str_list = []
# Useful variables
num_estimate = mcmc_set.chains[0].num_estimate
fontP = FontProperties()
fontP.set_size('small')
legend_kwargs = {'prop':fontP, 'ncol':1, 'bbox_to_anchor':(1, 1),
'fancybox': True, 'shadow': True}
# Make plot of posterior
fig = Figure()
ax = fig.gca()
for i, chain in enumerate(mcmc_set.chains):
color = cm.jet(i/float(num_estimate - 1))
label = 's%d' % chain.options.seed
if chain.pruned:
line = ax.plot(chain.thinned_steps, chain.posteriors, color=color,
label=label)
else:
line = ax.plot(chain.posteriors, color=color, label=label)
ax.set_title("Posterior traces")
ax.legend(loc='upper left', **legend_kwargs)
posterior_plot_filename = '%s_posterior_trace.png' % mcmc_set.name
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.savefig(posterior_plot_filename)
# Make plots of parameter traces
for i in range(num_estimate):
param_name = mcmc_set.chains[0].options.estimate_params[i].name
fig = Figure()
ax = fig.gca()
for j, chain in enumerate(mcmc_set.chains):
color = cm.jet(j/float(num_estimate - 1))
label = 's%d' % chain.options.seed
if chain.pruned:
line = ax.plot(chain.thinned_steps, chain.positions[:,i],
color=color, label=label)
else:
line = ax.plot(chain.positions[:, i], color=color, label=label)
ax.set_title("Parameter: %s" % param_name)
ax.legend(loc='upper left', **legend_kwargs)
plot_filename = '%s_trace_%s.png' % (mcmc_set.name, param_name)
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.savefig(plot_filename)
img_str_list.append(plot_filename)
# Make the html file
html_str += '<a href="%s"><img src="%s" width=400 /></a>' % \
(posterior_plot_filename, posterior_plot_filename)
html_str += '\n'.join([
'<a href="%s"><img src="%s" width=400 /></a>' %
(i, i) for i in img_str_list])
html_str += "</body></html>"
html_filename = '%s_convergence.html' % mcmc_set.name
with open(html_filename, 'w') as f:
f.write(html_str)
return FloatListResult(convergence.convergence_criterion(mcmc_set),
html_filename)
@reporter('Maximum likelihood')
def maximum_likelihood(mcmc_set):
# Get the maximum likelihood
try:
(max_likelihood, max_likelihood_position) = mcmc_set.maximum_likelihood()
except NoPositionsException as npe:
return Result(None, None)
return show_fit_at_position(mcmc_set, max_likelihood,
max_likelihood_position, 'max_likelihood')
@reporter('Maximum posterior')
def maximum_posterior(mcmc_set):
# Get the maximum posterior
try:
(max_posterior, max_posterior_position) = mcmc_set.maximum_posterior()
except NoPositionsException as npe:
return Result(None, None)
return show_fit_at_position(mcmc_set, max_posterior,
max_posterior_position, 'max_posterior')
def show_fit_at_position(mcmc_set, fit_value, position, fit_name):
"""Create the result page showing the fit quality at the given position.
Parameters
----------
mcmc_set : MCMCSet object
The set of MCMC chains
fit_value : float
The quality of fit at the given position.
position : numpy.array
Array of (log10-transformed) parameter values at the given position.
fit_name : string
A shorthand name for the fit position, e.g., "max_likelihood". Should
conform to rules of Python variable naming (no spaces, doesn't
start with a number, etc.).
Returns
-------
A result object containing the fit value and the link to the accompanying
HTML plot page, if any.
"""
# If the MCMC object does not have a fit_plotting_function defined
# (for example, if it is a base MCMC object), then don't create a
# plot for visualization.
if not hasattr(mcmc_set.chains[0], 'fit_plotting_function'):
return Result(fit_value, None)
# Prepare html for page showing plots at position
html_str = "<html><head><title>Simulation of %s " \
"with %s parameter values</title></head>\n" \
% (mcmc_set.name, fit_name)
html_str += "<body><p>Simulation of %s with %s " \
"parameter values</p>\n" % (mcmc_set.name, fit_name)
# Show the plot vs. the data at the position
fig = mcmc_set.chains[0].fit_plotting_function(position=position)
img_filename = '%s_%s_plot.png' % (mcmc_set.name, fit_name)
fig.savefig(img_filename)
fig.savefig(img_filename.replace('.png', '.pdf'))
html_str += '<p><img src="%s" /></p>' % img_filename
chain0 = mcmc_set.chains[0]
"""
# Show the plot of all observables at the position
tspan = chain0.options.tspan
observables = chain0.options.model.observables
x = chain0.simulate(position=position, observables=True)
fig = Figure()
ax = fig.gca()
lines = []
for o in observables:
line = ax.plot(tspan, x[o.name])
lines += line
ax.set_title("Observables at %s" % fit_name)
fig.legend(lines, [o.name for o in observables], 'lower right')
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
img_filename = '%s_%s_species.png' % (mcmc_set.name, fit_name)
fig.savefig(img_filename)
html_str += '<p><img src="%s" /></p>' % img_filename
"""
# Print the parameter values for the position as a dict that can be
# used to override the initial values
html_str += '<pre>%s_params = {\n' % fit_name
for i, p in enumerate(chain0.options.estimate_params):
html_str += "\t'%s': %.17g,\n" % \
(p.name, 10 ** position[i])
html_str += '}</pre>'
html_str += '</body></html>'
# Create the html file
html_filename = '%s_%s_plot.html' % (mcmc_set.name, fit_name)
with open(html_filename, 'w') as f:
f.write(html_str)
return Result(fit_value, html_filename)
@reporter('Sample fits')
def sample_fits(mcmc_set):
fig = Figure()
ax = fig.gca()
plot_filename = '%s_sample_fits.png' % mcmc_set.name
thumbnail_filename = '%s_sample_fits_th.png' % mcmc_set.name
# Make sure we can call the method 'get_observable_timecourses'
if not hasattr(mcmc_set.chains[0], 'get_observable_timecourses') or \
not hasattr(mcmc_set.chains[0], 'plot_data'):
return Result('None', None)
# Plot the original data
mcmc_set.chains[0].plot_data(ax)
# Plot a sampling of trajectories from the original parameter set
try:
for i in range(num_samples):
position = mcmc_set.get_sample_position()
timecourses = mcmc_set.chains[0].get_observable_timecourses(
position=position)
for obs_name, timecourse in timecourses.iteritems():
ax.plot(timecourse[0], timecourse[1], color='g', alpha=0.1,
label=obs_name)
except NoPositionsException as npe:
pass
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.savefig(plot_filename)
fig.savefig(plot_filename.replace('.png', '.pdf'))
fig.savefig(thumbnail_filename, dpi=10)
return ThumbnailResult(thumbnail_filename, plot_filename)
@reporter('Marginals')
def marginals(mcmc_set):
"""Returns the vector of Gelman-Rubin convergence criterion values and a
link to an HTML file containing plots of the traces of the walk for each
parameter fitted."""
# Prepare html for page showing plots of parameter traces
html_str = "<html><head><title>Marginal distributions for " \
"%s</title></head>\n" % mcmc_set.name
html_str += "<body><p>Marginal distributions for for %s</p>\n" \
% mcmc_set.name
img_str_list = []
# Make plots of parameter traces
for i in range(mcmc_set.chains[0].num_estimate):
param_name = mcmc_set.chains[0].options.estimate_params[i].name
fig = Figure()
ax = fig.gca()
# Build a list of arrays containing positions for this parameter
chains_for_param = []
for chain in mcmc_set.chains:
# Don't try to plot marginals for a chain with no accepted steps!
if len(chain.positions) > 0:
chains_for_param.append(chain.positions[:,i])
# Plot the marginals
ax.hist(chains_for_param, histtype='step', bins=25)
ax.set_title("Parameter: %s" % param_name)
plot_filename = '%s_marginal_%s.png' % (mcmc_set.name, param_name)
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.savefig(plot_filename)
img_str_list.append(plot_filename)
# Make the html file
html_str += '\n'.join([
'<a href="%s"><img src="%s" width=400 /></a>' %
(i, i) for i in img_str_list])
html_str += "</body></html>"
html_filename = '%s_marginals.html' % mcmc_set.name
with open(html_filename, 'w') as f:
f.write(html_str)
return Result(None, html_filename)
```
#### File: examples/earm/earm_1_3_standalone.py
```python
import numpy
import scipy.weave, scipy.integrate
import collections
import itertools
import distutils.errors
_use_inline = False
# try to inline a C statement to see if inline is functional
try:
scipy.weave.inline('int i;', force=1)
_use_inline = True
except distutils.errors.CompileError:
pass
Parameter = collections.namedtuple('Parameter', 'name value')
Observable = collections.namedtuple('Observable', 'name species coefficients')
Initial = collections.namedtuple('Initial', 'param_index species_index')
class Model(object):
def __init__(self):
self.y = None
self.yobs = None
self.integrator = scipy.integrate.ode(self.ode_rhs, )
self.integrator.set_integrator('vode', method='bdf', with_jacobian=True, rtol=1e-3, atol=1e-6)
self.y0 = numpy.empty(60)
self.ydot = numpy.empty(60)
self.sim_param_values = numpy.empty(206)
self.parameters = [None] * 206
self.observables = [None] * 6
self.initial_conditions = [None] * 19
self.parameters[0] = Parameter('L_0', 3000)
self.parameters[1] = Parameter('pR_0', 1000)
self.parameters[2] = Parameter('flip_0', 2000)
self.parameters[3] = Parameter('pC8_0', 10000)
self.parameters[4] = Parameter('BAR_0', 1000)
self.parameters[5] = Parameter('pC3_0', 10000)
self.parameters[6] = Parameter('pC6_0', 10000)
self.parameters[7] = Parameter('XIAP_0', 100000)
self.parameters[8] = Parameter('PARP_0', 1e+06)
self.parameters[9] = Parameter('Bid_0', 60000)
self.parameters[10] = Parameter('Mcl1_0', 20000)
self.parameters[11] = Parameter('Bax_0', 80000)
self.parameters[12] = Parameter('Bcl2_0', 30000)
self.parameters[13] = Parameter('Mito_0', 500000)
self.parameters[14] = Parameter('mCytoC_0', 500000)
self.parameters[15] = Parameter('mSmac_0', 100000)
self.parameters[16] = Parameter('pC9_0', 100000)
self.parameters[17] = Parameter('Apaf_0', 100000)
self.parameters[18] = Parameter('kf1', 4e-07)
self.parameters[19] = Parameter('kr1', 1e-06)
self.parameters[20] = Parameter('kc1', 0.01)
self.parameters[21] = Parameter('kf2', 1e-06)
self.parameters[22] = Parameter('kr2', 0.001)
self.parameters[23] = Parameter('kf3', 1e-07)
self.parameters[24] = Parameter('kr3', 0.001)
self.parameters[25] = Parameter('kc3', 1)
self.parameters[26] = Parameter('kf4', 1e-06)
self.parameters[27] = Parameter('kr4', 0.001)
self.parameters[28] = Parameter('kf5', 1e-07)
self.parameters[29] = Parameter('kr5', 0.001)
self.parameters[30] = Parameter('kc5', 1)
self.parameters[31] = Parameter('kf6', 1e-07)
self.parameters[32] = Parameter('kr6', 0.001)
self.parameters[33] = Parameter('kc6', 1)
self.parameters[34] = Parameter('kf7', 1e-07)
self.parameters[35] = Parameter('kr7', 0.001)
self.parameters[36] = Parameter('kc7', 1)
self.parameters[37] = Parameter('kf8', 2e-06)
self.parameters[38] = Parameter('kr8', 0.001)
self.parameters[39] = Parameter('kc8', 0.1)
self.parameters[40] = Parameter('kf9', 1e-06)
self.parameters[41] = Parameter('kr9', 0.001)
self.parameters[42] = Parameter('kc9', 20)
self.parameters[43] = Parameter('kf10', 1e-07)
self.parameters[44] = Parameter('kr10', 0.001)
self.parameters[45] = Parameter('kc10', 1)
self.parameters[46] = Parameter('kf11', 1e-06)
self.parameters[47] = Parameter('kr11', 0.001)
self.parameters[48] = Parameter('kf12', 1e-07)
self.parameters[49] = Parameter('kr12', 0.001)
self.parameters[50] = Parameter('kc12', 1)
self.parameters[51] = Parameter('kf13', 0.01)
self.parameters[52] = Parameter('kr13', 1)
self.parameters[53] = Parameter('kf14', 0.0001)
self.parameters[54] = Parameter('kr14', 0.001)
self.parameters[55] = Parameter('kf15', 0.0002)
self.parameters[56] = Parameter('kr15', 0.001)
self.parameters[57] = Parameter('kf16', 0.0001)
self.parameters[58] = Parameter('kr16', 0.001)
self.parameters[59] = Parameter('kf17', 0.0002)
self.parameters[60] = Parameter('kr17', 0.001)
self.parameters[61] = Parameter('kf18', 0.0001)
self.parameters[62] = Parameter('kr18', 0.001)
self.parameters[63] = Parameter('kf19', 0.0001)
self.parameters[64] = Parameter('kr19', 0.001)
self.parameters[65] = Parameter('kc19', 1)
self.parameters[66] = Parameter('kf20', 0.0002)
self.parameters[67] = Parameter('kr20', 0.001)
self.parameters[68] = Parameter('kc20', 10)
self.parameters[69] = Parameter('kf21', 0.0002)
self.parameters[70] = Parameter('kr21', 0.001)
self.parameters[71] = Parameter('kc21', 10)
self.parameters[72] = Parameter('kf22', 1)
self.parameters[73] = Parameter('kr22', 0.01)
self.parameters[74] = Parameter('kf23', 5e-07)
self.parameters[75] = Parameter('kr23', 0.001)
self.parameters[76] = Parameter('kc23', 1)
self.parameters[77] = Parameter('kf24', 5e-08)
self.parameters[78] = Parameter('kr24', 0.001)
self.parameters[79] = Parameter('kf25', 5e-09)
self.parameters[80] = Parameter('kr25', 0.001)
self.parameters[81] = Parameter('kc25', 1)
self.parameters[82] = Parameter('kf26', 1)
self.parameters[83] = Parameter('kr26', 0.01)
self.parameters[84] = Parameter('kf27', 2e-06)
self.parameters[85] = Parameter('kr27', 0.001)
self.parameters[86] = Parameter('kf28', 7e-06)
self.parameters[87] = Parameter('kr28', 0.001)
self.parameters[88] = Parameter('kf31', 0.001)
self.parameters[89] = Parameter('kdeg_Mcl1', 0.0001)
self.parameters[90] = Parameter('kdeg_AMito', 0.0001)
self.parameters[91] = Parameter('kdeg_C3_U', 0)
self.parameters[92] = Parameter('ks_L', 0)
self.parameters[93] = Parameter('kdeg_L', 2.9e-06)
self.parameters[94] = Parameter('kdeg_pR', 2.9e-06)
self.parameters[95] = Parameter('ks_pR', 0.000435)
self.parameters[96] = Parameter('kdeg_flip', 2.9e-06)
self.parameters[97] = Parameter('ks_flip', 0.00087)
self.parameters[98] = Parameter('kdeg_pC8', 2.9e-06)
self.parameters[99] = Parameter('ks_pC8', 0.00435)
self.parameters[100] = Parameter('kdeg_BAR', 2.9e-06)
self.parameters[101] = Parameter('ks_BAR', 0.000435)
self.parameters[102] = Parameter('kdeg_pC3', 2.9e-06)
self.parameters[103] = Parameter('ks_pC3', 0.00435)
self.parameters[104] = Parameter('kdeg_pC6', 2.9e-06)
self.parameters[105] = Parameter('ks_pC6', 0.00435)
self.parameters[106] = Parameter('kdeg_XIAP', 2.9e-06)
self.parameters[107] = Parameter('ks_XIAP', 0.0435)
self.parameters[108] = Parameter('kdeg_PARP', 2.9e-06)
self.parameters[109] = Parameter('ks_PARP', 0.435)
self.parameters[110] = Parameter('kdeg_Bid', 2.9e-06)
self.parameters[111] = Parameter('ks_Bid', 0.0261)
self.parameters[112] = Parameter('ks_Mcl1', 0.3)
self.parameters[113] = Parameter('kdeg_Bax', 2.9e-06)
self.parameters[114] = Parameter('ks_Bax', 0.0348)
self.parameters[115] = Parameter('kdeg_Bcl2', 2.9e-06)
self.parameters[116] = Parameter('ks_Bcl2', 0.01305)
self.parameters[117] = Parameter('kdeg_Mito', 2.9e-06)
self.parameters[118] = Parameter('ks_Mito', 0.2175)
self.parameters[119] = Parameter('kdeg_mCytoC', 2.9e-06)
self.parameters[120] = Parameter('ks_mCytoC', 0.2175)
self.parameters[121] = Parameter('kdeg_mSmac', 2.9e-06)
self.parameters[122] = Parameter('ks_mSmac', 0.0435)
self.parameters[123] = Parameter('kdeg_Apaf', 2.9e-06)
self.parameters[124] = Parameter('ks_Apaf', 0.0435)
self.parameters[125] = Parameter('kdeg_pC9', 2.9e-06)
self.parameters[126] = Parameter('ks_pC9', 0.0435)
self.parameters[127] = Parameter('kdeg_L_pR', 2.9e-06)
self.parameters[128] = Parameter('ks_L_pR', 0)
self.parameters[129] = Parameter('kdeg_DISC', 2.9e-06)
self.parameters[130] = Parameter('ks_DISC', 0)
self.parameters[131] = Parameter('kdeg_DISC_flip', 2.9e-06)
self.parameters[132] = Parameter('ks_DISC_flip', 0)
self.parameters[133] = Parameter('kdeg_DISC_pC8', 2.9e-06)
self.parameters[134] = Parameter('ks_DISC_pC8', 0)
self.parameters[135] = Parameter('kdeg_C8', 2.9e-06)
self.parameters[136] = Parameter('ks_C8', 0)
self.parameters[137] = Parameter('kdeg_BAR_C8', 2.9e-06)
self.parameters[138] = Parameter('ks_BAR_C8', 0)
self.parameters[139] = Parameter('kdeg_C8_pC3', 2.9e-06)
self.parameters[140] = Parameter('ks_C8_pC3', 0)
self.parameters[141] = Parameter('kdeg_Bid_C8', 2.9e-06)
self.parameters[142] = Parameter('ks_Bid_C8', 0)
self.parameters[143] = Parameter('kdeg_C3', 2.9e-06)
self.parameters[144] = Parameter('ks_C3', 0)
self.parameters[145] = Parameter('kdeg_tBid', 2.9e-06)
self.parameters[146] = Parameter('ks_tBid', 0)
self.parameters[147] = Parameter('kdeg_C3_pC6', 2.9e-06)
self.parameters[148] = Parameter('ks_C3_pC6', 0)
self.parameters[149] = Parameter('kdeg_C3_XIAP', 2.9e-06)
self.parameters[150] = Parameter('ks_C3_XIAP', 0)
self.parameters[151] = Parameter('kdeg_C3_PARP', 2.9e-06)
self.parameters[152] = Parameter('ks_C3_PARP', 0)
self.parameters[153] = Parameter('kdeg_Mcl1_tBid', 2.9e-06)
self.parameters[154] = Parameter('ks_Mcl1_tBid', 0)
self.parameters[155] = Parameter('kdeg_Bax_tBid', 2.9e-06)
self.parameters[156] = Parameter('ks_Bax_tBid', 0)
self.parameters[157] = Parameter('kdeg_C6', 2.9e-06)
self.parameters[158] = Parameter('ks_C6', 0)
self.parameters[159] = Parameter('ks_C3_U', 0)
self.parameters[160] = Parameter('kdeg_CPARP', 2.9e-06)
self.parameters[161] = Parameter('ks_CPARP', 0)
self.parameters[162] = Parameter('kdeg_aBax', 2.9e-06)
self.parameters[163] = Parameter('ks_aBax', 0)
self.parameters[164] = Parameter('kdeg_C6_pC8', 2.9e-06)
self.parameters[165] = Parameter('ks_C6_pC8', 0)
self.parameters[166] = Parameter('kdeg_MBax', 2.9e-06)
self.parameters[167] = Parameter('ks_MBax', 0)
self.parameters[168] = Parameter('kdeg_Bcl2_MBax', 2.9e-06)
self.parameters[169] = Parameter('ks_Bcl2_MBax', 0)
self.parameters[170] = Parameter('kdeg_Bax2', 2.9e-06)
self.parameters[171] = Parameter('ks_Bax2', 0)
self.parameters[172] = Parameter('kdeg_Bax2_Bcl2', 2.9e-06)
self.parameters[173] = Parameter('ks_Bax2_Bcl2', 0)
self.parameters[174] = Parameter('kdeg_Bax4', 2.9e-06)
self.parameters[175] = Parameter('ks_Bax4', 0)
self.parameters[176] = Parameter('kdeg_Bax4_Bcl2', 2.9e-06)
self.parameters[177] = Parameter('ks_Bax4_Bcl2', 0)
self.parameters[178] = Parameter('kdeg_Bax4_Mito', 2.9e-06)
self.parameters[179] = Parameter('ks_Bax4_Mito', 0)
self.parameters[180] = Parameter('ks_AMito', 0)
self.parameters[181] = Parameter('kdeg_AMito_mCytoC', 2.9e-06)
self.parameters[182] = Parameter('ks_AMito_mCytoC', 0)
self.parameters[183] = Parameter('kdeg_AMito_mSmac', 2.9e-06)
self.parameters[184] = Parameter('ks_AMito_mSmac', 0)
self.parameters[185] = Parameter('kdeg_ACytoC', 2.9e-06)
self.parameters[186] = Parameter('ks_ACytoC', 0)
self.parameters[187] = Parameter('kdeg_ASmac', 2.9e-06)
self.parameters[188] = Parameter('ks_ASmac', 0)
self.parameters[189] = Parameter('kdeg_cCytoC', 2.9e-06)
self.parameters[190] = Parameter('ks_cCytoC', 0)
self.parameters[191] = Parameter('kdeg_cSmac', 2.9e-06)
self.parameters[192] = Parameter('ks_cSmac', 0)
self.parameters[193] = Parameter('kdeg_Apaf_cCytoC', 2.9e-06)
self.parameters[194] = Parameter('ks_Apaf_cCytoC', 0)
self.parameters[195] = Parameter('kdeg_XIAP_cSmac', 2.9e-06)
self.parameters[196] = Parameter('ks_XIAP_cSmac', 0)
self.parameters[197] = Parameter('kdeg_aApaf', 2.9e-06)
self.parameters[198] = Parameter('ks_aApaf', 0)
self.parameters[199] = Parameter('kdeg_Apop', 2.9e-06)
self.parameters[200] = Parameter('ks_Apop', 0)
self.parameters[201] = Parameter('kdeg_Apop_pC3', 2.9e-06)
self.parameters[202] = Parameter('ks_Apop_pC3', 0)
self.parameters[203] = Parameter('kdeg_Apop_XIAP', 2.9e-06)
self.parameters[204] = Parameter('ks_Apop_XIAP', 0)
self.parameters[205] = Parameter('__source_0', 1)
self.observables[0] = Observable('Bid_unbound', [9], [1])
self.observables[1] = Observable('PARP_unbound', [8], [1])
self.observables[2] = Observable('mSmac_unbound', [15], [1])
self.observables[3] = Observable('tBid_total', [29, 33, 34], [1, 1, 1])
self.observables[4] = Observable('CPARP_total', [37], [1])
self.observables[5] = Observable('cSmac_total', [53, 55], [1, 1])
self.initial_conditions[0] = Initial(0, 0)
self.initial_conditions[1] = Initial(1, 1)
self.initial_conditions[2] = Initial(2, 2)
self.initial_conditions[3] = Initial(3, 3)
self.initial_conditions[4] = Initial(4, 4)
self.initial_conditions[5] = Initial(5, 5)
self.initial_conditions[6] = Initial(6, 6)
self.initial_conditions[7] = Initial(7, 7)
self.initial_conditions[8] = Initial(8, 8)
self.initial_conditions[9] = Initial(9, 9)
self.initial_conditions[10] = Initial(10, 10)
self.initial_conditions[11] = Initial(11, 11)
self.initial_conditions[12] = Initial(12, 12)
self.initial_conditions[13] = Initial(13, 13)
self.initial_conditions[14] = Initial(14, 14)
self.initial_conditions[15] = Initial(15, 15)
self.initial_conditions[16] = Initial(17, 16)
self.initial_conditions[17] = Initial(16, 17)
self.initial_conditions[18] = Initial(205, 18)
if _use_inline:
def ode_rhs(self, t, y, p):
ydot = self.ydot
scipy.weave.inline(r'''
ydot[0] = -p[93]*y[0] - p[18]*y[0]*y[1] + p[88]*y[21] + p[19]*y[19] + p[92]*y[18];
ydot[1] = -p[94]*y[1] - p[18]*y[0]*y[1] + p[88]*y[21] + p[19]*y[19] + p[95]*y[18];
ydot[2] = -p[96]*y[2] - p[21]*y[2]*y[21] + p[22]*y[22] + p[97]*y[18];
ydot[3] = -p[98]*y[3] - p[23]*y[21]*y[3] - p[34]*y[3]*y[35] + p[24]*y[23] + p[35]*y[39] + p[99]*y[18];
ydot[4] = -p[100]*y[4] - p[26]*y[24]*y[4] + p[27]*y[25] + p[101]*y[18];
ydot[5] = -p[102]*y[5] - p[79]*y[5]*y[57] - p[28]*y[24]*y[5] + p[80]*y[58] + p[29]*y[26] + p[103]*y[18];
ydot[6] = -p[104]*y[6] - p[31]*y[28]*y[6] + p[32]*y[30] + p[105]*y[18];
ydot[7] = p[39]*y[31] - p[106]*y[7] - p[84]*y[57]*y[7] - p[86]*y[53]*y[7] - p[37]*y[28]*y[7] + p[85]*y[59] + p[87]*y[55] + p[38]*y[31] + p[107]*y[18];
ydot[8] = -p[108]*y[8] - p[40]*y[28]*y[8] + p[41]*y[32] + p[109]*y[18];
ydot[9] = -p[110]*y[9] - p[43]*y[24]*y[9] + p[44]*y[27] + p[111]*y[18];
ydot[10] = -p[89]*y[10] - p[46]*y[10]*y[29] + p[47]*y[33] + p[112]*y[18];
ydot[11] = -p[113]*y[11] - p[48]*y[11]*y[29] + p[49]*y[34] + p[114]*y[18];
ydot[12] = -p[115]*y[12] - p[53]*y[12]*y[40] - p[57]*y[12]*y[42] - p[61]*y[12]*y[44] + p[54]*y[41] + p[58]*y[43] + p[62]*y[45] + p[116]*y[18];
ydot[13] = p[90]*y[47] - p[117]*y[13] - p[63]*y[13]*y[44] + p[64]*y[46] + p[118]*y[18];
ydot[14] = -p[119]*y[14] - p[66]*y[14]*y[47] + p[67]*y[48] + p[120]*y[18];
ydot[15] = -p[121]*y[15] - p[69]*y[15]*y[47] + p[70]*y[49] + p[122]*y[18];
ydot[16] = -p[123]*y[16] - p[74]*y[16]*y[52] + p[75]*y[54] + p[124]*y[18];
ydot[17] = -p[125]*y[17] - p[77]*y[17]*y[56] + p[78]*y[57] + p[126]*y[18];
ydot[18] = 0;
ydot[19] = -p[20]*y[19] - p[127]*y[19] + p[18]*y[0]*y[1] - p[19]*y[19] + p[128]*y[18];
ydot[20] = p[185]*y[50] + p[181]*y[48] + p[183]*y[49] + p[187]*y[51] + p[123]*y[16] + p[193]*y[54] + p[199]*y[57] + p[203]*y[59] + p[201]*y[58] + p[100]*y[4] + p[137]*y[25] + p[113]*y[11] + p[170]*y[42] + p[172]*y[43] + p[174]*y[44] + p[176]*y[45] + p[178]*y[46] + p[155]*y[34] + p[115]*y[12] + p[168]*y[41] + p[110]*y[9] + p[141]*y[27] + p[143]*y[28] + p[151]*y[32] + p[91]*y[36] + p[149]*y[31] + p[147]*y[30] + p[157]*y[35] + p[164]*y[39] + p[135]*y[24] + p[139]*y[26] + p[160]*y[37] + p[131]*y[22] + p[133]*y[23] + p[93]*y[0] + p[127]*y[19] + p[166]*y[40] + p[89]*y[10] + p[153]*y[33] + p[117]*y[13] + p[108]*y[8] + p[106]*y[7] + p[195]*y[55] + p[197]*y[56] + p[162]*y[38] + p[189]*y[52] + p[191]*y[53] + p[96]*y[2] + p[119]*y[14] + p[121]*y[15] + p[102]*y[5] + p[104]*y[6] + p[98]*y[3] + p[125]*y[17] + p[94]*y[1] + p[145]*y[29];
ydot[21] = p[20]*y[19] + p[25]*y[23] - p[21]*y[2]*y[21] - p[23]*y[21]*y[3] - p[88]*y[21] + p[22]*y[22] + p[24]*y[23] + p[130]*y[18];
ydot[22] = -p[131]*y[22] + p[21]*y[2]*y[21] - p[22]*y[22] + p[132]*y[18];
ydot[23] = -p[25]*y[23] - p[133]*y[23] + p[23]*y[21]*y[3] - p[24]*y[23] + p[134]*y[18];
ydot[24] = p[45]*y[27] + p[25]*y[23] + p[30]*y[26] + p[36]*y[39] - p[135]*y[24] - p[43]*y[24]*y[9] - p[26]*y[24]*y[4] - p[28]*y[24]*y[5] + p[44]*y[27] + p[27]*y[25] + p[29]*y[26] + p[136]*y[18];
ydot[25] = -p[137]*y[25] + p[26]*y[24]*y[4] - p[27]*y[25] + p[138]*y[18];
ydot[26] = -p[30]*y[26] - p[139]*y[26] + p[28]*y[24]*y[5] - p[29]*y[26] + p[140]*y[18];
ydot[27] = -p[45]*y[27] - p[141]*y[27] + p[43]*y[24]*y[9] - p[44]*y[27] + p[142]*y[18];
ydot[28] = p[81]*y[58] + p[30]*y[26] + p[33]*y[30] + p[42]*y[32] - p[143]*y[28] - p[31]*y[28]*y[6] - p[37]*y[28]*y[7] - p[40]*y[28]*y[8] + p[32]*y[30] + p[38]*y[31] + p[41]*y[32] + p[144]*y[18];
ydot[29] = p[45]*y[27] + p[50]*y[34] - p[145]*y[29] - p[46]*y[10]*y[29] - p[48]*y[11]*y[29] + p[47]*y[33] + p[49]*y[34] + p[146]*y[18];
ydot[30] = -p[33]*y[30] - p[147]*y[30] + p[31]*y[28]*y[6] - p[32]*y[30] + p[148]*y[18];
ydot[31] = -p[39]*y[31] - p[149]*y[31] + p[37]*y[28]*y[7] - p[38]*y[31] + p[150]*y[18];
ydot[32] = -p[42]*y[32] - p[151]*y[32] + p[40]*y[28]*y[8] - p[41]*y[32] + p[152]*y[18];
ydot[33] = -p[153]*y[33] + p[46]*y[10]*y[29] - p[47]*y[33] + p[154]*y[18];
ydot[34] = -p[50]*y[34] - p[155]*y[34] + p[48]*y[11]*y[29] - p[49]*y[34] + p[156]*y[18];
ydot[35] = p[33]*y[30] + p[36]*y[39] - p[157]*y[35] - p[34]*y[3]*y[35] + p[35]*y[39] + p[158]*y[18];
ydot[36] = p[39]*y[31] - p[91]*y[36] + p[159]*y[18];
ydot[37] = p[42]*y[32] - p[160]*y[37] + p[161]*y[18];
ydot[38] = p[50]*y[34] - p[162]*y[38] - p[51]*y[38] + p[52]*y[40] + p[163]*y[18];
ydot[39] = -p[36]*y[39] - p[164]*y[39] + p[34]*y[3]*y[35] - p[35]*y[39] + p[165]*y[18];
ydot[40] = -p[166]*y[40] + p[51]*y[38] - p[53]*y[12]*y[40] - 1.0*p[55]*pow(y[40], 2) - p[52]*y[40] + p[54]*y[41] + 2*p[56]*y[42] + p[167]*y[18];
ydot[41] = -p[168]*y[41] + p[53]*y[12]*y[40] - p[54]*y[41] + p[169]*y[18];
ydot[42] = -p[170]*y[42] + 0.5*p[55]*pow(y[40], 2) - p[57]*y[12]*y[42] - 1.0*p[59]*pow(y[42], 2) - p[56]*y[42] + p[58]*y[43] + 2*p[60]*y[44] + p[171]*y[18];
ydot[43] = -p[172]*y[43] + p[57]*y[12]*y[42] - p[58]*y[43] + p[173]*y[18];
ydot[44] = -p[174]*y[44] + 0.5*p[59]*pow(y[42], 2) - p[61]*y[12]*y[44] - p[63]*y[13]*y[44] - p[60]*y[44] + p[62]*y[45] + p[64]*y[46] + p[175]*y[18];
ydot[45] = -p[176]*y[45] + p[61]*y[12]*y[44] - p[62]*y[45] + p[177]*y[18];
ydot[46] = -p[65]*y[46] - p[178]*y[46] + p[63]*y[13]*y[44] - p[64]*y[46] + p[179]*y[18];
ydot[47] = p[65]*y[46] + p[68]*y[48] + p[71]*y[49] - p[90]*y[47] - p[66]*y[14]*y[47] - p[69]*y[15]*y[47] + p[67]*y[48] + p[70]*y[49] + p[180]*y[18];
ydot[48] = -p[68]*y[48] - p[181]*y[48] + p[66]*y[14]*y[47] - p[67]*y[48] + p[182]*y[18];
ydot[49] = -p[71]*y[49] - p[183]*y[49] + p[69]*y[15]*y[47] - p[70]*y[49] + p[184]*y[18];
ydot[50] = p[68]*y[48] - p[185]*y[50] - p[72]*y[50] + p[73]*y[52] + p[186]*y[18];
ydot[51] = p[71]*y[49] - p[187]*y[51] - p[82]*y[51] + p[83]*y[53] + p[188]*y[18];
ydot[52] = p[76]*y[54] - p[189]*y[52] + p[72]*y[50] - p[74]*y[16]*y[52] - p[73]*y[52] + p[75]*y[54] + p[190]*y[18];
ydot[53] = -p[191]*y[53] + p[82]*y[51] - p[86]*y[53]*y[7] - p[83]*y[53] + p[87]*y[55] + p[192]*y[18];
ydot[54] = -p[76]*y[54] - p[193]*y[54] + p[74]*y[16]*y[52] - p[75]*y[54] + p[194]*y[18];
ydot[55] = -p[195]*y[55] + p[86]*y[53]*y[7] - p[87]*y[55] + p[196]*y[18];
ydot[56] = p[76]*y[54] - p[197]*y[56] - p[77]*y[17]*y[56] + p[78]*y[57] + p[198]*y[18];
ydot[57] = p[81]*y[58] - p[199]*y[57] + p[77]*y[17]*y[56] - p[79]*y[5]*y[57] - p[84]*y[57]*y[7] - p[78]*y[57] + p[80]*y[58] + p[85]*y[59] + p[200]*y[18];
ydot[58] = -p[81]*y[58] - p[201]*y[58] + p[79]*y[5]*y[57] - p[80]*y[58] + p[202]*y[18];
ydot[59] = -p[203]*y[59] + p[84]*y[57]*y[7] - p[85]*y[59] + p[204]*y[18];
''', ['ydot', 't', 'y', 'p'])
return ydot
else:
def ode_rhs(self, t, y, p):
ydot = self.ydot
ydot[0] = -p[93]*y[0] - p[18]*y[0]*y[1] + p[88]*y[21] + p[19]*y[19] + p[92]*y[18]
ydot[1] = -p[94]*y[1] - p[18]*y[0]*y[1] + p[88]*y[21] + p[19]*y[19] + p[95]*y[18]
ydot[2] = -p[96]*y[2] - p[21]*y[2]*y[21] + p[22]*y[22] + p[97]*y[18]
ydot[3] = -p[98]*y[3] - p[23]*y[21]*y[3] - p[34]*y[3]*y[35] + p[24]*y[23] + p[35]*y[39] + p[99]*y[18]
ydot[4] = -p[100]*y[4] - p[26]*y[24]*y[4] + p[27]*y[25] + p[101]*y[18]
ydot[5] = -p[102]*y[5] - p[79]*y[5]*y[57] - p[28]*y[24]*y[5] + p[80]*y[58] + p[29]*y[26] + p[103]*y[18]
ydot[6] = -p[104]*y[6] - p[31]*y[28]*y[6] + p[32]*y[30] + p[105]*y[18]
ydot[7] = p[39]*y[31] - p[106]*y[7] - p[84]*y[57]*y[7] - p[86]*y[53]*y[7] - p[37]*y[28]*y[7] + p[85]*y[59] + p[87]*y[55] + p[38]*y[31] + p[107]*y[18]
ydot[8] = -p[108]*y[8] - p[40]*y[28]*y[8] + p[41]*y[32] + p[109]*y[18]
ydot[9] = -p[110]*y[9] - p[43]*y[24]*y[9] + p[44]*y[27] + p[111]*y[18]
ydot[10] = -p[89]*y[10] - p[46]*y[10]*y[29] + p[47]*y[33] + p[112]*y[18]
ydot[11] = -p[113]*y[11] - p[48]*y[11]*y[29] + p[49]*y[34] + p[114]*y[18]
ydot[12] = -p[115]*y[12] - p[53]*y[12]*y[40] - p[57]*y[12]*y[42] - p[61]*y[12]*y[44] + p[54]*y[41] + p[58]*y[43] + p[62]*y[45] + p[116]*y[18]
ydot[13] = p[90]*y[47] - p[117]*y[13] - p[63]*y[13]*y[44] + p[64]*y[46] + p[118]*y[18]
ydot[14] = -p[119]*y[14] - p[66]*y[14]*y[47] + p[67]*y[48] + p[120]*y[18]
ydot[15] = -p[121]*y[15] - p[69]*y[15]*y[47] + p[70]*y[49] + p[122]*y[18]
ydot[16] = -p[123]*y[16] - p[74]*y[16]*y[52] + p[75]*y[54] + p[124]*y[18]
ydot[17] = -p[125]*y[17] - p[77]*y[17]*y[56] + p[78]*y[57] + p[126]*y[18]
ydot[18] = 0
ydot[19] = -p[20]*y[19] - p[127]*y[19] + p[18]*y[0]*y[1] - p[19]*y[19] + p[128]*y[18]
ydot[20] = p[185]*y[50] + p[181]*y[48] + p[183]*y[49] + p[187]*y[51] + p[123]*y[16] + p[193]*y[54] + p[199]*y[57] + p[203]*y[59] + p[201]*y[58] + p[100]*y[4] + p[137]*y[25] + p[113]*y[11] + p[170]*y[42] + p[172]*y[43] + p[174]*y[44] + p[176]*y[45] + p[178]*y[46] + p[155]*y[34] + p[115]*y[12] + p[168]*y[41] + p[110]*y[9] + p[141]*y[27] + p[143]*y[28] + p[151]*y[32] + p[91]*y[36] + p[149]*y[31] + p[147]*y[30] + p[157]*y[35] + p[164]*y[39] + p[135]*y[24] + p[139]*y[26] + p[160]*y[37] + p[131]*y[22] + p[133]*y[23] + p[93]*y[0] + p[127]*y[19] + p[166]*y[40] + p[89]*y[10] + p[153]*y[33] + p[117]*y[13] + p[108]*y[8] + p[106]*y[7] + p[195]*y[55] + p[197]*y[56] + p[162]*y[38] + p[189]*y[52] + p[191]*y[53] + p[96]*y[2] + p[119]*y[14] + p[121]*y[15] + p[102]*y[5] + p[104]*y[6] + p[98]*y[3] + p[125]*y[17] + p[94]*y[1] + p[145]*y[29]
ydot[21] = p[20]*y[19] + p[25]*y[23] - p[21]*y[2]*y[21] - p[23]*y[21]*y[3] - p[88]*y[21] + p[22]*y[22] + p[24]*y[23] + p[130]*y[18]
ydot[22] = -p[131]*y[22] + p[21]*y[2]*y[21] - p[22]*y[22] + p[132]*y[18]
ydot[23] = -p[25]*y[23] - p[133]*y[23] + p[23]*y[21]*y[3] - p[24]*y[23] + p[134]*y[18]
ydot[24] = p[45]*y[27] + p[25]*y[23] + p[30]*y[26] + p[36]*y[39] - p[135]*y[24] - p[43]*y[24]*y[9] - p[26]*y[24]*y[4] - p[28]*y[24]*y[5] + p[44]*y[27] + p[27]*y[25] + p[29]*y[26] + p[136]*y[18]
ydot[25] = -p[137]*y[25] + p[26]*y[24]*y[4] - p[27]*y[25] + p[138]*y[18]
ydot[26] = -p[30]*y[26] - p[139]*y[26] + p[28]*y[24]*y[5] - p[29]*y[26] + p[140]*y[18]
ydot[27] = -p[45]*y[27] - p[141]*y[27] + p[43]*y[24]*y[9] - p[44]*y[27] + p[142]*y[18]
ydot[28] = p[81]*y[58] + p[30]*y[26] + p[33]*y[30] + p[42]*y[32] - p[143]*y[28] - p[31]*y[28]*y[6] - p[37]*y[28]*y[7] - p[40]*y[28]*y[8] + p[32]*y[30] + p[38]*y[31] + p[41]*y[32] + p[144]*y[18]
ydot[29] = p[45]*y[27] + p[50]*y[34] - p[145]*y[29] - p[46]*y[10]*y[29] - p[48]*y[11]*y[29] + p[47]*y[33] + p[49]*y[34] + p[146]*y[18]
ydot[30] = -p[33]*y[30] - p[147]*y[30] + p[31]*y[28]*y[6] - p[32]*y[30] + p[148]*y[18]
ydot[31] = -p[39]*y[31] - p[149]*y[31] + p[37]*y[28]*y[7] - p[38]*y[31] + p[150]*y[18]
ydot[32] = -p[42]*y[32] - p[151]*y[32] + p[40]*y[28]*y[8] - p[41]*y[32] + p[152]*y[18]
ydot[33] = -p[153]*y[33] + p[46]*y[10]*y[29] - p[47]*y[33] + p[154]*y[18]
ydot[34] = -p[50]*y[34] - p[155]*y[34] + p[48]*y[11]*y[29] - p[49]*y[34] + p[156]*y[18]
ydot[35] = p[33]*y[30] + p[36]*y[39] - p[157]*y[35] - p[34]*y[3]*y[35] + p[35]*y[39] + p[158]*y[18]
ydot[36] = p[39]*y[31] - p[91]*y[36] + p[159]*y[18]
ydot[37] = p[42]*y[32] - p[160]*y[37] + p[161]*y[18]
ydot[38] = p[50]*y[34] - p[162]*y[38] - p[51]*y[38] + p[52]*y[40] + p[163]*y[18]
ydot[39] = -p[36]*y[39] - p[164]*y[39] + p[34]*y[3]*y[35] - p[35]*y[39] + p[165]*y[18]
ydot[40] = -p[166]*y[40] + p[51]*y[38] - p[53]*y[12]*y[40] - 1.0*p[55]*pow(y[40], 2) - p[52]*y[40] + p[54]*y[41] + 2*p[56]*y[42] + p[167]*y[18]
ydot[41] = -p[168]*y[41] + p[53]*y[12]*y[40] - p[54]*y[41] + p[169]*y[18]
ydot[42] = -p[170]*y[42] + 0.5*p[55]*pow(y[40], 2) - p[57]*y[12]*y[42] - 1.0*p[59]*pow(y[42], 2) - p[56]*y[42] + p[58]*y[43] + 2*p[60]*y[44] + p[171]*y[18]
ydot[43] = -p[172]*y[43] + p[57]*y[12]*y[42] - p[58]*y[43] + p[173]*y[18]
ydot[44] = -p[174]*y[44] + 0.5*p[59]*pow(y[42], 2) - p[61]*y[12]*y[44] - p[63]*y[13]*y[44] - p[60]*y[44] + p[62]*y[45] + p[64]*y[46] + p[175]*y[18]
ydot[45] = -p[176]*y[45] + p[61]*y[12]*y[44] - p[62]*y[45] + p[177]*y[18]
ydot[46] = -p[65]*y[46] - p[178]*y[46] + p[63]*y[13]*y[44] - p[64]*y[46] + p[179]*y[18]
ydot[47] = p[65]*y[46] + p[68]*y[48] + p[71]*y[49] - p[90]*y[47] - p[66]*y[14]*y[47] - p[69]*y[15]*y[47] + p[67]*y[48] + p[70]*y[49] + p[180]*y[18]
ydot[48] = -p[68]*y[48] - p[181]*y[48] + p[66]*y[14]*y[47] - p[67]*y[48] + p[182]*y[18]
ydot[49] = -p[71]*y[49] - p[183]*y[49] + p[69]*y[15]*y[47] - p[70]*y[49] + p[184]*y[18]
ydot[50] = p[68]*y[48] - p[185]*y[50] - p[72]*y[50] + p[73]*y[52] + p[186]*y[18]
ydot[51] = p[71]*y[49] - p[187]*y[51] - p[82]*y[51] + p[83]*y[53] + p[188]*y[18]
ydot[52] = p[76]*y[54] - p[189]*y[52] + p[72]*y[50] - p[74]*y[16]*y[52] - p[73]*y[52] + p[75]*y[54] + p[190]*y[18]
ydot[53] = -p[191]*y[53] + p[82]*y[51] - p[86]*y[53]*y[7] - p[83]*y[53] + p[87]*y[55] + p[192]*y[18]
ydot[54] = -p[76]*y[54] - p[193]*y[54] + p[74]*y[16]*y[52] - p[75]*y[54] + p[194]*y[18]
ydot[55] = -p[195]*y[55] + p[86]*y[53]*y[7] - p[87]*y[55] + p[196]*y[18]
ydot[56] = p[76]*y[54] - p[197]*y[56] - p[77]*y[17]*y[56] + p[78]*y[57] + p[198]*y[18]
ydot[57] = p[81]*y[58] - p[199]*y[57] + p[77]*y[17]*y[56] - p[79]*y[5]*y[57] - p[84]*y[57]*y[7] - p[78]*y[57] + p[80]*y[58] + p[85]*y[59] + p[200]*y[18]
ydot[58] = -p[81]*y[58] - p[201]*y[58] + p[79]*y[5]*y[57] - p[80]*y[58] + p[202]*y[18]
ydot[59] = -p[203]*y[59] + p[84]*y[57]*y[7] - p[85]*y[59] + p[204]*y[18]
return ydot
def simulate(self, tspan, param_values=None, view=False):
if param_values is not None:
# accept vector of parameter values as an argument
if len(param_values) != len(self.parameters):
raise Exception("param_values must have length %d" % len(self.parameters))
self.sim_param_values[:] = param_values
else:
# create parameter vector from the values in the model
self.sim_param_values[:] = [p.value for p in self.parameters]
self.y0.fill(0)
for ic in self.initial_conditions:
self.y0[ic.species_index] = self.sim_param_values[ic.param_index]
if self.y is None or len(tspan) != len(self.y):
self.y = numpy.empty((len(tspan), len(self.y0)))
if len(self.observables):
self.yobs = numpy.ndarray(len(tspan), zip((obs.name for obs in self.observables),
itertools.repeat(float)))
else:
self.yobs = numpy.ndarray((len(tspan), 0))
self.yobs_view = self.yobs.view(float).reshape(len(self.yobs), -1)
# perform the actual integration
self.integrator.set_initial_value(self.y0, tspan[0])
self.integrator.set_f_params(self.sim_param_values)
self.y[0] = self.y0
t = 1
while self.integrator.successful() and self.integrator.t < tspan[-1]:
self.y[t] = self.integrator.integrate(tspan[t])
t += 1
for i, obs in enumerate(self.observables):
self.yobs_view[:, i] = \
(self.y[:, obs.species] * obs.coefficients).sum(1)
if view:
y_out = self.y.view()
yobs_out = self.yobs.view()
for a in y_out, yobs_out:
a.flags.writeable = False
else:
y_out = self.y.copy()
yobs_out = self.yobs.copy()
return (y_out, yobs_out)
```
#### File: examples/earm/thermodynamic_integration.py
```python
from __future__ import division
import bayessb
import numpy as np
import scipy.integrate
import matplotlib.pyplot as plt
import itertools
import multiprocessing
import sys
from fit_1_3_standalone import build_opts
def run_chain(args):
temperature, sample = args
# Start with master option set, then override temperature and seed
opts = master_opts.copy()
opts.thermo_temp = temperature
opts.seed = sample
# Build an MCMC object and run it
mcmc = bayessb.MCMC(opts)
mcmc.run()
# Return likelihoods of accepted moves in the latter half of the walk
num_likelihoods = mcmc.acceptance // 2
return mcmc.likelihoods[mcmc.accepts][num_likelihoods:]
def print_progress_bar(fraction):
percentage = fraction * 100
bar_size = int(fraction * 50)
sys.stdout.write('%3d%% [%-51s]\r' % (percentage, '=' * bar_size + '>'))
sys.stdout.flush()
if __name__ == '__main__':
print "Performing thermodynamic integration:"
# Build master option set
master_opts = build_opts()
# Don't print anything out, as we'll have many simultaneous workers
master_opts.step_fn = None
# Choose the number of temperatures to sample and chains to run at each
num_temps = 8
num_chains = 3
# Sample temperatures on a log scale, from 1e-3 to 1
temperatures = np.logspace(-3, 0, num_temps)
# Produce tuples of input arguments to run_chain
inputs = itertools.product(temperatures, xrange(num_chains))
# Launch a parallel processing pool to run the chains
pool = multiprocessing.Pool()
result = pool.map_async(run_chain, inputs)
# Print a progress bar while the pool is still working
num_chunks = result._number_left
while not result.ready():
try:
outputs = result.get(timeout=1)
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt as e:
pool.terminate()
raise
print_progress_bar((num_chunks - result._number_left) / num_chunks),
print
pool.close()
pool.join()
# Calculate mean of likelihoods from all chains at each temperature, and
# standard deviation on the means from each chain at each temperature
likelihood_means = np.empty_like(temperatures)
likelihood_stds = np.empty_like(temperatures)
for i, temperature in enumerate(temperatures):
likelihood_sets = []
for c in xrange(num_chains):
# Extract the right likelihood vectors from the pool output,
# negating the values to obtain positive log-likelihood values
likelihood_sets.append(-1 * outputs[i * num_chains + c])
# Mean of all likelihood values
likelihood_means[i] = np.mean(np.hstack(likelihood_sets))
# Standard deviation on the means
likelihood_stds[i] = np.std(map(np.mean, likelihood_sets))
# Produce a number of sampled trajectories from the means and stds
num_samples = 1000
sample_iter = itertools.imap(np.random.normal, likelihood_means,
likelihood_stds, itertools.repeat(num_samples))
# FIXME this needlessly creates an intermediate list
samples = np.array(list(sample_iter)).T
# Integrate sampled trajectories to obtain log-evidence i.e.
# log(P(Data|Model))
log_evidences = scipy.integrate.simps(samples, temperatures)
# Plot histogram of evidence values
counts, bins, _ = plt.hist(np.exp(log_evidences), bins=40)
print 'Histogram of evidence values:'
for b, c in zip(bins, counts):
print '%-8.3g: %d' % (b, c)
plt.xlabel('Evidence')
plt.ylabel('Count')
plt.show()
``` |
{
"source": "johnbachman/belpy",
"score": 3
} |
#### File: assemblers/cx/hub_layout.py
```python
import json
import math
import random
import networkx
from collections import defaultdict
def get_aspect(cx, aspect_name):
"""Return an aspect given the name of the aspect"""
if isinstance(cx, dict):
return cx.get(aspect_name)
for entry in cx:
if list(entry.keys())[0] == aspect_name:
return entry[aspect_name]
def edge_type_to_class(edge_type):
"""Return the edge class for layout purposes based on the edge type"""
edge_type = edge_type.lower()
if 'amount' in edge_type:
return 'amount'
if edge_type in ('activation', 'inhibition'):
return 'activity'
if edge_type == 'complex':
return 'complex'
else:
return 'modification'
def classify_nodes(graph, hub: int):
"""Classify each node based on its type and relationship to the hub."""
node_stats = defaultdict(lambda: defaultdict(list))
for u, v, data in graph.edges(data=True):
# This means the node is downstream of the hub
if hub == u:
h, o = u, v
if data['i'] != 'complex':
node_stats[o]['up'].append(-1)
else:
node_stats[o]['up'].append(0)
# This means the node is upstream of the hub
elif hub == v:
h, o = v, u
if data['i'] != 'complex':
node_stats[o]['up'].append(1)
else:
node_stats[o]['up'].append(0)
else:
continue
node_stats[o]['interaction'].append(edge_type_to_class(data['i']))
node_classes = {}
for node_id, stats in node_stats.items():
up = max(set(stats['up']), key=stats['up'].count)
# Special case: if up is not 0 then we should exclude complexes
# from the edge_type states so that we don't end up with
# (-1, complex, ...) or (1, complex, ...) as the node class
interactions = [i for i in stats['interaction'] if
not (up != 0 and i == 'complex')]
edge_type = max(set(interactions), key=interactions.count)
node_type = graph.nodes[node_id]['type']
node_classes[node_id] = (up, edge_type, node_type)
return node_classes
def get_attributes(aspect, id):
"""Return the attributes pointing to a given ID in a given aspect."""
attributes = {}
for entry in aspect:
if entry['po'] == id:
attributes[entry['n']] = entry['v']
return attributes
def cx_to_networkx(cx):
"""Return a MultiDiGraph representation of a CX network."""
graph = networkx.MultiDiGraph()
for node_entry in get_aspect(cx, 'nodes'):
id = node_entry['@id']
attrs = get_attributes(get_aspect(cx, 'nodeAttributes'), id)
attrs['n'] = node_entry['n']
graph.add_node(id, **attrs)
for edge_entry in get_aspect(cx, 'edges'):
id = edge_entry['@id']
attrs = get_attributes(get_aspect(cx, 'edgeAttributes'), id)
attrs['i'] = edge_entry['i']
graph.add_edge(edge_entry['s'], edge_entry['t'], key=id, **attrs)
return graph
def get_quadrant_from_class(node_class):
"""Return the ID of the segment of the plane corresponding to a class."""
up, edge_type, _ = node_class
if up == 0:
return 0 if random.random() < 0.5 else 7
mappings = {(-1, 'modification'): 1,
(-1, 'amount'): 2,
(-1, 'activity'): 3,
(1, 'activity'): 4,
(1, 'amount'): 5,
(1, 'modification'): 6}
return mappings[(up, edge_type)]
def get_coordinates(node_class):
"""Generate coordinates for a node in a given class."""
quadrant_size = (2 * math.pi / 8.0)
quadrant = get_quadrant_from_class(node_class)
begin_angle = quadrant_size * quadrant
r = 200 + 800*random.random()
alpha = begin_angle + random.random() * quadrant_size
x = r * math.cos(alpha)
y = r * math.sin(alpha)
return x, y
def get_layout_aspect(hub, node_classes):
"""Get the full layout aspect with coordinates for each node."""
aspect = [{'node': hub, 'x': 0.0, 'y': 0.0}]
for node, node_class in node_classes.items():
if node == hub:
continue
x, y = get_coordinates(node_class)
aspect.append({'node': node, 'x': x, 'y': y})
return aspect
def get_node_by_name(graph, name):
"""Return a node ID given its name."""
for id, attrs in graph.nodes(data=True):
if attrs['n'] == name:
return id
def add_semantic_hub_layout(cx, hub: str):
"""Attach a layout aspect to a CX network given a hub node."""
graph = cx_to_networkx(cx)
hub_node = get_node_by_name(graph, hub)
node_classes = classify_nodes(graph, hub_node)
layout_aspect = get_layout_aspect(hub_node, node_classes)
cx['cartesianLayout'] = layout_aspect
if __name__ == '__main__':
with open('CDK13.cx', 'r') as fh:
cx = json.load(fh)
add_semantic_hub_layout(cx, 'CDK13')
```
#### File: assemblers/pysb/export.py
```python
import logging
import networkx
from pysb.export import export
logger = logging.getLogger(__name__)
def export_sbgn(model):
"""Return an SBGN model string corresponding to the PySB model.
This function first calls generate_equations on the PySB model to obtain
a reaction network (i.e. individual species, reactions). It then iterates
over each reaction and and instantiates its reactants, products, and the
process itself as SBGN glyphs and arcs.
Parameters
----------
model : pysb.core.Model
A PySB model to be exported into SBGN
Returns
-------
sbgn_str : str
An SBGN model as string
"""
import lxml.etree
import lxml.builder
from pysb.bng import generate_equations
from indra.assemblers.sbgn import SBGNAssembler
logger.info('Generating reaction network with BNG for SBGN export. ' +
'This could take a long time.')
generate_equations(model)
sa = SBGNAssembler()
glyphs = {}
for idx, species in enumerate(model.species):
glyph = sa._glyph_for_complex_pattern(species)
if glyph is None:
continue
sa._map.append(glyph)
glyphs[idx] = glyph
for reaction in model.reactions:
# Get all the reactions / products / controllers of the reaction
reactants = set(reaction['reactants']) - set(reaction['products'])
products = set(reaction['products']) - set(reaction['reactants'])
controllers = set(reaction['reactants']) & set(reaction['products'])
# Add glyph for reaction
process_glyph = sa._process_glyph('process')
# Connect reactants with arcs
if not reactants:
glyph_id = sa._none_glyph()
sa._arc('consumption', glyph_id, process_glyph)
else:
for r in reactants:
glyph = glyphs.get(r)
if glyph is None:
glyph_id = sa._none_glyph()
else:
glyph_id = glyph.attrib['id']
sa._arc('consumption', glyph_id, process_glyph)
# Connect products with arcs
if not products:
glyph_id = sa._none_glyph()
sa._arc('production', process_glyph, glyph_id)
else:
for p in products:
glyph = glyphs.get(p)
if glyph is None:
glyph_id = sa._none_glyph()
else:
glyph_id = glyph.attrib['id']
sa._arc('production', process_glyph, glyph_id)
# Connect controllers with arcs
for c in controllers:
glyph = glyphs[c]
sa._arc('catalysis', glyph.attrib['id'], process_glyph)
sbgn_str = sa.print_model().decode('utf-8')
return sbgn_str
def export_kappa_im(model, fname=None):
"""Return a networkx graph representing the model's Kappa influence map.
Parameters
----------
model : pysb.core.Model
A PySB model to be exported into a Kappa IM.
fname : Optional[str]
A file name, typically with .png or .pdf extension in which
the IM is rendered using pygraphviz.
Returns
-------
networkx.MultiDiGraph
A graph object representing the influence map.
"""
from .kappa_util import im_json_to_graph
kappa = _prepare_kappa(model)
imap = kappa.analyses_influence_map()
im = im_json_to_graph(imap)
for param in model.parameters:
try:
im.remove_node(param.name)
except:
pass
if fname:
agraph = networkx.nx_agraph.to_agraph(im)
agraph.draw(fname, prog='dot')
return im
def export_kappa_cm(model, fname=None):
"""Return a networkx graph representing the model's Kappa contact map.
Parameters
----------
model : pysb.core.Model
A PySB model to be exported into a Kappa CM.
fname : Optional[str]
A file name, typically with .png or .pdf extension in which
the CM is rendered using pygraphviz.
Returns
-------
npygraphviz.Agraph
A graph object representing the contact map.
"""
from .kappa_util import cm_json_to_graph
kappa = _prepare_kappa(model)
cmap = kappa.analyses_contact_map()
cm = cm_json_to_graph(cmap)
if fname:
cm.draw(fname, prog='dot')
return cm
def export_cm_network(model):
"""Return a networkx graph of the model's Kappa contact map.
Parameters
----------
model : pysb.Model
A PySB model whose Kappa contact graph is to be generated.
Returns
-------
networkx.Graph
An undirected networkx graph representing the contact map.
"""
from .kappa_util import cm_json_to_networkx
kappa = _prepare_kappa(model)
cmap = kappa.analyses_contact_map()
g = cm_json_to_networkx(cmap)
return g
def _prepare_kappa(model):
"""Return a Kappa STD with the model loaded."""
import kappy
kappa = kappy.KappaStd()
model_str = export(model, 'kappa')
kappa.add_model_string(model_str)
kappa.project_parse()
return kappa
```
#### File: indra/databases/efo_client.py
```python
from indra.databases.obo_client import OboClient
_client = OboClient(prefix='efo')
def get_efo_name_from_efo_id(efo_id):
"""Return the EFO name corresponding to the given EFO ID.
Parameters
----------
efo_id : str
The EFO identifier to be converted. Example: "0005557"
Returns
-------
efo_name : str
The EFO name corresponding to the given EFO identifier.
"""
return _client.get_name_from_id(efo_id)
def get_efo_id_from_efo_name(efo_name):
"""Return the EFO identifier corresponding to the given EFO name.
Parameters
----------
efo_name : str
The EFO name to be converted. Example: "gum cancer"
Returns
-------
efo_id : str
The EFO identifier corresponding to the given EFO name.
"""
return _client.get_id_from_name(efo_name)
```
#### File: indra/databases/identifiers.py
```python
import re
import logging
from indra.resources import load_resource_json
logger = logging.getLogger(__name__)
identifiers_url = 'https://identifiers.org'
# These are just special cases of name spaces where the mapping from INDRA to
# identifiers.org is not a question of simplecapitalization.
identifiers_mappings = {
'UP': 'uniprot',
'UPPRO': 'uniprot.chain',
'UPISO': 'uniprot.isoform',
'REFSEQ_PROT': 'refseq',
'PF': 'pfam',
'IP': 'interpro',
'ECCODE': 'ec-code',
'NONCODE': 'noncodev4.rna',
'LNCRNADB': 'rnacentral',
'MIRBASEM': 'mirbase.mature',
'EGID': 'ncbigene',
'NCBI': 'ncibgene',
'HGNC_GROUP': 'hgnc.genefamily',
'LINCS': 'lincs.smallmolecule',
'PUBCHEM': 'pubchem.compound',
'CHEMBL': 'chembl.compound',
'CTD': 'ctd.chemical',
'CVCL': 'cellosaurus',
}
# These are namespaces used by INDRA that don't have corresponding
# identifiers.org entries
non_registry = {
'SDIS', 'SCHEM', 'SFAM', 'SCOMP', 'SIGNOR', 'HMS-LINCS', 'NXPFA',
'OMIM', 'LSPCI', 'UPLOC', 'BFO', 'CCLE'
}
# These are namespaces that can appear in db_refs but are actually not
# representing grounding.
non_grounding = {
'TEXT', 'TEXT_NORM'
}
# These are reverse mappings from identifiers.org namespaces to INDRA
# namespaces
identifiers_reverse = {
v: k for k, v in identifiers_mappings.items()
}
# We have to patch this one because it is ambiguous
identifiers_reverse['ncbigene'] = 'EGID'
# These are only the URLs that are strictly prefixes and not more complicated
# patterns. This is because some downstream code uses these as prefixes
# rather than arbitrary patterns.
url_prefixes = {
# Biology namespaces
'NXPFA': 'https://www.nextprot.org/term/FA-',
'SIGNOR': 'https://signor.uniroma2.it/relation_result.php?id=',
'LSPCI': 'https://labsyspharm.github.io/lspci/',
# WM namespaces
'UN': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'WDI': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'FAO': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'HUME': ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies'
'/hume_ontology/'),
'CWMS': 'http://trips.ihmc.us/',
'SOFIA': 'http://cs.cmu.edu/sofia/',
}
def get_ns_from_identifiers(identifiers_ns):
""""Return a namespace compatible with INDRA from an identifiers namespace.
For example, this function can be used to map 'uniprot' to 'UP'.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
Returns
-------
str or None
The namespace compatible with INDRA's internal representation or
None if the given namespace isn't an identifiers.org standard.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
if not reg_entry:
return None
mapping = identifiers_reverse.get(identifiers_ns.lower())
if mapping:
return mapping
else:
return identifiers_ns.upper()
def get_ns_id_from_identifiers(identifiers_ns, identifiers_id):
"""Return a namespace/ID pair compatible with INDRA from identifiers.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
identifiers_id : str
An identifiers.org standard ID in the given namespace.
Returns
-------
(str, str)
A namespace and ID that are valid in INDRA db_refs.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
db_ns = get_ns_from_identifiers(identifiers_ns)
if db_ns is None:
return None, None
db_id = identifiers_id
if reg_entry['namespace_embedded']:
if not identifiers_id.startswith(identifiers_ns.upper()):
db_id = '%s:%s' % (identifiers_ns.upper(), identifiers_id)
return db_ns, db_id
def get_identifiers_ns(db_name):
"""Map an INDRA namespace to an identifiers.org namespace when possible.
Example: this can be used to map 'UP' to 'uniprot'.
Parameters
----------
db_name : str
An INDRA namespace to map to identifiers.org
Returns
-------
str or None
An identifiers.org namespace or None if not available.
"""
mapped_db_name = identifiers_mappings.get(db_name, db_name.lower())
if mapped_db_name not in identifiers_registry:
return None
return mapped_db_name
def get_url_prefix(db_name):
"""Return the URL prefix for a given namespace."""
identifiers_ns = get_identifiers_ns(db_name)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if not identifiers_entry['namespace_embedded']:
return '%s/%s:' % (identifiers_url, identifiers_ns.lower())
else:
return '%s/' % identifiers_url
else:
if db_name in url_prefixes:
return url_prefixes[db_name]
return None
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
# This is the case where we have a prefix that we can simply attach the
# db_id to to get the desired URL.
if db_name == 'CHEMBL':
db_id = ensure_chembl_prefix(db_id)
elif db_name == 'CHEBI':
db_id = ensure_chebi_prefix(db_id)
prefix = get_url_prefix(db_name)
if prefix:
return '%s%s' % (prefix, db_id)
# Otherwise, we have to handle some special cases
bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'
if db_name == 'LINCS':
if db_id.startswith('LSM-'): # Lincs Small Molecule ID
url = identifiers_url + '/lincs.smallmolecule:%s' % db_id
elif db_id.startswith('LCL-'): # Lincs Cell Line ID
url = identifiers_url + '/lincs.cell:%s' % db_id
else: # Assume LINCS Protein
url = identifiers_url + '/lincs.protein:%s' % db_id
elif db_name == 'CHEMBL':
if not db_id.startswith('CHEMBL'):
db_id = 'CHEMBL%s' % db_id
url = identifiers_url + '/chembl.compound:%s' % db_id
elif db_name == 'HMS-LINCS':
url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id
# Special cases with no identifiers entry
elif db_name == 'SCHEM':
url = bel_scai_url + 'selventa-legacy-chemicals/' + \
'selventa-legacy-chemicals-20150601.belns'
elif db_name == 'SCOMP':
url = bel_scai_url + 'selventa-named-complexes/' + \
'selventa-named-complexes-20150601.belns'
elif db_name == 'SFAM':
url = bel_scai_url + 'selventa-protein-families/' + \
'selventa-protein-families-20150601.belns'
elif db_name == 'TEXT' or db_name == 'TEXT_NORM':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
def parse_identifiers_url(url):
"""Retrieve database name and ID given the URL.
Parameters
----------
url : str
An identifiers.org URL to parse.
Returns
-------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc. corresponding to the
given URL.
db_id : str
An identifier in the database.
"""
# Try matching by string pattern
db_ns, db_id = None, None
url_pattern = \
r'(?:https?)://identifiers.org/([A-Za-z0-9.-]+)(/|:)([A-Za-z0-9:_.-]+)'
match = re.match(url_pattern, url)
if match is not None:
g = match.groups()
if len(g) == 3:
pattern_ns, pattern_id = g[0], g[2]
db_ns, db_id = get_ns_id_from_identifiers(pattern_ns, pattern_id)
if db_ns == 'HGNC':
if db_id.startswith('HGNC:'):
db_id = db_id[5:]
# If we got UP and UPPRO, return UPPRO
if db_ns == 'UP' and '#PRO_' in url:
db_ns = 'UPPRO'
db_id = url[url.find('PRO_'):]
if db_ns and db_id:
return db_ns, db_id
for ns, prefix in url_prefixes.items():
if url.startswith(prefix):
return ns, url[len(prefix):]
# Handle other special cases
for part in ['/lincs.smallmolecule', '/lincs.cell', '/lincs.protein']:
if part in url:
return 'LINCS', url[(url.find(part) + len(part) + 1):]
if '/chembl.compound' in url:
return 'CHEMBL', url[
(url.find('/chembl.compound') + len('/chembl.compound:')):]
if 'lincs.hms.harvard.edu' in url:
return 'HMS-LINCS', url[len('http://lincs.hms.harvard.edu/db/sm/'):-4]
if 'selventa-legacy-chemicals/' in url:
return 'SCHEM', None
if 'selventa-named-complexes/' in url:
return 'SCOMP', None
if 'selventa-protein-families/' in url:
return 'SFAM', None
else:
logger.warning('Could not parse URL %s' % url)
return None, None
def namespace_embedded(db_ns: str) -> bool:
"""Return true if this namespace requires IDs to have namespace embedded.
This function first maps the given namespace to an identifiers.org
namespace and then checks the registry to see if namespaces need
to be embedded in IDs. If yes, it returns True. If not, or the ID can't
be mapped to identifiers.org, it returns False
Parameters
----------
db_ns :
The namespace to check.
Returns
-------
:
True if the namespace is known to be embedded in IDs of this
namespace. False if unknown or known not to be embedded.
"""
identifiers_ns = get_identifiers_ns(db_ns)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if identifiers_entry['namespace_embedded']:
return True
return False
def ensure_prefix_if_needed(db_ns: str, db_id: str) -> str:
"""Return an ID ensuring a namespace prefix if known to be needed.
Parameters
----------
db_ns :
The namespace associated with the identifier.
db_id :
The original identifier.
Returns
-------
:
The identifier with namespace embedded if needed.
"""
if namespace_embedded(db_ns):
return ensure_prefix(db_ns, db_id)
return db_id
def ensure_prefix(db_ns, db_id, with_colon=True):
"""Return a valid ID that has the given namespace embedded.
This is useful for namespaces such as CHEBI, GO or BTO that require
the namespace to be part of the ID. Note that this function always
ensures that the given db_ns is embedded in the ID and can handle the
case whene it's already present.
Parameters
----------
db_ns : str
A namespace.
db_id : str
An ID within that namespace which should have the namespace
as a prefix in it.
with_colon: Optional[bool]
If True, the namespace prefix is followed by a colon in the ID (e.g.,
CHEBI:12345). Otherwise, no colon is added (e.g., CHEMBL1234).
Default: True
"""
if db_id is None:
return None
colon = ':' if with_colon else ''
if not db_id.startswith(f'{db_ns}{colon}'):
return f'{db_ns}{colon}{db_id}'
return db_id
def ensure_chebi_prefix(chebi_id):
"""Return a valid CHEBI ID that has the appropriate CHEBI: prefix."""
return ensure_prefix('CHEBI', chebi_id)
def ensure_chembl_prefix(chembl_id):
"""Return a valid CHEMBL ID that has the appropriate CHEMBL prefix."""
return ensure_prefix('CHEMBL', chembl_id, with_colon=False)
identifiers_registry = load_resource_json('identifiers_patterns.json')
```
#### File: sources/bel/api.py
```python
import zlib
import json
import pybel
import logging
import requests
from functools import lru_cache
import pybel.constants as pc
from pybel.io.sbel import add_sbel_row
from .processor import PybelProcessor
logger = logging.getLogger(__name__)
version = 'v1.0.0'
branch = 'https://github.com/cthoyt/selventa-knowledge/raw/' \
'{}/selventa_knowledge/{}'
large_corpus_url = branch.format(version, 'large_corpus.bel.nodelink.json.gz')
small_corpus_url = branch.format(version, 'small_corpus.bel.nodelink.json.gz')
def process_small_corpus():
"""Return PybelProcessor with statements from Selventa Small Corpus.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
its statements attribute.
"""
return process_pybel_network(network_type='graph_jsongz_url',
network_file=small_corpus_url)
def process_large_corpus():
"""Return PybelProcessor with statements from Selventa Large Corpus.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
its statements attribute.
"""
return process_pybel_network(network_type='graph_jsongz_url',
network_file=large_corpus_url)
def process_pybel_network(network_type, network_file, **kwargs):
"""Return PybelProcessor by processing a given network file.
Parameters
----------
network_type : str
The type of network that network_file is. The options are:
belscript, json, cbn_jgif, graph_pickle, and graph_jsongz_url.
Default: graph_jsongz_url
network_file : str
Path to the network file/URL to process.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
if network_type == 'belscript':
return process_belscript(network_file, **kwargs)
elif network_type == 'json':
return process_json_file(network_file)
elif network_type == 'cbn_jgif':
return process_cbn_jgif_file(network_file)
elif network_type == 'graph_jsongz_url':
if not network_file:
network_file = large_corpus_url
logger.info('Loading %s' % network_file)
res = requests.get(network_file)
res.raise_for_status()
contentb = zlib.decompress(res.content, zlib.MAX_WBITS | 32)
content = contentb.decode('utf-8')
graph = pybel.from_nodelink_jsons(content)
return process_pybel_graph(graph)
elif network_type == 'graph_pickle':
graph = pybel.from_pickle(network_file)
return process_pybel_graph(graph)
else:
raise ValueError('Unknown network type: %s' % network_type)
def process_pybel_neighborhood(entity_names, network_type='graph_jsongz_url',
network_file=None, **kwargs):
"""Return PybelProcessor around neighborhood of given genes in a network.
This function processes the given network file and filters the returned
Statements to ones that contain genes in the given list.
Parameters
----------
entity_names : list[str]
A list of entity names (e.g., gene names) which will be used as the
basis of filtering the result. If any of the Agents of an extracted
INDRA Statement has a name appearing in this list, the Statement is
retained in the result.
network_type : Optional[str]
The type of network that network_file is. The options are:
belscript, json, cbn_jgif, graph_pickle, and graph_jsongz_url.
Default: graph_jsongz_url
network_file : Optional[str]
Path to the network file/URL to process. If not given, by default, the
Selventa Large Corpus is used via a URL pointing to a gzipped PyBEL
Graph JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
bp = process_pybel_network(network_type, network_file, **kwargs)
filtered_stmts = []
filter_names = set(entity_names)
for stmt in bp.statements:
found = False
for agent in stmt.agent_list():
if agent is not None:
if agent.name in filter_names:
found = True
if found:
filtered_stmts.append(stmt)
bp.statements = filtered_stmts
return bp
def process_bel_stmt(bel: str, squeeze: bool = False):
"""Process a single BEL statement and return the PybelProcessor
or a single statement if ``squeeze`` is True.
Parameters
----------
bel : str
A BEL statement. See example below.
squeeze : Optional[bool]
If squeeze and there's only one statement in the processor,
it will be unpacked.
Returns
-------
statements : Union[Statement, PybelProcessor]
A list of INDRA statments derived from the BEL statement.
If squeeze is true and there was only one statement, the
unpacked INDRA statement will be returned.
Examples
--------
>>> from indra.sources.bel import process_bel_stmt
>>> bel_s = 'kin(p(FPLX:MEK)) -> kin(p(FPLX:ERK))'
>>> process_bel_stmt(bel_s, squeeze=True)
Activation(MEK(kinase), ERK(), kinase)
"""
r = pybel.parse(bel)
# make sure activations in the right place
for a, b in [(pc.SOURCE, pc.SOURCE_MODIFIER), (pc.TARGET, pc.TARGET_MODIFIER)]:
side = r[a]
for c in [pc.MODIFIER, pc.EFFECT, pc.FROM_LOC, pc.TO_LOC, pc.LOCATION]:
if c in side:
r.setdefault(b, {})[c] = side.pop(c)
graph = pybel.BELGraph()
add_sbel_row(graph, r)
bp = process_pybel_graph(graph)
if squeeze and len(bp.statements) == 1:
return bp.statements[0]
return bp
@lru_cache(maxsize=100)
def process_pybel_graph(graph):
"""Return a PybelProcessor by processing a PyBEL graph.
Parameters
----------
graph : pybel.struct.BELGraph
A PyBEL graph to process
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
bp = PybelProcessor(graph)
bp.get_statements()
if bp.annot_manager.failures:
logger.warning('missing %d annotation pairs',
sum(len(v)
for v in bp.annot_manager.failures.values()))
return bp
def process_belscript(file_name, **kwargs):
"""Return a PybelProcessor by processing a BEL script file.
Key word arguments are passed directly to pybel.from_path,
for further information, see
pybel.readthedocs.io/en/latest/io.html#pybel.from_path
Some keyword arguments we use here differ from the defaults
of PyBEL, namely we set `citation_clearing` to False
and `no_identifier_validation` to True.
Parameters
----------
file_name : str
The path to a BEL script file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
if 'citation_clearing' not in kwargs:
kwargs['citation_clearing'] = False
if 'no_identifier_validation' not in kwargs:
kwargs['no_identifier_validation'] = True
pybel_graph = pybel.from_bel_script(file_name, **kwargs)
return process_pybel_graph(pybel_graph)
def process_json_file(file_name):
"""Return a PybelProcessor by processing a Node-Link JSON file.
For more information on this format, see:
http://pybel.readthedocs.io/en/latest/io.html#node-link-json
Parameters
----------
file_name : str
The path to a Node-Link JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
pybel_graph = pybel.from_nodelink_file(file_name, check_version=False)
return process_pybel_graph(pybel_graph)
def process_cbn_jgif_file(file_name):
"""Return a PybelProcessor by processing a CBN JGIF JSON file.
Parameters
----------
file_name : str
The path to a CBN JGIF JSON file.
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements.
"""
with open(file_name, 'r') as jgf:
return process_pybel_graph(pybel.from_cbn_jgif(json.load(jgf)))
```
#### File: sources/drugbank/api.py
```python
import logging
from typing import Optional, Sequence, Union
from xml.etree import ElementTree
from .processor import DrugbankProcessor
logger = logging.getLogger(__name__)
def process_from_web(
username: Optional[str] = None,
password: Optional[str] = None,
version: Optional[str] = None,
prefix: Union[None, str, Sequence[str]] = None,
) -> DrugbankProcessor:
"""Get a processor using :func:`process_xml` with :mod:`drugbank_downloader`.
Parameters
----------
username :
The DrugBank username. If not passed, looks up in the environment
``DRUGBANK_USERNAME``. If not found, raises a ValueError.
password :
The <PASSWORD>Bank password. If not passed, looks up in the environment
``DRUGBANK_PASSWORD``. If not found, raises a ValueError.
version :
The DrugBank version. If not passed, uses :mod:`bioversions` to
look up the most recent version.
prefix :
The prefix and subkeys passed to :func:`pystow.ensure` to specify
a non-default location to download the data to.
Returns
-------
DrugbankProcessor
A DrugbankProcessor instance which contains a list of INDRA
Statements in its statements attribute that were extracted
from the given DrugBank version
"""
import drugbank_downloader
et = drugbank_downloader.parse_drugbank(
username=username,
password=password,
version=version,
prefix=prefix,
)
return process_element_tree(et)
def process_xml(fname):
"""Return a processor by extracting Statements from DrugBank XML.
Parameters
----------
fname : str
The path to a DrugBank XML file to process.
Returns
-------
DrugbankProcessor
A DrugbankProcessor instance which contains a list of INDRA
Statements in its statements attribute that were extracted
from the given XML file.
"""
logger.info('Loading %s...' % fname)
et = ElementTree.parse(fname)
return process_element_tree(et)
def process_element_tree(et):
"""Return a processor by extracting Statement from DrugBank XML.
Parameters
----------
et : xml.etree.ElementTree
An ElementTree loaded from the DrugBank XML file to process.
Returns
-------
DrugbankProcessor
A DrugbankProcessor instance which contains a list of INDRA
Statements in its statements attribute that were extracted
from the given ElementTree.
"""
logger.info('Extracting DrugBank statements...')
dp = DrugbankProcessor(et)
dp.extract_statements()
return dp
```
#### File: sources/eidos/server.py
```python
import sys
import json
from flask import Flask, request
from indra.sources.eidos.reader import EidosReader
app = Flask(__name__)
@app.route('/process_text', methods=['POST'])
def process_text():
text = request.json.get('text')
if not text:
return {}
res = er.process_text(text)
return json.dumps(res)
@app.route('/reground', methods=['POST'])
def reground():
text = request.json.get('text')
ont_yml = request.json.get('ont_yml')
if not ont_yml:
from indra_world.ontology import world_ontology
ont_yml = world_ontology.dump_yml_str()
topk = request.json.get('topk', 10)
is_canonicalized = request.json.get('is_canonicalized', False)
if not text:
return []
if isinstance(text, str):
text = [text]
res = er.reground_texts(text, ont_yml, topk=topk,
is_canonicalized=is_canonicalized)
return json.dumps(res)
if __name__ == '__main__':
port = int(sys.argv[1]) if len(sys.argv) > 1 else 6666
er = EidosReader()
er.process_text('hello') # This is done to initialize the system
app.run(host='0.0.0.0', port=port)
```
#### File: sources/hypothesis/api.py
```python
__all__ = ['process_annotations', 'get_annotations', 'upload_annotation',
'upload_statement_annotation', 'statement_to_annotations']
import logging
import requests
from indra.config import get_config
from .processor import HypothesisProcessor
from .annotator import statement_to_annotations
logger = logging.getLogger(__name__)
base_url = 'https://api.hypothes.is/api/'
api_key = get_config('HYPOTHESIS_API_KEY')
headers = {'Authorization': 'Bearer %s' % api_key,
'Accept': 'application/vnd.hypothesis.v1+json',
'content-type': 'application/json'}
indra_group = get_config('HYPOTHESIS_GROUP')
def send_get_request(endpoint, **params):
"""Send a request to the hypothes.is web service and return JSON response.
Note that it is assumed that `HYPOTHESIS_API_KEY` is set either as a
configuration entry or as an environmental variable.
Parameters
----------
endpoint : str
The endpoint to call, e.g., `search`.
params : kwargs
A set of keyword arguments that are passed to the `requests.get` call
as `params`.
"""
if api_key is None:
return ValueError('No API key set in HYPOTHESIS_API_KEY')
res = requests.get(base_url + endpoint, headers=headers,
params=params)
res.raise_for_status()
return res.json()
def send_post_request(endpoint, **params):
"""Send a post request to the hypothes.is web service and return JSON
response.
Note that it is assumed that `HYPOTHESIS_API_KEY` is set either as a
configuration entry or as an environmental variable.
Parameters
----------
endpoint : str
The endpoint to call, e.g., `search`.
params : kwargs
A set of keyword arguments that are passed to the `requests.post` call
as `json`.
"""
if api_key is None:
return ValueError('No API key set in HYPOTHESIS_API_KEY')
res = requests.post(base_url + endpoint, headers=headers,
json=params)
res.raise_for_status()
return res.json()
def upload_annotation(url, annotation, target_text=None, tags=None,
group=None):
"""Upload an annotation to hypothes.is.
Parameters
----------
url : str
The URL of the resource being annotated.
annotation : str
The text content of the annotation itself.
target_text : Optional[str]
The specific span of text that the annotation applies to.
tags : list[str]
A list of tags to apply to the annotation.
group : Optional[str]
The hypothesi.is key of the group (not its name). If not given, the
HYPOTHESIS_GROUP configuration in the config file or an environmental
variable is used.
Returns
-------
json
The full response JSON from the web service.
"""
if group is None:
if indra_group:
group = indra_group
else:
raise ValueError('No group provided and HYPOTHESIS_GROUP '
'is not set.')
params = {
'uri': url,
'group': group,
'text': annotation,
}
if target_text:
params['target'] = [{
'source': [url],
'selector': [
{'type': 'TextQuoteSelector',
'exact': target_text}
]
}]
if tags:
params['tags'] = tags
permissions = {'read': ['group:%s' % group]}
params['permissions'] = permissions
res = send_post_request('annotations', **params)
return res
def upload_statement_annotation(stmt, annotate_agents=True):
"""Construct and upload all annotations for a given INDRA Statement.
Parameters
----------
stmt : indra.statements.Statement
An INDRA Statement.
annotate_agents : Optional[bool]
If True, the agents in the annotation text are linked to outside
databases based on their grounding. Default: True
Returns
-------
list of dict
A list of annotation structures that were uploaded to hypothes.is.
"""
annotations = statement_to_annotations(stmt,
annotate_agents=annotate_agents)
for annotation in annotations:
annotation['tags'].append('indra_upload')
upload_annotation(**annotation)
return annotations
def get_annotations(group=None):
"""Return annotations in hypothes.is in a given group.
Parameters
----------
group : Optional[str]
The hypothesi.is key of the group (not its name). If not given, the
HYPOTHESIS_GROUP configuration in the config file or an environmental
variable is used.
"""
if group is None:
if indra_group:
group = indra_group
else:
raise ValueError('No group provided and HYPOTHESIS_GROUP '
'is not set.')
# Note that this batch size is the maximum that the API allows, therefore
# it makes sense to run queries with this fixed limit.
limit = 200
offset = 0
annotations = []
while True:
logger.info('Getting up to %d annotations from offset %d' %
(limit, offset))
res = send_get_request('search', group=group, limit=limit, offset=offset)
rows = res.get('rows', [])
if not rows:
break
annotations += rows
offset += len(rows)
logger.info('Got a total of %d annotations' % len(annotations))
return annotations
def process_annotations(group=None, reader=None, grounder=None):
"""Process annotations in hypothes.is in a given group.
Parameters
----------
group : Optional[str]
The hypothesi.is key of the group (not its name). If not given, the
HYPOTHESIS_GROUP configuration in the config file or an environmental
variable is used.
reader : Optional[None, str, Callable[[str], Processor]]
A handle for a function which takes a single str argument
(text to process) and returns a processor object with a statements
attribute containing INDRA Statements. By default, the REACH reader's
process_text function is used with default parameters. Note that
if the function requires extra parameters other than the input text,
functools.partial can be used to set those. Can be alternatively
set to :func:`indra.sources.bel.process_text` by using the string
"bel".
grounder : Optional[function]
A handle for a function which takes a positional str argument (entity
text to ground) and an optional context key word argument and returns
a list of objects matching the structure of gilda.grounder.ScoredMatch.
By default, Gilda's ground function is used for grounding.
Returns
-------
HypothesisProcessor
A HypothesisProcessor object which contains a list of extracted
INDRA Statements in its statements attribute, and a list of extracted
grounding curations in its groundings attribute.
Example
-------
Process all annotations that have been written in BEL with:
.. code-block:: python
from indra.sources import hypothesis
processor = hypothesis.process_annotations(group='Z8RNqokY', reader='bel')
processor.statements
# returns: [Phosphorylation(AKT(), PCGF2(), T, 334)]
If this example doesn't work, try joining the group with this link:
https://hypothes.is/groups/Z8RNqokY/cthoyt-bel.
"""
annotations = get_annotations(group=group)
hp = HypothesisProcessor(annotations, reader=reader, grounder=grounder)
hp.extract_statements()
hp.extract_groundings()
return hp
```
#### File: sources/indra_db_rest/api.py
```python
__all__ = ['get_statements', 'get_statements_for_papers',
'get_statements_for_paper', 'get_statements_by_hash',
'get_statements_from_query', 'submit_curation', 'get_curations']
from indra.util import clockit
from indra.statements import Complex, SelfModification, ActiveForm, \
Translocation, Conversion
from indra.sources.indra_db_rest.query import *
from indra.sources.indra_db_rest.processor import DBQueryStatementProcessor
from indra.sources.indra_db_rest.util import make_db_rest_request, get_url_base
@clockit
def get_statements(subject=None, object=None, agents=None, stmt_type=None,
use_exact_type=False, limit=None, persist=True, timeout=None,
strict_stop=False, ev_limit=10, sort_by='ev_count', tries=3,
use_obtained_counts=False, api_key=None):
"""Get Statements from the INDRA DB web API matching given agents and type.
You get a :py:class:`DBQueryStatementProcessor
<indra.sources.indra_db_rest.processor.DBQueryStatementProcessor>`
object, which allow Statements to be loaded in a background thread,
providing a sample of the "best" content available promptly in the
``sample_statements`` attribute, and populates the statements attribute when
the paged load is complete. The "best" is determined by the ``sort_by``
attribute, which may be either 'belief' or 'ev_count' or None.
Parameters
----------
subject/object : str
Optionally specify the subject and/or object of the statements
you wish to get from the database. By default, the namespace is assumed
to be HGNC gene names, however you may specify another namespace by
including "@<namespace>" at the end of the name string. For example, if
you want to specify an agent by chebi, you could use "CHEBI:6801@CHEBI",
or if you wanted to use the HGNC id, you could use "6871@HGNC".
agents : list[str]
A list of agents, specified in the same manner as subject and object,
but without specifying their grammatical position.
stmt_type : str
Specify the types of interactions you are interested in, as indicated
by the sub-classes of INDRA's Statements. This argument is *not* case
sensitive. If the statement class given has sub-classes
(e.g. RegulateAmount has IncreaseAmount and DecreaseAmount), then both
the class itself, and its subclasses, will be queried, by default. If
you do not want this behavior, set use_exact_type=True. Note that if
max_stmts is set, it is possible only the exact statement type will
be returned, as this is the first searched. The processor then cycles
through the types, getting a page of results for each type and adding it
to the quota, until the max number of statements is reached.
use_exact_type : bool
If stmt_type is given, and you only want to search for that specific
statement type, set this to True. Default is False.
limit : Optional[int]
Select the maximum number of statements to return. When set less than
500 the effect is much the same as setting persist to false, and will
guarantee a faster response. Default is None.
persist : bool
Default is True. When False, if a query comes back limited (not all
results returned), just give up and pass along what was returned.
Otherwise, make further queries to get the rest of the data (which may
take some time).
timeout : positive int or None
If an int, block until the work is done and statements are retrieved, or
until the timeout has expired, in which case the results so far will be
returned in the response object, and further results will be added in
a separate thread as they become available. Block indefinitely until all
statements are retrieved. Default is None.
strict_stop : bool
If True, the query will only be given `timeout` time to complete before
being abandoned entirely. Otherwise the timeout will simply wait for the
thread to join for `timeout` seconds before returning, allowing other
work to continue while the query runs in the background. The default is
False.
ev_limit : Optional[int]
Limit the amount of evidence returned per Statement. Default is 10.
sort_by : Optional[str]
Str options are currently 'ev_count' or 'belief'. Results will return in
order of the given parameter. If None, results will be turned in an
arbitrary order.
tries : Optional[int]
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 3.
use_obtained_counts : Optional[bool]
If True, evidence counts and source counts are reported based
on the actual evidences returned for each statement in this query
(as opposed to all existing evidences, even if not all were returned).
Default: False
api_key : Optional[str]
Override or use in place of the API key given in the INDRA config file.
Returns
-------
processor : :py:class:`DBQueryStatementProcessor`
An instance of the DBQueryStatementProcessor, which has an attribute
``statements`` which will be populated when the query/queries are done.
"""
query = EmptyQuery()
def add_agent(ag_str, role):
if ag_str is None:
return
nonlocal query
if '@' in ag_str:
ag_id, ag_ns = ag_str.split('@')
else:
ag_id = ag_str
ag_ns = 'NAME'
query &= HasAgent(ag_id, ag_ns, role=role)
add_agent(subject, 'subject')
add_agent(object, 'object')
if agents is not None:
for ag in agents:
add_agent(ag, None)
if stmt_type is not None:
query &= HasType([stmt_type], include_subclasses=not use_exact_type)
if isinstance(query, EmptyQuery):
raise ValueError("No constraints provided.")
return DBQueryStatementProcessor(query, limit=limit, persist=persist,
ev_limit=ev_limit, timeout=timeout,
sort_by=sort_by, tries=tries,
strict_stop=strict_stop,
use_obtained_counts=use_obtained_counts,
api_key=api_key)
@clockit
def get_statements_by_hash(hash_list, limit=None, ev_limit=10,
sort_by='ev_count', persist=True, timeout=None,
strict_stop=False, tries=3, api_key=None):
"""Get Statements from a list of hashes.
Parameters
----------
hash_list : list[int or str]
A list of statement hashes.
limit : Optional[int]
Select the maximum number of statements to return. When set less than
500 the effect is much the same as setting persist to false, and will
guarantee a faster response. Default is None.
ev_limit : Optional[int]
Limit the amount of evidence returned per Statement. Default is 100.
sort_by : Optional[str]
Options are currently 'ev_count' or 'belief'. Results will return in
order of the given parameter. If None, results will be turned in an
arbitrary order.
persist : bool
Default is True. When False, if a query comes back limited (not all
results returned), just give up and pass along what was returned.
Otherwise, make further queries to get the rest of the data (which may
take some time).
timeout : positive int or None
If an int, return after `timeout` seconds, even if query is not done.
Default is None.
strict_stop : bool
If True, the query will only be given `timeout` time to complete before
being abandoned entirely. Otherwise the timeout will simply wait for the
thread to join for `timeout` seconds before returning, allowing other
work to continue while the query runs in the background. The default is
False.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can
also help gracefully handle an unreliable connection, if you're
willing to wait. Default is 3.
api_key : Optional[str]
Override or use in place of the API key given in the INDRA config file.
Returns
-------
processor : :py:class:`DBQueryStatementProcessor`
An instance of the DBQueryStatementProcessor, which has an attribute
`statements` which will be populated when the query/queries are done.
"""
return DBQueryStatementProcessor(HasHash(hash_list), limit=limit,
ev_limit=ev_limit, sort_by=sort_by,
persist=persist, timeout=timeout,
tries=tries, strict_stop=strict_stop,
api_key=api_key)
def get_statements_for_paper(*args, **kwargs):
from warnings import warn
warn("`get_statements_for_paper` has been replaced with "
"`get_statements_for_papers`.", DeprecationWarning)
return get_statements_for_papers(*args, **kwargs)
@clockit
def get_statements_for_papers(ids, limit=None, ev_limit=10, sort_by='ev_count',
persist=True, timeout=None, strict_stop=False,
tries=3, filter_ev=True, api_key=None):
"""Get Statements extracted from the papers with the given ref ids.
Parameters
----------
ids : list[str, str]
A list of tuples with ids and their type. For example:
``[('pmid', '12345'), ('pmcid', 'PMC12345')]`` The type can be any one
of 'pmid', 'pmcid', 'doi', 'pii', 'manuscript_id', or 'trid', which is
the primary key id of the text references in the database.
limit : Optional[int]
Select the maximum number of statements to return. When set less than
500 the effect is much the same as setting persist to false, and will
guarantee a faster response. Default is None.
ev_limit : Optional[int]
Limit the amount of evidence returned per Statement. Default is 10.
filter_ev : bool
Indicate whether evidence should have the same filters applied as
the statements themselves, where appropriate (e.g. in the case of a
filter by paper).
sort_by : Optional[str]
Options are currently 'ev_count' or 'belief'. Results will return in
order of the given parameter. If None, results will be turned in an
arbitrary order.
persist : bool
Default is True. When False, if a query comes back limited (not all
results returned), just give up and pass along what was returned.
Otherwise, make further queries to get the rest of the data (which may
take some time).
timeout : positive int or None
If an int, return after `timeout` seconds, even if query is not done.
Default is None.
strict_stop : bool
If True, the query will only be given `timeout` time to complete before
being abandoned entirely. Otherwise the timeout will simply wait for the
thread to join for `timeout` seconds before returning, allowing other
work to continue while the query runs in the background. The default is
False.
tries : int > 0
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 3.
api_key : Optional[str]
Override or use in place of the API key given in the INDRA config file.
Returns
-------
processor : :py:class:`DBQueryStatementProcessor`
An instance of the DBQueryStatementProcessor, which has an attribute
`statements` which will be populated when the query/queries are done.
"""
return DBQueryStatementProcessor(FromPapers(ids), limit=limit,
ev_limit=ev_limit, sort_by=sort_by,
persist=persist, timeout=timeout,
tries=tries, filter_ev=filter_ev,
strict_stop=strict_stop, api_key=api_key)
@clockit
def get_statements_from_query(query, limit=None, ev_limit=10,
sort_by='ev_count', persist=True, timeout=None,
strict_stop=False, tries=3, filter_ev=True,
use_obtained_counts=False,
api_key=None):
"""Get Statements using a Query.
Example
-------
>>>
>> from indra.sources.indra_db_rest.query import HasAgent, FromMeshIds
>> query = HasAgent("MEK", "FPLX") & FromMeshIds(["D001943"])
>> p = get_statements_from_query(query, limit=100)
>> stmts = p.statements
Parameters
----------
query : :py:class:`Query`
The query to be evaluated in return for statements.
limit : Optional[int]
Select the maximum number of statements to return. When set less than
500 the effect is much the same as setting persist to false, and will
guarantee a faster response. Default is None.
ev_limit : Optional[int]
Limit the amount of evidence returned per Statement. Default is 10.
filter_ev : bool
Indicate whether evidence should have the same filters applied as
the statements themselves, where appropriate (e.g. in the case of a
filter by paper).
sort_by : Optional[str]
Options are currently 'ev_count' or 'belief'. Results will return in
order of the given parameter. If None, results will be turned in an
arbitrary order.
persist : bool
Default is True. When False, if a query comes back limited (not all
results returned), just give up and pass along what was returned.
Otherwise, make further queries to get the rest of the data (which may
take some time).
timeout : positive int or None
If an int, return after ``timeout`` seconds, even if query is not done.
Default is None.
strict_stop : bool
If True, the query will only be given `timeout` time to complete before
being abandoned entirely. Otherwise the timeout will simply wait for the
thread to join for `timeout` seconds before returning, allowing other
work to continue while the query runs in the background. The default is
False.
use_obtained_counts : Optional[bool]
If True, evidence counts and source counts are reported based
on the actual evidences returned for each statement in this query
(as opposed to all existing evidences, even if not all were returned).
Default: False
tries : Optional[int]
Set the number of times to try the query. The database often caches
results, so if a query times out the first time, trying again after a
timeout will often succeed fast enough to avoid a timeout. This can also
help gracefully handle an unreliable connection, if you're willing to
wait. Default is 3.
api_key : Optional[str]
Override or use in place of the API key given in the INDRA config file.
Returns
-------
processor : :py:class:`DBQueryStatementProcessor`
An instance of the DBQueryStatementProcessor, which has an attribute
`statements` which will be populated when the query/queries are done.
"""
return DBQueryStatementProcessor(query, limit=limit,
ev_limit=ev_limit, sort_by=sort_by,
persist=persist, timeout=timeout,
tries=tries, filter_ev=filter_ev,
strict_stop=strict_stop,
use_obtained_counts=use_obtained_counts,
api_key=api_key)
def submit_curation(hash_val, tag, curator_email, text=None,
source='indra_rest_client', ev_hash=None, pa_json=None,
ev_json=None, api_key=None, is_test=False):
"""Submit a curation for the given statement at the relevant level.
Parameters
----------
hash_val : int
The hash corresponding to the statement.
tag : str
A very short phrase categorizing the error or type of curation,
e.g. "grounding" for a grounding error, or "correct" if you are
marking a statement as correct.
curator_email : str
The email of the curator.
text : str
A brief description of the problem.
source : str
The name of the access point through which the curation was performed.
The default is 'direct_client', meaning this function was used
directly. Any higher-level application should identify itself here.
ev_hash : int
A hash of the sentence and other evidence information. Elsewhere
referred to as `source_hash`.
pa_json : None or dict
The JSON of a statement you wish to curate. If not given, it may be
inferred (best effort) from the given hash.
ev_json : None or dict
The JSON of an evidence you wish to curate. If not given, it cannot be
inferred.
api_key : Optional[str]
Override or use in place of the API key given in the INDRA config file.
is_test : bool
Used in testing. If True, no curation will actually be added to the
database.
"""
data = {'tag': tag, 'text': text, 'email': curator_email, 'source': source,
'ev_hash': ev_hash, 'pa_json': pa_json, 'ev_json': ev_json}
url = 'curation/submit/%s' % hash_val
if is_test:
qstr = '?test'
else:
qstr = ''
resp = make_db_rest_request('post', url, qstr, data=data, api_key=api_key)
return resp.json()
def get_curations(hash_val=None, source_hash=None, api_key=None):
"""Get the curations for a specific statement and evidence.
If neither hash_val nor source_hash are given, all curations will be
retrieved. This will require the user to have extra permissions, as
determined by their API key.
Parameters
----------
hash_val : Optional[int]
The hash of a statement whose curations you want to retrieve.
source_hash : Optional[int]
The hash generated for a piece of evidence for which you want curations.
The `hash_val` must be provided to use the `source_hash`.
api_key : Optional[str]
Override or use in place of the API key given in the INDRA config file.
Returns
-------
curations : list
A list of dictionaries containing the curation data.
"""
url = 'curation/list'
if hash_val is not None:
url += f'/{hash_val}'
if source_hash is not None:
url += f'/{source_hash}'
elif source_hash is not None:
raise ValueError("A hash_val must be given if a source_hash is given.")
resp = make_db_rest_request('get', url, api_key=api_key)
return resp.json()
def get_statement_queries(stmts, fallback_ns='NAME', pick_ns_fun=None,
**params):
"""Get queries used to search based on a statement.
In addition to the stmts, you can enter any parameters standard to the
query. See https://github.com/indralab/indra_db/rest_api for a full list.
Parameters
----------
stmts : list[Statement]
A list of INDRA statements.
fallback_ns : Optional[str]
The name space to search by when an Agent in a Statement is not
grounded to one of the standardized name spaces. Typically,
searching by 'NAME' (i.e., the Agent's name) is a good option if
(1) An Agent's grounding is missing but its name is
known to be standard in one of the name spaces. In this case the
name-based lookup will yield the same result as looking up by
grounding. Example: MAP2K1(db_refs={})
(2) Any Agent that is encountered with the same name as this Agent
is never standardized, so looking up by name yields the same result
as looking up by TEXT. Example: xyz(db_refs={'TEXT': 'xyz'})
Searching by TEXT is better in other cases e.g., when the given
specific Agent is not grounded but we have other Agents with the
same TEXT that are grounded and then standardized to a different name.
Example: Erk(db_refs={'TEXT': 'Erk'}).
Default: 'NAME'
pick_ns_fun : Optional[function]
An optional user-supplied function which takes an Agent as input and
returns a string of the form value@ns where 'value' will be looked
up in namespace 'ns' to search for the given Agent.
**params : kwargs
A set of keyword arguments that are added as parameters to the
query URLs.
"""
def pick_ns(ag):
# If the Agent has grounding, in order of preference, in any of these
# name spaces then we look it up based on grounding.
for ns in ['FPLX', 'HGNC', 'UP', 'CHEBI', 'GO', 'MESH']:
if ns in ag.db_refs:
dbid = ag.db_refs[ns]
return '%s@%s' % (dbid, ns)
# Otherwise we fall back on searching by NAME or TEXT
# (or any other given name space as long as the Agent name can be
# usefully looked up in that name space).
return '%s@%s' % (ag.name, fallback_ns)
pick_ns_fun = pick_ns if not pick_ns_fun else pick_ns_fun
queries = []
url_base = get_url_base('statements/from_agents')
non_binary_statements = (Complex, SelfModification, ActiveForm,
Translocation, Conversion)
for stmt in stmts:
kwargs = {}
if not isinstance(stmt, non_binary_statements):
for pos, ag in zip(['subject', 'object'], stmt.agent_list()):
if ag is not None:
kwargs[pos] = pick_ns_fun(ag)
else:
for i, ag in enumerate(stmt.agent_list()):
if ag is not None:
kwargs['agent%d' % i] = pick_ns_fun(ag)
kwargs['type'] = stmt.__class__.__name__
kwargs.update(params)
query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items()
if v is not None])
queries.append(url_base + query_str)
return queries
```
#### File: tests/test_obo_clients/test_ido_client.py
```python
from indra.databases import ido_client
def test_ido_client_loaded():
"""Test the IDO client is loaded."""
assert 'ido' == ido_client._client.prefix
assert ido_client._client.entries
assert ido_client._client.name_to_id
def test_lookup():
"""Test IDO name and identifier lookup."""
name = ido_client.get_ido_name_from_ido_id("0000403")
assert "parasite role" == name, name
identifier = ido_client.get_ido_id_from_ido_name("parasite role")
assert "0000403" == identifier, identifier
if __name__ == '__main__':
test_lookup()
```
#### File: indra/tests/test_preassembler.py
```python
import os
import unittest
from indra.preassembler import Preassembler, render_stmt_graph, \
flatten_evidence, flatten_stmts
from indra.sources import reach
from indra.statements import *
from indra.ontology.bio import bio_ontology
try:
from indra_world.ontology import load_world_ontology
world_ontology = load_world_ontology(default_type='flat')
has_indra_world = True
except ImportError:
has_indra_world = False
from indra.preassembler import RefinementFilter, OntologyRefinementFilter
def test_duplicates():
src = Agent('SRC', db_refs={'HGNC': '11283'})
ras = Agent('RAS', db_refs={'FA': '03663'})
st1 = Phosphorylation(src, ras)
st2 = Phosphorylation(src, ras)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 1
def test_duplicates_copy():
src = Agent('SRC', db_refs={'HGNC': '11283'})
ras = Agent('RAS', db_refs={'FA': '03663'})
st1 = Phosphorylation(src, ras, evidence=[Evidence(text='Text 1')])
st2 = Phosphorylation(src, ras, evidence=[Evidence(text='Text 2')])
stmts = [st1, st2]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
assert len(pa.unique_stmts) == 1
assert len(stmts) == 2
assert len(stmts[0].evidence) == 1
assert len(stmts[1].evidence) == 1
def test_duplicates_sorting():
mc = ModCondition('phosphorylation')
map2k1_1 = Agent('MAP2K1', mods=[mc])
mc1 = ModCondition('phosphorylation', 'serine', '218')
mc2 = ModCondition('phosphorylation', 'serine', '222')
mc3 = ModCondition('phosphorylation', 'serine', '298')
map2k1_2 = Agent('MAP2K1', mods=[mc1, mc2, mc3])
mapk3 = Agent('MAPK3')
st1 = Phosphorylation(map2k1_1, mapk3, position='218')
st2 = Phosphorylation(map2k1_2, mapk3)
st3 = Phosphorylation(map2k1_1, mapk3, position='218')
stmts = [st1, st2, st3]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
def test_combine_duplicates():
raf = Agent('RAF1')
mek = Agent('MEK1')
erk = Agent('ERK2')
p1 = Phosphorylation(raf, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf, mek,
evidence=Evidence(text='baz'))
p4 = Phosphorylation(raf, mek,
evidence=Evidence(text='beep'))
p5 = Phosphorylation(mek, erk,
evidence=Evidence(text='foo2'))
p6 = Dephosphorylation(mek, erk,
evidence=Evidence(text='bar2'))
p7 = Dephosphorylation(mek, erk,
evidence=Evidence(text='baz2'))
p8 = Dephosphorylation(mek, erk,
evidence=Evidence(text='beep2'))
p9 = Dephosphorylation(Agent('SRC'), Agent('KRAS'),
evidence=Evidence(text='beep'))
stmts = [p1, p2, p3, p4, p5, p6, p7, p8, p9]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 4, len(pa.unique_stmts)
num_evs =[len(s.evidence) for s in pa.unique_stmts]
assert pa.unique_stmts[0].matches(p6) # MEK dephos ERK
assert num_evs[0] == 3, num_evs[0]
assert pa.unique_stmts[1].matches(p9) # SRC dephos KRAS
assert num_evs[1] == 1, num_evs[1]
assert pa.unique_stmts[2].matches(p5) # MEK phos ERK
assert num_evs[2] == 1, num_evs[2]
assert pa.unique_stmts[3].matches(p1) # RAF phos MEK
assert num_evs[3] == 4, num_evs[3]
def test_combine_evidence_exact_duplicates():
raf = Agent('RAF1')
mek = Agent('MEK1')
p1 = Phosphorylation(raf, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
stmts = [p1, p2, p3]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 1
assert len(pa.unique_stmts[0].evidence) == 2
assert set(ev.text for ev in pa.unique_stmts[0].evidence) == \
set(['foo', 'bar'])
def test_combine_evidence_exact_duplicates_different_raw_text():
raf1 = Agent('RAF1', db_refs={'TEXT': 'Raf'})
raf2 = Agent('RAF1', db_refs={'TEXT': 'RAF'})
mek = Agent('MEK1')
p1 = Phosphorylation(raf1, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf1, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf2, mek,
evidence=Evidence(text='bar'))
stmts = [p1, p2, p3]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 1
assert len(pa.unique_stmts[0].evidence) == 3
assert set(ev.text for ev in pa.unique_stmts[0].evidence) == \
set(['foo', 'bar', 'bar'])
def test_superfamily_refinement():
"""A gene-level statement should be supported by a family-level
statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
st1 = Phosphorylation(src, ras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras, 'tyrosine', '32')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the gene-level
# one, supported by the family one.
assert len(stmts) == 1, stmts
assert (stmts[0].equals(st2))
assert (len(stmts[0].supported_by) == 1)
assert (stmts[0].supported_by[0].equals(st1))
def test_superfamily_refinement_isa_or_partof():
src = Agent('SRC', db_refs={'HGNC': '11283'})
prkag1 = Agent('PRKAG1', db_refs={'HGNC': '9385'})
ampk = Agent('AMPK', db_refs={'FPLX': 'AMPK'})
st1 = Phosphorylation(src, ampk, 'tyrosine', '32')
st2 = Phosphorylation(src, prkag1, 'tyrosine', '32')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the gene-level
# one, supported by the family one.
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_modification_refinement():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert len(stmts) == 1
assert stmts[0].equals(st1)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st2)
def test_modification_refinement_residue_noenz():
erbb3 = Agent('Erbb3')
st1 = Phosphorylation(None, erbb3)
st2 = Phosphorylation(None, erbb3, 'Y')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.related_stmts) == 1
def test_modification_refinement_noenz():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(None, nras, 'tyrosine', '32')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert len(stmts) == 1
assert stmts[0].equals(st1)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st2)
assert stmts[0].supported_by[0].supports[0].equals(st1)
def test_modification_refinement_noenz2():
"""A more specific modification statement should be supported by a more
generic modification statement.
Similar to test_modification_refinement_noenz for statements where one
argument is associated with a component in the hierarchy (SIRT1 in this
case) but the other is not (BECN1).
"""
sirt1 = Agent('SIRT1', db_refs={'HGNC':'14929', 'UP':'Q96EB6',
'TEXT':'SIRT1'})
becn1 = Agent('BECN1', db_refs={'HGNC': '1034', 'UP': 'Q14457',
'TEXT': 'Beclin 1'})
st1 = Deacetylation(sirt1, becn1)
st2 = Deacetylation(None, becn1)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert (len(stmts) == 1)
assert (stmts[0].equals(st1))
assert (len(stmts[0].supported_by) == 1)
assert (stmts[0].supported_by[0].equals(st2))
assert (stmts[0].supported_by[0].supports[0].equals(st1))
def test_modification_norefinement_noenz():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
st1 = Phosphorylation(src, nras)
st2 = Phosphorylation(None, nras, 'Y', '32',
evidence=[Evidence(text='foo')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[1].evidence)==1
def test_modification_norefinement_subsfamily():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
st1 = Phosphorylation(src, nras)
st2 = Phosphorylation(src, ras, 'Y', '32',
evidence=[Evidence(text='foo')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[0].evidence) == 1, stmts
def test_modification_norefinement_enzfamily():
"""A more specific modification statement should be supported by a more
generic modification statement."""
mek = Agent('MEK')
raf = Agent('RAF')
braf = Agent('BRAF')
st1 = Phosphorylation(raf, mek, 'Y', '32',
evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[1].evidence)==1
def test_bound_condition_refinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
gtp = Agent('GTP', db_refs={'CHEBI': '15996'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
nrasgtp = Agent('NRAS', db_refs={'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp, True)])
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp, 'tyrosine', '32')
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_bound_condition_norefinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
gtp = Agent('GTP', db_refs={'CHEBI': '15996'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
nrasgtp = Agent('NRAS', db_refs={'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp, True)])
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The bound condition is more specific in st2 but the modification is less
# specific. Therefore these statements should not be combined.
assert len(stmts) == 2
def test_bound_condition_deep_refinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs={'HGNC': '11283'})
gtp1 = Agent('GTP', db_refs={'CHEBI': '15996'})
gtp2 = Agent('GTP', mods=[ModCondition('phosphorylation')],
db_refs = {'CHEBI': '15996'})
nrasgtp1 = Agent('NRAS', db_refs={'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp1, True)])
nrasgtp2 = Agent('NRAS', db_refs={'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp2, True)])
st1 = Phosphorylation(src, nrasgtp1, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp2, 'tyrosine', '32')
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_complex_refinement():
ras = Agent('RAS')
raf = Agent('RAF')
mek = Agent('MEK')
st1 = Complex([ras, raf])
st2 = Complex([mek, ras, raf])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_complex_agent_refinement():
ras = Agent('RAS')
raf1 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, True)])
raf2 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, False)])
st1 = Complex([ras, raf1])
st2 = Complex([ras, raf2])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_mod_sites_refinement():
"""A statement with more specific modification context should be supported
by a less-specific statement."""
# TODO
assert True
def test_binding_site_refinement():
"""A statement with information about a binding site for an interaction
between two proteins should be supported by a statement without this
information."""
# TODO
assert True
def test_activating_substitution_refinement():
"""Should only be refinement if entities are a refinement and all
fields match."""
mc1 = MutCondition('12', 'G', 'D')
mc2 = MutCondition('61', 'Q', 'L')
nras1 = Agent('NRAS', mutations=[mc1], db_refs={'HGNC': '7989'})
nras2 = Agent('NRAS', mutations=[mc2], db_refs={'HGNC': '7989'})
ras = Agent('RAS', mutations=[mc1], db_refs={'FPLX': 'RAS'})
st1 = ActiveForm(ras, 'gtpbound', True,
evidence=Evidence(text='bar'))
st2 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st3 = ActiveForm(nras2, 'gtpbound', True,
evidence=Evidence(text='bar'))
st4 = ActiveForm(nras1, 'phosphatase', True,
evidence=Evidence(text='bar'))
st5 = ActiveForm(nras1, 'gtpbound', False,
evidence=Evidence(text='bar'))
assert st2.refinement_of(st1, bio_ontology)
assert not st3.refinement_of(st1, bio_ontology)
assert not st4.refinement_of(st1, bio_ontology)
assert not st5.refinement_of(st1, bio_ontology)
assert not st1.refinement_of(st2, bio_ontology)
assert not st3.refinement_of(st2, bio_ontology)
assert not st4.refinement_of(st2, bio_ontology)
assert not st5.refinement_of(st2, bio_ontology)
assert not st1.refinement_of(st3, bio_ontology)
assert not st2.refinement_of(st3, bio_ontology)
assert not st4.refinement_of(st3, bio_ontology)
assert not st5.refinement_of(st3, bio_ontology)
assert not st1.refinement_of(st4, bio_ontology)
assert not st2.refinement_of(st4, bio_ontology)
assert not st3.refinement_of(st4, bio_ontology)
assert not st5.refinement_of(st4, bio_ontology)
assert not st1.refinement_of(st5, bio_ontology)
assert not st2.refinement_of(st5, bio_ontology)
assert not st3.refinement_of(st5, bio_ontology)
assert not st4.refinement_of(st5, bio_ontology)
def test_translocation():
st1 = Translocation(Agent('AKT'), None, None)
st2 = Translocation(Agent('AKT'), None, 'plasma membrane')
st3 = Translocation(Agent('AKT'), None, 'nucleus')
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 2, pa.related_stmts
def test_grounding_aggregation():
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
braf4 = Agent('BRAF', db_refs={'TEXT': 'B-raf', 'UP': 'P15056',
'HGNC': '1097'})
st1 = Phosphorylation(None, braf1)
st2 = Phosphorylation(None, braf2)
st3 = Phosphorylation(None, braf3)
st4 = Phosphorylation(None, braf4)
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3, unique_stmts
def test_grounding_aggregation_complex():
mek = Agent('MEK')
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF', 'dummy': 'dummy'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
st1 = Complex([mek, braf1])
st2 = Complex([braf2, mek])
st3 = Complex([mek, braf3])
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3, unique_stmts
def test_render_stmt_graph():
braf = Agent('BRAF', db_refs={'HGNC': '1097'})
mek1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
mek = Agent('MEK', db_refs={'FPLX':'MEK'})
# Statements
p0 = Phosphorylation(braf, mek)
p1 = Phosphorylation(braf, mek1)
p2 = Phosphorylation(braf, mek1, position='218')
p3 = Phosphorylation(braf, mek1, position='222')
p4 = Phosphorylation(braf, mek1, 'serine')
p5 = Phosphorylation(braf, mek1, 'serine', '218')
p6 = Phosphorylation(braf, mek1, 'serine', '222')
stmts = [p0, p1, p2, p3, p4, p5, p6]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_related()
graph = render_stmt_graph(pa.related_stmts, reduce=False)
# One node for each statement
assert len(graph.nodes()) == 7
# Edges:
# p0 supports p1-p6 = 6 edges
# p1 supports p2-p6 = 5 edges
# p2 supports p5 = 1 edge
# p3 supports p6 = 1 edge
# p4 supports p5-p6 = 2 edges
# (p5 and p6 support none--they are top-level)
# 6 + 5 + 1 + 1 + 2 = 15 edges
assert len(graph.edges()) == 15
def test_flatten_evidence_hierarchy():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 2
assert 'bar' in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 1
assert supporting_stmt.evidence[0].text == 'foo'
supporting_stmt.evidence[0].text = 'changed_foo'
assert supporting_stmt.evidence[0].text == 'changed_foo'
assert 'changed_foo' not in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert {ev.annotations.get('support_type') for ev in top_stmt.evidence} \
== {'direct', 'supported_by'}
def test_flatten_evidence_multilevel():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S',
evidence=[Evidence(text='bar')])
st3 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='baz')])
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 3, len(top_stmt.evidence)
anns = [ev.annotations['support_type'] for ev in top_stmt.evidence]
assert anns.count('direct') == 1
assert anns.count('supported_by') == 2
def test_flatten_evidence_hierarchy_supports():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa_stmts = pa.combine_related(return_toplevel=False)
assert len(pa_stmts) == 2
flattened = flatten_evidence(pa_stmts, collect_from='supports')
assert len(flattened) == 2
top_stmt = flattened[1]
assert len(top_stmt.evidence) == 1
assert 'bar' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 2
assert set([e.text for e in supporting_stmt.evidence]) == {'foo', 'bar'}
def test_flatten_stmts():
st1 = Phosphorylation(Agent('MAP3K5'), Agent('RAF1'), 'S', '338')
st2 = Phosphorylation(None, Agent('RAF1'), 'S', '338')
st3 = Phosphorylation(None, Agent('RAF1'))
st4 = Phosphorylation(Agent('PAK1'), Agent('RAF1'), 'S', '338')
st5 = Phosphorylation(None, Agent('RAF1'), evidence=Evidence(text='foo'))
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4, st5])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 2
assert len(flatten_stmts(pa.unique_stmts)) == 4
assert len(flatten_stmts(pa.related_stmts)) == 4
def test_complex_refinement_order():
st1 = Complex([Agent('MED23'), Agent('ELK1')])
st2 = Complex([Agent('ELK1', mods=[ModCondition('phosphorylation')]),
Agent('MED23')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 1
def test_activation_refinement():
subj = Agent('alcohol', db_refs={'CHEBI': 'CHEBI:16236',
'HMDB': 'HMDB00108',
'PUBCHEM': '702',
'TEXT': 'alcohol'})
obj = Agent('endotoxin', db_refs={'TEXT': 'endotoxin'})
st1 = Inhibition(subj, obj)
st2 = Activation(subj, obj)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_homodimer_refinement():
egfr = Agent('EGFR')
erbb = Agent('ERBB2')
st1 = Complex([erbb, erbb])
st2 = Complex([erbb, egfr])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_return_toplevel():
src = Agent('SRC', db_refs={'HGNC': '11283'})
nras = Agent('NRAS', db_refs={'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related(return_toplevel=True)
assert len(stmts) == 1
assert len(stmts[0].supported_by) == 1
assert len(stmts[0].supported_by[0].supports) == 1
stmts = pa.combine_related(return_toplevel=False)
assert len(stmts) == 2
ix = 1 if stmts[0].residue else 0
assert len(stmts[1-ix].supported_by) == 1
assert len(stmts[1-ix].supported_by[0].supports) == 1
assert len(stmts[ix].supports) == 1
assert len(stmts[ix].supports[0].supported_by) == 1
def test_conversion_refinement():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
gtp = Agent('GTP')
gdp = Agent('GDP')
st1 = Conversion(ras, gtp, gdp)
st2 = Conversion(hras, gtp, gdp)
st3 = Conversion(hras, [gtp, gdp], gdp)
st4 = Conversion(hras, [gdp, gtp], gdp)
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4])
toplevel_stmts = pa.combine_related()
assert len(toplevel_stmts) == 2
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_influence_duplicate():
gov = 'wm/concept/causal_factor/social_and_political/government'
agr = 'wm/concept/causal_factor/agriculture/crop_production'
cgov = Event(Concept('government', db_refs={'WM': [(gov, 1.0)]}))
cagr = Event(Concept('agriculture', db_refs={'WM': [(agr, 1.0)]}))
print(cgov.matches_key())
stmt1 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(cagr, cgov, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos3')])
pa = Preassembler(world_ontology, [stmt1, stmt2, stmt3])
unique_stmts = pa.combine_duplicates()
unique_stmts = sorted(unique_stmts, key=lambda x: len(x.evidence))
assert len(unique_stmts) == 2
assert len(unique_stmts[0].evidence) == 1
assert len(unique_stmts[1].evidence) == 2, unique_stmts
sources = [e.source_api for e in unique_stmts[1].evidence]
assert set(sources) == {'eidos1', 'eidos3'}
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_influence_refinement():
tran = 'wm/concept/causal_factor/access/infrastructure_access/'\
'transportation'
ship = 'wm/concept/causal_factor/access/infrastructure_access/' \
'transportation/shipping'
agr = 'wm/concept/causal_factor/economic_and_commerce/' \
'economic_activity/livelihood'
ctran = Event(Concept('transportation', db_refs={'WM': [(tran, 1.0)]}))
cship = Event(Concept('trucking', db_refs={'WM': [(ship, 1.0)]}))
cagr = Event(Concept('agriculture', db_refs={'WM': [(agr, 1.0)]}))
stmt1 = Influence(ctran, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(cship, cagr, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cagr, ctran, evidence=[Evidence(source_api='eidos3')])
pa = Preassembler(world_ontology, [stmt1, stmt2, stmt3])
rel_stmts = pa.combine_related()
assert len(rel_stmts) == 2, rel_stmts
truck_stmt = [st for st in rel_stmts if st.subj.concept.name ==
'trucking'][0]
assert len(truck_stmt.supported_by) == 1
assert truck_stmt.supported_by[0].subj.concept.name == 'transportation'
def test_find_contradicts():
st1 = Inhibition(Agent('a'), Agent('b'))
st2 = Activation(Agent('a'), Agent('b'))
st3 = IncreaseAmount(Agent('a'), Agent('b'))
st4 = DecreaseAmount(Agent('a'), Agent('b'))
st5 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', None, None, True)]),
'kinase', True)
st6 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', None, None, True)]),
'kinase', False)
pa = Preassembler(bio_ontology, [st1, st2, st3, st4, st5, st6])
contradicts = pa.find_contradicts()
assert len(contradicts) == 3
for s1, s2 in contradicts:
assert {s1.uuid, s2.uuid} in ({st1.uuid, st2.uuid},
{st3.uuid, st4.uuid},
{st5.uuid, st6.uuid})
def test_find_contradicts_refinement():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Phosphorylation(Agent('x'), ras)
st2 = Dephosphorylation(Agent('x'), kras)
st3 = Dephosphorylation(Agent('x'), hras)
pa = Preassembler(bio_ontology, [st1, st2, st3])
contradicts = pa.find_contradicts()
assert len(contradicts) == 2
for s1, s2 in contradicts:
assert {s1.uuid, s2.uuid} in ({st1.uuid, st2.uuid},
{st1.uuid, st3.uuid})
def test_preassemble_related_complex():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Complex([kras, hras])
st2 = Complex([kras, ras])
st3 = Complex([hras, kras])
st4 = Complex([ras, kras])
pa = Preassembler(bio_ontology, [st1, st2, st3, st4])
uniq = pa.combine_duplicates()
assert len(uniq) == 2
top = pa.combine_related()
assert len(top) == 1
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_normalize_opposites():
concept1 = 'wm/concept/causal_factor/food_security/food_stability'
concept2 = 'wm/concept/causal_factor/food_insecurity/food_instability'
concept3 = ('wm/concept/causal_factor/crisis_and_disaster/'
'environmental_disasters/natural_disaster/flooding')
# First test the inherently positive being the main grounding
dbr = {'WM': [(concept1, 1.0), (concept2, 0.5), (concept3, 0.1)]}
ev = Event(Concept('x', db_refs=dbr),
delta=QualitativeDelta(polarity=1))
pa = Preassembler(world_ontology, stmts=[ev])
pa.normalize_opposites(ns='WM')
# We are normalizing to food supply since that is the inherently
# positive concept
assert pa.stmts[0].concept.db_refs['WM'][0] == \
(concept1, 1.0), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][1] == \
(concept1, 0.5), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][2] == \
(concept3, 0.1), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].delta.polarity == 1
# Next test the inherently negative being the main grounding
dbr = {'WM': [(concept2, 1.0), (concept1, 0.5), (concept3, 0.1)]}
ev = Event(Concept('x', db_refs=dbr),
delta=QualitativeDelta(polarity=1))
pa = Preassembler(world_ontology, stmts=[ev])
pa.normalize_opposites(ns='WM')
# We are normalizing to food supply since that is the inherently
# positive concept
assert pa.stmts[0].concept.db_refs['WM'][0] == \
(concept1, 1.0), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][1] == \
(concept1, 0.5), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][2] == \
(concept3, 0.1), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].delta.polarity == -1
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_normalize_opposites_influence():
concept1 = 'wm/concept/causal_factor/food_security/food_stability'
concept2 = 'wm/concept/causal_factor/food_insecurity/food_instability'
dbr1 = {'WM': [(concept1, 1.0), (concept2, 0.5)]}
dbr2 = {'WM': [(concept2, 1.0), (concept1, 0.5)]}
stmt = Influence(Event(Concept('x', db_refs=dbr1),
delta=QualitativeDelta(polarity=1)),
Event(Concept('y', db_refs=dbr2),
delta=QualitativeDelta(polarity=-1)))
pa = Preassembler(world_ontology, stmts=[stmt])
pa.normalize_opposites(ns='WM')
assert pa.stmts[0].subj.delta.polarity == 1
assert pa.stmts[0].obj.delta.polarity == 1
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_normalize_opposites_association():
concept1 = 'wm/concept/causal_factor/food_security/food_stability'
concept2 = 'wm/concept/causal_factor/food_insecurity/food_instability'
dbr1 = {'WM': [(concept1, 1.0), (concept2, 0.5)]}
dbr2 = {'WM': [(concept2, 1.0), (concept1, 0.5)]}
stmt = Association([Event(Concept('x', db_refs=dbr1),
delta=QualitativeDelta(polarity=1)),
Event(Concept('y', db_refs=dbr2),
delta=QualitativeDelta(polarity=-1))])
pa = Preassembler(world_ontology, stmts=[stmt])
pa.normalize_opposites(ns='WM')
assert pa.stmts[0].members[0].delta.polarity == 1
assert pa.stmts[0].members[1].delta.polarity == 1
def test_agent_text_storage():
A1 = Agent('A', db_refs={'TEXT': 'A'})
A2 = Agent('A', db_refs={'TEXT': 'alpha'})
B1 = Agent('B', db_refs={'TEXT': 'bag'})
B2 = Agent('B', db_refs={'TEXT': 'bug'})
C = Agent('C')
D = Agent('D')
inp = [
Complex([A1, B1], evidence=Evidence(text='A complex bag.')),
Complex([B2, A2], evidence=Evidence(text='bug complex alpha once.')),
Complex([B2, A2], evidence=Evidence(text='bug complex alpha again.')),
Complex([A1, C, B2], evidence=Evidence(text='A complex C bug.')),
Phosphorylation(A1, B1, evidence=Evidence(text='A phospo bags.')),
Phosphorylation(A2, B2, evidence=Evidence(text='alpha phospho bugs.')),
Conversion(D, [A1, B1], [C, D],
evidence=Evidence(text='D: A bag -> C D')),
Conversion(D, [B1, A2], [C, D],
evidence=Evidence(text='D: bag a -> C D')),
Conversion(D, [B2, A2], [D, C],
evidence=Evidence(text='D: bug a -> D C')),
Conversion(D, [B1, A1], [C, D],
evidence=Evidence(text='D: bag A -> C D')),
Conversion(D, [A1], [A1, C],
evidence=Evidence(text='D: A -> A C'))
]
pa = Preassembler(bio_ontology, inp)
unq1 = pa.combine_duplicates()
assert len(unq1) == 5, len(unq1)
assert all([len(ev.annotations['prior_uuids']) == 1
for s in unq1 for ev in s.evidence
if len(s.evidence) > 1]),\
'There can only be one prior evidence per uuid at this stage.'
ev_uuid_dict = {ev.annotations['prior_uuids'][0]: ev.annotations['agents']
for s in unq1 for ev in s.evidence}
for s in inp:
raw_text = [ag.db_refs.get('TEXT')
for ag in s.agent_list(deep_sorted=True)]
assert raw_text == ev_uuid_dict[s.uuid]['raw_text'],\
str(raw_text) + '!=' + str(ev_uuid_dict[s.uuid]['raw_text'])
# Now run pa on the above corpus plus another statement.
inp2 = unq1 + [
Complex([A1, C, B1], evidence=Evidence(text='A complex C bag.'))
]
pa2 = Preassembler(bio_ontology, inp2)
unq2 = pa2.combine_duplicates()
assert len(unq2) == 5, len(unq2)
old_ev_list = []
new_ev = None
for s in unq2:
for ev in s.evidence:
if ev.text == inp2[-1].evidence[0].text:
new_ev = ev
else:
old_ev_list.append(ev)
assert all([len(ev.annotations['prior_uuids']) == 2 for ev in old_ev_list])
assert new_ev
assert len(new_ev.annotations['prior_uuids']) == 1
def test_agent_coordinates():
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'reach_coordinates.json')
stmts = reach.process_json_file(path).statements
pa = Preassembler(bio_ontology, stmts)
unique_stmt = pa.combine_duplicates()[0]
agent_annots = [ev.annotations['agents'] for ev in unique_stmt.evidence]
assert all(a['raw_text'] == ['MEK1', 'ERK2'] for a in agent_annots)
assert {tuple(a['coords']) for a in agent_annots} == {((21, 25), (0, 4)),
((0, 4), (15, 19))}
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_association_duplicate():
ev1 = Event(Concept('a'))
ev2 = Event(Concept('b'))
ev3 = Event(Concept('c'))
# Order of members does not matter
st1 = Association([ev1, ev2], evidence=[Evidence(source_api='eidos1')])
st2 = Association([ev1, ev3], evidence=[Evidence(source_api='eidos2')])
st3 = Association([ev2, ev1], evidence=[Evidence(source_api='eidos3')])
st4 = Association([ev2, ev3], evidence=[Evidence(source_api='eidos4')])
st5 = Association([ev2, ev3], evidence=[Evidence(source_api='eidos5')])
eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/eidos/eidos_ontology.rdf')
pa = Preassembler(world_ontology, [st1, st2, st3, st4, st5])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3
assert len(unique_stmts[0].evidence) == 2
assert len(unique_stmts[1].evidence) == 1
assert len(unique_stmts[2].evidence) == 2
sources = [e.source_api for e in unique_stmts[0].evidence]
assert set(sources) == {'eidos1', 'eidos3'}
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_association_refinement():
unrelated = 'wm/concept/causal_factor/wild_food_sources'
parent = 'wm/concept/causal_factor/health_and_life'
child = 'wm/concept/causal_factor/health_and_life/' \
'living_condition/food_safety'
parent_event = Event(Concept('parent', db_refs={'WM': [(parent, 1.0)]}))
unrelated_event = \
Event(Concept('unrelated', db_refs={'WM': [(unrelated, 1.0)]}))
child_event = Event(Concept('child',
db_refs={'WM': [(child, 1.0)]}))
st1 = Association([parent_event, unrelated_event],
evidence=[Evidence(source_api='eidos1')])
st2 = Association([unrelated_event, parent_event],
evidence=[Evidence(source_api='eidos2')])
st3 = Association([parent_event, child_event],
evidence=[Evidence(source_api='eidos3')])
st4 = Association([unrelated_event, child_event],
evidence=[Evidence(source_api='eidos4')])
pa = Preassembler(world_ontology, [st1, st2, st3, st4])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3
top_level_stmts = pa.combine_related()
assert len(top_level_stmts) == 2, top_level_stmts
names = {tuple(sorted(e.concept.name for e in stmt.members)): stmt
for stmt in top_level_stmts}
stmt = names[('child', 'unrelated')]
assert len(stmt.supported_by) == 1
assert {e.concept.name for e in stmt.supported_by[0].members} == \
{'parent', 'unrelated'}
@unittest.skipUnless(has_indra_world, 'indra_world not available')
def test_matches_key_fun():
from indra.statements import WorldContext, RefContext
def has_location(stmt):
if not stmt.context or not stmt.context.geo_location or \
not stmt.context.geo_location.db_refs.get('GEOID'):
return False
return True
def event_location_matches(stmt):
if isinstance(stmt, Event):
if not has_location(stmt):
context_key = None
else:
context_key = stmt.context.geo_location.db_refs['GEOID']
matches_key = str((stmt.concept.matches_key(), context_key))
else:
matches_key = stmt.matches_key()
return matches_key
def event_location_refinement(st1, st2, ontology, entities_refined):
if isinstance(st1, Event) and isinstance(st2, Event):
ref = st1.refinement_of(st2, ontology)
if not ref:
return False
if not has_location(st2):
return True
elif not has_location(st1) and has_location(st2):
return False
else:
return st1.context.geo_location.db_refs['GEOID'] == \
st2.context.geo_location.db_refs['GEOID']
context1 = WorldContext(geo_location=RefContext('x',
db_refs={'GEOID': '1'}))
context2 = WorldContext(geo_location=RefContext('x',
db_refs={'GEOID': '2'}))
health = 'wm/concept/causal_factor/health_and_life'
e1 = Event(Concept('health', db_refs={'WM': [(health, 1.0)]}),
context=context1,
evidence=Evidence(text='1', source_api='eidos'))
e2 = Event(Concept('health', db_refs={'WM': [(health, 1.0)]}),
context=context2,
evidence=Evidence(text='2', source_api='eidos'))
e3 = Event(Concept('health', db_refs={'WM': [(health, 1.0)]}),
context=context2,
evidence=Evidence(text='3', source_api='eidos'))
pa = Preassembler(world_ontology, [e1, e2, e3],
matches_fun=event_location_matches,
refinement_fun=event_location_refinement)
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2, unique_stmts
from indra.tools.assemble_corpus import run_preassembly
stmts = run_preassembly([e1, e2, e3], matches_fun=event_location_matches,
refinement_fun=event_location_refinement)
assert len(stmts) == 2, stmts
def test_uppro_assembly():
ag1 = Agent('x', db_refs={'UP': 'P01019', 'UPPRO': 'PRO_0000032457'})
ag2 = Agent('y', db_refs={'UP': 'P01019', 'UPPRO': 'PRO_0000032458'})
assert ag1.get_grounding() == ('UPPRO', ag1.db_refs['UPPRO'])
assert ag2.get_grounding() == ('UPPRO', ag2.db_refs['UPPRO'])
stmt1 = Phosphorylation(None, ag1)
stmt2 = Phosphorylation(None, ag2)
assert stmt1.matches_key() != stmt2.matches_key()
pa = Preassembler(bio_ontology, [stmt1, stmt2])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2, unique_stmts
from indra.tools import assemble_corpus as ac
stmts = ac.map_grounding([stmt1, stmt2])
pa = Preassembler(bio_ontology, stmts)
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2
def test_split_idx():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Phosphorylation(Agent('x'), ras)
st2 = Phosphorylation(Agent('x'), kras)
st3 = Phosphorylation(Agent('x'), hras)
pa = Preassembler(bio_ontology)
maps = pa._generate_id_maps([st1, st2, st3])
assert (1, 0) in maps, maps
assert (2, 0) in maps, maps
assert pa._comparison_counter == 2
pa = Preassembler(bio_ontology)
maps = pa._generate_id_maps([st1, st2, st3], split_idx=1)
assert (2, 0) in maps, maps
assert (1, 0) not in maps, maps
assert pa._comparison_counter == 1
def test_refinement_filters():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Phosphorylation(Agent('x'), ras)
st2 = Phosphorylation(Agent('x'), kras)
st3 = Phosphorylation(Agent('x'), hras)
# This filters everything out so no comparisons will be done
class FilterEmpty(RefinementFilter):
def get_less_specifics(self, stmt, possibly_related):
return set()
# This is a superset of all comparisons constrained by the ontology
# so will not change what the preassembler does internally
class FilterAll(RefinementFilter):
def get_less_specifics(self, stmt, possibly_related):
shs = set(self.shared_data['stmts_by_hash']) - {stmt.get_hash()}
if possibly_related is not None:
shs &= possibly_related
return shs
# No comparisons here
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related(
filters=[OntologyRefinementFilter(ontology=bio_ontology),
FilterEmpty()])
assert pa._comparison_counter == 0
# The same number of comparisons here as without the filter
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related(
filters=[OntologyRefinementFilter(ontology=bio_ontology),
FilterAll()])
assert pa._comparison_counter == 2, pa._comparison_counter
# Just to make sure lists of more than one filter are correctly handled
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related(
filters=[
FilterAll(),
FilterEmpty(),
OntologyRefinementFilter(bio_ontology)
])
assert pa._comparison_counter == 0, pa._comparison_counter
# Now try adding more statement types
st4 = Activation(Agent('x'), ras)
st5 = Activation(Agent('x'), kras)
st6 = Activation(Agent('x'), hras)
# The same number of comparisons here as without the filter
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4, st5, st6])
pa.combine_related(
filters=[OntologyRefinementFilter(bio_ontology),
FilterAll()
])
assert pa._comparison_counter == 4, pa._comparison_counter
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4, st5, st6])
pa.combine_related(
filters=[FilterAll(),
OntologyRefinementFilter(bio_ontology)
])
assert pa._comparison_counter == 4, pa._comparison_counter
# Just to make sure lists of more than one filter are correctly handled
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4, st5, st6])
pa.combine_related(
filters=[
FilterAll(),
FilterEmpty(),
OntologyRefinementFilter(bio_ontology)
])
assert pa._comparison_counter == 0, pa._comparison_counter
```
#### File: indra/tests/test_pysb_assembler.py
```python
import xml.etree.ElementTree as ET
from indra.assemblers.pysb import PysbAssembler
import indra.assemblers.pysb.assembler as pa
from indra.assemblers.pysb.assembler import Policy, Param
from indra.assemblers.pysb.preassembler import PysbPreassembler
from indra.assemblers.pysb.export import export_cm_network
from indra.assemblers.pysb.kappa_util import get_cm_cycles
from indra.statements import *
from pysb import bng, WILD, Monomer, Annotation
from pysb.testing import with_model
from nose.tools import raises
def test_pysb_assembler_complex1():
member1 = Agent('BRAF')
member2 = Agent('MEK1')
stmt = Complex([member1, member2])
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 2
assert len(model.monomers) == 2
def test_pysb_assembler_complex2():
member1 = Agent('BRAF')
member2 = Agent('MEK1')
member3 = Agent('ERK1')
stmt = Complex([member1, member2, member3])
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 6
assert len(model.monomers) == 3
def test_pysb_assembler_complex3():
hras = Agent('HRAS')
member1 = Agent('BRAF', bound_conditions=[BoundCondition(hras, True)])
member2 = Agent('MEK1')
stmt = Complex([member1, member2])
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 2
assert len(model.monomers) == 3
def test_pysb_assembler_complex_twostep():
member1 = Agent('BRAF')
member2 = Agent('MEK1')
stmt = Complex([member1, member2])
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
assert len(model.rules) == 2
assert len(model.monomers) == 2
def test_pysb_assembler_complex_multiway():
member1 = Agent('BRAF')
member2 = Agent('MEK1')
member3 = Agent('ERK1')
stmt = Complex([member1, member2, member3])
pa = PysbAssembler([stmt])
model = pa.make_model(policies='multi_way')
assert len(model.rules) == 2
assert len(model.monomers) == 3
def test_pysb_assembler_actsub():
stmt = ActiveForm(Agent('BRAF', mutations=[MutCondition('600', 'V', 'E')]),
'activity', True)
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
assert len(model.rules) == 0
assert len(model.monomers) == 1
def test_pysb_assembler_phos_noenz():
enz = None
sub = Agent('MEK1')
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 0
assert len(model.monomers) == 0
def test_pysb_assembler_dephos_noenz():
enz = None
sub = Agent('MEK1')
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 0
assert len(model.monomers) == 0
def test_pysb_assembler_phos1():
enz = Agent('BRAF')
sub = Agent('MEK1')
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_pysb_assembler_phos2():
hras = Agent('HRAS')
enz = Agent('BRAF', bound_conditions=[BoundCondition(hras, True)])
sub = Agent('MEK1')
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 3
def test_pysb_assembler_phos3():
hras = Agent('HRAS')
erk1 = Agent('ERK1')
enz = Agent('BRAF', bound_conditions=[BoundCondition(hras, True)])
sub = Agent('MEK1', bound_conditions=[BoundCondition(erk1, True)])
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 4
def test_pysb_assembler_phos4():
hras = Agent('HRAS')
erk1 = Agent('ERK1')
enz = Agent('BRAF', bound_conditions=[BoundCondition(hras, True)])
sub = Agent('MEK1', bound_conditions=[BoundCondition(erk1, False)])
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 4
def test_pysb_assembler_autophos1():
enz = Agent('MEK1')
stmt = Autophosphorylation(enz, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 1
def test_pysb_assembler_autophos2():
raf1 = Agent('RAF1')
enz = Agent('MEK1', bound_conditions=[BoundCondition(raf1, True)])
stmt = Autophosphorylation(enz, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_pysb_assembler_autophos3():
egfr = Agent('EGFR')
enz = Agent('EGFR', bound_conditions=[BoundCondition(egfr, True)])
stmt = Autophosphorylation(enz, 'tyrosine')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 1
def test_pysb_assembler_transphos1():
egfr = Agent('EGFR')
enz = Agent('EGFR', bound_conditions=[BoundCondition(egfr, True)])
stmt = Transphosphorylation(enz, 'tyrosine')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 1
def test_pysb_assembler_act1():
egfr = Agent('EGFR')
subj = Agent('GRB2', bound_conditions=[BoundCondition(egfr, True)])
obj = Agent('SOS1')
stmt = Activation(subj, obj)
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 3
def test_pysb_assembler_dephos1():
phos = Agent('PP2A')
sub = Agent('MEK1')
stmt = Dephosphorylation(phos, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_pysb_assembler_dephos2():
phos = Agent('PP2A')
raf1 = Agent('RAF1')
sub = Agent('MEK1', bound_conditions=[BoundCondition(raf1, True)])
stmt = Dephosphorylation(phos, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 3
def test_pysb_assembler_gef1():
gef = Agent('SOS1')
ras = Agent('HRAS')
stmt = Gef(gef, ras)
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_pysb_assembler_gap1():
gap = Agent('NF1')
ras = Agent('HRAS')
stmt = Gap(gap, ras)
pa = PysbAssembler([stmt])
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_pysb_assembler_actmod1():
mek = Agent('MEK')
erk = Agent('ERK')
stmts = []
mc1 = ModCondition('phosphorylation', 'serine', '218')
mc2 = ModCondition('phosphorylation', 'serine', '222')
stmts.append(ActiveForm(Agent('MEK', mods=[mc1, mc2]), 'activity', True))
stmts.append(Phosphorylation(mek, erk, 'threonine', '185'))
stmts.append(Phosphorylation(mek, erk, 'tyrosine', '187'))
pa = PysbAssembler(stmts)
model = pa.make_model()
assert len(model.rules) == 2
assert len(model.monomers) == 2
model = pa.make_model(policies='two_step')
assert len(model.rules) == 5
def test_pysb_assembler_actmod2():
mek = Agent('MEK', activity=ActivityCondition('activity', True))
erk = Agent('ERK')
stmts = []
stmts.append(ActiveForm(Agent(
'MEK', mods=[ModCondition('phosphorylation', 'serine', '218')]),
'activity', True))
stmts.append(ActiveForm(Agent(
'MEK', mods=[ModCondition('phosphorylation', 'serine', '222')]),
'activity', True))
stmts.append(Phosphorylation(mek, erk, 'threonine', '185'))
stmts.append(Phosphorylation(mek, erk, 'tyrosine', '187'))
pa = PysbAssembler(stmts)
model = pa.make_model()
assert len(model.rules) == 4
assert len(model.monomers) == 2
model = pa.make_model(policies='two_step')
assert len(model.rules) == 9
def test_pysb_assembler_phos_twostep1():
enz = Agent('BRAF')
sub = Agent('MEK1')
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
assert len(model.rules) == 3
assert len(model.monomers) == 2
def test_pysb_assembler_twostep_mixed():
member1 = Agent('BRAF')
member2 = Agent('RAF1')
st1 = Complex([member1, member2])
st2 = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
pa = PysbAssembler([st1, st2])
pa.make_model(policies='two_step')
assert len(pa.model.rules) == 5
assert len(pa.model.monomers) == 4
def test_pysb_assembler_phos_twostep_local():
enz = Agent('BRAF')
sub = Agent('MEK1')
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
assert len(model.rules) == 3
assert len(model.monomers) == 2
def test_pysb_assembler_phos_twostep_local_to_global():
enz = Agent('BRAF')
sub = Agent('MEK1')
stmt = Phosphorylation(enz, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
# This call should have reverted to default policy
model = pa.make_model()
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_pysb_assembler_dephos_twostep1():
phos = Agent('PP2A')
sub = Agent('MEK1')
stmt = Dephosphorylation(phos, sub, 'serine', '222')
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
assert len(model.rules) == 3
assert len(model.monomers) == 2
def test_statement_specific_policies():
enz = Agent('BRAF')
sub = Agent('MEK1')
phos = Agent('PP2A')
stmt1 = Phosphorylation(enz, sub, 'serine', '222')
stmt2 = Dephosphorylation(phos, sub, 'serine', '222')
policies = {'Phosphorylation': 'two_step',
'Dephosphorylation': 'interactions_only'}
pa = PysbAssembler([stmt1, stmt2])
model = pa.make_model(policies=policies)
assert len(model.rules) == 4
assert len(model.monomers) == 3
def test_unspecified_statement_policies():
enz = Agent('BRAF')
sub = Agent('MEK1')
phos = Agent('PP2A')
stmt1 = Phosphorylation(enz, sub, 'serine', '222')
stmt2 = Dephosphorylation(phos, sub, 'serine', '222')
policies = {'Phosphorylation': 'two_step',
'other': 'interactions_only'}
pa = PysbAssembler([stmt1, stmt2])
model = pa.make_model(policies=policies)
assert len(model.rules) == 4
assert len(model.monomers) == 3
def test_activity_activity():
subj = Agent('KRAS')
obj = Agent('BRAF')
stmt = Activation(subj, obj)
pa = PysbAssembler([stmt])
model = pa.make_model(policies='interactions_only')
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_activity_activity2():
subj = Agent('KRAS')
obj = Agent('BRAF')
stmt = Activation(subj, obj)
pa = PysbAssembler([stmt])
model = pa.make_model(policies='one_step')
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_activity_activity2():
subj = Agent('Vemurafenib')
obj = Agent('BRAF')
stmt = Inhibition(subj, obj)
pa = PysbAssembler([stmt])
model = pa.make_model(policies='interactions_only')
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_activity_activity3():
subj = Agent('Vemurafenib')
obj = Agent('BRAF')
stmt = Inhibition(subj, obj)
pa = PysbAssembler([stmt])
model = pa.make_model(policies='one_step')
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_rule_name_str_1():
s = pa.get_agent_rule_str(Agent('BRAF'))
assert s == 'BRAF'
def test_rule_name_str_2():
a = Agent('GRB2',
bound_conditions=[BoundCondition(Agent('EGFR'), True)])
s = pa.get_agent_rule_str(a)
assert s == 'GRB2_EGFR'
def test_rule_name_str_3():
a = Agent('GRB2',
bound_conditions=[BoundCondition(Agent('EGFR'), False)])
s = pa.get_agent_rule_str(a)
assert s == 'GRB2_nEGFR'
def test_rule_name_str_4():
a = Agent('BRAF', mods=[ModCondition('phosphorylation', 'serine')])
s = pa.get_agent_rule_str(a)
assert s == 'BRAF_phosphoS'
def test_rule_name_str_5():
a = Agent('BRAF', mods=[ModCondition('phosphorylation', 'serine', '123')])
s = pa.get_agent_rule_str(a)
assert s == 'BRAF_phosphoS123'
def test_neg_act_mod():
mc = ModCondition('phosphorylation', 'serine', '123', False)
st1 = ActiveForm(Agent('BRAF', mods=[mc]), 'activity', True)
braf = Agent('BRAF', activity=ActivityCondition('active', True))
st2 = Phosphorylation(braf, Agent('MAP2K2'))
pa = PysbAssembler([st1, st2])
pa.make_model(policies='one_step')
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'S123': ('u', WILD)}
def test_pos_agent_mod():
mc = ModCondition('phosphorylation', 'serine', '123', True)
st = Phosphorylation(Agent('BRAF', mods=[mc]), Agent('MAP2K2'))
pa = PysbAssembler([st])
pa.make_model(policies='one_step')
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'S123': ('p', WILD)}
def test_neg_agent_mod():
mc = ModCondition('phosphorylation', 'serine', '123', False)
st = Phosphorylation(Agent('BRAF', mods=[mc]), Agent('MAP2K2'))
pa = PysbAssembler([st])
pa.make_model(policies='one_step')
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'S123': ('u', WILD)}
def test_mut():
mut = MutCondition('600', 'V', 'E')
st = Phosphorylation(Agent('BRAF', mutations=[mut]), Agent('MEK'))
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'V600': 'E'}
def test_mut_missing1():
mut = MutCondition('600', 'V', None)
st = Phosphorylation(Agent('BRAF', mutations=[mut]), Agent('MEK'))
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'V600': 'X'}
def test_mut_missing2():
mut = MutCondition('600', None, 'E')
st = Phosphorylation(Agent('BRAF', mutations=[mut]), Agent('MEK'))
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'mut600': 'E'}
def test_mut_missing3():
mut = MutCondition(None, 'V', 'E')
st = Phosphorylation(Agent('BRAF', mutations=[mut]), Agent('MEK'))
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'V': 'E'}
def test_mut_missing4():
mut = MutCondition(None, None, None)
st = Phosphorylation(Agent('BRAF', mutations=[mut]), Agent('MEK'))
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.monomer.name == 'BRAF'
assert braf.site_conditions == {'mut': 'X'}
def test_agent_loc():
st = Phosphorylation(Agent('BRAF', location='cytoplasm'), Agent('MEK'))
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
braf = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert braf.site_conditions == {'loc': 'cytoplasm'}
def test_translocation():
st = Translocation(Agent('FOXO3A'), 'nucleus', 'cytoplasm')
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
f1 = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert f1.site_conditions == {'loc': 'nucleus'}
f2 = r.product_pattern.complex_patterns[0].monomer_patterns[0]
assert f2.site_conditions == {'loc': 'cytoplasm'}
assert r.rate_forward.name == 'kf_foxo3a_nucleus_cytoplasm_1'
def test_translocation_to():
st = Translocation(Agent('FOXO3A'), None, 'nucleus')
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
f1 = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert f1.site_conditions == {'loc': 'cytoplasm'}
f2 = r.product_pattern.complex_patterns[0].monomer_patterns[0]
assert f2.site_conditions == {'loc': 'nucleus'}
assert r.rate_forward.name == 'kf_foxo3a_cytoplasm_nucleus_1'
def test_phos_atpdep():
st = Phosphorylation(Agent('BRAF'), Agent('MEK'), 'S', '222')
pa = PysbAssembler([st])
pa.make_model(policies='atp_dependent')
assert len(pa.model.rules) == 5
def test_set_context():
st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
pa = PysbAssembler([st])
pa.make_model()
assert pa.model.parameters['MAP2K1_0'].value == pa.default_initial_amount
assert pa.model.parameters['MAPK3_0'].value == pa.default_initial_amount
pa.set_context('A375_SKIN')
assert pa.model.parameters['MAP2K1_0'].value > 10000
assert pa.model.parameters['MAPK3_0'].value > 10000
def test_set_context_monomer_notfound():
st = Phosphorylation(Agent('MAP2K1'), Agent('XYZ'))
pa = PysbAssembler([st])
pa.make_model()
assert pa.model.parameters['MAP2K1_0'].value == pa.default_initial_amount
assert pa.model.parameters['XYZ_0'].value == pa.default_initial_amount
pa.add_default_initial_conditions(100)
assert pa.model.parameters['MAP2K1_0'].value == 100
assert pa.model.parameters['XYZ_0'].value == 100
pa.set_context('A375_SKIN')
assert pa.model.parameters['MAP2K1_0'].value > 10000
assert pa.model.parameters['XYZ_0'].value == pa.default_initial_amount
def test_set_context_celltype_notfound():
st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
pa = PysbAssembler([st])
pa.make_model()
pa.set_context('XYZ')
def test_annotation():
st = Phosphorylation(Agent('BRAF', db_refs = {'UP': 'P15056'}),
Agent('MAP2K2', db_refs = {'HGNC': '6842'}))
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.annotations) == 5
def test_annotation_regamount():
st1 = IncreaseAmount(Agent('BRAF', db_refs = {'UP': 'P15056'}),
Agent('MAP2K2', db_refs = {'HGNC': '6842'}))
st2 = DecreaseAmount(Agent('BRAF', db_refs = {'UP': 'P15056'}),
Agent('MAP2K2', db_refs = {'HGNC': '6842'}))
pa = PysbAssembler([st1, st2])
pa.make_model()
assert len(pa.model.annotations) == 8
def test_print_model():
st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
pa = PysbAssembler([st])
pa.make_model()
pa.save_model('/dev/null')
def test_save_rst():
st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
pa = PysbAssembler([st])
pa.make_model()
pa.save_rst('/dev/null')
def test_export_model():
st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
pa = PysbAssembler([st])
pa.make_model()
exp_str = pa.export_model('kappa')
assert exp_str
exp_str = pa.export_model('bngl')
assert exp_str
exp_str = pa.export_model('sbml', file_name='/dev/null')
assert exp_str
def test_assemble_export_sbgn():
# Add various statements to test their assembly
st = Phosphorylation(Agent('BRAF'), Agent('MAP2K1'))
mc = ModCondition('phosphorylation', None, None, True)
st2 = Activation(Agent('MAP2K1', mods=[mc]), Agent('MAPK1'))
st3 = Complex([Agent('MAPK1'), Agent('DUSP6')])
st4 = DecreaseAmount(None, Agent('DUSP6'))
pa = PysbAssembler([st, st2, st3, st4])
pa.make_model()
# Export to SBGN
model = pa.export_model('sbgn')
assert model is not None
# Test that the right elements are there in the result
et = ET.fromstring(model)
from indra.assemblers.sbgn.assembler import sbgn_ns
sbgn_nss = {'s': sbgn_ns}
glyphs = et.findall('s:map/s:glyph', namespaces=sbgn_nss)
glyph_classes = [g.attrib.get('class') for g in glyphs]
assert glyph_classes.count('macromolecule') == 6
assert glyph_classes.count('complex') == 2
assert glyph_classes.count('process') == 10
return pa
def test_name_standardize():
n = pa._n('.*/- ^&#@$')
assert isinstance(n, str)
assert n == '__________'
n = pa._n('14-3-3')
assert isinstance(n, str)
assert n == 'p14_3_3'
n = pa._n('\U0001F4A9bar')
assert isinstance(n, str)
assert n == 'bar'
def test_generate_equations():
st = Phosphorylation(Agent('MAP2K1'), Agent('MAPK3'))
pa = PysbAssembler([st])
pa.make_model()
bng.generate_equations(pa.model)
def test_non_python_name_phos():
st = Phosphorylation(Agent('14-3-3'), Agent('BRAF kinase'))
pa = PysbAssembler([st])
pa.make_model()
names = [m.name for m in pa.model.monomers]
assert 'BRAF_kinase' in names
assert 'p14_3_3' in names
bng.generate_equations(pa.model)
def test_non_python_name_bind():
st = Complex([Agent('14-3-3'), Agent('BRAF kinase')])
pa = PysbAssembler([st])
pa.make_model()
bng.generate_equations(pa.model)
def test_decreaseamount_one_step():
subj = Agent('KRAS')
obj = Agent('BRAF')
st1 = DecreaseAmount(subj, obj)
st2 = DecreaseAmount(None, obj)
pa = PysbAssembler([st1, st2])
model = pa.make_model(policies='one_step')
assert len(model.rules) == 2
assert len(model.monomers) == 2
def test_decreaseamount_interactions_only():
subj = Agent('KRAS')
obj = Agent('BRAF')
st1 = DecreaseAmount(subj, obj)
st2 = DecreaseAmount(None, obj)
pa = PysbAssembler([st1, st2])
model = pa.make_model(policies='interactions_only')
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_increaseamount_one_step():
subj = Agent('KRAS')
obj = Agent('BRAF')
st1 = IncreaseAmount(subj, obj)
st2 = IncreaseAmount(None, obj)
pa = PysbAssembler([st1, st2])
model = pa.make_model(policies='one_step')
assert len(model.rules) == 2
assert len(model.monomers) == 2
def test_increaseamount_interactions_only():
subj = Agent('KRAS')
obj = Agent('BRAF')
st1 = IncreaseAmount(subj, obj)
st2 = IncreaseAmount(None, obj)
pa = PysbAssembler([st1, st2])
model = pa.make_model(policies='interactions_only')
assert len(model.rules) == 1
assert len(model.monomers) == 2
def test_missing_catalytic_default_site():
c8 = Agent('CASP8', activity=ActivityCondition('catalytic', True))
c3 = Agent('CASP3')
stmt = Activation(c8, c3, 'catalytic')
# Interactions only
pa = PysbAssembler([stmt])
model = pa.make_model(policies='interactions_only')
# One step
pa = PysbAssembler([stmt])
model = pa.make_model(policies='one_step')
# Two step
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
def test_missing_transcription_default_site():
p53 = Agent('TP53', activity=ActivityCondition('transcription', True))
bax = Agent('BAX')
stmt = Activation(p53, bax)
# Interactions only
pa = PysbAssembler([stmt])
model = pa.make_model(policies='interactions_only')
# One step
pa = PysbAssembler([stmt])
model = pa.make_model(policies='one_step')
# Two step
pa = PysbAssembler([stmt])
model = pa.make_model(policies='two_step')
def test_translocation_loc_special_char():
st = Translocation(Agent('KSR1'), 'cytoplasm', 'cell surface')
pa = PysbAssembler([st])
pa.make_model()
assert len(pa.model.rules) == 1
r = pa.model.rules[0]
f1 = r.reactant_pattern.complex_patterns[0].monomer_patterns[0]
assert f1.site_conditions == {'loc': 'cytoplasm'}
f2 = r.product_pattern.complex_patterns[0].monomer_patterns[0]
assert f2.site_conditions == {'loc': 'cell_surface'}
assert r.rate_forward.name == 'kf_ksr1_cytoplasm_cell_surface_1'
@with_model
def test_get_mp_with_grounding():
foo = Agent('Foo', db_refs={'HGNC': 'foo'})
a = Agent('A', db_refs={'HGNC': '6840'})
b = Agent('B', db_refs={'HGNC': '6871'})
Monomer('A_monomer')
Monomer('B_monomer')
Annotation(A_monomer, 'https://identifiers.org/hgnc:6840')
Annotation(B_monomer, 'https://identifiers.org/hgnc:6871')
mps = list(pa.grounded_monomer_patterns(model, foo))
assert len(mps) == 0
mps = list(pa.grounded_monomer_patterns(model, a))
assert len(mps) == 1, mps
assert mps[0].monomer == A_monomer
mps = list(pa.grounded_monomer_patterns(model, b))
assert len(mps) == 1
assert mps[0].monomer == B_monomer
@with_model
def test_get_mp_with_grounding_2():
a1 = Agent('A', mods=[ModCondition('phosphorylation', None, None)],
db_refs={'HGNC': '6840'})
a2 = Agent('A', mods=[ModCondition('phosphorylation', 'Y', '187')],
db_refs={'HGNC': '6840'})
Monomer('A_monomer', ['phospho', 'T185', 'Y187'],
{'phospho': 'y', 'T185': ['u', 'p'], 'Y187': ['u', 'p']})
Annotation(A_monomer, 'https://identifiers.org/hgnc:6840')
A_monomer.site_annotations = [
Annotation(('phospho', 'y'), 'phosphorylation', 'is_modification'),
Annotation(('T185', 'p'), 'phosphorylation', 'is_modification'),
Annotation(('Y187', 'p'), 'phosphorylation', 'is_modification'),
Annotation('T185', 'T', 'is_residue'),
Annotation('T185', '185', 'is_position'),
Annotation('Y187', 'Y', 'is_residue'),
Annotation('Y187', '187', 'is_position')
]
mps_1 = list(pa.grounded_monomer_patterns(model, a1))
assert len(mps_1) == 3
mps_2 = list(pa.grounded_monomer_patterns(model, a2))
assert len(mps_2) == 1
mp = mps_2[0]
assert mp.monomer == A_monomer
assert mp.site_conditions == {'Y187': ('p', WILD)}
# TODO Add test for unmodified agent!
# TODO Add test involving multiple (possibly degenerate) modifications!
# TODO Add test for generic double phosphorylation
def test_phospho_assemble_grounding():
a = Agent('MEK1', db_refs={'HGNC': '6840'})
b = Agent('ERK2', db_refs={'HGNC': '6871'})
b_phos = Agent('Foo', mods=[ModCondition('phosphorylation', None, None)],
db_refs={'HGNC': '6871'})
st1 = Phosphorylation(a, b, 'T', '185')
# One step
def check_policy(policy):
pysb_asmb = pa.PysbAssembler([st1])
model = pysb_asmb.make_model(policies=policy)
mps = list(pa.grounded_monomer_patterns(model, b_phos))
assert len(mps) == 1
assert mps[0].monomer.name == 'ERK2'
assert mps[0].site_conditions == {'T185': ('p', WILD)}
for policy in ('one_step', 'interactions_only', 'two_step',
'atp_dependent'):
check_policy(policy)
def test_get_grounded_agents_from_model():
mek = Agent('MEK1', db_refs={'HGNC': '6840'})
erk = Agent('ERK2', db_refs={'HGNC': '6871'})
erk_phos = Agent('ERK2', db_refs={'HGNC': '6871'},
mods=[ModCondition('phosphorylation')])
erk_phos_y187 = Agent('ERK2', db_refs={'HGNC': '6871'},
mods=[ModCondition('phosphorylation', 'Y', '187')])
phos_stmt = Phosphorylation(mek, erk)
phos_y187_stmt = Phosphorylation(mek, erk, 'Y', '187')
pysba = pa.PysbAssembler([phos_stmt, phos_y187_stmt])
pysb_model = pysba.make_model()
agents_by_mp, mps_by_rule = pa.get_grounded_agents(pysb_model)
assert isinstance(agents_by_mp, dict)
assert isinstance(mps_by_rule, dict)
model_agents = agents_by_mp.values()
model_keys = set([ag.matches_key() for ag in model_agents])
# TODO add other types of agent conditions here
# TODO do we expect a different agent for af?
test_keys = set([mek.matches_key(), erk_phos.matches_key(),
erk_phos_y187.matches_key()])
assert len(model_keys.intersection(test_keys)) == 3
def test_phospho_mod_grounding():
a = Agent('MEK1', mods=[ModCondition('phosphorylation', 'S', '218'),
ModCondition('phosphorylation', 'S', '222')],
db_refs={'HGNC': '6840'})
b = Agent('ERK2', db_refs={'HGNC': '6871'})
a_phos = Agent('Foo', mods=[ModCondition('phosphorylation', None, None)],
db_refs={'HGNC': '6840'})
st1 = Phosphorylation(a, b, 'T', '185')
pysb_asmb = pa.PysbAssembler([st1])
model = pysb_asmb.make_model(policies='one_step')
mps = list(pa.grounded_monomer_patterns(model, a_phos))
assert len(mps) == 2
assert mps[0].monomer.name == 'MEK1'
assert mps[1].monomer.name == 'MEK1'
sc = [mp.site_conditions for mp in mps]
assert {'S218': ('p', WILD)} in sc
assert {'S222': ('p', WILD)} in sc
# Check if we get the doubly phosphorylated MonomerPattern
mps = list(pa.grounded_monomer_patterns(model, a))
assert len(mps) == 1
assert mps[0].monomer.name == 'MEK1'
assert mps[0].site_conditions == {'S218': ('p', WILD),
'S222': ('p', WILD)}
def test_multiple_grounding_mods():
mek = Agent('MEK1', db_refs={'HGNC': '6840'})
erk = Agent('ERK2', db_refs={'HGNC': '6871'})
cbl = Agent('CBL', db_refs={'HGNC': '1541'})
ub_phos_erk = Agent(
'ERK2',
mods=[ModCondition('phosphorylation', None, None),
ModCondition('ubiquitination', None, None)],
db_refs={'HGNC': '6871'})
st1 = Phosphorylation(mek, erk, 'T', '185')
st2 = Phosphorylation(mek, erk, 'Y', '187')
st3 = Ubiquitination(cbl, erk, 'K', '40')
st4 = Ubiquitination(cbl, erk, 'K', '50')
pysb_asmb = pa.PysbAssembler([st1, st2, st3, st4])
model = pysb_asmb.make_model(policies='one_step')
mps = list(pa.grounded_monomer_patterns(model, ub_phos_erk))
assert len(mps) == 4
assert mps[0].monomer.name == 'ERK2'
assert mps[1].monomer.name == 'ERK2'
assert mps[2].monomer.name == 'ERK2'
assert mps[3].monomer.name == 'ERK2'
def test_grounded_active_pattern():
a = Agent('A', db_refs={'HGNC': '1234'})
b = Agent('B', db_refs={'HGNC': '5678'})
b_phos = Agent('B', mods=[ModCondition('phosphorylation', 'S', '100')],
db_refs={'HGNC': '5678'})
b_act = Agent('B', activity=ActivityCondition('activity', True),
db_refs={'HGNC': '5678'})
st1 = Phosphorylation(a, b, 'S', '100')
st2 = ActiveForm(b_phos, 'activity', True)
pysba = PysbAssembler([st1, st2])
model = pysba.make_model(policies='one_step')
mps = list(pa.grounded_monomer_patterns(model, b_act))
def _check_mod_assembly(mod_class):
subj = Agent('KRAS')
obj = Agent('BRAF')
st1 = mod_class(subj, obj)
pa = PysbAssembler([st1])
model = pa.make_model(policies='interactions_only')
assert len(model.rules) == 1
assert len(model.monomers) == 2
pa = PysbAssembler([st1])
model = pa.make_model(policies='one_step')
assert len(model.rules) == 1
assert len(model.monomers) == 2
pa = PysbAssembler([st1])
model = pa.make_model(policies='two_step')
assert len(model.rules) == 3
assert len(model.monomers) == 2
def test_modification_assembly():
classes = AddModification.__subclasses__() + \
RemoveModification.__subclasses__()
for mod_class in classes:
_check_mod_assembly(mod_class)
def test_rule_annotation():
a = Agent('A', db_refs={'HGNC': '1234'})
b = Agent('B', db_refs={'HGNC': '5678'})
def check_rule_annotation(stmt, policy):
pa = PysbAssembler([stmt])
model = pa.make_model(policies=policy)
subj = [ann.object for ann in model.annotations
if ann.predicate == 'rule_has_subject']
obj = [ann.object for ann in model.annotations
if ann.predicate == 'rule_has_object']
assert len(subj) == 1
assert subj[0] == 'A'
assert len(obj) == 1
assert obj[0] == 'B'
classes = AddModification.__subclasses__() + \
RemoveModification.__subclasses__()
for mod_class in classes:
stmt = mod_class(a, b)
check_rule_annotation(stmt, 'one_step')
check_rule_annotation(stmt, 'two_step')
# Check ATP dependent phosphorylation
stmt = Phosphorylation(a, b)
check_rule_annotation(stmt, 'atp_dependent')
stmt = Activation(a, b)
check_rule_annotation(stmt, 'one_step')
#Skip Autophosphorylation and Transphosphorylation for now
#Gef
#Gap
def test_activeform_site():
a = Agent('A', db_refs={'HGNC': '1234'})
b = Agent('B', db_refs={'HGNC': '5678'})
b_phos = Agent('B', mods=[ModCondition('phosphorylation', 'Y', '200')],
db_refs={'HGNC': '5678'})
st1 = Phosphorylation(a, b, 'S', '100')
st2 = ActiveForm(b_phos, 'kinase', True)
pa = PysbAssembler([st1, st2])
model = pa.make_model(policies='one_step')
# TODO Do the same for mutation condition
# TODO Localization condition
# TODO Bound condition
# TODO Unphosphorylated/unmodified forms (try ubiquitinated/acetylated lysine)
def test_activation_subj1():
"""No subject activity is defined."""
st = Activation(Agent('a'), Agent('b'))
pa = PysbAssembler([st])
pa.make_model()
assert pa.model.monomers['a'].sites == []
left = pa.model.rules[0].reactant_pattern
subj_left = left.complex_patterns[0].monomer_patterns[0]
right = pa.model.rules[0].product_pattern
subj_right = right.complex_patterns[0].monomer_patterns[0]
assert subj_left.site_conditions == {}
assert subj_right.site_conditions == {}
def test_activation_subj2():
"""Subject activity is defined explicitly."""
a_act = Agent('a', activity=ActivityCondition('activity', True))
st = Activation(a_act, Agent('b'))
st2 = ActiveForm(Agent('a', mods=[ModCondition('phosphorylation')]),
'activity', True)
pa = PysbAssembler([st, st2])
pa.make_model()
assert pa.model.monomers['a'].sites == ['phospho']
left = pa.model.rules[0].reactant_pattern
subj_left = left.complex_patterns[0].monomer_patterns[0]
right = pa.model.rules[0].product_pattern
subj_right = right.complex_patterns[0].monomer_patterns[0]
assert subj_left.site_conditions == {u'phospho': (u'p', WILD)}
assert subj_right.site_conditions == {u'phospho': (u'p', WILD)}
def test_activation_subj3():
"""Subject activity is defined implicitly by another statement."""
a_act = Agent('a', activity=ActivityCondition('activity', True))
st = Activation(a_act, Agent('b'))
st2 = Activation(Agent('c'), Agent('a'))
pa = PysbAssembler([st, st2])
pa.make_model()
assert len(pa.model.rules) == 2
assert pa.model.monomers['a'].sites == ['activity']
left = pa.model.rules[0].reactant_pattern
subj_left = left.complex_patterns[0].monomer_patterns[0]
right = pa.model.rules[0].product_pattern
subj_right = right.complex_patterns[0].monomer_patterns[0]
assert subj_left.site_conditions == {u'activity': (u'active')}
assert subj_right.site_conditions == {u'activity': (u'active')}
def test_activation_subj4():
"""Subject activity is defined both explicitly and implicitly."""
a_act = Agent('a', activity=ActivityCondition('activity', True))
st = Activation(a_act, Agent('b'))
st2 = Activation(Agent('c'), Agent('a'))
st3 = ActiveForm(Agent('a', mods=[ModCondition('phosphorylation')]),
'activity', True)
pa = PysbAssembler([st, st2, st3])
pa.make_model()
assert set(pa.model.monomers['a'].sites) == set(['activity', 'phospho'])
left = pa.model.rules[0].reactant_pattern
subj_left = left.complex_patterns[0].monomer_patterns[0]
right = pa.model.rules[0].product_pattern
subj_right = right.complex_patterns[0].monomer_patterns[0]
assert subj_left.site_conditions == {u'phospho': (u'p', WILD)}
assert subj_right.site_conditions == {u'phospho': (u'p', WILD)}
def test_pysb_preassembler_replace_activities1():
st1 = ActiveForm(Agent('a', location='nucleus'), 'activity', True)
st2 = Phosphorylation(Agent('a',
activity=ActivityCondition('activity', True)),
Agent('b'))
ppa = PysbPreassembler([st1, st2])
ppa.replace_activities()
assert len(ppa.statements) == 2
assert ppa.statements[1].enz.location == 'nucleus'
def test_pysb_preassembler_replace_activities2():
a_act = Agent('a', activity=ActivityCondition('activity', True))
st = Activation(a_act, Agent('b'))
st2 = Activation(Agent('c'), Agent('a'))
ppa = PysbPreassembler([st, st2])
ppa.replace_activities()
assert len(ppa.statements) == 2
def test_pysb_preassembler_replace_activities3():
p = Agent('PPP2CA')
bc = BoundCondition(p, False)
erk = Agent('ERK')
mek1 = Agent('MEK', mods=[ModCondition('phosphorylation',
None, None, True)])
mek2 = Agent('MEK', activity=ActivityCondition('activity', True),
bound_conditions=[bc])
st2 = ActiveForm(mek1, 'activity', True)
st1 = Phosphorylation(mek2, erk)
ppa = PysbPreassembler([st1, st2])
ppa.replace_activities()
assert len(ppa.statements) == 2
assert ppa.statements[0].enz.mods
assert ppa.statements[0].enz.bound_conditions
def test_phos_michaelis_menten():
stmt = Phosphorylation(Agent('MEK'), Agent('ERK'))
pa = PysbAssembler([stmt])
pa.make_model(policies='michaelis_menten')
assert len(pa.model.parameters) == 4
def test_deubiq_michaelis_menten():
stmt = Deubiquitination(Agent('MEK'), Agent('ERK'))
pa = PysbAssembler([stmt])
pa.make_model(policies='michaelis_menten')
assert len(pa.model.parameters) == 4
def test_act_michaelis_menten():
stmt = Activation(Agent('MEK'), Agent('ERK'))
stmt2 = Inhibition(Agent('DUSP'), Agent('ERK'))
pa = PysbAssembler([stmt, stmt2])
pa.make_model(policies='michaelis_menten')
assert len(pa.model.parameters) == 7
def test_increaseamount_hill():
stmt = IncreaseAmount(Agent('TP53'), Agent('MDM2'))
pa = PysbAssembler([stmt])
pa.make_model(policies='hill')
pa.save_model()
assert len(pa.model.parameters) == 5
def test_convert_nosubj():
stmt = Conversion(None, [Agent('PIP2')], [Agent('PIP3')])
pa = PysbAssembler([stmt])
pa.make_model()
assert len(pa.model.parameters) == 3
assert len(pa.model.rules) == 1
assert len(pa.model.monomers) == 2
# We need to make sure that these are Kappa-compatible, and the easiest
# way to do that is by making a ModelChecker and getting the IM without
# error
from indra.explanation.model_checker import PysbModelChecker
pmc = PysbModelChecker(pa.model)
pmc.get_im()
def test_convert_subj():
stmt = Conversion(Agent('PIK3CA'), [Agent('PIP2')], [Agent('PIP3')])
pa = PysbAssembler([stmt])
pa.make_model()
assert len(pa.model.parameters) == 4
assert len(pa.model.rules) == 1
assert len(pa.model.monomers) == 3
# We need to make sure that these are Kappa-compatible, and the easiest
# way to do that is by making a ModelChecker and getting the IM without
# error
from indra.explanation.model_checker import PysbModelChecker
pmc = PysbModelChecker(pa.model)
pmc.get_im()
def test_activity_agent_rule_name():
stmt = Phosphorylation(Agent('BRAF',
activity=ActivityCondition('kinase',
True)),
Agent('MAP2K1',
activity=ActivityCondition('activity',
False)))
pa = PysbAssembler([stmt])
pa.make_model()
assert pa.model.rules[0].name == \
'BRAF_kin_phosphorylation_MAP2K1_act_inact_phospho', \
pa.model.rules[0].name
def test_policy_object():
stmt = Phosphorylation(Agent('a'), Agent('b'))
pa = PysbAssembler([stmt])
pol = Policy('two_step')
model = pa.make_model(policies={stmt.uuid: pol})
assert len(model.rules) == 3
assert str(pol) == 'Policy(two_step)'
def test_policy_parameters():
pol = Policy('two_step', parameters={'kf': Param('a', 1.0),
'kr': Param('b', 2.0),
'kc': Param('c', 3.0)})
# Make sure we can correctly stringify here
assert str(pol)
stmt = Deubiquitination(Agent('a'), Agent('b'))
pa = PysbAssembler([stmt])
model = pa.make_model(policies={stmt.uuid: pol})
assert model.parameters['c'].value == 3.0
@raises(pa.UnknownPolicyError)
def test_policy_object_invalid():
stmt = Phosphorylation(Agent('a'), Agent('b'))
pa = PysbAssembler([stmt])
model = pa.make_model(policies={'xyz': Policy('two_step')})
assert len(model.rules) == 3
def test_mod_parameter():
stmt = Phosphorylation(Agent('a'), Agent('b'))
pol = Policy('one_step', parameters={'kf': Param('my_kf_param', 0.99)})
pa = PysbAssembler([stmt])
model = pa.make_model(policies={stmt.uuid: pol})
assert model.parameters['my_kf_param'].value == 0.99
def test_policy_multiple():
pol1 = Policy('michaelis_menten', parameters={'Km': Param('my_Km', 1.0),
'kc': Param('my_kc', 1e-1)})
pol2 = Policy('one_step', parameters={'kf': Param('d', 10.0)})
stmt1 = Inhibition(Agent('a'), Agent('b'))
stmt2 = Translocation(Agent('a'), 'cytoplasm', 'nucleus')
pa = PysbAssembler([stmt1, stmt2])
model = pa.make_model(policies={stmt1.uuid: pol1,
stmt2.uuid: pol2})
assert model.parameters['d'].value == 10.0
print(model.expressions['a_deactivates_b_activity_rate'])
print(model.rules)
def test_kappa_im_export():
stmts = [Activation(Agent('a'), Agent('b')),
Activation(Agent('b',
activity=ActivityCondition('activity', True)),
Agent('c'))]
pa = PysbAssembler(stmts)
pa.make_model()
graph = pa.export_model('kappa_im', '/dev/null')
assert len(graph.nodes) == 2
assert len(graph.edges) == 1
def test_kappa_cm_export():
stmts = [Complex([Agent('a'), Agent('b')])]
pa = PysbAssembler(stmts)
pa.make_model()
graph = pa.export_model('kappa_cm', '/dev/null')
assert len(graph.nodes()) == 2
assert len(graph.edges()) == 1
def test_contact_map_cycles_1():
stmts = [Complex([Agent('a'), Agent('b')]),
Complex([Agent('a'), Agent('c')]),
Complex([Agent('b'), Agent('c')])]
pa = PysbAssembler(stmts)
pa.make_model()
graph = export_cm_network(pa.model)
assert len(graph.nodes()) == 9, len(graph.nodes)
assert len(graph.edges()) == 9, len(graph.edges)
cycles = get_cm_cycles(graph)
assert len(cycles) == 1, cycles
assert cycles[0] == ['a(b)', 'b(a)', 'b(c)', 'c(b)', 'c(a)', 'a(c)']
def test_contact_map_cycles_2():
erk1 = Agent('MAPK1', db_refs={'HGNC': '6871'})
erk2 = Agent('MAPK3', db_refs={'HGNC': '6877'})
# In this case there will be no cycles because the binding site on x
# for ERK1 and ERK2 is generated to be competitive.
stmts = [Complex([Agent('x'), erk1]),
Complex([Agent('x'), erk2]),
Complex([erk1, erk2])]
pa = PysbAssembler(stmts)
pa.make_model()
graph = export_cm_network(pa.model)
assert len(graph.nodes()) == 8, len(graph.nodes)
assert len(graph.edges()) == 8, len(graph.edges)
cycles = get_cm_cycles(graph)
assert not cycles, cycles
``` |
{
"source": "johnbachman/covid-19",
"score": 2
} |
#### File: covid_19/disease_maps/minerva_client.py
```python
import json
import requests
default_map_name = 'covid19map'
base_url = 'https://%s.elixir-luxembourg.org/minerva/api/'
def get_config(map_name=default_map_name):
url = (base_url % map_name) + 'configuration/'
res = requests.get(url)
res.raise_for_status()
return res.json()
def get_project_id_from_config(config):
options = config.get('options', [])
for option in options:
if option.get('type') == 'DEFAULT_MAP':
return option.get('value')
return None
def get_models(project_id, map_name=default_map_name):
url = (base_url % map_name) + ('projects/%s/models/' % project_id)
res = requests.get(url)
res.raise_for_status()
return res.json()
def get_model_elements(model_id, project_id, map_name=default_map_name):
url = (base_url % map_name) + \
('projects/%s/models/%s/' % (project_id, model_id)) + \
'bioEntities/elements/?columns=id,name,type,references'
res = requests.get(url)
res.raise_for_status()
return res.json()
def get_all_model_elements(models, project_id, map_name=default_map_name):
all_elements = []
for model in models:
model_id = model['idObject']
model_elements = get_model_elements(model_id, project_id,
map_name)
all_elements += model_elements
return all_elements
def get_element_references(element):
refs = element.get('references', [])
return [(ref.get('type'), ref.get('resource')) for ref in refs]
def get_all_valid_element_refs(map_name=default_map_name):
config = get_config(map_name)
project_id = get_project_id_from_config(config)
models = get_models(project_id, map_name)
all_model_elements = get_all_model_elements(models, project_id,
map_name)
element_refs = [get_element_references(element) for element
in all_model_elements]
valid_element_refs = [ref for ref in element_refs if ref]
return valid_element_refs
```
#### File: covid_19/eidos/eidos_indra_assembly.py
```python
import os
import pickle
import indra.tools.assemble_corpus as ac
from indra.tools.live_curation import Corpus
from collections import Counter
from indra.preassembler.custom_preassembly import agent_name_stmt_type_matches
def norm_name(name):
return '_'.join(sorted(list(set(name.lower().split()))))
def make_fake_wm(stmts):
for stmt in stmts:
for agent in stmt.agent_list():
agent.db_refs['WM'] = [(norm_name(agent.name), 1.0)]
def filter_name_frequency(stmts, k=2):
norm_names = []
for stmt in stmts:
for agent in stmt.agent_list():
norm_names.append(norm_name(agent.name))
cnt = Counter(norm_names)
names = {n for n, c in cnt.most_common() if c >= k}
new_stmts = []
for stmt in stmts:
found = True
for agent in stmt.agent_list():
if norm_name(agent.name) not in names:
found = False
break
if found:
new_stmts.append(stmt)
return new_stmts
if __name__ == '__main__':
stmts_pkl = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, os.pardir, 'stmts'
'eidos_statements_influence.pkl')
with open(stmts_pkl, 'rb') as fh:
stmts = pickle.load(fh)
make_fake_wm(stmts)
stmts = filter_name_frequency(stmts, k=2)
assembled_stmts = \
ac.run_preassembly(stmts, matches_fun=agent_name_stmt_type_matches)
meta_data = ('This corpus was assembled from ~30k '
'papers related to Covid-19.')
corpus = Corpus(assembled_stmts, raw_statements=stmts, meta_data=meta_data)
corpus_name = 'covid-20200319-ontfree'
corpus.s3_put(corpus_name)
```
#### File: covid-19/covid_19/preprocess.py
```python
import os
import csv
import sys
import json
import time
import re
import urllib
import logging
import tarfile
from os.path import abspath, dirname, join, isdir
import pandas as pd
from indra.util import zip_string
logger = logging.getLogger(__name__)
baseurl = 'https://ai2-semanticscholar-cord-19.s3-us-west-2.amazonaws.com/'
def get_latest_available_date():
"""Get the date of the latest CORD19 dataset upload."""
req = urllib.request.Request((baseurl + 'historical_releases.html'))
with urllib.request.urlopen(req) as response:
page_content = response.read()
latest_date = re.search(
r'<i>Latest release: (.*?)</i>', str(page_content)).group(1)
logger.info('Latest data release is %s' % latest_date)
return latest_date
latest_date = get_latest_available_date() # For processing latest data
# latest_date = '2020-06-15' # For processing a different date manually
data_dir = join(dirname(abspath(__file__)), '..', 'data')
basepath = join(data_dir, latest_date)
metadata_file = join(basepath, 'metadata.csv')
doc_gz_path = os.path.join(basepath, 'document_parses.tar.gz')
doc_df = None
def download_metadata():
"""Download metadata file only."""
# Create missing directories
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(basepath):
os.mkdir(basepath)
if not os.path.exists(metadata_file):
logger.info('Downloading metadata')
md_url = baseurl + '%s/metadata.csv' % latest_date
urllib.request.urlretrieve(md_url, metadata_file)
logger.info('Latest metadata is available in %s' % metadata_file)
def download_latest_data():
"""Download metadata and document parses."""
download_metadata()
if not os.path.exists(doc_gz_path):
logger.info('Downloading document parses')
doc_url = baseurl + '%s/document_parses.tar.gz' % latest_date
urllib.request.urlretrieve(doc_url, doc_gz_path)
logger.info('Latest data is available in %s' % basepath)
def get_all_texts():
"""Return a dictionary mapping json filenames with full text contents."""
texts_by_file = {}
logger.info('Extracting full texts from all document json files...')
tar = tarfile.open(doc_gz_path)
members = tar.getmembers()
for m in members:
f = tar.extractfile(m)
doc_json = json.loads(f.read().decode('utf-8'))
text = get_text_from_json(doc_json)
texts_by_file[m.name] = text
tar.close()
return texts_by_file
def get_zip_texts_for_entry(md_entry, texts_by_file, zip=True):
texts = []
if md_entry['pdf_json_files']:
filenames = [s.strip() for s in md_entry['pdf_json_files'].split(';')]
pdf_texts = []
for filename in filenames:
if texts_by_file.get(filename):
pdf_texts.append(texts_by_file[filename])
else:
logger.warning('Text for %s is missing' % filename)
combined_text = '\n'.join(pdf_texts)
if zip:
combined_text = zip_string(combined_text)
texts.append(('cord19_pdf', 'fulltext', combined_text))
if md_entry['pmc_json_files']:
filename = md_entry['pmc_json_files']
if texts_by_file.get(filename):
text = texts_by_file[filename]
else:
logger.warning('Text for %s is missing' % filename)
if zip:
text = zip_string(text)
texts.append(('cord19_pmc_xml', 'fulltext', text))
if md_entry['abstract']:
text = md_entry['abstract']
if zip:
text = zip_string(text)
texts.append(('cord19_abstract', 'abstract', text))
return texts
def get_metadata_df():
file_data = []
hashes = []
"""
for content_type, content_path in paths.items():
for filename in os.listdir(content_path):
if filename.endswith('.json'):
file_hash = filename.split('.')[0]
hashes.append(file_hash)
file_data.append((content_path, content_type))
file_df = pd.DataFrame(file_data, index=hashes, dtype='str',
columns=['content_path', 'content_type'])
"""
dtype_dict = {
'cord_uid': 'object',
'sha': 'object',
'source_x': 'object',
'title': 'object',
'doi': 'object',
'pmcid': 'object',
'pubmed_id': 'object',
'license': 'object',
'abstract': 'object',
'publish_time': 'object',
'authors': 'object',
'journal': 'object',
'mag_id': 'object',
'who_covidence_id': 'object',
'arxiv_id': 'object',
'pdf_json_files': 'object',
'pmc_json_files': 'object',
'url': 'object',
's2_id': 'object',
}
md = pd.read_csv(metadata_file, dtype=dtype_dict,
parse_dates=['publish_time'])
md = md.where(md.notnull(), None)
#file_data = metadata.join(file_df, 'sha')
#return file_data
return md
def get_ids(id_type):
"""Get unique article identifiers from the dataset.
Parameters
----------
id_type : str
Dataframe column name, e.g. 'pubmed_id', 'pmcid', 'doi'.
Returns
-------
list of str
List of unique identifiers in the dataset, e.g. all unique PMCIDs.
"""
global doc_df
if doc_df is None:
doc_df = get_metadata_df()
unique_ids = list(doc_df[~pd.isna(doc_df[id_type])][id_type].unique())
return unique_ids
def get_text_from_json(doc_json):
text = ''
text += doc_json['metadata']['title']
text += '.\n'
if 'abstract' in doc_json:
for p in doc_json['abstract']:
text += p['text']
text += '\n'
for p in doc_json['body_text']:
text += p['text']
text += '\n'
for cap_dict in doc_json['ref_entries'].values():
text += cap_dict['text']
text += '\n'
return text
def dump_text_files(output_dir, doc_df):
# TODO this needs to be updated with new df structure and code updates
sha_ix = 1
path_ix = 15
title_ix = 3
abs_ix = 8
# Start by dumping full texts
dumped_rows = set()
text_df = doc_df[~pd.isna(doc_df.content_path)]
ft_counter = 0
for row in text_df.itertuples():
ix = row[0]
json_file = f'{join(row[path_ix], row[sha_ix])}.json'
text = get_text_from_json(json_file)
output_file = join(output_dir, f'CORD19_DOC_{ix}.txt')
#output_file = f'{join(output_dir, row[sha_ix])}.txt'
with open(output_file, 'wt') as f:
f.write(text)
ft_counter += 1
dumped_rows.add(ix)
# Then look at the abstracts
abstract_df = doc_df[pd.isna(doc_df.content_path) &
~pd.isna(doc_df.abstract)]
for row in abstract_df.itertuples():
ix = row[0]
# If we've already dumped full text, skip it
if ix in dumped_rows:
continue
text = row[title_ix]
text += '.\n'
text += row[abs_ix]
output_file = join(output_dir, f'CORD19_DOC_{ix}.txt')
with open(output_file, 'wt') as f:
f.write(text)
# Finally, dump the metadata to a CSV file
doc_df.to_csv(join(output_dir, 'metadata.csv'))
def get_metadata_dict():
df = get_metadata_df()
return df.to_dict(orient='records')
def fix_doi(doi):
if doi is None:
return None
prefixes = ['http://dx.doi.org/', 'doi.org/']
for prefix in prefixes:
if doi.startswith(prefix):
doi = doi[len(prefix):]
return doi
def fix_pmid(pmid):
if pmid is None:
return None
if not pmid.isdigit():
pmid = None
return pmid
def get_text_refs_from_metadata(entry):
mappings = {
'cord_uid': 'CORD19_UID',
'sha': 'CORD19_SHA',
'doi': 'DOI',
'pmcid': 'PMCID',
'pubmed_id': 'PMID',
'who_covidence_id': 'WHO_COVIDENCE',
'mag_id': 'MICROSOFT'
}
text_refs = {}
for key, ref_key in mappings.items():
val = entry.get(key)
if key == 'doi':
val = fix_doi(val)
elif key == 'pubmed_id':
val = fix_pmid(val)
if val and not pd.isnull(val):
# Temporary patch to remove float suffixes
if val.endswith('.0'):
val = val[:-2]
text_refs[ref_key] = val
return text_refs
``` |
{
"source": "johnbachman/deft",
"score": 3
} |
#### File: adeft/modeling/label.py
```python
from adeft.recognize import AdeftRecognizer
class AdeftLabeler(object):
"""Class for labeling corpora
Parameters
----------
grounding_dict : dict of dict of str
Dictionary mapping shortforms to grounding_map dictionaries mapping
longforms to groundings
Attributes
----------
recognizers : list of py:class`adeft.recognize.AdeftRecognizer`
List of recognizers for each shortform to be considered. Each
recognizer identifies longforms for a shortform by finding defining
matches to a defining pattern (DP)
"""
def __init__(self, grounding_dict):
self.grounding_dict = grounding_dict
self.recognizers = [AdeftRecognizer(shortform, grounding_map)
for shortform, grounding_map
in grounding_dict.items()]
def build_from_texts(self, texts):
"""Build labeled corpus from a list of texts
Labels texts based on defining patterns (DPs)
Parameters
----------
texts : list of str
List of texts to build corpus from
Returns
-------
corpus : list of tuple
Contains tuples for each text in the input list which contains
a defining pattern. Multiple tuples correspond to texts with
multiple defining patterns for longforms with different groundings.
The first element of each tuple contains the training text with all
defining patterns replaced with only the shortform. The second
element contains the groundings for longforms matched with a
defining pattern.
"""
corpus = []
for text in texts:
data_points = self._process_text(text)
if data_points:
corpus.extend(data_points)
return corpus
def _process_text(self, text):
"""Returns training data and label corresponding to text if found
The training text corresponding to an input text is obtained by
stripping out all occurences of (<shortform>). It is possible that
longforms are matched with the standard pattern. In this case, multiple
datapoints are returned each with different labels but the same
training text.
Parameters
----------
text : str
Fulltext to build datapoint from, if possible.
Returns
-------
datapoints : list of tuple or None
Returns None if no label can be found by matching the standard
pattern. Otherwise, returns a list of pairs containing the training
text and a label for each label appearing in the input text
matching the standard pattern.
"""
groundings = set()
for recognizer in self.recognizers:
groundings.update(recognizer.recognize(text))
if not groundings:
return None
for recognizer in self.recognizers:
text = recognizer.strip_defining_patterns(text)
datapoints = [(text, grounding) for grounding in groundings]
return datapoints
```
#### File: adeft/nlp/nlp.py
```python
import os
import re
import json
from collections import defaultdict
from nltk.stem.snowball import EnglishStemmer
class WatchfulStemmer(object):
"""Wraps the nltk.snow EnglishStemmer.
Keeps track of the number of times words have been mapped to particular
stems by the wrapped stemmer. Extraction of longforms works with stemmed
tokens but it is necessary to recover actual words from stems.
Attributes
----------
__snowball : :py:class:`nltk.stem.snowball.EnglishStemmer`
counts : defaultdict of defaultdict of int
Contains the count of the number of times a particular word has been
mapped to from a particular stem by the wrapped stemmer. Of the form
counts[stem:str][word:str] = count:int
"""
def __init__(self):
self.__snowball = EnglishStemmer()
self.counts = defaultdict(lambda: defaultdict(int))
def stem(self, word):
"""Returns stemmed form of word.
Adds one to count associated to the computed stem, word pair.
Parameters
----------
word : str
text to stem
Returns
-------
stemmed : str
stemmed form of input word
"""
stemmed = self.__snowball.stem(word)
self.counts[stemmed][word] += 1
return stemmed
def most_frequent(self, stemmed):
"""Return the most frequent word mapped to a given stem
Parameters
----------
stemmed : str
Stem that has previously been output by the wrapped snowball
stemmer.
Returns
-------
output : str or None
Most frequent word that has been mapped to the input stem or None
if the wrapped stemmer has never mapped the a word to the input
stem. Break ties with lexicographic order
"""
words = list(self.counts[stemmed].items())
if words:
words.sort(key=lambda x: x[1], reverse=True)
candidates = [word[0] for word in words if word[1] == words[0][1]]
output = min(candidates)
else:
raise ValueError('stem %s has not been observed' % stemmed)
return output
def tokenize(text):
"""Simple word tokenizer based on a regular expression pattern
Everything that is not a block of alphanumeric characters is considered as
a separate token.
Parameters
----------
text : str
Text to tokenize
Returns
-------
tokens : list of tuple
Tokens in the input text along with their text coordinates. Each
tuple has a token as the first element and the tokens coordinates
as its second element.
"""
pattern = re.compile(r'\w+|[^\s\w]')
matches = re.finditer(pattern, text)
return [(m.group(), (m.start(), m.end()-1)) for m in matches]
def untokenize(tokens):
"""Return inverse of the Adeft word tokenizer
The inverse is inexact. For simplicity, all white space characters are
replaced with a space. An exact inverse is not necessary for adeft's
purposes.
Parameters
----------
tokens : list of tuple
List of tuples of the form (word, (start, end)) giving tokens
and coordinates as output by Adeft's word tokenizer
Returns
-------
output : str
The original string that produced the input tokens, with the caveat
that every white space character will be replaced with a space.
"""
# Edge cases: input text is empty string or only has one token
if len(tokens) == 0:
return ''
elif len(tokens) == 1:
return tokens[0][0]
# This looks messy but is simple conceptually.
# At each step add the current token and a number of spaces determined
# by the coordinates of the previous token and the current token.
output = [tokens[0][0]] + [' ']*(tokens[1][1][0] - tokens[0][1][1] - 1)
for index in range(1, len(tokens)-1):
output.append(tokens[index][0])
output.extend([' ']*(tokens[index+1][1][0]
- tokens[index][1][1] - 1))
output.append(tokens[-1][0])
return ''.join(output)
stopwords_min = set(['a', 'an', 'the', 'and', 'or', 'of', 'with', 'at',
'from', 'into', 'to', 'for', 'on', 'by', 'be', 'been',
'am', 'is', 'are', 'was', 'were', 'in', 'that', 'as'])
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'stopwords.json'), 'r') as f:
english_stopwords = json.load(f)
```
#### File: deft/adeft/util.py
```python
import re
from unicodedata import category
from adeft.nlp import tokenize
def get_candidate_fragments(text, shortform, window=100):
"""Return candidate longform fragments from text
Identifies candidate longforms by searching for defining patterns (DP)
in the text. Candidate longforms consist of non-punctuation tokens within
a specified range of characters before the DP up until either the start
of the text, the end of a previous DP or optionally a token from a set of
excluded tokens.
Parameters
----------
text : str
Text to search for defining patterns (DP)
shortform : str
Shortform to disambiguate
window : Optional[int]
Specifies range of characters before a defining pattern (DP)
to consider when finding longforms. If set to 30, candidate
longforms would be taken from the string
"ters before a defining pattern". Default: 100
"""
# Find defining patterns by matching a regular expression
matches = re.finditer(r'\(\s*%s\s*\)' % shortform, text)
# Keep track of the index of the end of the previous
# Longform candidates cannot contain a previous DP and any text
# before them
end_previous = -1
result = []
for match in matches:
# coordinates of current match
span = match.span()
# beginning of window containing longform candidate
left = max(end_previous+1, span[0]-window)
# fragment of text in this window
fragment = text[left:span[0]]
result.append(fragment)
end_previous = span[1]
return result
def get_candidate(fragment, exclude=None):
"""Return tokens in candidate fragment up until last excluded word
Parameters
----------
fragment : str
The fragment to return tokens from.
exclude : Optional[set of str]
Terms that are to be excluded from candidate longforms.
Default: None
"""
if exclude is None:
exclude = set()
tokens = [token.lower() for token, _
in tokenize(fragment)
if len(token) > 1 or not category(token).startswith('P')]
index = len(tokens)
# Take only tokens from end up to but not including the last
# excluded in the fragment
while index > 0:
index -= 1
if tokens[index] in exclude:
tokens = tokens[index+1:]
break
return tokens
``` |
{
"source": "johnbachman/famplex_site",
"score": 3
} |
#### File: famplex_site/famplex_web/load_data.py
```python
import csv
from os.path import join, dirname
def load_grounding_map(filename):
gm_rows = load_csv(filename)
gm_tuples = []
g_map = {}
for row in gm_rows:
gm_tuples.append(tuple(row))
key = row[0]
db_refs = {'TEXT': key}
keys = [entry for entry in row[1::2] if entry != '']
values = [entry for entry in row[2::2] if entry != '']
if len(keys) != len(values):
print('ERROR: Mismatched keys and values in row %s' % str(row))
continue
else:
db_refs.update(dict(zip(keys, values)))
if len(db_refs.keys()) > 1:
g_map[key] = db_refs
else:
g_map[key] = None
return g_map, tuple(gm_tuples)
def load_equivalences(filename):
equivalences = {}
rows = load_csv(filename)
for row in rows:
fplx_id = row[2]
equiv = (row[0], row[1])
if fplx_id not in equivalences:
equivalences[fplx_id] = [equiv]
else:
equivalences[fplx_id].append(equiv)
return equivalences
def load_relationships(filename):
relationships = []
rows = load_csv(filename)
for row in rows:
relationships.append(((row[0], row[1]), row[2], (row[3], row[4])))
return relationships
def load_entities(filename):
entities = []
rows = load_csv(filename)
for row in rows:
entities.append(row[0])
return entities
def load_csv(filename):
filename = join(dirname(__file__), filename)
with open(filename) as fh:
csvreader = csv.reader(fh, delimiter=',', quotechar='"')
rows = [row for row in csvreader]
return rows
def load_synonyms(gm):
synonyms = {}
for syn, db_refs in gm.items():
for db, db_id in db_refs.items():
if db == 'FPLX':
if db_id in synonyms:
synonyms[db_id].append(syn)
else:
synonyms[db_id] = [syn]
return synonyms
relationships = load_relationships('relations.csv')
equivalences = load_equivalences('equivalences.csv')
gm, gm_tuples = load_grounding_map('grounding_map.csv')
entities = load_entities('entities.csv')
synonyms = load_synonyms(gm)
``` |
{
"source": "johnbachman/genewalk",
"score": 2
} |
#### File: genewalk/genewalk/cli.py
```python
import gc
import os
import sys
import copy
import pickle
import random
import logging
import argparse
import numpy as np
from genewalk import __version__
from genewalk.nx_mg_assembler import load_network
from genewalk.gene_lists import read_gene_list
from genewalk.deepwalk import run_walks
from genewalk.null_distributions import get_rand_graph, \
get_null_distributions
from genewalk.perform_statistics import GeneWalk
from genewalk import logger as root_logger, default_logger_format, \
default_date_format
from genewalk.resources import ResourceManager
logger = logging.getLogger('genewalk.cli')
default_base_folder = os.path.join(os.path.expanduser('~/'), 'genewalk')
def create_project_folder(base_folder, project):
project_folder = os.path.join(base_folder, project)
logger.info('Creating project folder at %s' % project_folder)
if not os.path.exists(project_folder):
os.makedirs(project_folder)
return project_folder
def save_pickle(obj, project_folder, prefix):
fname = os.path.join(project_folder, '%s.pkl' % prefix)
logger.info('Saving into %s...' % fname)
with open(fname, 'wb') as fh:
pickle.dump(obj, fh)
def load_pickle(project_folder, prefix):
fname = os.path.join(project_folder, '%s.pkl' % prefix)
logger.info('Loading %s...' % fname)
with open(fname, 'rb') as fh:
return pickle.load(fh)
def main():
parser = argparse.ArgumentParser(
description='Run GeneWalk on a list of genes provided in a text '
'file.')
parser.add_argument('--version', action='version',
version='GeneWalk %s' % __version__,
help='Print the version of GeneWalk and exit.')
parser.add_argument('--project', help='A name for the project which '
'determines the folder within the '
'base folder in which the '
'intermediate and final results '
'are written. Must contain only '
'characters that are valid in '
'folder names.',
required=True)
parser.add_argument('--genes', help='Path to a text file with a list of '
'genes of interest, for example'
'differentially expressed genes. '
'The type of gene identifiers used in '
'the text file are provided in the '
'id_type argument.',
required=True)
parser.add_argument('--id_type',
help='The type of gene IDs provided in the text file '
'in the genes argument. Possible values are: '
'hgnc_symbol, hgnc_id, ensembl_id, and mgi_id.',
choices=['hgnc_symbol', 'hgnc_id',
'ensembl_id', 'mgi_id'],
required=True)
parser.add_argument('--stage', default='all',
help='The stage of processing to run. Default: '
'%(default)s',
choices=['all', 'node_vectors', 'null_distribution',
'statistics'])
parser.add_argument('--base_folder', default=default_base_folder,
help='The base folder used to store GeneWalk '
'temporary and result files for a given project.'
' Default: %(default)s')
parser.add_argument('--network_source', default='pc',
help='The source of the network to be used.'
'Possible values are: pc, indra, edge_list, and '
'sif. In case of indra, edge_list, and sif, '
'the network_file argument must be specified.'
' Default: %(default)s',
choices=['pc', 'indra', 'edge_list', 'sif'])
parser.add_argument('--network_file', default=None,
help='If network_source is indra, this argument '
'points to a Python pickle file in which a list '
'of INDRA Statements constituting the network '
'is contained. In case network_source is '
'edge_list or sif, '
'the network_file argument points to a text file '
'representing the network.')
parser.add_argument('--nproc', default=1, type=int,
help='The number of processors to use in a '
'multiprocessing environment. Default: '
'%(default)s')
parser.add_argument('--nreps_graph', default=3, type=int,
help='The number of repeats to run when calculating '
'node vectors on the GeneWalk graph. '
'Default: %(default)s')
parser.add_argument('--nreps_null', default=3, type=int,
help='The number of repeats to run when calculating '
'node vectors on the random network graphs '
'for constructing the null distribution. '
'Default: %(default)s')
parser.add_argument('--alpha_fdr', default=1, type=float,
help='The false discovery rate to use when '
'outputting the final statistics table. '
'If 1 (default), all similarities are output, '
'otherwise only the ones whose false discovery '
'rate are below this parameter are included. '
'Default: %(default)s')
parser.add_argument('--save_dw', default=False, type=bool,
help='If True, the full DeepWalk object for each '
'repeat is saved in the project folder. This can '
'be useful for debugging but the files are '
'typically very large. Default: %(default)s')
parser.add_argument('--random_seed', default=None, type=int,
help='If provided, the random number generator is '
'seeded with the given value. This should only '
'be used if the goal is to deterministically '
'reproduce a prior result obtained with the same '
'random seed.')
args = parser.parse_args()
# Now we run the relevant stage of processing
project_folder = create_project_folder(args.base_folder, args.project)
# Add a logger specific to the project and processing stage
log_file = os.path.join(project_folder, 'genewalk_%s.log' % args.stage)
formatter = logging.Formatter(default_logger_format,
datefmt=default_date_format)
project_log_handler = logging.FileHandler(log_file)
project_log_handler.setFormatter(formatter)
root_logger.addHandler(project_log_handler)
if args.random_seed:
logger.info('Running with random seed %d' % args.random_seed)
random.seed(a=int(args.random_seed))
# Make sure we have all the resource files
rm = ResourceManager(base_folder=args.base_folder)
rm.download_all()
if args.stage in ('all', 'node_vectors'):
genes = read_gene_list(args.genes, args.id_type)
save_pickle(genes, project_folder, 'genes')
MG = load_network(args.network_source, args.network_file, genes,
resource_manager=rm)
save_pickle(MG.graph, project_folder, 'multi_graph')
for i in range(args.nreps_graph):
logger.info('%s/%s' % (i + 1, args.nreps_graph))
DW = run_walks(MG.graph, workers=args.nproc)
# Pickle the node vectors (embeddings) and DW object
if args.save_dw:
save_pickle(DW, project_folder, 'deepwalk_%d' % (i + 1))
nv = copy.deepcopy(DW.model.wv)
save_pickle(nv, project_folder,
'deepwalk_node_vectors_%d' % (i + 1))
# Delete the DeepWalk object to clear memory
del DW, nv
gc.collect()
if args.stage in ('all', 'null_distribution'):
MG = load_pickle(project_folder, 'multi_graph')
srd = []
for i in range(args.nreps_null):
logger.info('%s/%s' % (i + 1, args.nreps_null))
RG = get_rand_graph(MG)
DW = run_walks(RG, workers=args.nproc)
# Pickle the node vectors (embeddings) and DW object
if args.save_dw:
save_pickle(DW, project_folder, 'deepwalk_rand_%d' % (i + 1))
nv = copy.deepcopy(DW.model.wv)
save_pickle(nv, project_folder, 'deepwalk_node_vectors_rand_%d'
% (i + 1))
# Delete the DeepWalk object to clear memory
del DW
gc.collect()
# Calculate the null distributions
srd += get_null_distributions(RG, nv)
del nv
gc.collect()
srd = np.asarray(sorted(srd))
save_pickle(srd, project_folder, 'genewalk_rand_simdists')
if args.stage in ('all', 'statistics'):
MG = load_pickle(project_folder, 'multi_graph')
genes = load_pickle(project_folder, 'genes')
nvs = [load_pickle(project_folder,
'deepwalk_node_vectors_%d' % (i + 1))
for i in range(args.nreps_graph)]
null_dist = load_pickle(project_folder, 'genewalk_rand_simdists')
GW = GeneWalk(MG, genes, nvs, null_dist)
df = GW.generate_output(alpha_fdr=args.alpha_fdr,
base_id_type=args.id_type)
fname = os.path.join(project_folder, 'genewalk_results.csv')
logger.info('Saving final results into %s' % fname)
df.to_csv(fname, index=False, float_format='%.3e')
if __name__ == '__main__':
main()
```
#### File: genewalk/genewalk/resources.py
```python
import os
import gzip
import shutil
import logging
import urllib.request
logger = logging.getLogger('genewalk.resources')
class ResourceManager(object):
def __init__(self, base_folder=None):
self.base_folder = base_folder if base_folder else \
os.path.join(os.path.expanduser('~'), 'genewalk')
self.resource_folder = self._get_resource_folder()
logger.info('Using %s as resource folder.' % self.resource_folder)
def get_go_obo(self):
fname = os.path.join(self.resource_folder, 'go.obo')
if not os.path.exists(fname):
download_go(fname)
return fname
def get_goa_gaf(self):
fname = os.path.join(self.resource_folder, 'goa_human.gaf')
if not os.path.exists(fname):
url_goa = ('http://geneontology.org/gene-associations/'
'goa_human.gaf.gz')
download_gz(fname, url_goa)
return fname
def get_pc(self):
fname = os.path.join(self.resource_folder,
'PathwayCommons11.All.hgnc.sif')
if not os.path.exists(fname):
url_pc = ('http://www.pathwaycommons.org/archives/PC2/v11/'
'PathwayCommons11.All.hgnc.sif.gz')
download_gz(fname, url_pc)
return fname
def _get_resource_folder(self):
resource_dir = os.path.join(self.base_folder, 'resources')
if not os.path.isdir(resource_dir):
try:
os.makedirs(resource_dir)
except Exception:
logger.warning(resource_dir + ' already exists')
return resource_dir
def download_all(self):
self.get_go_obo()
self.get_goa_gaf()
self.get_pc()
def download_go(fname):
url = 'http://snapshot.geneontology.org/ontology/go.obo'
logger.info('Downloading %s into %s' % (url, fname))
urllib.request.urlretrieve(url, fname)
def download_gz(fname, url):
logger.info('Downloading %s and extracting into %s' % (url, fname))
gz_file = fname + '.gz'
urllib.request.urlretrieve(url, gz_file)
with gzip.open(gz_file, 'rb') as fin:
with open(fname, 'wb') as fout:
shutil.copyfileobj(fin, fout)
if __name__ == '__main__':
# Download all the resources if this script is run directly
ResourceManager().download_all()
``` |
{
"source": "johnbachman/indra_network_search",
"score": 3
} |
#### File: indra_network_search/tests/test_api.py
```python
import logging
import unittest
from indra_network_search.api import app
logger = logging.getLogger('api test')
class IndraNetworkServiceTest(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
def tearDown(self):
pass
def query_test(self):
"""Test a query to empty network"""
query = {
'source': 'BRCA1',
'target': 'BRCA2',
'stmt_filter': [],
'edge_hash_blacklist': [],
'node_filter': ['hgnc', 'fplx'],
'node_blacklist': [],
'path_length': False,
'sign': 'no_sign',
'weighted': False,
'bsco': 0.0,
'curated_db_only': False,
'fplx_expand': False,
'k_shortest': 1,
'two_way': False
}
resp = self.app.post('query/submit', json=query)
resp_json = resp.get_json()['result']
logger.info('Response: %s' % resp_json)
assert resp.status_code == 200
assert resp_json.keys() == {'paths_by_node_count', 'common_targets',
'common_parents', 'timeout'}
assert isinstance(resp_json['paths_by_node_count'], dict)
assert isinstance(resp_json['common_targets'], list)
assert isinstance(resp_json['common_parents'], dict)
assert isinstance(resp_json['timeout'], bool)
def test_query_endpoint(self):
"""Test if endpoint responds
NOTE: only tests that the query was received by the API and that it
was properly formatted.
"""
resp = self.app.post('query/submit', json={'test': 'api'})
logger.info('api response: %s' % repr(resp))
assert resp.status_code == 200
assert resp.get_json()
assert resp.get_json().get('result', None)
assert resp.get_json().get('result') == 'api test passed'
```
#### File: indra_network_search/tests/test_indra_network.py
```python
import logging
import unittest
import numpy as np
import pandas as pd
from collections import defaultdict
from depmap_analysis.network_functions.net_functions import \
sif_dump_df_to_digraph
from indra_network_search.net import IndraNetwork
logger = logging.getLogger(__name__)
# Get dataframe
df = pd.DataFrame()
# Add custom row to df that can be checked later
test_edge = ('GENE_A', 'GENE_B')
test_medge = (*test_edge, 0)
test_node = test_edge[0]
test_hash: int = 1234567890
belief: float = 0.987654321
test_type = 'Activation'
test_row = {
'agA_ns': 'TEST', 'agA_id': '1234', 'agA_name': test_edge[0],
'agB_ns': 'TEST', 'agB_id': '2345', 'agB_name': test_edge[1],
'stmt_type': test_type, 'evidence_count': 1, 'stmt_hash': test_hash
}
test_source: str = 'pc11'
test_evidence = {test_hash: {test_source: 1}}
test_belief = {test_hash: belief}
df = df.append(test_row, ignore_index=True)
dg = sif_dump_df_to_digraph(
df, strat_ev_dict=test_evidence, belief_dict=test_belief,
include_entity_hierarchies=False
)
class TestNetwork(unittest.TestCase):
def setUp(self):
self.df = df
self.indra_network = IndraNetwork(indra_dir_graph=dg)
self.indra_network.verbose = 2
def test_network_search(self):
query = {
'source': test_edge[0],
'target': test_edge[1],
'stmt_filter': [],
'edge_hash_blacklist': [],
'node_filter': ['test'],
'node_blacklist': [],
'path_length': False,
'sign': 'no_sign',
'weighted': False,
'bsco': 0.0,
'curated_db_only': False,
'fplx_expand': False,
'k_shortest': 1,
'two_way': True,
'mesh_ids': [],
'strict_mesh_id_filtering': False,
'const_c': 1,
'const_tk': 10,
'terminal_ns': [],
}
result = self.indra_network.handle_query(**query)
logger.info('Got result: %s' % result)
assert result['timeout'] is False
assert isinstance(result['paths_by_node_count'], (dict, defaultdict))
assert 2 in result['paths_by_node_count']['forward']
assert not result['paths_by_node_count']['backward']
assert len(result['paths_by_node_count']['forward'][2]) == 1
assert isinstance(result['paths_by_node_count']['forward'][2][0],
(dict, defaultdict))
path_dict = result['paths_by_node_count']['forward'][2][0]
assert path_dict['path'] == list(test_edge)
assert isinstance(path_dict['cost'], str)
assert isinstance(path_dict['sort_key'], str)
stmts = path_dict['stmts']
assert isinstance(stmts, list)
assert isinstance(stmts[0][test_type], list)
assert isinstance(stmts[0][test_type][0], dict)
assert stmts[0]['subj'], stmts[0]['obj'] == test_edge
stmt_dict = stmts[0][test_type][0]
assert isinstance(stmt_dict['weight'], (np.longfloat, float))
assert stmt_dict['stmt_type'] == test_row['stmt_type']
assert str(stmt_dict['stmt_hash']) == str(test_row['stmt_hash']), \
f"stmt_dict['stmt_hash']={stmt_dict['stmt_hash']}, test_row[" \
f"'stmt_hash']={test_row['stmt_hash']}"
assert stmt_dict['evidence_count'] == test_row['evidence_count']
assert isinstance(stmt_dict['source_counts'], dict)
assert stmt_dict['source_counts'] == test_evidence
assert stmt_dict['source_counts'][test_source] == \
test_evidence[test_source]
assert isinstance(stmt_dict['curated'], bool)
assert stmt_dict['curated'] is True
assert stmt_dict['belief'] == test_belief
def test_query_handling(self):
result = self.indra_network.handle_query(test=True)
assert {'paths_by_node_count', 'common_targets', 'common_parents',
'timeout'} == set(result.keys())
assert isinstance(result['paths_by_node_count'], dict)
assert not result['paths_by_node_count'].get('forward', False)
assert not result['paths_by_node_count'].get('backward', False)
assert isinstance(result['common_targets'], list)
assert isinstance(result['common_parents'], dict)
assert result['timeout'] is False
def test_dir_edge_structure(self):
# Get an edge from test DB
e = None
for e in self.indra_network.dir_edges:
if e != test_edge:
break
# Check basic edge
assert isinstance(e, tuple)
assert len(e) == 2
# Check edge dict
edge_dict = self.indra_network.dir_edges[e]
edge_dict_test = self.indra_network.dir_edges[test_edge]
assert isinstance(edge_dict, dict)
assert isinstance(edge_dict_test, dict)
assert isinstance(edge_dict['belief'], (np.longfloat, float))
assert isinstance(edge_dict_test['belief'], (np.longfloat, float))
assert isinstance(edge_dict['weight'], np.longfloat)
assert isinstance(edge_dict_test['weight'], np.longfloat)
# Check stmt meta data list
stmt_list = edge_dict['statements']
test_stmt_list = edge_dict_test['statements']
assert isinstance(stmt_list, list)
assert isinstance(test_stmt_list, list)
assert isinstance(stmt_list[0], dict)
assert isinstance(test_stmt_list[0], dict)
# Check stmt meta data
assert isinstance(stmt_list[0]['weight'], (float, np.longfloat))
assert isinstance(test_stmt_list[0]['weight'], (float, np.longfloat))
assert isinstance(stmt_list[0]['stmt_type'], str)
assert test_stmt_list[0]['stmt_type'] == 'TestStatement'
assert isinstance(stmt_list[0]['stmt_hash'], int)
assert test_stmt_list[0]['stmt_hash'] == 1234567890
assert isinstance(stmt_list[0]['evidence_count'], int)
assert test_stmt_list[0]['evidence_count'] == 1
assert isinstance(stmt_list[0]['source_counts'], dict)
assert isinstance(test_stmt_list[0]['source_counts'], dict)
assert len(test_stmt_list[0]['source_counts']) == 1
assert test_source in test_stmt_list[0]['source_counts']
assert test_stmt_list[0]['source_counts'][test_source] == 1
assert isinstance(stmt_list[0]['curated'], bool)
assert test_stmt_list[0]['curated'] is True
assert isinstance(stmt_list[0]['belief'], (float, np.longfloat))
assert isinstance(test_stmt_list[0]['belief'], (float, np.longfloat))
assert test_stmt_list[0]['belief'] == 0.987654321
def test_multi_dir_edge_structure(self):
# Get an edge from test DB
e = None
for e in self.indra_network.mdg_edges:
if e != test_medge:
break
# Check basic edge
assert isinstance(e, tuple)
assert len(e) == 3
# Check edge dict
edge_dict = self.indra_network.mdg_edges[e]
edge_dict_test = self.indra_network.mdg_edges[test_medge]
assert isinstance(edge_dict, dict)
assert isinstance(edge_dict_test, dict)
assert isinstance(edge_dict['belief'], (np.longfloat, float))
assert isinstance(edge_dict_test['belief'], (np.longfloat, float))
assert edge_dict_test['belief'] == 0.987654321
assert isinstance(edge_dict['weight'], (np.longfloat, float))
assert isinstance(edge_dict_test['weight'], (np.longfloat, float))
assert isinstance(edge_dict['stmt_type'], str)
assert edge_dict_test['stmt_type'] == 'TestStatement'
assert isinstance(edge_dict['stmt_hash'], int)
assert edge_dict_test['stmt_hash'] == 1234567890
assert isinstance(edge_dict['evidence_count'], int)
assert edge_dict_test['evidence_count'] == 1
assert isinstance(edge_dict['source_counts'], dict)
assert isinstance(edge_dict_test['source_counts'], dict)
assert len(edge_dict_test['source_counts']) == 1
assert test_source in edge_dict_test['source_counts']
assert edge_dict_test['source_counts'][test_source] == 1
assert isinstance(edge_dict['curated'], bool)
assert edge_dict_test['curated'] is True
def test_nodes(self):
# Get a db node
node = None
for node in self.indra_network.nodes:
if node != test_node:
break
# Check nodes
node_dict = self.indra_network.nodes[node]
test_node_dict = self.indra_network.nodes[test_node]
assert isinstance(node_dict, dict)
assert isinstance(test_node_dict, dict)
assert isinstance(node_dict['ns'], str)
assert test_node_dict['ns'] == 'TEST'
assert isinstance(node_dict['id'], str)
assert test_node_dict['id'] == '1234'
``` |
{
"source": "johnbachman/nf_model",
"score": 2
} |
#### File: nf_model/mech_model/processor.py
```python
import os
import sys
from collections import OrderedDict, defaultdict
from indra.sources import trips
from pysb import Observable #kappa
from indra.tools import assemble_corpus as ac
#from indra.explanation.model_checker import remove_im_params
from indra.preassembler import flatten_evidence
from pysb.integrate import ScipyOdeSimulator
import numpy as np
from matplotlib import pyplot as plt
from indra.util import plot_formatting as pf
import pickle
from indra.statements import *
from indra.assemblers.english.assembler import _assemble_agent_str
#from indra.explanation.model_checker import _add_modification_to_agent
from indra.tools import assemble_corpus as ac
from pysb.export import export
from indra.sources import indra_db_rest as idr
from indra_db import client
import logging
logger = logging.getLogger(__name__)
class NlModelProcessor(object):
def __init__(self, model_file, cache_dir=None, trips_endpoint='drum-dev',
extra_stmts=None):
if cache_dir is None:
cache_dir = '_cache'
if extra_stmts is None:
extra_stmts = []
self.model_file = model_file
self.extra_stmts = extra_stmts
self.cache_dir = cache_dir
self.trips_endpoint = trips_endpoint
self.model_lines = {}
self.statements = []
self.stmts_by_group = {}
def scrape_model_text(self):
"""Get text from model .rst file grouped by subheading.
Returns
-------
dict
Dictionary mapping heading title to a list of lines.
"""
with open(self.model_file, 'rt') as f:
lines = [line.strip() for line in f.readlines()]
# Filter out empty lines and section headings
model_lines = OrderedDict()
for ix, line in enumerate(lines):
if not line:
continue
if line.startswith('===') or line.startswith('#'):
continue
if ix < len(lines) - 1 and lines[ix+1].startswith('==='):
group_name = line
continue
else:
if group_name in model_lines:
model_lines[group_name].append(line)
else:
model_lines[group_name] = [line]
self.model_lines = model_lines
return self.model_lines
def get_statements(self, output_file=None):
"""Get the full set of model statements including extra statements.
Optionally dumps a pickle of statements to given output file.
Parameters
----------
output_file : str
File to save the statements.
Returns
-------
list of INDRA Statements
"""
stmts_by_group = self.get_stmts_by_group()
self.statements = [s for stmts_by_line in stmts_by_group.values()
for stmt_list in stmts_by_line.values()
for s in stmt_list]
# Dump the statements
if output_file is not None:
ac.dump_statements(self.statements, output_file)
return self.statements
def get_stmts_by_group(self, output_file=None):
"""Read lines in model and store statements in dict keyed by group.
Optionally dumps the dictionary of statements by group to a file.
Parameters
----------
output_file : str
File to save the dictionary of statements indexed by group.
Returns
-------
dict of lists of statements
Dictionary mapping heading titles to lists of INDRA Statements.
"""
# Get the sentences from the model
model_lines = self.scrape_model_text()
# Read text
stmts_by_group = OrderedDict()
# Iterate over each group of sentences
for group_name, lines in model_lines.items():
stmts_by_line = OrderedDict()
print("Processing group '%s'..." % group_name)
for ix, line in enumerate(lines):
print("%d of %d: processing '%s'..." % (ix+1, len(lines), line))
stmts = self.process_text_with_cache(line)
stmts_by_line[line] = stmts
stmts_by_group[group_name] = stmts_by_line
# Add the extra statements
if self.extra_stmts:
stmts_by_group['extra'] = OrderedDict()
stmts_by_group['extra']['extra'] = self.extra_stmts
self.stmts_by_group = stmts_by_group
# Dump the statements
if output_file is not None:
with open(output_file, 'wb') as f:
pickle.dump(self.stmts_by_group, f)
return self.stmts_by_group
def process_text_with_cache(self, text):
"""Wrapper around trips.process_text that caches statements.
Parameters
----------
text : str
Text to be processed by TRIPS.
Returns
-------
list of INDRA Statements
List of INDRA Statements from processing the text.
"""
text_no_space = text.replace(' ', '')
text_no_space = text_no_space.replace('.', '')
cache_filename = text_no_space + '.pkl'
cache_path = os.path.join(self.cache_dir, cache_filename)
if os.path.isfile(cache_path):
print('Loading cached stmts: %s' % cache_filename)
with open(cache_path, 'rb') as f:
stmts = pickle.load(f)
else:
# Alternative use 'drum-dev'
tp = trips.process_text(text, service_endpoint=self.trips_endpoint)
stmts = tp.statements
# If no statements produced, emit warning
if not stmts:
logger.warning(f'No statements for "{text}"')
print(f'No statements for "{text}"')
else:
logger.info(f'Saving {len(stmts)} stmts in {cache_filename}')
with open(cache_path, 'wb') as f:
pickle.dump(stmts, f)
return stmts
# Reverse effect for translocation?
# Checks:
# -- growth factor stimulation increases:
# - phospho-EGFR
# - Active RAS
# - Phospho-ERK
# - FOS/JUN expression
# - phospho-S6
# Add complexes as binding statements
# Re-write statements grounded to BE complexes to contain bound conditions
# (PI3K, AP1)
``` |
{
"source": "johnbachman/OmicsIntegrator",
"score": 3
} |
#### File: OmicsIntegrator/tests/test_load_graph.py
```python
import os, sys, tempfile, pytest
# Create the path to forest relative to the test_load_graph.py path
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'scripts'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from forest_util import loadGraph
def write_valid_graph(tmpf):
'''Write a valid .sif file
INPUT: tmpf - a temporary file
'''
tmpf.write('A\tpp\tB\n')
tmpf.write('B\tpd\tC\n')
tmpf.write('C\tpd\tD\n')
tmpf.write('D\tpd\tC\n')
tmpf.write('A\tpp\tE\n')
class TestLoadGraph:
def test_valid_graph(self):
sifFilename= tempfile.NamedTemporaryFile(suffix='.sif', delete=False)
try:
# Write the network file
write_valid_graph(sifFilename)
finally:
sifFilename.close()
# Load the DiGraph
graph = loadGraph(sifFilename.name)
# Check that the DiGraph has the expected properties
assert graph.order() == 5, "Unexpected number of nodes"
assert graph.size() == 7, "Unexpected number of edges"
# Check that the DiGraph has the expected edges
assert graph.has_edge('A','B')
assert graph.has_edge('B','A')
assert graph.has_edge('B','C')
assert graph.has_edge('C','D')
assert graph.has_edge('D','C')
assert graph.has_edge('A','E')
assert graph.has_edge('E','A')
# Remove because delete=False above
os.remove(sifFilename.name)
def test_invalid_edge_type(self):
sifFilename= tempfile.NamedTemporaryFile(suffix='.sif', delete=False)
try:
# Write the network file
write_valid_graph(sifFilename)
# Add another line with an invalid edge
sifFilename.write('A\tdd\tF\n')
finally:
sifFilename.close()
# Load the DiGraph and expect an Exception for the invalid edge
with pytest.raises(Exception):
loadGraph(sifFilename.name)
# Remove because delete=False above
os.remove(sifFilename.name)
def test_invalid_columns(self):
sifFilename= tempfile.NamedTemporaryFile(suffix='.sif', delete=False)
try:
# Write the network file
write_valid_graph(sifFilename)
# Add another line with an extra column
sifFilename.write('A\tdd\tF\t0.85\n')
finally:
sifFilename.close()
# Load the DiGraph and expect an Exception for the extra column
with pytest.raises(Exception):
loadGraph(sifFilename.name)
# Remove because delete=False above
os.remove(sifFilename.name)
```
#### File: OmicsIntegrator/tests/test_score.py
```python
import os, sys
# Create the path to forest relative to the test_score.py path
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'scripts'))
if not path in sys.path:
sys.path.insert(1, path)
del path
from forest import score
class TestScore:
def test_muGreaterThanZero(self):
try:
score(2, 0, 0)
except ValueError:
assert 1
else:
assert 0
try:
score(2, -1, 0)
except ValueError:
assert 1
else:
assert 0
def test_whenValueOne(self):
assert score(1, 1, 0) == 0
def test_musquaredFalse(self):
assert score(2, 2, 0) == -4
def test_musquaredTrue(self):
assert score(2, 2, 1) == -8
``` |
{
"source": "johnbachman/protmapper",
"score": 3
} |
#### File: protmapper/protmapper/cli.py
```python
import csv
import argparse
from protmapper.api import *
def process_input(fname):
sites = []
with open(fname, 'r') as fh:
for idx, row in enumerate(csv.reader(fh)):
if len(row) != 4:
raise ValueError('Line %d of %s doesn\'t have 4 elements.' %
(idx, fname))
sites.append(row)
return sites
def dump_output(fname, mapped_sites):
rows = [mapped_sites[0].attrs]
rows += [ms.to_list() for ms in mapped_sites]
with open(fname, 'w') as fh:
writer = csv.writer(fh)
writer.writerows(rows)
def main():
parser = argparse.ArgumentParser(
description='Run Protmapper on a list of proteins with residues and '
'sites provided in a text file.')
parser.add_argument('input',
help=('Path to an input file. The input file is a text file in '
'which each row consists of four comma separated values, '
'with the first element being a protein ID, the second, '
'the namespace in which that ID is valid (uniprot or hgnc),'
'third, an amino acid represented as a single capital letter, '
'and fourth, a site position on the protein.'))
parser.add_argument('output',
help=('Path to the output file to be generated. Each line of the '
'output file corresponds to a line in the input file. Each line'
'represents a mapped site produced by Protmapper.'))
parser.add_argument('--peptide',
help=('If given, the third element of each row of the input file is a '
'peptide (amino acid sequence) rather than a single amino acid '
'residue. In this case, peptide-oriented mappings are '
'applied. In this mode the following boolean arguments are '
'ignored.'), action='store_true')
parser.add_argument('--no_methionine_offset', help=(
'If given, will not check for off-by-one errors in site position ('
'possibly) attributable to site numbering from mature proteins after '
'cleavage of the initial methionine.'), action='store_true')
parser.add_argument('--no_orthology_mapping', help=(
'If given, will not check sequence positions for known modification '
'sites in mouse or rat sequences (based on PhosphoSitePlus data).'),
action='store_true')
parser.add_argument('--no_isoform_mapping', help=(
'If given, will not check sequence positions for known modifications '
'in other human isoforms of the protein (based on PhosphoSitePlus '
'data).'), action='store_true')
args = parser.parse_args()
# Separate call to make function testable
run_main(args)
def run_main(args):
mapping_kwargs = {
'do_methionine_offset': False if args.no_methionine_offset else True,
'do_orthology_mapping': False if args.no_orthology_mapping else True,
'do_isoform_mapping': False if args.no_isoform_mapping else True
}
pm = ProtMapper()
sites = process_input(args.input)
if args.peptide:
# We have to make the positions ints here
sites = [tuple(s[:3] + [int(s[3])]) for s in sites]
mapped_sites = [pm.map_peptide_to_human_ref(*site) for site in sites]
else:
mapped_sites = pm.map_sitelist_to_human_ref(sites, **mapping_kwargs)
dump_output(args.output, mapped_sites)
if __name__ == '__main__':
main()
```
#### File: protmapper/tests/test_phosphosite_client.py
```python
from nose.plugins.attrib import attr
from protmapper.phosphosite_client import map_to_human_site, sites_only, \
PspMapping
def test_map_mouse_to_human():
mouse_up_id = 'Q61337'
psp = map_to_human_site(mouse_up_id, 'S', '112')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'Q92934' # Human ref seq
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '75'
assert psp.motif == 'EIRSRHSSYPAGTED'
assert psp.respos == 7
def test_isoform_mapping_from_human():
up_id = 'P29353'
psp = map_to_human_site(up_id, 'Y', '239')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P29353' # Human ref seq
assert psp.mapped_res == 'Y'
assert psp.mapped_pos == '349'
assert psp.motif == 'EEPPDHQYYNDFPGK'
assert psp.respos == 7
def test_mapping_from_human_isoform():
up_id = 'P29353-2'
psp = map_to_human_site(up_id, 'Y', '239')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P29353' # Human ref seq
assert psp.mapped_res == 'Y'
assert psp.mapped_pos == '349'
assert psp.motif == 'EEPPDHQYYNDFPGK'
assert psp.respos == 7
def test_isoform_mapping_from_mouse():
up_id = 'P98083' # Mouse SHC1
psp = map_to_human_site(up_id, 'Y', '239')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P29353' # Human ref seq
assert psp.mapped_res == 'Y'
assert psp.mapped_pos == '349'
assert psp.motif == 'EEPPDHQYYNDFPGK'
assert psp.respos == 7
def test_mapping_from_human_ref():
up_id = 'P29353' # SHC1
psp = map_to_human_site(up_id, 'Y', '349')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P29353' # Human ref seq
assert psp.mapped_res == 'Y'
assert psp.mapped_pos == '349'
assert psp.motif == 'EEPPDHQYYNDFPGK'
assert psp.respos == 7
def test_mapping_from_human_ref_iso_id():
up_id = 'P29353-1' # SHC1
psp = map_to_human_site(up_id, 'Y', '349')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P29353' # Human ref seq
assert psp.mapped_res == 'Y'
assert psp.mapped_pos == '349'
assert psp.motif == 'EEPPDHQYYNDFPGK'
assert psp.respos == 7
def test_mapping_from_mouse_isoform():
up_id = 'Q8CI51-3'
psp = map_to_human_site(up_id, 'S', '105')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'Q96HC4' # Human ref seq
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '214'
assert psp.motif == 'PTVTSVCSETSQELA'
assert psp.respos == 7
def test_no_site_in_human_ref():
psp = map_to_human_site('Q01105', 'S', '9')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'Q01105-2'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '9'
assert psp.motif == 'SAPAAKVSKKELNSN'
assert psp.respos == 7
"""
def test_wrong_residue():
# SL6A3 T53 -> S53
psp = map_to_human_site('Q01959', 'T', '53')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'Q01959'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '53'
assert psp.motif == 'TLTNPRQSPVEAQDR'
assert psp.respos == 7
"""
def test_smpd1_s508():
# The site is invalid, but PSP doesn't know that
psp = map_to_human_site('P17405', 'S', '508')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P17405'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '508'
assert psp.motif == 'DGNYSGSSHVVLDHE'
assert psp.respos == 7
def test_set_s9():
psp = map_to_human_site('Q01105', 'S', '9')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'Q01105-2'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '9'
def test_h2afx_s139():
psp = map_to_human_site('P16104', 'S', '139')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P16104'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '139'
assert psp.motif == 'GKKATQASQEY'
assert psp.respos == 7
def test_motif_processing():
# Make sure that site motifs with prepended underscores have the residue
# position assigned accordingly
psp = map_to_human_site('P68431', 'T', '3')
assert isinstance(psp, PspMapping)
assert psp.mapped_id == 'P68431'
assert psp.mapped_res == 'T'
assert psp.mapped_pos == '3'
assert psp.motif == 'ARTKQTARKS'
assert psp.respos == 2
def test_sites_only():
sites = sites_only()
# These checks make sure that the sites are constructed from the data
# dictionaries as expected
assert ('Q8CI51-3', 'S', '105') in sites
assert ('Q8CI51', 'T', '80') in sites
assert ('Q8CI51-3', 'T', '80') not in sites
assert ('P29353', 'Y', '427') in sites
assert ('P29353', 'Y', '317') in sites
assert ('P29353-2', 'Y', '317') in sites
assert ('P29353-2', 'Y', '427') not in sites
# Check the -1 isoforms are also included
assert ('P28661-1', 'S', '28') in sites
def test_explicit_ref_isoforms():
psp = map_to_human_site('Q9Y2K2', 'S', '551')
assert psp.mapped_id == 'Q9Y2K2'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '493'
psp = map_to_human_site('Q14155', 'S', '672')
assert psp.mapped_id == 'Q14155'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '694'
psp = map_to_human_site('O15027', 'T', '220')
assert psp.mapped_id == 'O15027'
assert psp.mapped_res == 'T'
assert psp.mapped_pos == '415'
psp = map_to_human_site('Q16555', 'S', '627')
assert psp.mapped_id == 'Q16555'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '522'
def test_ref_seq_not_found():
psp = map_to_human_site('P10636', 'S', '202')
assert psp.mapped_id == 'P10636'
assert psp.mapped_res == 'S'
assert psp.mapped_pos == '519'
``` |
{
"source": "johnbachman/pykeen",
"score": 2
} |
#### File: pykeen/models/base.py
```python
import inspect
import logging
from abc import abstractmethod
from collections import defaultdict
from typing import Any, ClassVar, Collection, Dict, Iterable, List, Mapping, Optional, Set, Type, Union
import pandas as pd
import torch
from torch import nn
from ..losses import Loss, MarginRankingLoss, NSSALoss
from ..regularizers import NoRegularizer, Regularizer
from ..tqdmw import tqdm
from ..triples import TriplesFactory
from ..typing import MappedTriples
from ..utils import NoRandomSeedNecessary, get_embedding, resolve_device, set_random_seed
from ..version import get_version
__all__ = [
'Model',
'EntityEmbeddingModel',
'EntityRelationEmbeddingModel',
'MultimodalModel',
]
logger = logging.getLogger(__name__)
UNSUPPORTED_FOR_SUBBATCHING = ( # must be a tuple
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.SyncBatchNorm,
)
def _extend_batch(
batch: MappedTriples,
all_ids: List[int],
dim: int,
) -> MappedTriples:
"""Extend batch for 1-to-all scoring by explicit enumeration.
:param batch: shape: (batch_size, 2)
The batch.
:param all_ids: len: num_choices
The IDs to enumerate.
:param dim: in {0,1,2}
The column along which to insert the enumerated IDs.
:return: shape: (batch_size * num_choices, 3)
A large batch, where every pair from the original batch is combined with every ID.
"""
# Extend the batch to the number of IDs such that each pair can be combined with all possible IDs
extended_batch = batch.repeat_interleave(repeats=len(all_ids), dim=0)
# Create a tensor of all IDs
ids = torch.tensor(all_ids, dtype=torch.long, device=batch.device)
# Extend all IDs to the number of pairs such that each ID can be combined with every pair
extended_ids = ids.repeat(batch.shape[0])
# Fuse the extended pairs with all IDs to a new (h, r, t) triple tensor.
columns = [extended_batch[:, i] for i in (0, 1)]
columns.insert(dim, extended_ids)
hrt_batch = torch.stack(columns, dim=-1)
return hrt_batch
class Model(nn.Module):
"""A base module for all of the KGE models."""
#: A dictionary of hyper-parameters to the models that use them
_hyperparameter_usage: ClassVar[Dict[str, Set[str]]] = defaultdict(set)
#: The default strategy for optimizing the model's hyper-parameters
hpo_default: ClassVar[Mapping[str, Any]]
#: The default loss function class
loss_default: ClassVar[Type[Loss]] = MarginRankingLoss
#: The default parameters for the default loss function class
loss_default_kwargs: ClassVar[Optional[Mapping[str, Any]]] = dict(margin=1.0, reduction='mean')
#: The instance of the loss
loss: Loss
#: The default regularizer class
regularizer_default: ClassVar[Type[Regularizer]] = NoRegularizer
#: The default parameters for the default regularizer class
regularizer_default_kwargs: ClassVar[Optional[Mapping[str, Any]]] = None
#: The instance of the regularizer
regularizer: Regularizer
def __init__(
self,
triples_factory: TriplesFactory,
loss: Optional[Loss] = None,
predict_with_sigmoid: bool = False,
automatic_memory_optimization: Optional[bool] = None,
preferred_device: Optional[str] = None,
random_seed: Optional[int] = None,
regularizer: Optional[Regularizer] = None,
) -> None:
"""Initialize the module.
:param triples_factory:
The triples factory facilitates access to the dataset.
:param loss:
The loss to use. If None is given, use the loss default specific to the model subclass.
:param predict_with_sigmoid:
Whether to apply sigmoid onto the scores when predicting scores. Applying sigmoid at prediction time may
lead to exactly equal scores for certain triples with very high, or very low score. When not trained with
applying sigmoid (or using BCEWithLogits), the scores are not calibrated to perform well with sigmoid.
:param automatic_memory_optimization:
If set to `True`, the model derives the maximum possible batch sizes for the scoring of triples during
evaluation and also training (if no batch size was given). This allows to fully utilize the hardware at hand
and achieves the fastest calculations possible.
:param preferred_device:
The preferred device for model training and inference.
:param random_seed:
A random seed to use for initialising the model's weights. **Should** be set when aiming at reproducibility.
:param regularizer:
A regularizer to use for training.
"""
super().__init__()
# Initialize the device
self._set_device(preferred_device)
# Random seeds have to set before the embeddings are initialized
if random_seed is None:
logger.warning('No random seed is specified. This may lead to non-reproducible results.')
elif random_seed is not NoRandomSeedNecessary:
set_random_seed(random_seed)
if automatic_memory_optimization is None:
automatic_memory_optimization = True
# Loss
if loss is None:
self.loss = self.loss_default(**self.loss_default_kwargs)
else:
self.loss = loss
# TODO: Check loss functions that require 1 and -1 as label but only
self.is_mr_loss = isinstance(self.loss, MarginRankingLoss)
# Regularizer
if regularizer is None:
regularizer = self.regularizer_default(
device=self.device,
**(self.regularizer_default_kwargs or {}),
)
self.regularizer = regularizer
self.is_nssa_loss = isinstance(self.loss, NSSALoss)
# The triples factory facilitates access to the dataset.
self.triples_factory = triples_factory
'''
When predict_with_sigmoid is set to True, the sigmoid function is applied to the logits during evaluation and
also for predictions after training, but has no effect on the training.
'''
self.predict_with_sigmoid = predict_with_sigmoid
# This allows to store the optimized parameters
self.automatic_memory_optimization = automatic_memory_optimization
@property
def can_slice_h(self) -> bool:
"""Whether score_h supports slicing."""
return _can_slice(self.score_h)
@property
def can_slice_r(self) -> bool:
"""Whether score_r supports slicing."""
return _can_slice(self.score_r)
@property
def can_slice_t(self) -> bool:
"""Whether score_t supports slicing."""
return _can_slice(self.score_t)
@property
def modules_not_supporting_sub_batching(self) -> Collection[nn.Module]:
"""Return all modules not supporting sub-batching."""
return [
module
for module in self.modules()
if isinstance(module, UNSUPPORTED_FOR_SUBBATCHING)
]
@property
def supports_subbatching(self) -> bool: # noqa: D400, D401
"""Does this model support sub-batching?"""
return len(self.modules_not_supporting_sub_batching) == 0
@abstractmethod
def _reset_parameters_(self): # noqa: D401
"""Reset all parameters of the model in-place."""
raise NotImplementedError
def reset_parameters_(self) -> 'Model': # noqa: D401
"""Reset all parameters of the model and enforce model constraints."""
self._reset_parameters_()
self.to_device_()
self.post_parameter_update()
return self
def __init_subclass__(cls, **kwargs):
"""Initialize the subclass while keeping track of hyper-parameters."""
super().__init_subclass__(**kwargs)
# Keep track of the hyper-parameters that are used across all
# subclasses of BaseModule
for k in cls.__init__.__annotations__.keys():
if k not in Model.__init__.__annotations__:
Model._hyperparameter_usage[k].add(cls.__name__)
@property
def num_entities(self) -> int: # noqa: D401
"""The number of entities in the knowledge graph."""
return self.triples_factory.num_entities
@property
def num_relations(self) -> int: # noqa: D401
"""The number of unique relation types in the knowledge graph."""
return self.triples_factory.num_relations
def _set_device(self, device: Union[None, str, torch.device] = None) -> None:
"""Set the Torch device to use."""
self.device = resolve_device(device=device)
def to_device_(self) -> 'Model':
"""Transfer model to device."""
self.to(self.device)
self.regularizer.to(self.device)
torch.cuda.empty_cache()
return self
def to_cpu_(self) -> 'Model':
"""Transfer the entire model to CPU."""
self._set_device('cpu')
return self.to_device_()
def to_gpu_(self) -> 'Model':
"""Transfer the entire model to GPU."""
self._set_device('cuda')
return self.to_device_()
def predict_scores(self, triples: torch.LongTensor) -> torch.FloatTensor:
"""Calculate the scores for triples.
This method takes head, relation and tail of each triple and calculates the corresponding score.
Additionally, the model is set to evaluation mode.
:param triples: shape: (number of triples, 3), dtype: long
The indices of (head, relation, tail) triples.
:return: shape: (number of triples, 1), dtype: float
The score for each triple.
"""
# Enforce evaluation mode
self.eval()
scores = self.score_hrt(triples)
if self.predict_with_sigmoid:
scores = torch.sigmoid(scores)
return scores
def predict_scores_all_tails(
self,
hr_batch: torch.LongTensor,
slice_size: Optional[int] = None,
) -> torch.FloatTensor:
"""Forward pass using right side (tail) prediction for obtaining scores of all possible tails.
This method calculates the score for all possible tails for each (head, relation) pair.
Additionally, the model is set to evaluation mode.
:param hr_batch: shape: (batch_size, 2), dtype: long
The indices of (head, relation) pairs.
:param slice_size: >0
The divisor for the scoring function when using slicing.
:return: shape: (batch_size, num_entities), dtype: float
For each h-r pair, the scores for all possible tails.
"""
# Enforce evaluation mode
self.eval()
if slice_size is None:
scores = self.score_t(hr_batch)
else:
scores = self.score_t(hr_batch, slice_size=slice_size)
if self.predict_with_sigmoid:
scores = torch.sigmoid(scores)
return scores
def predict_heads(
self,
relation_label: str,
tail_label: str,
add_novelties: bool = True,
remove_known: bool = False,
) -> pd.DataFrame:
"""Predict tails for the given head and relation (given by label).
:param relation_label: The string label for the relation
:param tail_label: The string label for the tail entity
:param add_novelties: Should the dataframe include a column denoting if the ranked head entities correspond
to novel triples?
:param remove_known: Should non-novel triples (those appearing in the training set) be shown with the results?
On one hand, this allows you to better assess the goodness of the predictions - you want to see that the
non-novel triples generally have higher scores. On the other hand, if you're doing hypothesis generation, they
may pose as a distraction. If this is set to True, then non-novel triples will be removed and the column
denoting novelty will be excluded, since all remaining triples will be novel. Defaults to false.
The following example shows that after you train a model on the Nations dataset,
you can score all entities w.r.t a given relation and tail entity.
>>> from pykeen.pipeline import pipeline
>>> result = pipeline(
... dataset='Nations',
... model='RotatE',
... )
>>> df = result.model.predict_heads('accusation', 'brazil')
"""
tail_id = self.triples_factory.entity_to_id[tail_label]
relation_id = self.triples_factory.relation_to_id[relation_label]
rt_batch = torch.tensor([[relation_id, tail_id]], dtype=torch.long, device=self.device)
scores = self.predict_scores_all_heads(rt_batch)
scores = scores[0, :].tolist()
rv = pd.DataFrame(
[
(entity_id, entity_label, scores[entity_id])
for entity_label, entity_id in self.triples_factory.entity_to_id.items()
],
columns=['head_id', 'head_label', 'score'],
).sort_values('score', ascending=False)
if add_novelties or remove_known:
rv['novel'] = rv['head_id'].map(lambda head_id: self._novel(head_id, relation_id, tail_id))
if remove_known:
rv = rv[rv['novel']]
del rv['novel']
return rv
def predict_tails(
self,
head_label: str,
relation_label: str,
add_novelties: bool = True,
remove_known: bool = False,
) -> pd.DataFrame:
"""Predict tails for the given head and relation (given by label).
:param head_label: The string label for the head entity
:param relation_label: The string label for the relation
:param add_novelties: Should the dataframe include a column denoting if the ranked tail entities correspond
to novel triples?
:param remove_known: Should non-novel triples (those appearing in the training set) be shown with the results?
On one hand, this allows you to better assess the goodness of the predictions - you want to see that the
non-novel triples generally have higher scores. On the other hand, if you're doing hypothesis generation, they
may pose as a distraction. If this is set to True, then non-novel triples will be removed and the column
denoting novelty will be excluded, since all remaining triples will be novel. Defaults to false.
The following example shows that after you train a model on the Nations dataset,
you can score all entities w.r.t a given head entity and relation.
>>> from pykeen.pipeline import pipeline
>>> result = pipeline(
... dataset='Nations',
... model='RotatE',
... )
>>> df = result.model.predict_tails('brazil', 'accusation')
"""
head_id = self.triples_factory.entity_to_id[head_label]
relation_id = self.triples_factory.relation_to_id[relation_label]
batch = torch.tensor([[head_id, relation_id]], dtype=torch.long, device=self.device)
scores = self.predict_scores_all_tails(batch)
scores = scores[0, :].tolist()
rv = pd.DataFrame(
[
(entity_id, entity_label, scores[entity_id])
for entity_label, entity_id in self.triples_factory.entity_to_id.items()
],
columns=['tail_id', 'tail_label', 'score'],
).sort_values('score', ascending=False)
if add_novelties or remove_known:
rv['novel'] = rv['tail_id'].map(lambda tail_id: self._novel(head_id, relation_id, tail_id))
if remove_known:
rv = rv[rv['novel']]
del rv['novel']
return rv
def _novel(self, h, r, t) -> bool:
"""Return if the triple is novel with respect to the training triples."""
triple = torch.tensor(data=[[h, r, t]], dtype=torch.long, device=self.triples_factory.mapped_triples.device)
return (triple == self.triples_factory.mapped_triples).all(dim=1).any().item()
def predict_scores_all_relations(
self,
ht_batch: torch.LongTensor,
slice_size: Optional[int] = None,
) -> torch.FloatTensor:
"""Forward pass using middle (relation) prediction for obtaining scores of all possible relations.
This method calculates the score for all possible relations for each (head, tail) pair.
Additionally, the model is set to evaluation mode.
:param ht_batch: shape: (batch_size, 2), dtype: long
The indices of (head, tail) pairs.
:param slice_size: >0
The divisor for the scoring function when using slicing.
:return: shape: (batch_size, num_relations), dtype: float
For each h-t pair, the scores for all possible relations.
"""
# Enforce evaluation mode
self.eval()
if slice_size is None:
scores = self.score_r(ht_batch)
else:
scores = self.score_r(ht_batch, slice_size=slice_size)
if self.predict_with_sigmoid:
scores = torch.sigmoid(scores)
return scores
def predict_scores_all_heads(
self,
rt_batch: torch.LongTensor,
slice_size: Optional[int] = None,
) -> torch.FloatTensor:
"""Forward pass using left side (head) prediction for obtaining scores of all possible heads.
This method calculates the score for all possible heads for each (relation, tail) pair.
Additionally, the model is set to evaluation mode.
:param rt_batch: shape: (batch_size, 2), dtype: long
The indices of (relation, tail) pairs.
:param slice_size: >0
The divisor for the scoring function when using slicing.
:return: shape: (batch_size, num_entities), dtype: float
For each r-t pair, the scores for all possible heads.
"""
# Enforce evaluation mode
self.eval()
'''
In case the model was trained using inverse triples, the scoring of all heads is not handled by calculating
the scores for all heads based on a (relation, tail) pair, but instead all possible tails are calculated
for a (tail, inverse_relation) pair.
'''
if not self.triples_factory.create_inverse_triples:
if slice_size is None:
scores = self.score_h(rt_batch)
else:
scores = self.score_h(rt_batch, slice_size=slice_size)
if self.predict_with_sigmoid:
scores = torch.sigmoid(scores)
return scores
'''
The PyKEEN package handles _inverse relations_ by adding the number of relations to the index of the
_native relation_.
Example:
The triples/knowledge graph used to train the model contained 100 relations. Due to using inverse relations,
the model now has an additional 100 inverse relations. If the _native relation_ has the index 3, the index
of the _inverse relation_ is 4 (id of relation + 1).
'''
rt_batch_cloned = rt_batch.clone()
rt_batch_cloned.to(device=rt_batch.device)
# The number of relations stored in the triples factory includes the number of inverse relations
# Id of inverse relation: relation + 1
rt_batch_cloned[:, 0] = rt_batch_cloned[:, 0] + 1
# The score_t function requires (entity, relation) pairs instead of (relation, entity) pairs
rt_batch_cloned = rt_batch_cloned.flip(1)
if slice_size is None:
scores = self.score_t(rt_batch_cloned)
else:
scores = self.score_t(rt_batch_cloned, slice_size=slice_size)
if self.predict_with_sigmoid:
scores = torch.sigmoid(scores)
return scores
def post_parameter_update(self) -> None:
"""Has to be called after each parameter update."""
self.regularizer.reset()
def regularize_if_necessary(self, *tensors: torch.FloatTensor) -> None:
"""Update the regularizer's term given some tensors, if regularization is requested.
:param tensors: The tensors that should be passed to the regularizer to update its term.
"""
if self.training:
self.regularizer.update(*tensors)
def compute_mr_loss(
self,
positive_scores: torch.FloatTensor,
negative_scores: torch.FloatTensor,
) -> torch.FloatTensor:
"""Compute the mean ranking loss for the positive and negative scores.
:param positive_scores: shape: s, dtype: float
The scores for positive triples.
:param negative_scores: shape: s, dtype: float
The scores for negative triples.
:raises RuntimeError:
If the chosen loss function does not allow the calculation of margin ranking
:return: dtype: float, scalar
The margin ranking loss value.
"""
if not self.is_mr_loss:
raise RuntimeError(
'The chosen loss does not allow the calculation of margin ranking'
' losses. Please use the compute_loss method instead.'
)
y = torch.ones_like(negative_scores, device=self.device)
return self.loss(positive_scores, negative_scores, y) + self.regularizer.term
def compute_label_loss(
self,
predictions: torch.FloatTensor,
labels: torch.FloatTensor,
) -> torch.FloatTensor:
"""Compute the classification loss.
:param predictions: shape: s
The tensor containing predictions.
:param labels: shape: s
The tensor containing labels.
:return: dtype: float, scalar
The label loss value.
"""
return self._compute_loss(tensor_1=predictions, tensor_2=labels)
def compute_self_adversarial_negative_sampling_loss(
self,
positive_scores: torch.FloatTensor,
negative_scores: torch.FloatTensor,
) -> torch.FloatTensor:
"""Compute self adversarial negative sampling loss.
:param positive_scores: shape: s
The tensor containing the positive scores.
:param negative_scores: shape: s
Tensor containing the negative scores.
:raises RuntimeError:
If the chosen loss does not allow the calculation of self adversarial negative sampling losses.
:return: dtype: float, scalar
The loss value.
"""
if not self.is_nssa_loss:
raise RuntimeError(
'The chosen loss does not allow the calculation of self adversarial negative sampling'
' losses. Please use the compute_self_adversarial_negative_sampling_loss method instead.'
)
return self._compute_loss(tensor_1=positive_scores, tensor_2=negative_scores)
def _compute_loss(
self,
tensor_1: torch.FloatTensor,
tensor_2: torch.FloatTensor,
) -> torch.FloatTensor:
"""Compute the loss for functions requiring two separate tensors as input.
:param tensor_1: shape: s
The tensor containing predictions or positive scores.
:param tensor_2: shape: s
The tensor containing target values or the negative scores.
:raises RuntimeError:
If the chosen loss does not allow the calculation of margin label losses.
:return: dtype: float, scalar
The label loss value.
"""
if self.is_mr_loss:
raise RuntimeError(
'The chosen loss does not allow the calculation of margin label'
' losses. Please use the compute_mr_loss method instead.'
)
return self.loss(tensor_1, tensor_2) + self.regularizer.term
@abstractmethod
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor:
"""Forward pass.
This method takes head, relation and tail of each triple and calculates the corresponding score.
:param hrt_batch: shape: (batch_size, 3), dtype: long
The indices of (head, relation, tail) triples.
:raises NotImplementedError:
If the method was not implemented for this class.
:return: shape: (batch_size, 1), dtype: float
The score for each triple.
"""
raise NotImplementedError
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor:
"""Forward pass using right side (tail) prediction.
This method calculates the score for all possible tails for each (head, relation) pair.
:param hr_batch: shape: (batch_size, 2), dtype: long
The indices of (head, relation) pairs.
:return: shape: (batch_size, num_entities), dtype: float
For each h-r pair, the scores for all possible tails.
"""
logger.warning(
'Calculations will fall back to using the score_hrt method, since this model does not have a specific '
'score_t function. This might cause the calculations to take longer than necessary.'
)
# Extend the hr_batch such that each (h, r) pair is combined with all possible tails
hrt_batch = _extend_batch(batch=hr_batch, all_ids=list(self.triples_factory.entity_to_id.values()), dim=2)
# Calculate the scores for each (h, r, t) triple using the generic interaction function
expanded_scores = self.score_hrt(hrt_batch=hrt_batch)
# Reshape the scores to match the pre-defined output shape of the score_t function.
scores = expanded_scores.view(hr_batch.shape[0], -1)
return scores
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor:
"""Forward pass using left side (head) prediction.
This method calculates the score for all possible heads for each (relation, tail) pair.
:param rt_batch: shape: (batch_size, 2), dtype: long
The indices of (relation, tail) pairs.
:return: shape: (batch_size, num_entities), dtype: float
For each r-t pair, the scores for all possible heads.
"""
logger.warning(
'Calculations will fall back to using the score_hrt method, since this model does not have a specific '
'score_h function. This might cause the calculations to take longer than necessary.'
)
# Extend the rt_batch such that each (r, t) pair is combined with all possible heads
hrt_batch = _extend_batch(batch=rt_batch, all_ids=list(self.triples_factory.entity_to_id.values()), dim=0)
# Calculate the scores for each (h, r, t) triple using the generic interaction function
expanded_scores = self.score_hrt(hrt_batch=hrt_batch)
# Reshape the scores to match the pre-defined output shape of the score_h function.
scores = expanded_scores.view(rt_batch.shape[0], -1)
return scores
def score_r(self, ht_batch: torch.LongTensor) -> torch.FloatTensor:
"""Forward pass using middle (relation) prediction.
This method calculates the score for all possible relations for each (head, tail) pair.
:param ht_batch: shape: (batch_size, 2), dtype: long
The indices of (head, tail) pairs.
:return: shape: (batch_size, num_relations), dtype: float
For each h-t pair, the scores for all possible relations.
"""
logger.warning(
'Calculations will fall back to using the score_hrt method, since this model does not have a specific '
'score_r function. This might cause the calculations to take longer than necessary.'
)
# Extend the ht_batch such that each (h, t) pair is combined with all possible relations
hrt_batch = _extend_batch(batch=ht_batch, all_ids=list(self.triples_factory.relation_to_id.values()), dim=1)
# Calculate the scores for each (h, r, t) triple using the generic interaction function
expanded_scores = self.score_hrt(hrt_batch=hrt_batch)
# Reshape the scores to match the pre-defined output shape of the score_r function.
scores = expanded_scores.view(ht_batch.shape[0], -1)
return scores
def get_grad_params(self) -> Iterable[nn.Parameter]:
"""Get the parameters that require gradients."""
# TODO: Why do we need that? The optimizer takes care of filtering the parameters.
return filter(lambda p: p.requires_grad, self.parameters())
def to_embeddingdb(self, session=None, use_tqdm: bool = False):
"""Upload to the embedding database.
:param session: Optional SQLAlchemy session
:param use_tqdm: Use :mod:`tqdm` progress bar?
:rtype: embeddingdb.sql.models.Collection
"""
from embeddingdb.sql.models import Embedding, Collection
if session is None:
from embeddingdb.sql.models import get_session
session = get_session()
collection = Collection(
package_name='pykeen',
package_version=get_version(),
dimensions=self.embedding_dim,
)
embeddings = self.entity_embeddings.weight.detach().cpu().numpy()
names = sorted(
self.triples_factory.entity_to_id,
key=self.triples_factory.entity_to_id.get,
)
if use_tqdm:
names = tqdm(names, desc='Building SQLAlchemy models')
for name, embedding in zip(names, embeddings):
embedding = Embedding(
collection=collection,
curie=name,
vector=list(embedding),
)
session.add(embedding)
session.add(collection)
session.commit()
return collection
@property
def num_parameter_bytes(self) -> int:
"""Calculate the number of bytes used for all parameters of the model."""
return sum(p.numel() * p.element_size() for p in self.parameters(recurse=True))
def save_state(self, path: str) -> None:
"""Save the state of the model.
:param path:
Path of the file where to store the state in.
"""
torch.save(self.state_dict(), path)
def load_state(self, path: str) -> None:
"""Load the state of the model.
:param path:
Path of the file where to load the state from.
"""
self.load_state_dict(torch.load(path, map_location=self.device))
class EntityEmbeddingModel(Model):
"""A base module for most KGE models that have one embedding for entities."""
def __init__(
self,
triples_factory: TriplesFactory,
embedding_dim: int = 50,
loss: Optional[Loss] = None,
predict_with_sigmoid: bool = False,
automatic_memory_optimization: Optional[bool] = None,
preferred_device: Optional[str] = None,
random_seed: Optional[int] = None,
regularizer: Optional[Regularizer] = None,
) -> None:
"""Initialize the entity embedding model.
:param embedding_dim:
The embedding dimensionality. Exact usages depends on the specific model subclass.
.. seealso:: Constructor of the base class :class:`pykeen.models.Model`
"""
super().__init__(
triples_factory=triples_factory,
automatic_memory_optimization=automatic_memory_optimization,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
predict_with_sigmoid=predict_with_sigmoid,
)
self.embedding_dim = embedding_dim
self.entity_embeddings = get_embedding(
num_embeddings=triples_factory.num_entities,
embedding_dim=self.embedding_dim,
device=self.device,
)
class EntityRelationEmbeddingModel(EntityEmbeddingModel):
"""A base module for KGE models that have different embeddings for entities and relations."""
def __init__(
self,
triples_factory: TriplesFactory,
embedding_dim: int = 50,
relation_dim: Optional[int] = None,
loss: Optional[Loss] = None,
predict_with_sigmoid: bool = False,
automatic_memory_optimization: Optional[bool] = None,
preferred_device: Optional[str] = None,
random_seed: Optional[int] = None,
regularizer: Optional[Regularizer] = None,
) -> None:
"""Initialize the entity embedding model.
:param relation_dim:
The relation embedding dimensionality. If not given, defaults to same size as entity embedding
dimension.
.. seealso:: Constructor of the base class :class:`pykeen.models.Model`
.. seealso:: Constructor of the base class :class:`pykeen.models.EntityEmbeddingModel`
"""
super().__init__(
triples_factory=triples_factory,
automatic_memory_optimization=automatic_memory_optimization,
loss=loss,
preferred_device=preferred_device,
random_seed=random_seed,
regularizer=regularizer,
predict_with_sigmoid=predict_with_sigmoid,
embedding_dim=embedding_dim,
)
# Default for relation dimensionality
if relation_dim is None:
relation_dim = embedding_dim
self.relation_dim = relation_dim
self.relation_embeddings = get_embedding(
num_embeddings=triples_factory.num_relations,
embedding_dim=self.relation_dim,
device=self.device,
)
def _can_slice(fn) -> bool:
return 'slice_size' in inspect.getfullargspec(fn).args
class MultimodalModel(EntityRelationEmbeddingModel):
"""A multimodal KGE model."""
```
#### File: pykeen/stoppers/early_stopping.py
```python
import dataclasses
import logging
from dataclasses import dataclass
from typing import Any, Callable, List, Mapping, Optional, Union
import numpy
from .stopper import Stopper
from ..evaluation import Evaluator
from ..models.base import Model
from ..trackers import ResultTracker
from ..triples import TriplesFactory
from ..utils import fix_dataclass_init_docs
__all__ = [
'smaller_than_any_buffer_element',
'larger_than_any_buffer_element',
'EarlyStopper',
'StopperCallback',
]
logger = logging.getLogger(__name__)
def smaller_than_any_buffer_element(buffer: numpy.ndarray, result: float, delta: float = 0.) -> bool:
"""Decide if a result is better than at least one buffer element, where smaller is better.
:param buffer:
The last results to compare against (excluding the current result).
:param result:
The current result.
:param delta:
The minimum improvement.
:return:
Whether the result is at least delta better than at least one value in the buffer.
"""
worst_in_window = buffer.max()
baseline = worst_in_window - delta
return result < baseline
def larger_than_any_buffer_element(buffer: numpy.ndarray, result: float, delta: float = 0.) -> bool:
"""Decide if a result is better than at least one buffer element, where larger is better.
:param buffer:
The last results to compare against (excluding the current result).
:param result:
The current result.
:param delta:
The minimum improvement.
:return:
Whether the result is at least delta better than at least one value in the buffer.
"""
worst_in_window = buffer.min()
baseline = worst_in_window + delta
return result > baseline
StopperCallback = Callable[[Stopper, Union[int, float]], None]
@fix_dataclass_init_docs
@dataclass
class EarlyStopper(Stopper):
"""A harness for early stopping."""
#: The model
model: Model = dataclasses.field(repr=False)
#: The evaluator
evaluator: Evaluator
#: The triples to use for evaluation
evaluation_triples_factory: Optional[TriplesFactory]
#: Size of the evaluation batches
evaluation_batch_size: Optional[int] = None
#: Slice size of the evaluation batches
evaluation_slice_size: Optional[int] = None
#: The number of epochs after which the model is evaluated on validation set
frequency: int = 10
#: The number of iterations (one iteration can correspond to various epochs)
#: with no improvement after which training will be stopped.
patience: int = 2
#: The name of the metric to use
metric: str = 'hits_at_k'
#: The minimum improvement between two iterations
delta: float = 0.005
#: The metric results from all evaluations
results: List[float] = dataclasses.field(default_factory=list, repr=False)
#: A ring buffer to store the recent results
buffer: numpy.ndarray = dataclasses.field(init=False)
#: A counter for the ring buffer
number_evaluations: int = 0
#: Whether a larger value is better, or a smaller
larger_is_better: bool = True
#: The criterion. Set in the constructor based on larger_is_better
improvement_criterion: Callable[[numpy.ndarray, float, float], bool] = None
#: The result tracker
result_tracker: Optional[ResultTracker] = None
#: Callbacks when training gets continued
continue_callbacks: List[StopperCallback] = dataclasses.field(default_factory=list, repr=False)
#: Callbacks when training is stopped early
stopped_callbacks: List[StopperCallback] = dataclasses.field(default_factory=list, repr=False)
#: Did the stopper ever decide to stop?
stopped: bool = False
def __post_init__(self):
"""Run after initialization and check the metric is valid."""
# TODO: Fix this
# if all(f.name != self.metric for f in dataclasses.fields(self.evaluator.__class__)):
# raise ValueError(f'Invalid metric name: {self.metric}')
if self.evaluation_triples_factory is None:
raise ValueError('Must specify a validation_triples_factory or a dataset for using early stopping.')
if self.larger_is_better:
self.improvement_criterion = larger_than_any_buffer_element
else:
self.improvement_criterion = smaller_than_any_buffer_element
self.buffer = numpy.empty(shape=(self.patience,))
# Dummy result tracker
if self.result_tracker is None:
self.result_tracker = ResultTracker()
def should_evaluate(self, epoch: int) -> bool:
"""Decide if evaluation should be done based on the current epoch and the internal frequency."""
return 0 == ((epoch - 1) % self.frequency)
@property
def number_results(self) -> int:
"""Count the number of results stored in the early stopper."""
return len(self.results)
def should_stop(self) -> bool:
"""Evaluate on a metric and compare to past evaluations to decide if training should stop."""
# Evaluate
metric_results = self.evaluator.evaluate(
model=self.model,
mapped_triples=self.evaluation_triples_factory.mapped_triples,
use_tqdm=False,
batch_size=self.evaluation_batch_size,
slice_size=self.evaluation_slice_size,
)
# After the first evaluation pass the optimal batch and slice size is obtained and saved for re-use
self.evaluation_batch_size = self.evaluator.batch_size
self.evaluation_slice_size = self.evaluator.slice_size
self.result_tracker.log_metrics(
metrics=metric_results.to_flat_dict(),
step=self.number_evaluations,
prefix='validation',
)
result = metric_results.get_metric(self.metric)
# Only check if enough values are already collected
if self.number_evaluations >= self.patience:
# Stop if the result did not improve more than delta for patience epochs.
if not self.improvement_criterion(buffer=self.buffer, result=result, delta=self.delta):
logger.info(f'Stopping early after {self.number_evaluations} evaluations with {self.metric}={result}')
for stopped_callback in self.stopped_callbacks:
stopped_callback(self, result)
self.stopped = True
return True
# Update ring buffer
self.buffer[self.number_evaluations % self.patience] = result
self.number_evaluations += 1
# Append to history
self.results.append(result)
for continue_callback in self.continue_callbacks:
continue_callback(self, result)
return False
def get_summary_dict(self) -> Mapping[str, Any]:
"""Get a summary dict."""
return dict(
frequency=self.frequency,
patience=self.patience,
delta=self.delta,
metric=self.metric,
larger_is_better=self.larger_is_better,
results=self.results,
stopped=self.stopped,
)
```
#### File: pykeen/tests/test_leakage.py
```python
import itertools as itt
import unittest
import numpy as np
from pykeen.triples import TriplesFactory
from pykeen.triples.leakage import Sealant, get_candidate_inverse_relations
class TestLeakage(unittest.TestCase):
"""Tests for identifying inverse relationships and leakage."""
def test_count_inverse_frequencies(self):
"""Test counting inverse frequencies.
Note, for r3, there are three triples, but the inverse triples are only counted once.
"""
t = [
['a', 'r1', 'b'],
#
['b', 'r2', 'c'],
['c', 'r2_inverse', 'b'],
['d', 'r2', 'e'],
['e', 'r2_inverse', 'd'],
#
['g', 'r3', 'h'],
['h', 'r3_inverse', 'g'],
['i', 'r3', 'j'],
['k', 'r3', 'l'],
]
triples_factory = TriplesFactory(triples=np.array(t, dtype=np.str))
frequencies = get_candidate_inverse_relations(triples_factory, minimum_frequency=0.0, symmetric=False)
self.assertEqual(
{
('r2', 'r2_inverse'): (2 / 2),
('r2_inverse', 'r2'): (2 / 2),
('r3', 'r3_inverse'): (1 / 3),
('r3_inverse', 'r3'): (1 / 1),
},
dict(frequencies),
)
def test_find_leak_assymetric(self):
"""Test finding test leakages with an asymmetric metric."""
n = 100
test_relation, test_relation_inverse = 'r', 'r_inverse'
train_generated = list(itt.chain.from_iterable((
[
[str(i), test_relation, str(j + 1 + n)],
[str(j + 1 + n), test_relation_inverse, str(i)],
]
for i, j in zip(range(n), range(n))
)))
train_non_inverses = [
['a', 'fine', 'b'],
['b', 'fine', 'c'],
]
forwards_extras = [
['-1', test_relation, '-2'], # this one leaks!
['-3', test_relation, '-4'],
]
inverse_extras = [
['-5', test_relation_inverse, '-6'],
]
train = train_generated + train_non_inverses + forwards_extras + inverse_extras
test = [
['-2', test_relation_inverse, '-1'], # this one was leaked!
]
train_factory = TriplesFactory(triples=np.array(train, dtype=np.str))
test_factory = TriplesFactory(triples=np.array(test, dtype=np.str))
sealant = Sealant(train_factory, symmetric=False)
expected_forwards_frequency = n / (n + len(forwards_extras))
expected_inverse_frequency = n / (n + len(inverse_extras))
self.assertGreater(len(forwards_extras), len(inverse_extras))
self.assertLess(expected_forwards_frequency, expected_inverse_frequency,
msg='Forwards frequency should be higher than inverse frequency')
self.assertEqual(
{
(test_relation, test_relation_inverse): expected_forwards_frequency,
(test_relation_inverse, test_relation): expected_inverse_frequency,
},
dict(sealant.candidate_inverse_relations),
)
self.assertIn(test_relation, sealant.inverses)
self.assertEqual(test_relation_inverse, sealant.inverses[test_relation])
self.assertIn(test_relation_inverse, sealant.inverses)
self.assertEqual(test_relation, sealant.inverses[test_relation_inverse])
self.assertIn(
test_relation_inverse,
sealant.inverse_relations_to_delete,
msg='The wrong relation was picked for deletion',
)
test_leaked = sealant.get_inverse_triples(test_factory)
self.assertEqual(1, len(test_leaked))
self.assertEqual(('-2', test_relation_inverse, '-1'), tuple(test_leaked[0]))
```
#### File: pykeen/tests/test_pipeline.py
```python
import unittest
from pykeen.models import TransE
from pykeen.models.base import Model
from pykeen.pipeline import PipelineResult, pipeline
from pykeen.regularizers import NoRegularizer, PowerSumRegularizer
class TestPipeline(unittest.TestCase):
"""Test the pipeline."""
def test_pipeline(self):
"""Test the pipeline on TransE and nations."""
pipeline_result = pipeline(
model='TransE',
dataset='nations',
)
self.assertIsInstance(pipeline_result, PipelineResult)
self.assertIsInstance(pipeline_result.model, Model)
self.assertIsInstance(pipeline_result.model.regularizer, NoRegularizer)
def test_specify_regularizer(self):
"""Test a pipeline that uses a regularizer."""
pipeline_result = pipeline(
model=TransE,
dataset='nations',
regularizer='powersum',
)
self.assertIsInstance(pipeline_result, PipelineResult)
self.assertIsInstance(pipeline_result.model, Model)
self.assertIsInstance(pipeline_result.model.regularizer, PowerSumRegularizer)
``` |
{
"source": "John-Backwell/Crypto_price_prediction",
"score": 3
} |
#### File: John-Backwell/Crypto_price_prediction/combined_models.py
```python
from math import comb
from sys import exc_info
import numpy as np
from numpy.core.numeric import count_nonzero
import pandas as pd
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from binary_classification_models import get_lasso_weights_scores
from binary_classification_models import find_strongest_weighted_features
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
from regression_models import plot_price_predictions
from sklearn import tree
def calculate_combined_model_accuracy(excel_file_1, excel_file_2, model_1,model_2,df_name,threshhold_func,func_arg2):
"""Combines a binary classification model with a regression model, and creates a combined decisions array
that only holds values for where the models agreed: IE the binary model classified it as a predicted price rise, and
the regression model predicted a price higher than the current closing price.
Args:
excel_file_1
excel_file_2
model_1
model_2
df_name
threshhold_func
func_arg2
Returns:
combined decisions array, accuracy of combined decisions, what percentage of total opportunities for trading where taken
by the combined model, actual values on whether price rose or fell.
"""
indices = get_lasso_weights_scores(excel_file_1,df_name,threshhold_func,func_arg2)
df_1 = pd.read_csv(excel_file_1)
df_2 = pd.read_csv(excel_file_2)
X_1 = df_1[df_1.columns[10:]]
X_1 = X_1.drop(X_1.columns[indices], axis=1)
Y_1 = df_1["is higher lower"]
X_2 = df_2[df_2.columns[10:]]
Y_2 = df_2["next close"]
split_num = round(0.9*len(X_1))
X_train_1, X_test_1 = X_1[0:split_num], X_1[split_num:]
Y_train_1, Y_test_1 = Y_1[0:split_num], Y_1[split_num:]
model_1.fit(X_train_1,Y_train_1)
Y_predict_1 = model_1.predict(X_test_1)
#print(accuracy_score(Y_test_1, Y_predict_1)*100)
X_train_2, X_test_2 = X_2[0:split_num], X_2[split_num:]
Y_train_2, Y_test_2 = Y_2[0:split_num], Y_2[split_num:]
model_2.fit(X_train_2,Y_train_2)
Y_predict_2 = model_2.predict(X_test_2)
#print(mean_squared_error(Y_test_2, Y_predict_2,squared= False))
combined_decisions = []
close = df_2['close']
close = close[split_num:].tolist()
for index, value in enumerate(Y_predict_1):
if Y_predict_1[index] == 1 and Y_predict_2[index]>= close[index]:
combined_decisions.append((index,1))
elif Y_predict_1[index] == 0 and Y_predict_2[index]<= close[index]:
combined_decisions.append((index,0))
return combined_decisions, calculate_accuracy(combined_decisions,Y_test_1.tolist()), (len(combined_decisions)/len(Y_test_1)*100),Y_predict_1,Y_predict_2
def calculate_accuracy(combined_decisions, Y_test_1):
"""calculates percentage accuracy of a combined decisions array
Args:
combined_decisions: predicted values for combined model
Y_test_1: True values
Returns:
percentage accuracy of predictions
"""
total_decisions = len(combined_decisions)
correct_decisions = 0
for index, decision in combined_decisions:
if decision == Y_test_1[index]:
correct_decisions +=1
return correct_decisions / total_decisions * 100
def simulate_trading_period_naive(decisions, excel_file, starting_cash, starting_BTC):
"""Comparing the percentage profitability of a naive trading strategy (in this case buying and
selling 1 BTC at a time) against just holding the starting BTC and cash throughout the test period.
Args:
decisions: decisions array
excel_file
starting_cash: how much cash the model starts with
starting_BTC: how much btc the model starts with
Returns:
the percentage profit of the model compared to holding, how much BTC it ends test period with, how much cash
"""
running_BTC = starting_BTC
running_cash = starting_cash
price_df = pd.read_csv(excel_file)
split_num = round(0.9*len(price_df))
close = price_df['close']
close = close[split_num:].tolist()
for index, value in decisions:
if value == 1:
running_BTC, running_cash = buy_BTC(running_BTC,running_cash,close[index],1)
elif value == 0:
running_BTC, running_cash = sell_BTC(running_BTC,running_cash,close[index],1)
percentage_profit = (((running_BTC * close[-1]) + running_cash) - ((starting_BTC * close[-1]) + starting_cash))/((starting_BTC * close[-1]) + starting_cash)
return percentage_profit * 100, running_BTC,running_cash
def buy_BTC(current_BTC, current_cash, price,num):
if current_cash >= price * num:
current_BTC = current_BTC + num
current_cash = current_cash - (num *price)
return current_BTC,current_cash
def sell_BTC(current_BTC, current_cash, price,num):
if current_BTC >= num:
current_BTC = current_BTC - num
current_cash = current_cash + (num*price)
return current_BTC, current_cash
if __name__ == "__main__":
results = calculate_combined_model_accuracy("preprocessed_price_data_robust_features.csv","non_binary_all_features_minmax.csv",RandomForestClassifier(),tree.DecisionTreeRegressor(),"minMax",find_strongest_weighted_features,10)
print(results[1])
print(results[24])
decisions = results[0]
print(simulate_trading_period_naive(decisions,"preprocessed_price_data_robust_features.csv",10000000,10))
single_model_decisions = enumerate(results[-2])
regressor_predictions = enumerate(results[-1])
price_df = price_df = pd.read_csv("non_binary_all_features_minmax.csv")
close = price_df['close']
split_num = round(0.9*len(price_df))
close = close[split_num:].tolist()
regressor_decisions = []
for index, value in regressor_predictions:
if value >= close[index]:
regressor_decisions.append((index, 1))
else:
regressor_decisions.append((index,0))
print(simulate_trading_period_naive(single_model_decisions,"preprocessed_price_data_robust_features.csv",10000000,10))
print(simulate_trading_period_naive(regressor_decisions,"non_binary_all_features_minmax.csv",10000000,10))
```
#### File: John-Backwell/Crypto_price_prediction/regression_models.py
```python
from textwrap import indent
import numpy as np
import pandas as pd
from sklearn import neighbors
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn import kernel_ridge
from sklearn import tree
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from binary_classification_models import get_lasso_weights_scores
from binary_classification_models import find_strongest_weighted_features
def generate_models():
#same format as in binary classifcation but different models used
model_names = ["KNeighborsRegressor","LinearRegression","Lasso",
"BayesRidge","KernalRidge","SGD","DecisionTree"]
models = [neighbors.KNeighborsRegressor(),linear_model.LinearRegression(),
linear_model.Lasso(alpha = 0.1,max_iter=100000),linear_model.BayesianRidge(),
kernel_ridge.KernelRidge(alpha=0.1),
linear_model.SGDRegressor(max_iter=100000),tree.DecisionTreeRegressor()]
models_and_names = zip(model_names, models)
return models_and_names
def run_models(X_train, X_test, Y_train, Y_test, models_list, dataframe_name):
results = []
for type, model in models_list:
model.fit(X_train, Y_train)
Y_predict = model.predict(X_test)
mse = mean_squared_error(Y_test, Y_predict,squared= False)
results.append([type, mse, dataframe_name])
plot_price_predictions(Y_predict, Y_test,type,type)
title = type + " focused"
plot_price_predictions(Y_predict[-100:],Y_test[-100:],type,title)
return results
def test_models_all_features(excel_file: str, dfs_name: str):
models_and_names = generate_models()
all_results = []
df = pd.read_csv(excel_file)
X = df[df.columns[10:]]
Y = df["next close"]
split_num = round(0.9*len(X)) # 90/10 split on train / test
X_train, X_test = X[0:split_num], X[split_num:]
Y_train, Y_test = Y[0:split_num], Y[split_num:]
all_results.append(run_models(X_train, X_test, Y_train, Y_test,
models_and_names, dfs_name))
return all_results
def plot_price_predictions(Y_predict, Y_true,title, saveName):
x = [i for i in range(len(Y_predict))]
plt.plot(x,Y_predict,'r-',label = 'Predicted Price')
plt.plot(x,Y_true,'b-',label = 'Actual Price')
plt.xlabel("Time")
plt.ylabel("Price (USD)")
plt.title = title
plt.legend(loc="upper left")
plt.savefig(saveName)
plt.close()
if __name__ == "__main__":
dfs_name = "MinMax normalisation"
print(test_models_all_features("non_binary_all_features_minmax.csv",dfs_name))
``` |
{
"source": "johnbaillieul/Vision_based_Navigation_TTT",
"score": 2
} |
#### File: Vision_based_Navigation_TTT/nodes/optical_flow.py
```python
import rospy
from sensor_msgs.msg import Image
from Vision_based_Navigation_TTT.msg import OpticalFlow
from cv_bridge import CvBridgeError, CvBridge
import cv2
import sys
import numpy as np
################################################################################
# Extreme left and extreme right
x_init_el = 0
y_init_el = 0
y_end_el = 0
y_init_er = 0
x_end_er = 0
y_end_er = 0
# Left and right
x_end_l = 0
y_end_l = 0
y_init_l = 0
y_end_r = 0
x_init_r = 0
y_init_r = 0
def set_limit(img_width, img_height):
# Extreme left and extreme right
global x_init_el
global y_init_el
global y_end_el
x_init_el = 0
y_init_el = 0
y_end_el = int(7.5 * img_height / 12)
global x_end_er
global y_end_er
global y_init_er
x_end_er = int(img_width)
y_end_er = int(7.5 * img_height / 12)
y_init_er = 0
# Left and right
global x_end_l
global y_end_l
global y_init_l
x_end_l = int(4 * img_width / 12)
y_end_l = int(7 * img_height / 12)
y_init_l = int(1 * img_height / 12)
global x_init_r
global y_init_r
global y_end_r
x_init_r = int(8 * img_width / 12)
y_init_r = int(1 * img_height / 12)
y_end_r = int(7 * img_height / 12)
################################################################################
def draw_optical_flow_field(gray_image, points_old, points_new, flow, dt):
color_img = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
color_red = [0, 255, 0] # bgr colorspace
linewidth = 3
print("Old points " + str(len(points_old)))
print("New points " + str(len(points_new)))
for i in range(len(points_new)):
x_init = points_old[i, 0]
y_init = points_old[i, 1]
x_end = points_new[i, 0]
y_end = points_new[i, 1]
cv2.line(color_img, (x_init, y_init), (x_end, y_end), color_red, linewidth)
cv2.namedWindow('Optical Flow', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Optical Flow', (600, 600))
cv2.imshow('Optical Flow', color_img)
cv2.waitKey(10)
################################################################################
class OFCalculator:
def __init__(self, param):
########## IMPORTANT PARAMETERS: ##########
self.image_sub_name = "front/image_raw"
self.num_ext_features = 250
self.num_cen_features = 100
self.min_feat_threshold = 1.0
###########################################
# Initialize Image acquisition
self.bridge = CvBridge()
# Verbose
self.show = int(param)
# Previous Image
self.prev_image = None
# Previous key points
self.prev_kps = np.array([], dtype='f')
# Previous time instant
self.prev_time = 0
# Masks
self.roi_el = np.array([])
self.roi_er = np.array([])
self.roi_c = np.array([])
# Params for ShiTomasi corner detection
# self.feature_params = dict(maxCorners=600,
# qualityLevel=0.15,
# minDistance=0,
# blockSize=10)
# <NAME> Optic Flow parameters
self.lk_params = dict(winSize=(15, 15),
maxLevel=3,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# ORB Detector/Descriptor initialization
self.orb_extreme = cv2.ORB_create(self.num_ext_features)
self.orb_center = cv2.ORB_create(self.num_cen_features)
# To enable the tracking
self.tracking = False
self.min_num_features = (2*self.num_ext_features + self.num_cen_features)/2
# Raw Image Subscriber Jackal PointGrey
self.image_sub = rospy.Subscriber(self.image_sub_name, Image, self.callback)
# Optical flow message Publisher
self.optic_flow_pub = rospy.Publisher("optical_flow", OpticalFlow, queue_size=10)
def callback(self, data):
rospy.loginfo(rospy.get_caller_id() + "ok1")
try:
curr_image = self.bridge.imgmsg_to_cv2(data, "mono8")
except CvBridgeError as e:
print(e)
return
# Get time stamp
secs = data.header.stamp.secs
nsecs = data.header.stamp.nsecs
curr_time = float(secs) + float(nsecs) * 1e-9
frequency = 1.0 / (curr_time - self.prev_time)
print("Frequency: " + str(frequency))
if self.prev_image is None:
self.prev_image = curr_image
self.prev_time = curr_time
set_limit(data.width, data.height)
# creating ROI
self.roi_el = curr_image[y_init_el:y_end_el, x_init_el:x_end_l]
self.roi_er = curr_image[y_init_er:y_end_er, x_init_r:x_end_er]
self.roi_c = curr_image[y_init_l:y_end_r, x_end_l:x_init_r]
keypoints_el = np.array([])
keypoints_el = np.append(keypoints_el, self.orb_extreme.detect(self.roi_el))
if (x_init_el != 0) or (y_init_el != 0):
for i in range(np.size(keypoints_el)):
tmp = list(keypoints_el[i].pt)
tmp[0] += x_init_el
tmp[1] += y_init_el
keypoints_el[i].pt = tuple(tmp)
keypoints_er = np.array([])
keypoints_er = np.append(keypoints_er, self.orb_extreme.detect(self.roi_er))
if (x_init_r != 0) or (y_init_er != 0):
for i in range(np.size(keypoints_er)):
tmp = list(keypoints_er[i].pt)
tmp[0] += x_init_r
tmp[1] += y_init_er
keypoints_er[i].pt = tuple(tmp)
keypoints_c = np.array([])
keypoints_c = np.append(keypoints_c, self.orb_center.detect(self.roi_c))
if (x_end_l != 0) or (y_init_l != 0):
for i in range(np.size(keypoints_c)):
tmp = list(keypoints_c[i].pt)
tmp[0] += x_end_l
tmp[1] += y_init_l
keypoints_c[i].pt = tuple(tmp)
keypoints = np.array([])
keypoints = np.append(keypoints, keypoints_el)
keypoints = np.append(keypoints, keypoints_er)
keypoints = np.append(keypoints, keypoints_c)
if np.size(keypoints) > 0:
p0 = cv2.KeyPoint_convert(keypoints)
self.prev_kps = np.float32(p0.reshape(-1, 1, 2))
self.tracking = True
else:
self.prev_kps = np.array([], dtype='f')
print("Features detected: 0")
return
if self.tracking:
tracked_features, status, error = cv2.calcOpticalFlowPyrLK(self.prev_image, curr_image,
self.prev_kps, None,
**self.lk_params)
# Select good points
good_kps_new = tracked_features[status == 1]
good_kps_old = self.prev_kps[status == 1]
print("len matches "+ str(len(good_kps_new)))
if len(good_kps_new) < self.min_feat_threshold*np.size(self.prev_kps):
self.tracking = False
self.prev_kps = np.array([], dtype='f')
elif np.size(good_kps_new) <= self.min_num_features:
self.tracking = False
self.prev_kps = np.array([], dtype='f')
else:
# Get time between images
dt = curr_time - self.prev_time
# Calculate flow field
flow = good_kps_new - good_kps_old
# print("Flow: " + str(flow))
# Draw the flow field
if self.show == 1:
draw_optical_flow_field(curr_image, good_kps_old, good_kps_new, flow, dt)
# Publish Optical Flow data to rostopic
msg = OpticalFlow()
msg.header.stamp.secs = secs
msg.header.stamp.nsecs = nsecs
msg.height = data.height
msg.width = data.width
msg.dt = dt # in msec
msg.x = good_kps_old[:, 0]
msg.y = good_kps_old[:, 1]
msg.vx = flow[:, 0] / dt
msg.vy = flow[:, 1] / dt
self.optic_flow_pub.publish(msg)
self.prev_image = curr_image
self.prev_kps = np.float32(good_kps_new.reshape(-1, 1, 2))
self.prev_time = curr_time
else:
print("new keyframe!")
# creating ROI
self.roi_el = curr_image[y_init_el:y_end_el, x_init_el:x_end_l]
self.roi_er = curr_image[y_init_er:y_end_er, x_init_r:x_end_er]
self.roi_c = curr_image[y_init_l:y_end_r, x_end_l:x_init_r]
keypoints_el = np.array([])
keypoints_el = np.append(keypoints_el, self.orb_extreme.detect(self.roi_el))
if (x_init_el != 0) or (y_init_el != 0):
for i in range(np.size(keypoints_el)):
tmp = list(keypoints_el[i].pt)
tmp[0] += x_init_el
tmp[1] += y_init_el
keypoints_el[i].pt = tuple(tmp)
keypoints_er = np.array([])
keypoints_er = np.append(keypoints_er, self.orb_extreme.detect(self.roi_er))
if (x_init_r != 0) or (y_init_er != 0):
for i in range(np.size(keypoints_er)):
tmp = list(keypoints_er[i].pt)
tmp[0] += x_init_r
tmp[1] += y_init_er
keypoints_er[i].pt = tuple(tmp)
keypoints_c = np.array([])
keypoints_c = np.append(keypoints_c, self.orb_center.detect(self.roi_c))
if (x_end_l != 0) or (y_init_l != 0):
for i in range(np.size(keypoints_c)):
tmp = list(keypoints_c[i].pt)
tmp[0] += x_end_l
tmp[1] += y_init_l
keypoints_c[i].pt = tuple(tmp)
keypoints = np.array([])
keypoints = np.append(keypoints, keypoints_el)
keypoints = np.append(keypoints, keypoints_er)
keypoints = np.append(keypoints, keypoints_c)
if np.size(keypoints) > 0:
p0 = cv2.KeyPoint_convert(keypoints)
self.prev_kps = np.float32(p0.reshape(-1, 1, 2))
self.tracking = True
else:
self.prev_kps = np.array([], dtype='f')
print("Features detected: 0")
self.prev_image = curr_image
self.prev_time = curr_time
def optical_flow(param):
rospy.init_node("optical_flow", anonymous=False)
OFCalculator(param)
rospy.spin()
if __name__ == '__main__':
if len(sys.argv) < 2:
parameter = str(0)
print("Parameter = 1, verbose mode")
else:
parameter = sys.argv[1]
optical_flow(parameter)
```
#### File: Vision_based_Navigation_TTT/scripts/Bernoulli_Textures.py
```python
import random
import numpy as np
import cv2
def create_image(p):
# let's create a heigth x width matrix with all pixels in black color
heigth = 1080
width = 1920
diameter = 50
x_correction = int(0.7 * diameter / 2)
y_correction = int(0.7 * diameter / 2)
img = np.ones((heigth, width, 3), np.uint8)*255
hcount = int(diameter/2)
while hcount < (heigth-3):
wcount = int(diameter/2)
while wcount < (width-3):
if random.uniform(0, 1) >= (1-p):
shape = random.uniform(0, 3)
if shape < 1.0:
cv2.circle(img, (wcount, hcount), int(diameter/2), [0, 0, 0], -1)
elif shape < 2.0:
cv2.rectangle(img, (wcount - x_correction, hcount - y_correction), (wcount + x_correction, hcount +
y_correction), [0, 0, 0], -1)
else:
pt1 = (wcount, hcount-y_correction)
pt2 = (wcount-x_correction, hcount+y_correction)
pt3 = (wcount+x_correction, hcount+y_correction)
triangle_cnt = np.array([pt1, pt2, pt3])
cv2.drawContours(img, [triangle_cnt], 0, (0, 0, 0), -1)
# img[hcount, wcount] = [255, 255, 255]
wcount += diameter
hcount += diameter
p = int(p * 100)
# save our image as a "jpg" image
cv2.imwrite("bernoulli" + str(p) + "M" + ".png", img)
if __name__ == '__main__':
create_image(0.08)
``` |
{
"source": "johnbanq/minecraft-symbol-analyzer",
"score": 3
} |
#### File: johnbanq/minecraft-symbol-analyzer/analyzer_viewer.py
```python
import pickle
from pprint import pprint
from my_analyzer.analysis_result import AnalysisResult, AnalysisClass, FunctionDecl, VariableDecl, SimpleSymbolDecl
def view_my_analyzed(analyzed: AnalysisResult):
for var in analyzed.variables:
print(var.class_name+' '+var.symbol[2])
pprint("total of %i variables"%len(analyzed.variables))
def main():
with open("data/analyzed.pk", 'rb') as af:
result = pickle.load(af)
view_my_analyzed(result)
if __name__ == '__main__':
main()
``` |
{
"source": "johnbanq/modl",
"score": 2
} |
#### File: modl/decomposition/image.py
```python
from math import sqrt
import time
from modl.feature_extraction.image import LazyCleanPatchExtractor
from modl.input_data.image import scale_patches
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state, gen_batches
from .dict_fact import DictFact
class ImageDictFact(BaseEstimator):
methods = {'masked': {'G_agg': 'masked', 'Dx_agg': 'masked'},
'dictionary only': {'G_agg': 'full', 'Dx_agg': 'full'},
'gram': {'G_agg': 'masked', 'Dx_agg': 'masked'},
# 1st epoch parameters
'average': {'G_agg': 'average', 'Dx_agg': 'average'},
'reducing ratio': {'G_agg': 'masked', 'Dx_agg': 'masked'}}
settings = {'dictionary learning': {'comp_l1_ratio': 0,
'code_l1_ratio': 1,
'comp_pos': False,
'code_pos': False,
'with_std': True,
'with_mean': True},
'NMF': {'comp_l1_ratio': 0,
'code_l1_ratio': 1,
'comp_pos': True,
'code_pos': True,
'with_std': True,
'with_mean': False}}
def __init__(self, method='masked',
setting='dictionary learning',
patch_size=(8, 8),
batch_size=100,
buffer_size=None,
step_size=1e-3,
n_components=50,
alpha=0.1,
learning_rate=0.92,
reduction=10,
n_epochs=1,
random_state=None,
callback=None,
max_patches=None,
verbose=0,
n_threads=1,
):
self.n_threads = n_threads
self.step_size = step_size
self.verbose = verbose
self.callback = callback
self.random_state = random_state
self.n_epochs = n_epochs
self.reduction = reduction
self.learning_rate = learning_rate
self.alpha = alpha
self.n_components = n_components
self.batch_size = batch_size
self.method = method
self.setting = setting
self.patch_size = patch_size
self.buffer_size = buffer_size
self.max_patches = max_patches
def fit(self, image, y=None):
self.random_state = check_random_state(self.random_state)
if self.method != 'sgd':
method = ImageDictFact.methods[self.method]
G_agg = method['G_agg']
Dx_agg = method['Dx_agg']
reduction = self.reduction
optimizer = 'variational'
else:
optimizer = 'sgd'
reduction = 1
G_agg = 'full'
Dx_agg = 'full'
setting = ImageDictFact.settings[self.setting]
comp_l1_ratio = setting['comp_l1_ratio']
code_l1_ratio = setting['code_l1_ratio']
comp_pos = setting['comp_pos']
code_pos = setting['code_pos']
with_std = setting['with_std']
with_mean = setting['with_mean']
if self.buffer_size is None:
buffer_size = self.batch_size * 10
else:
buffer_size = self.buffer_size
self.dict_fact_ = DictFact(n_epochs=self.n_epochs,
random_state=self.random_state,
n_components=self.n_components,
comp_l1_ratio=comp_l1_ratio,
learning_rate=self.learning_rate,
comp_pos=comp_pos,
optimizer=optimizer,
step_size=self.step_size,
code_pos=code_pos,
batch_size=self.batch_size,
G_agg=G_agg,
Dx_agg=Dx_agg,
reduction=reduction,
code_alpha=self.alpha,
code_l1_ratio=code_l1_ratio,
tol=1e-2,
callback=self._callback,
verbose=self.verbose,
n_threads=self.n_threads)
if self.verbose:
print('Preparing patch extraction')
patch_extractor = LazyCleanPatchExtractor(
patch_size=self.patch_size, max_patches=self.max_patches,
random_state=self.random_state)
patch_extractor.fit(image)
n_patches = patch_extractor.n_patches_
self.patch_shape_ = patch_extractor.patch_shape_
if self.verbose:
print('Fitting dictionary')
init_patches = patch_extractor.partial_transform(batch=
self.n_components)
init_patches = _flatten_patches(init_patches, with_std=with_std,
with_mean=with_mean, copy=False)
self.dict_fact_.prepare(n_samples=n_patches, X=init_patches)
for i in range(self.n_epochs):
if self.verbose:
print('Epoch %i' % (i + 1))
if i >= 1:
if self.verbose:
print('Shuffling dataset')
permutation = self.dict_fact_.shuffle()
patch_extractor.shuffle(permutation)
buffers = gen_batches(n_patches, buffer_size)
if self.method == 'gram' and i == 4:
self.dict_fact_.set_params(G_agg='full', Dx_agg='average')
if self.method == 'reducing ratio':
reduction = 1 + (self.reduction - 1) / sqrt(i + 1)
self.dict_fact_.set_params(reduction=reduction)
for j, buffer in enumerate(buffers):
buffer_size = buffer.stop - buffer.start
patches = patch_extractor.partial_transform(batch=buffer)
patches = _flatten_patches(patches, with_mean=with_mean,
with_std=with_std, copy=False)
self.dict_fact_.partial_fit(patches, buffer)
return self
def transform(self, patches):
with_std = ImageDictFact.settings[self.setting]['with_std']
with_mean = ImageDictFact.settings[self.setting]['with_mean']
patches = _flatten_patches(patches, with_mean=with_mean,
with_std=with_std, copy=True)
return self.dict_fact_.transform(patches)
def score(self, patches):
with_std = ImageDictFact.settings[self.setting]['with_std']
with_mean = ImageDictFact.settings[self.setting]['with_mean']
patches = _flatten_patches(patches, with_mean=with_mean,
with_std=with_std, copy=True)
return self.dict_fact_.score(patches)
@property
def n_iter_(self):
# Property for callback purpose
return self.dict_fact_.n_iter_
@property
def time_(self):
# Property for callback purpose
return self.dict_fact_.time_
@property
def components_(self):
# Property for callback purpose
components_shape = (self.n_components,) + self.patch_shape_
return self.dict_fact_.components_.reshape(
components_shape)
def _callback(self, *args):
if self.callback is not None:
self.callback(self)
def _flatten_patches(patches, with_mean=True,
with_std=True, copy=False):
n_patches = patches.shape[0]
patches = scale_patches(patches, with_mean=with_mean,
with_std=with_std, copy=copy)
patches = patches.reshape((n_patches, -1))
return patches
class DictionaryScorer:
def __init__(self, test_data, info=None):
self.start_time = time.clock()
self.test_data = test_data
self.test_time = 0
self.time = []
self.cpu_time = []
self.score = []
self.iter = []
self.info = info
def __call__(self, dict_fact):
test_time = time.clock()
score = dict_fact.score(self.test_data)
self.test_time += time.clock() - test_time
this_time = time.clock() - self.start_time - self.test_time
self.time.append(this_time)
self.score.append(score)
self.iter.append(dict_fact.n_iter_)
self.cpu_time.append(dict_fact.time_)
if self.info is not None:
self.info['time'] = self.cpu_time
self.info['score'] = self.score
self.info['iter'] = self.iter
```
#### File: modl/plotting/image.py
```python
import numpy as np
import matplotlib.cm as cm
from math import ceil, sqrt
def plot_patches(fig, patches):
if patches.ndim == 4:
channel_step = patches.shape[3] // 3
# patches = np.concatenate([np.sum(patches[:, :, :, i * channel_step:
# (i + 1) * channel_step],
# axis=3)[..., np.newaxis]
# for i in range(3)], axis=3)
if patches.shape[3] == 1:
patches = patches[:, :, :, 0]
elif patches.shape[3] >= 3:
patches = patches[:, :, :, :3]
patches = np.rollaxis(patches, 3, 2).reshape(
(patches.shape[0], patches.shape[1], patches.shape[2] * 3))
patches = patches[:256]
side_size =ceil(sqrt(patches.shape[0]))
for i, patch in enumerate(patches):
ax = fig.add_subplot(side_size, side_size, i + 1)
ax.imshow(
patch,
interpolation='nearest')
ax.set_xticks(())
ax.set_yticks(())
fig.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
return fig
def plot_single_patch(ax, patch, x=3, y=3, positive=True, average=False):
if not positive:
patch -= patch.mean(axis=(0, 1))[np.newaxis, np.newaxis, :]
n_channel = x * y
patch = patch.copy()
std = np.sqrt((patch ** 2).sum(axis=(0, 1)))
std[std == 0] = 0
patch /= std[np.newaxis, np.newaxis, :]
channel_step = patch.shape[2] // n_channel
if average:
patch = np.concatenate([np.sum(patch[:, :, i * channel_step:
(i + 1) * channel_step],
axis=2)[..., np.newaxis]
for i in range(n_channel)], axis=2)
patch = patch[:, :, np.linspace(0, patch.shape[2] - 1, n_channel).astype('int')]
squares_patch = np.zeros(
(x * patch.shape[0], y * patch.shape[1]))
idx = 0
for i in range(x):
for j in range(y):
if idx < patch.shape[2]:
squares_patch[i * patch.shape[0]:(i + 1) * patch.shape[0],
j * patch.shape[1]:(j + 1) * patch.shape[1]] = patch[:, :, idx]
idx += 1
ax.imshow(squares_patch,
interpolation='nearest')
for j in range(1, y):
ax.axvline(j * patch.shape[1], c='white', linewidth=1)
for i in range(1, x):
ax.axhline(i * patch.shape[0] - 1, c='white', linewidth=1)
ax.set_xticks(())
ax.set_yticks(())
for side in ["top", "right", "left", "bottom"]:
ax.spines[side].set_visible(False)
return ax
```
#### File: math/tests/test_enet.py
```python
import numpy as np
from numpy import sqrt
from numpy.testing import assert_array_almost_equal, assert_almost_equal
from sklearn.utils import check_random_state
from modl.utils.math.enet import enet_norm, enet_projection, enet_scale
def _enet_norm_for_projection(v, gamma):
return np.sum(v * (1 + gamma / 2 * v))
def enet_norm_slow(v, l1_ratio=0.1):
if l1_ratio == 0:
return sqrt(np.sum(v ** 2))
b_abs = np.abs(v)
return np.sum(b_abs * (l1_ratio + b_abs * (1 - l1_ratio)))
def enet_projection_slow(v, radius=1, l1_ratio=0.1):
"""Projection on the elastic-net ball
**References:**
<NAME>, <NAME>, <NAME>, <NAME>, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
"""
random_state = check_random_state(None)
if l1_ratio == 0:
return v / sqrt(np.sum(v ** 2))
gamma = 2 / l1_ratio - 2
radius /= l1_ratio
m = v.shape[0]
b_abs = np.abs(v)
norm = _enet_norm_for_projection(b_abs, gamma)
if norm <= radius:
return v
else:
s = 0
rho = 0
U = np.arange(m)
mask = np.ones(m, dtype=np.bool)
mask_non_zero = mask.nonzero()[0]
while mask_non_zero.shape[0] != 0:
k = random_state.randint(mask_non_zero.shape[0])
idx = mask_non_zero[k]
k = U[idx]
sel = b_abs < b_abs[k]
G = U[~sel * mask]
d_rho = G.shape[0]
d_s = _enet_norm_for_projection(b_abs[G], gamma)
if s + d_s - (rho + d_rho) * (1 + gamma / 2 * b_abs[k]) * b_abs[k]\
< radius * (1 + gamma * b_abs[k]) ** 2:
s += d_s
rho += d_rho
mask *= sel
else:
mask *= ~sel
mask[idx] = False
mask_non_zero = mask.nonzero()[0]
if gamma != 0:
a = gamma ** 2 * radius + gamma * rho * 0.5
b_ = 2 * radius * gamma + rho
c = radius - s
l = (-b_ + np.sqrt(b_ ** 2 - 4 * a * c)) / (2*a)
else:
l = (s - radius) / rho
b_sign = np.sign(v)
b_sign[b_sign == 0] = 1
return b_sign * np.maximum(np.zeros_like(b_abs), b_abs - l)\
/ (1 + l * gamma)
def test_slow_enet_norm():
norms = np.zeros(10)
norms2 = np.zeros(10)
random_state = check_random_state(0)
for i in range(10):
a = random_state.randn(10000)
norms[i] = enet_norm_slow(a, l1_ratio=0.1)
norms2[i] = (1 - 0.1) * (a ** 2).sum() + 0.1 * np.abs(a).sum()
assert_array_almost_equal(norms, norms2)
def test_slow_enet_projection_norm():
norms = np.zeros(10)
random_state = check_random_state(0)
for i in range(10):
a = random_state.randn(10000)
b = np.asarray(enet_projection_slow(a, radius=1, l1_ratio=0.1))
norms[i] = enet_norm_slow(b, l1_ratio=0.1)
assert_array_almost_equal(norms, np.ones(10))
def test_enet_projection_norm():
random_state = check_random_state(0)
norms = np.zeros(10)
for i in range(10):
a = random_state.randn(20000)
a /= np.sqrt(np.sum(a ** 2))
c = np.zeros(20000)
enet_projection(a, c, 1, 0.15)
norms[i] = enet_norm(c, l1_ratio=0.15)
assert_array_almost_equal(norms, np.ones(10))
def test_enet_projection():
c = np.empty((10, 100))
b = np.empty((10, 100))
random_state = check_random_state(0)
for i in range(10):
a = random_state.randn(100)
b[i, :] = enet_projection_slow(a, radius=1, l1_ratio=0.1)
enet_projection(a, c[i], radius=1, l1_ratio=0.1)
assert_array_almost_equal(c, b, 4)
def test_fast_enet_l2_ball():
random_state = check_random_state(0)
norms = np.zeros(10)
for i in range(10):
a = random_state.randn(100)
c = np.zeros(100)
enet_projection(a, c, 2, 0.0)
norms[i] = np.sqrt(np.sum(c ** 2))
assert_array_almost_equal(norms, np.ones(10) * sqrt(2))
for i in range(10):
a = random_state.randn(100)
a /= np.sqrt(np.sum(a ** 2)) * 10
enet_projection(a, c, 2, 0.0)
assert_array_almost_equal(a, c)
def test_enet_l1_ball():
random_state = check_random_state(0)
norms = np.zeros(10)
for i in range(10):
a = random_state.randn(100)
b = np.zeros(100)
enet_projection(a, b, 1, 1.0)
norms[i] = np.sum(np.abs(b))
assert_array_almost_equal(norms, np.ones(10))
def test_enet_scale():
random_state = check_random_state(0)
a = random_state.randn(100)
for r in [1., 2.]:
for l1_ratio in [0., 0.5, 1.]:
enet_scale(a, l1_ratio, r)
norm = enet_norm(a, l1_ratio)
assert_almost_equal(norm, r)
```
#### File: recsys/tests/test_cross_validation.py
```python
import scipy.sparse as sp
from numpy.testing import assert_equal
from modl.utils.recsys.cross_validation import ShuffleSplit
def test_shuffle_split():
X = [[3, 0, 0, 1],
[2, 0, 5, 0],
[0, 4, 3, 0],
[0, 0, 2, 0]]
X = sp.coo_matrix(X)
cv = ShuffleSplit(n_iter=10)
for X_tr, X_te in cv.split(X):
assert_equal(X.shape, X_tr.shape)
assert_equal(X.shape, X_te.shape)
assert_equal(X.data.shape[0],
X_tr.data.shape[0] + X_te.data.shape[0])
```
#### File: modl/utils/system.py
```python
import os
def get_cache_dirs(cache_dir=None):
""" Returns the directories in which modl stores its cache
This is typically useful for the end-user to check where the cache is stored.
Parameters
----------
cache_dir: string, optional
Path of the cache directory. Used to force cache storage in a specified
location. Default: None
Returns
-------
paths: list of strings
Paths of the dataset directories.
Notes
-----
This function retrieves the datasets directories using the following
priority :
1. the keyword argument data_dir
2. the global environment variable SHARED_CACHE
3. the user environment variable CACHE
4. modl_data in the user home folder
"""
paths = []
# Check data_dir which force storage in a specific location
if cache_dir is not None:
paths.extend(cache_dir.split(os.pathsep))
# If data_dir has not been specified, then we crawl default locations
if cache_dir is None:
global_data = os.getenv('SHARED_CACHE')
if global_data is not None:
paths.extend(global_data.split(os.pathsep))
local_data = os.getenv('CACHE')
if local_data is not None:
paths.extend(local_data.split(os.pathsep))
paths.append(os.path.expanduser('~/cache'))
return paths
def get_output_dir(data_dir=None):
""" Returns the directories in which cogspaces store results.
Parameters
----------
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
Returns
-------
paths: list of strings
Paths of the dataset directories.
Notes
-----
This function retrieves the datasets directories using the following
priority :
1. the keyword argument data_dir
2. the global environment variable OUTPUT_MODL_DIR
4. output/modl in the user home folder
"""
paths = []
# Check data_dir which force storage in a specific location
if data_dir is not None:
return str(data_dir)
else:
# If data_dir has not been specified, then we crawl default locations
output_dir = os.getenv('MODL_OUTPUT')
if output_dir is not None:
return str(output_dir)
return os.path.expanduser('~/output/modl')
``` |
{
"source": "johnbanq/psbody-mesh-build-script",
"score": 3
} |
#### File: psbody-mesh-build-script/install_psbody/infra.py
```python
import argparse
import contextlib
import logging
import os
import re
import shutil
import stat
import subprocess
import sys
from logging import getLogger
from typing import List
# global variables #
log = getLogger("install_script")
do_not_cleanup = False
yes_everything = False
# functions #
def get_do_not_cleanup():
# TODO: do it the right way
return do_not_cleanup
def install_script_main(
package_name,
prepare_environment,
execute_build,
validate_build
):
"""
entry point(main function) of the entire build script.
this function accepts:
* a context managers function: prepare_environment
* a function: execute_build
* a function: validate_build
ideally, you should:
* put git clone & dependency install in prepare_environment
* put build logic in execute_build
* put tests in validate_build
the build process is structured as follows:
* detect conda environment
* run prepare_environment()
* re-activate conda environment to refresh environment variables
* run execute_build()
* run cleanup in prepare_environment()
* re-activate conda environment to refresh environment variables
* run validate_build()
note: the re-activation sequence is used to give conda a chance to update all the environs
note: and it is done by using a trampoline script,
note: and indicating its in reactivated environment by additional environment
"""
global do_not_cleanup, yes_everything
# parse arguments #
parser = argparse.ArgumentParser(description='%s installation script' % package_name)
parser.add_argument(
'--no-cleanup', action='store_true',
help='do not cleanup the dependencies & files when exiting, helpful for debugging'
)
parser.add_argument(
'--verbose', action='store_true',
help='print debug log along the way'
)
parser.add_argument(
'--yes', action='store_true',
help='say yes to all options in the install'
)
parser.add_argument(
'--environment', type=str, default="prepare_environment",
help='INTERNAL FLAG: DO NOT TOUCH, used to indicate reactivated environment'
)
args = parser.parse_args()
# apply arguments #
do_not_cleanup = args.no_cleanup
yes_everything = args.yes
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
script_path = os.path.abspath(__file__)
segments = os.path.normpath(script_path).split(os.path.sep)
has_pyz = any([s.endswith(".pyz") for s in segments])
if has_pyz:
# because os.path treats C:// as 'C:', '', concat segments will lead to wrong path!
while not segments[-1].endswith(".pyz"):
script_path = os.path.dirname(script_path)
segments.pop()
# main #
if args.environment == "prepare_environment":
env_name = detect_conda_environment()
log.debug("setting up prepare_environment")
with prepare_environment():
run_with_reactivated_environment(
env_name, [
"python", script_path,
*sys.argv[1:], "--environment", "execute_build"
],
cleanup=not do_not_cleanup
)
log.debug("tearing down prepare_environment")
run_with_reactivated_environment(
env_name, [
"python", script_path,
*sys.argv[1:], "--environment", "validate_build"
],
cleanup=not do_not_cleanup
)
elif args.environment == "execute_build":
log.debug("running execute_build")
execute_build()
elif args.environment == "validate_build":
log.debug("running validate_build")
validate_build()
def detect_conda_environment():
"""
detect the current conda environment, and return its name
"""
log.info("detecting conda environment")
env_name = parse_conda_info("active environment")
log.debug("detected environment name: %s", env_name)
if env_name == "None":
log.fatal("you are not in a conda environment! Try conda activate base to enter the base environment!")
raise RuntimeError("cannot run the script outside a conda environment!")
else:
log.info("detected environment: %s", env_name)
return env_name
def detect_conda_activate_script():
"""
detect the path to conda activate script, used in trampoline to activate environment
"""
log.debug("detecting conda activation script location")
base_folder = parse_conda_info("base environment")
if base_folder.endswith(")"):
base_folder = base_folder[:base_folder.rfind("(")]
base_folder = base_folder.strip()
if os.name != "nt":
script = os.path.join(base_folder, "bin", "activate")
else:
script = os.path.join(base_folder, "Scripts", "activate.bat")
log.debug("detected: %s", script)
return script
def parse_conda_info(key: str):
"""
parse value of a key in the output of conda info
:rtype: str
"""
try:
result = run(["conda", "info"], stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
log.fatal("could not run conda info, do you have conda installed?")
raise e
lines = result.stdout.decode(encoding=sys.getdefaultencoding()).splitlines()
lines = [re.match("%s +: +(?P<value>.*)" % key, line.strip()) for line in lines]
lines = [line for line in lines if line]
assert len(lines) == 1, "exactly 1 %s line expected, but got %i !" % (key, len(lines))
value = lines[0].group("value").strip()
return value
TRAMPOLINE_SCRIPT_WINDOWS = """\
@echo off
@call %(activate_script_path)s %(environment)s
if errorlevel 1 exit 1
%(command)s
if errorlevel 1 exit 1
"""
TRAMPOLINE_SCRIPT_BASH = """\
#!/usr/bin/env bash
source %(activate_script_path)s %(environment)s
%(command)s
"""
def run_with_reactivated_environment(env_name: str, commands: List[str], cleanup=True):
"""
run with re-activated conda environment
"""
if os.name == "nt":
script_name = ".bqinstall.trampoline.bat"
else:
script_name = ".bqinstall.trampoline.sh"
try:
# write script #
with open(script_name, "w") as f:
log.debug("writing trampoline script: %s", f.name)
template = TRAMPOLINE_SCRIPT_WINDOWS if os.name == "nt" else TRAMPOLINE_SCRIPT_BASH
template = template % {
"activate_script_path": detect_conda_activate_script(),
"environment": env_name,
"command": (" ".join(commands))
}
for line in template.splitlines():
line = line.strip()
f.write(line+os.linesep)
# run script #
log.debug("jumping into the trampoline, wee!")
if os.name == "nt":
run([script_name], stdout=None, stderr=None) # force stdout & stderr
else:
run(["chmod", "+x", script_name])
run(["./" + script_name], stdout=None, stderr=None) # force stdout & stderr
finally:
if cleanup and os.path.exists(script_name):
os.unlink(script_name)
def run(*args, **kwargs):
"""
utils for running subprocess.run,
will remain silent until something when wrong
note: will auto enable shell on windows as most commands seems
to require it to function on Github CI
"""
try:
# enable shell on windows
if os.name == "nt":
kwargs["shell"] = True
# override-able stdout/stderr config
normal_pipe_or_not = None if log.getEffectiveLevel() == logging.DEBUG else subprocess.PIPE
kwargs["stdout"] = kwargs.get("stdout", normal_pipe_or_not)
kwargs["stderr"] = kwargs.get("stderr", normal_pipe_or_not)
return subprocess.run(*args, **kwargs, check=True)
except subprocess.CalledProcessError as e:
log.error("error while executing: %s", str(e.args))
log.error("stdout: \n%s", e.stdout.decode("UTF-8") if e.stdout else "None")
log.error("stderr: \n%s", e.stderr.decode("UTF-8") if e.stderr else "None")
raise e
@contextlib.contextmanager
def inside_git_repository(repo_url, repo_hash=None, dir_name=".bqinstall.repo", cleanup=True):
"""
clone a git repo into the specified directory and cd into it, then cleanup on exit
:type cleanup: bool
:type dir_name: str
:type repo_url: str
:type repo_hash: str | None
"""
if os.path.exists(dir_name):
log.debug("path exists, removing it")
rmtree_git_repo(dir_name)
run(["git", "clone", repo_url, dir_name])
os.chdir(dir_name)
run(["git", "checkout", repo_hash if repo_hash else ""])
try:
yield
finally:
os.chdir("..")
if cleanup:
rmtree_git_repo(dir_name)
def rmtree_git_repo(dirpath: str):
# note: because you can't programmatically delete .git on windows in the naive way
# see: https://my.oschina.net/hechunc/blog/3078597
def readonly_handler(func, path, execinfo):
exc, exc_inst, _ = execinfo
if os.name == "nt" and isinstance(exc_inst, PermissionError) and exc_inst.args[0] == 13:
os.chmod(path, stat.S_IWRITE)
func(path)
else:
raise
shutil.rmtree(dirpath, onerror=readonly_handler)
def upgrade_pip():
def enhance_on_win(lst):
if os.name == "nt":
# make windows happy and stop blocking us
lst.insert(-2, "--user")
return lst
run(enhance_on_win(["python", "-m", "pip", "install", "--upgrade", "pip"]))
```
#### File: psbody-mesh-build-script/install_psbody/__main__.py
```python
import contextlib
import os
import shutil
from infra import log, install_script_main, run, inside_git_repository, upgrade_pip, \
get_do_not_cleanup
from install_pyopengl import install_pyopengl
# preparing environment #
REPO_URL = "https://github.com/johnbanq/mesh.git"
REPO_REVISION = "0d876727d5184161ed085bd3ef74967441b0a0e8"
REPO_DIR = ".bqinstall.mpi-is.mesh"
@contextlib.contextmanager
def psbody_prepare_environment():
with inside_git_repository(
repo_url=REPO_URL, repo_hash=REPO_REVISION, dir_name=REPO_DIR,
cleanup=not get_do_not_cleanup()
):
install_cxx_compiler()
install_boost()
install_pyopengl()
yield
def install_cxx_compiler():
# note: this has to be permanently installed as uninstalling it caused an regression
run(["conda", "install", "-y", "-c", "conda-forge", "cxx-compiler"])
def install_boost():
log.info("installing boost")
run(["conda", "install", "-y", "boost"])
# execute build #
def psbody_execute_build():
log.info("installing python dependencies")
# we need a newer pip to do all the installation
upgrade_pip()
run([
"pip", "install",
"--upgrade",
"-r", "requirements.txt"
])
log.info("running setup.py")
if os.name == "nt":
boost_location = os.path.join(os.environ["CONDA_PREFIX"], "Library", "include")
else:
boost_location = os.path.join(os.environ["CONDA_PREFIX"], "include")
run([
"pip", "install",
"--no-deps",
'--install-option=--boost-location=%s' % boost_location,
"--verbose",
"--no-cache-dir",
"."
])
# run tests #
def psbody_validate_build():
log.info("running tests")
with inside_git_repository(
repo_url=REPO_URL, repo_hash=REPO_REVISION, dir_name=REPO_DIR,
cleanup=not get_do_not_cleanup()
):
# fix the stupid CRLF issue
shutil.rmtree("data")
run(["git", "checkout", "data"])
log.info("running tests")
if os.name == "nt":
run(["python", "-m", "unittest", "-v"])
else:
run(["make", "tests"])
log.info("all test passed, installation successful!")
# main #
if __name__ == '__main__':
install_script_main(
package_name="psbody",
prepare_environment=psbody_prepare_environment,
execute_build=psbody_execute_build,
validate_build=psbody_validate_build
)
``` |
{
"source": "johnbarneta/netmiko",
"score": 3
} |
#### File: examples/configuration_changes/config_changes_to_device_list.py
```python
from __future__ import print_function, unicode_literals
import sys
from netmiko import ConnectHandler
from getpass import getpass
def usage(ext):
# exit with description and command line example
print("\nInput file should contain list of switch IP addresses.")
print("Commands should be the commands you wish to run on your")
print('network devices enclosed in "quotes".')
print(
"Results key: # = enable mode, * = successful command",
"w = write mem, ! = command failure",
)
print("\nusage:")
print(
("\n%s <input file>" % sys.argv[0]),
'"command1"',
'"command2"',
'"command3"',
"wr",
)
sys.exit(ext)
def get_cmd_line():
if len(sys.argv) < 2:
usage(0)
cmdlist = sys.argv[2:]
try:
with open(sys.argv[1], "r") as f:
switchip = f.read().splitlines()
f.close()
except (IndexError, IOError):
usage(0)
return switchip, cmdlist
def main():
inputfile, config_commands = get_cmd_line()
print("Switch configuration updater. Please provide login information.\n")
# Get username and password information.
username = input("Username: ")
password = getpass("Password: ")
enasecret = getpass("Enable Secret: ")
print("{}{:<20}{:<40}{:<20}".format("\n", "IP Address", "Name", "Results"), end="")
for switchip in inputfile:
ciscosw = {
"device_type": "cisco_ios",
"ip": switchip.strip(),
"username": username.strip(),
"password": password.strip(),
"secret": enasecret.strip(),
}
print()
print("{:<20}".format(switchip.strip()), end="", flush=True)
try:
# Connect to switch and enter enable mode.
net_connect = ConnectHandler(**ciscosw)
except Exception:
print("** Failed to connect.", end="", flush=True)
continue
prompt = net_connect.find_prompt()
# Print out the prompt/hostname of the device
print("{:<40}".format(prompt), end="", flush=True)
try:
# Ensure we are in enable mode and can make changes.
if "#" not in prompt[-1]:
net_connect.enable()
print("#", end="", flush=True)
except Exception:
print("Unable to enter enable mode.", end="", flush=True)
continue
else:
for cmd in config_commands:
# Make requested configuration changes.
try:
if cmd in ("w", "wr"):
output = net_connect.save_config()
print("w", end="", flush=True)
else:
output = net_connect.send_config_set(cmd)
if "Invalid input" in output:
# Unsupported command in this IOS version.
print("Invalid input: ", cmd, end="", flush=True)
print("*", end="", flush=True)
except Exception:
# Command failed! Stop processing further commands.
print("!")
break
net_connect.disconnect()
if __name__ == "__main__":
main()
```
#### File: use_cases/case18_structured_data_genie/genie_show_mac_nxos.py
```python
from getpass import getpass
from pprint import pprint
from netmiko import ConnectHandler
PASSWORD = getpass()
def main():
conn = ConnectHandler(
host="nxos1.lasthop.io",
device_type="cisco_nxos",
username="username",
password=PASSWORD,
)
output = conn.send_command("show mac address-table", use_genie=True)
pprint(output)
if __name__ == "__main__":
main()
```
#### File: use_cases/case18_structured_data_genie/genie_textfsm_combined_ex1.py
```python
from getpass import getpass
from pprint import pprint
from netmiko import ConnectHandler
PASSWORD = getpass()
def main():
conn = ConnectHandler(
host="cisco1.lasthop.io",
# "cisco_xe" device type will cause textfsm to not return structured data
device_type="cisco_xe",
username="username",
password=PASSWORD,
)
# Setting both `use_textfsm` and `use_genie` to True will try textfsm first
# if structured data is returned genie will be ignored. If textfsm does not
# return structured data netmiko will try to parse with genie
output = conn.send_command("show version", use_textfsm=True, use_genie=True)
# genie structured data returned
pprint(output)
if __name__ == "__main__":
main()
```
#### File: netmiko/cisco/cisco_nxos_ssh.py
```python
import re
import time
import os
from netmiko.cisco_base_connection import CiscoSSHConnection
from netmiko.cisco_base_connection import CiscoFileTransfer
class CiscoNxosSSH(CiscoSSHConnection):
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read(pattern=r"[>#]")
self.ansi_escape_codes = True
self.set_base_prompt()
self.disable_paging()
self.set_terminal_width(command="terminal width 511")
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def normalize_linefeeds(self, a_string):
"""Convert '\r\n' or '\r\r\n' to '\n, and remove extra '\r's in the text."""
newline = re.compile(r"(\r\r\n|\r\n)")
# NX-OS fix for incorrect MD5 on 9K (due to strange <enter> patterns on NX-OS)
return newline.sub(self.RESPONSE_RETURN, a_string).replace("\r", "\n")
def check_config_mode(self, check_string=")#", pattern="#"):
"""Checks if the device is in configuration mode or not."""
return super().check_config_mode(check_string=check_string, pattern=pattern)
class CiscoNxosFileTransfer(CiscoFileTransfer):
"""Cisco NXOS SCP File Transfer driver."""
def __init__(
self,
ssh_conn,
source_file,
dest_file,
file_system="bootflash:",
direction="put",
socket_timeout=10.0,
):
self.ssh_ctl_chan = ssh_conn
self.source_file = source_file
self.dest_file = dest_file
self.direction = direction
if file_system:
self.file_system = file_system
else:
raise ValueError("Destination file system must be specified for NX-OS")
if direction == "put":
self.source_md5 = self.file_md5(source_file)
self.file_size = os.stat(source_file).st_size
elif direction == "get":
self.source_md5 = self.remote_md5(remote_file=source_file)
self.file_size = self.remote_file_size(remote_file=source_file)
else:
raise ValueError("Invalid direction specified")
self.socket_timeout = socket_timeout
def check_file_exists(self, remote_cmd=""):
"""Check if the dest_file already exists on the file system (return boolean)."""
if self.direction == "put":
if not remote_cmd:
remote_cmd = f"dir {self.file_system}{self.dest_file}"
remote_out = self.ssh_ctl_chan.send_command_expect(remote_cmd)
search_string = r"{}.*Usage for".format(self.dest_file)
if "No such file or directory" in remote_out:
return False
elif re.search(search_string, remote_out, flags=re.DOTALL):
return True
else:
raise ValueError("Unexpected output from check_file_exists")
elif self.direction == "get":
return os.path.exists(self.dest_file)
def remote_file_size(self, remote_cmd="", remote_file=None):
"""Get the file size of the remote file."""
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
if not remote_cmd:
remote_cmd = f"dir {self.file_system}/{remote_file}"
remote_out = self.ssh_ctl_chan.send_command(remote_cmd)
# Match line containing file name
escape_file_name = re.escape(remote_file)
pattern = r".*({}).*".format(escape_file_name)
match = re.search(pattern, remote_out)
if match:
file_size = match.group(0)
file_size = file_size.split()[0]
if "No such file or directory" in remote_out:
raise IOError("Unable to find file on remote system")
else:
return int(file_size)
@staticmethod
def process_md5(md5_output, pattern=r"= (.*)"):
"""Not needed on NX-OS."""
raise NotImplementedError
def remote_md5(self, base_cmd="show file", remote_file=None):
if remote_file is None:
if self.direction == "put":
remote_file = self.dest_file
elif self.direction == "get":
remote_file = self.source_file
remote_md5_cmd = f"{base_cmd} {self.file_system}{remote_file} md5sum"
return self.ssh_ctl_chan.send_command(remote_md5_cmd, max_loops=1500).strip()
def enable_scp(self, cmd=None):
raise NotImplementedError
def disable_scp(self, cmd=None):
raise NotImplementedError
``` |
{
"source": "johnbarney/Securitas",
"score": 3
} |
#### File: johnbarney/Securitas/securitas.py
```python
import datetime
import os
import re
import boto3
# Get today's date
TODAY = datetime.datetime.utcnow().date()
# AWS Administrator's group
AWS_ADMIN = os.environ['AWS_ADMIN']
# Boto3 Clients
IAM_CLIENT = boto3.client('iam')
IAM_RESOURCE = boto3.resource('iam')
SES_CLIENT = boto3.client('ses')
# Regex that verifies a valid email address
EMAIL_REGEX = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
# Creates a list of users with valid emails as usernames
USERS = []
if not re.match(EMAIL_REGEX, AWS_ADMIN):
raise ValueError('AWS Admin address is not a valid email!')
for u in IAM_CLIENT.list_users()['Users']:
if re.match(EMAIL_REGEX, u['UserName']):
USERS.append(u)
else:
print(f"User {u['UserName']}. Skipping...")
# Checks for API Key compliance
def keyrotation(event, context):
for u in USERS:
# Get all key pairs for a user
key_pairs = IAM_CLIENT.list_access_keys(UserName=u['UserName'])
for key_pair in key_pairs['AccessKeyMetadata']:
# Only audit key pairs that are Active
if key_pair['Status'] == 'Active':
Id = key_pair['AccessKeyId']
# 60 day warning
if (key_pair['CreateDate'].date() - TODAY) == datetime.timedelta(-60):
__compose_email(recipient=u['UserName'],
subject="AWS Key expire in 30 days!",
body=f"Your AWS Key {Id} will expire and "
"will be deleted in 30 days. You can "
"create a new key pair at any time via "
"the console at any time. Please email "
f"{AWS_ADMIN} with any questions.")
# 5 day warning
elif (key_pair['CreateDate'].date() - TODAY) == datetime.timedelta(-85):
__compose_email(recipient=u['UserName'],
subject="AWS Key expire in 5 days!",
body=f"Your AWS Key {Id} will expire and "
"will be deleted in 5 days. You can "
"create a new key pair at any time via "
"the console at any time. Please email "
f"{AWS_ADMIN} with any questions.")
# one day warning
elif (key_pair['CreateDate'].date() - TODAY) == datetime.timedelta(-89):
__compose_email(recipient=u['UserName'],
subject="AWS Key expire tomorrow!",
body=f"Your AWS Key {Id} will expire and "
"will be deleted tomorrow. You can "
"create a new key pair at any time via "
"the console at any time. Please email "
f"{AWS_ADMIN} with any questions.")
# Delete key
elif (key_pair['CreateDate'].date() - TODAY) < datetime.timedelta(-90):
delete = IAM_RESOURCE.AccessKey(
u['UserName'],
key_pair['AccessKeyId']).delete()
__compose_email(recipient=u['UserName'],
subject="AWS Key expired!",
body=f"Your AWS Key {Id} was over 90 days "
"old and has been deleted. You can create "
"a new key pair at any time via the "
"console at any time. Please email "
f"{AWS_ADMIN} with any questions.")
return {"message": "Finished key pair audit successfully."}
# Checks for MFA Compliance
def mfacheck(event, context):
for u in USERS:
device_list = IAM_CLIENT.list_mfa_devices(UserName=u['UserName'])
if len(device_list['MFADevices']) == 0:
__compose_email(recipient=u['UserName'],
subject="No MFA Device found!",
body="Your AWS account has no MFA device "
"associated. Please create one as soon as possible"
f"! Please email {AWS_ADMIN} with any questions. "
"https://docs.aws.amazon.com/IAM/latest/UserGuide"
"/id_credentials_mfa_enable_virtual.html#enable-virt-mfa-for-iam-user")
return {"message": "Finished MFA audit successfully."}
# Pre-built email call
def __compose_email(recipient, subject, body):
return SES_CLIENT.send_email(
Source=AWS_ADMIN,
Destination={
'ToAddresses': [
recipient
],
'BccAddresses': [
AWS_ADMIN
]
},
Message={
'Subject': {
'Charset': 'UTF-8',
'Data': subject
},
'Body': {
'Text': {
'Charset': 'UTF-8',
'Data': body
},
'Html': {
'Charset': 'UTF-8',
'Data': body
}
}
},
ReplyToAddresses=[
AWS_ADMIN
],
ReturnPath=AWS_ADMIN
)
``` |
{
"source": "johnbartels/deploy",
"score": 3
} |
#### File: johnbartels/deploy/baseScript.py
```python
import os
import sys
import subprocess
import argparse
import json
'''
This script will house various common methods
'''
#- General Purpose print command:
def println(s):
print (str(s))
#- Call .flush(); to see output in jenkins job's console output while script is executing as opposed to on completion:
sys.stdout.flush()
#- General purpose run command:
def runcommand(command, show_command=True, throwOnFailure=True):
if show_command is True:
println('command: %s' % (str(command)))
output = None
returncode = None
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
output = p.communicate()[0]
returncode = p.returncode
except Exception, exc:
println('***Command Error: %s' % (str(command)))
println(exc.message)
if returncode != 0 and throwOnFailure is True:
println('***Error in command, exiting. Command: %s\n' % (command))
println('Output: %s\n' % (str(output)))
raise Exception("Command_Error")
return output, returncode
def parse_args():
#- Parse command line args:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num_items', help='Number of Items.', default=5, type=int)
parser.add_argument('--job_dir', help='Directory to get size of.', default='/var/build/jenkins/jobs/myjobname')
args = parser.parse_args()
return args
args = parse_args()
``` |
{
"source": "johnbartholomew/bookwyrm",
"score": 3
} |
#### File: fedireads/activitypub/status.py
```python
from uuid import uuid4
from fedireads.settings import DOMAIN
def get_rating(review):
''' activitypub serialize rating activity '''
status = get_status(review)
status['inReplyToBook'] = review.book.local_id
status['fedireadsType'] = review.status_type
status['rating'] = review.rating
status['content'] = '%d star rating of "%s"' % (
review.rating, review.book.title)
return status
def get_quotation(quotation):
''' fedireads json for quotations '''
status = get_status(quotation)
status['inReplyToBook'] = quotation.book.local_id
status['fedireadsType'] = quotation.status_type
status['quote'] = quotation.quote
return status
def get_quotation_article(quotation):
''' a book quotation formatted for a non-fedireads isntance (mastodon) '''
status = get_status(quotation)
content = '"%s"<br>-- <a href="%s">"%s"</a>)<br><br>%s' % (
quotation.quote,
quotation.book.local_id,
quotation.book.title,
quotation.content,
)
status['content'] = content
return status
def get_review(review):
''' fedireads json for book reviews '''
status = get_status(review)
status['inReplyToBook'] = review.book.local_id
status['fedireadsType'] = review.status_type
status['name'] = review.name
status['rating'] = review.rating
return status
def get_comment(comment):
''' fedireads json for book reviews '''
status = get_status(comment)
status['inReplyToBook'] = comment.book.local_id
status['fedireadsType'] = comment.status_type
return status
def get_rating_note(review):
''' simple rating, send it as a note not an artciel '''
status = get_status(review)
status['content'] = 'Rated "%s": %d stars' % (
review.book.title,
review.rating,
)
status['type'] = 'Note'
return status
def get_review_article(review):
''' a book review formatted for a non-fedireads isntance (mastodon) '''
status = get_status(review)
if review.rating:
status['name'] = 'Review of "%s" (%d stars): %s' % (
review.book.title,
review.rating,
review.name
)
else:
status['name'] = 'Review of "%s": %s' % (
review.book.title,
review.name
)
return status
def get_comment_article(comment):
''' a book comment formatted for a non-fedireads isntance (mastodon) '''
status = get_status(comment)
status['content'] += '<br><br>(comment on <a href="%s">"%s"</a>)' % \
(comment.book.local_id, comment.book.title)
return status
def get_status(status):
''' create activitypub json for a status '''
user = status.user
uri = status.remote_id
reply_parent_id = status.reply_parent.remote_id \
if status.reply_parent else None
image_attachments = []
books = list(status.mention_books.all()[:3])
if hasattr(status, 'book'):
books.append(status.book)
for book in books:
if book and book.cover:
image_path = book.cover.url
image_type = image_path.split('.')[-1]
image_attachments.append({
'type': 'Document',
'mediaType': 'image/%s' % image_type,
'url': 'https://%s%s' % (DOMAIN, image_path),
'name': 'Cover of "%s"' % book.title,
})
status_json = {
'id': uri,
'url': uri,
'inReplyTo': reply_parent_id,
'published': status.published_date.isoformat(),
'attributedTo': user.remote_id,
# TODO: assuming all posts are public -- should check privacy db field
'to': ['https://www.w3.org/ns/activitystreams#Public'],
'cc': ['%s/followers' % user.remote_id],
'sensitive': status.sensitive,
'content': status.content,
'type': status.activity_type,
'attachment': image_attachments,
'replies': {
'id': '%s/replies' % uri,
'type': 'Collection',
'first': {
'type': 'CollectionPage',
'next': '%s/replies?only_other_accounts=true&page=true' % uri,
'partOf': '%s/replies' % uri,
'items': [], # TODO: populate with replies
}
}
}
return status_json
def get_replies(status, replies):
''' collection of replies '''
id_slug = status.remote_id + '/replies'
return {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': id_slug,
'type': 'Collection',
'first': {
'id': '%s?page=true' % id_slug,
'type': 'CollectionPage',
'next': '%s?only_other_accounts=true&page=true' % id_slug,
'partOf': id_slug,
'items': [get_status(r) for r in replies],
}
}
def get_replies_page(status, replies):
''' actual reply list content '''
id_slug = status.remote_id + '/replies?page=true&only_other_accounts=true'
items = []
for reply in replies:
if reply.user.local:
items.append(get_status(reply))
else:
items.append(reply.remote_id)
return {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': id_slug,
'type': 'CollectionPage',
'next': '%s&min_id=%d' % (id_slug, replies[len(replies) - 1].id),
'partOf': status.remote_id + '/replies',
'items': [items]
}
def get_favorite(favorite):
''' like a post '''
return {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': favorite.remote_id,
'type': 'Like',
'actor': favorite.user.remote_id,
'object': favorite.status.remote_id,
}
def get_unfavorite(favorite):
''' like a post '''
return {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': '%s/undo' % favorite.remote_id,
'type': 'Undo',
'actor': favorite.user.remote_id,
'object': {
'id': favorite.remote_id,
'type': 'Like',
'actor': favorite.user.remote_id,
'object': favorite.status.remote_id,
}
}
def get_boost(boost):
''' boost/announce a post '''
return {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': boost.remote_id,
'type': 'Announce',
'actor': boost.user.remote_id,
'object': boost.boosted_status.remote_id,
}
def get_add_tag(tag):
''' add activity for tagging a book '''
uuid = uuid4()
return {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': str(uuid),
'type': 'Add',
'actor': tag.user.remote_id,
'object': {
'type': 'Tag',
'id': tag.remote_id,
'name': tag.name,
},
'target': {
'type': 'Book',
'id': tag.book.local_id,
}
}
def get_remove_tag(tag):
''' add activity for tagging a book '''
uuid = uuid4()
return {
'@context': 'https://www.w3.org/ns/activitystreams',
'id': str(uuid),
'type': 'Remove',
'actor': tag.user.remote_id,
'object': {
'type': 'Tag',
'id': tag.remote_id,
'name': tag.name,
},
'target': {
'type': 'Book',
'id': tag.book.local_id,
}
}
```
#### File: fedireads/models/user.py
```python
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.dispatch import receiver
from fedireads import activitypub
from fedireads.models.shelf import Shelf
from fedireads.settings import DOMAIN
from fedireads.signatures import create_key_pair
from .base_model import FedireadsModel
class User(AbstractUser):
''' a user who wants to read books '''
private_key = models.TextField(blank=True, null=True)
public_key = models.TextField(blank=True, null=True)
inbox = models.CharField(max_length=255, unique=True)
shared_inbox = models.CharField(max_length=255, blank=True, null=True)
federated_server = models.ForeignKey(
'FederatedServer',
on_delete=models.PROTECT,
null=True,
)
outbox = models.CharField(max_length=255, unique=True)
summary = models.TextField(blank=True, null=True)
local = models.BooleanField(default=True)
fedireads_user = models.BooleanField(default=True)
localname = models.CharField(
max_length=255,
null=True,
unique=True
)
# name is your display name, which you can change at will
name = models.CharField(max_length=100, blank=True, null=True)
avatar = models.ImageField(upload_to='avatars/', blank=True, null=True)
following = models.ManyToManyField(
'self',
symmetrical=False,
through='UserFollows',
through_fields=('user_subject', 'user_object'),
related_name='followers'
)
follow_requests = models.ManyToManyField(
'self',
symmetrical=False,
through='UserFollowRequest',
through_fields=('user_subject', 'user_object'),
related_name='follower_requests'
)
blocks = models.ManyToManyField(
'self',
symmetrical=False,
through='UserBlocks',
through_fields=('user_subject', 'user_object'),
related_name='blocked_by'
)
favorites = models.ManyToManyField(
'Status',
symmetrical=False,
through='Favorite',
through_fields=('user', 'status'),
related_name='favorite_statuses'
)
remote_id = models.CharField(max_length=255, null=True, unique=True)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
manually_approves_followers = models.BooleanField(default=False)
@property
def activitypub_serialize(self):
return activitypub.get_actor(self)
class UserRelationship(FedireadsModel):
''' many-to-many through table for followers '''
user_subject = models.ForeignKey(
'User',
on_delete=models.PROTECT,
related_name='%(class)s_user_subject'
)
user_object = models.ForeignKey(
'User',
on_delete=models.PROTECT,
related_name='%(class)s_user_object'
)
# follow or follow_request for pending TODO: blocking?
relationship_id = models.CharField(max_length=100)
class Meta:
abstract = True
constraints = [
models.UniqueConstraint(
fields=['user_subject', 'user_object'],
name='%(class)s_unique'
),
models.CheckConstraint(
check=~models.Q(user_subject=models.F('user_object')),
name='%(class)s_no_self'
)
]
def get_remote_id(self):
''' use shelf identifier in remote_id '''
base_path = self.user_subject.remote_id
return '%s#%s/%d' % (base_path, self.status, self.id)
class UserFollows(UserRelationship):
@property
def status(self):
return 'follows'
@classmethod
def from_request(cls, follow_request):
return cls(
user_subject=follow_request.user_subject,
user_object=follow_request.user_object,
relationship_id=follow_request.relationship_id,
)
class UserFollowRequest(UserRelationship):
@property
def status(self):
return 'follow_request'
class UserBlocks(UserRelationship):
@property
def status(self):
return 'blocks'
class FederatedServer(FedireadsModel):
''' store which server's we federate with '''
server_name = models.CharField(max_length=255, unique=True)
# federated, blocked, whatever else
status = models.CharField(max_length=255, default='federated')
# is it mastodon, fedireads, etc
application_type = models.CharField(max_length=255, null=True)
application_version = models.CharField(max_length=255, null=True)
@receiver(models.signals.pre_save, sender=User)
def execute_before_save(sender, instance, *args, **kwargs):
''' populate fields for new local users '''
# this user already exists, no need to poplate fields
if instance.id or not instance.local:
return
# populate fields for local users
instance.remote_id = 'https://%s/user/%s' % (DOMAIN, instance.username)
instance.localname = instance.username
instance.username = '%s@%s' % (instance.username, DOMAIN)
instance.actor = instance.remote_id
instance.inbox = '%s/inbox' % instance.remote_id
instance.shared_inbox = 'https://%s/inbox' % DOMAIN
instance.outbox = '%s/outbox' % instance.remote_id
if not instance.private_key:
instance.private_key, instance.public_key = create_key_pair()
@receiver(models.signals.post_save, sender=User)
def execute_after_save(sender, instance, created, *args, **kwargs):
''' create shelves for new users '''
if not instance.local or not created:
return
shelves = [{
'name': 'To Read',
'identifier': 'to-read',
}, {
'name': 'Currently Reading',
'identifier': 'reading',
}, {
'name': 'Read',
'identifier': 'read',
}]
for shelf in shelves:
Shelf(
name=shelf['name'],
identifier=shelf['identifier'],
user=instance,
editable=False
).save()
```
#### File: bookwyrm/fedireads/outgoing.py
```python
from datetime import datetime
from urllib.parse import urlencode
from django.db import IntegrityError, transaction
from django.http import HttpResponseNotFound, JsonResponse
from django.views.decorators.csrf import csrf_exempt
import requests
from fedireads import activitypub
from fedireads import models
from fedireads.broadcast import broadcast
from fedireads.status import create_review, create_status
from fedireads.status import create_quotation, create_comment
from fedireads.status import create_tag, create_notification, create_rating
from fedireads.remote_user import get_or_create_remote_user
@csrf_exempt
def outbox(request, username):
''' outbox for the requested user '''
if request.method != 'GET':
return HttpResponseNotFound()
try:
user = models.User.objects.get(localname=username)
except models.User.DoesNotExist:
return HttpResponseNotFound()
# paginated list of messages
if request.GET.get('page'):
limit = 20
min_id = request.GET.get('min_id')
max_id = request.GET.get('max_id')
# filters for use in the django queryset min/max
filters = {}
# params for the outbox page id
params = {'page': 'true'}
if min_id is not None:
params['min_id'] = min_id
filters['id__gt'] = min_id
if max_id is not None:
params['max_id'] = max_id
filters['id__lte'] = max_id
page_id = user.outbox + '?' + urlencode(params)
statuses = models.Status.objects.filter(
user=user,
**filters
).select_subclasses().all()[:limit]
return JsonResponse(
activitypub.get_outbox_page(user, page_id, statuses, max_id, min_id)
)
# collection overview
size = models.Status.objects.filter(user=user).count()
return JsonResponse(activitypub.get_outbox(user, size))
def handle_account_search(query):
''' webfingerin' other servers '''
user = None
domain = query.split('@')[1]
try:
user = models.User.objects.get(username=query)
except models.User.DoesNotExist:
url = 'https://%s/.well-known/webfinger?resource=acct:%s' % \
(domain, query)
response = requests.get(url)
if not response.ok:
response.raise_for_status()
data = response.json()
for link in data['links']:
if link['rel'] == 'self':
try:
user = get_or_create_remote_user(link['href'])
except KeyError:
return HttpResponseNotFound()
return user
def handle_follow(user, to_follow):
''' someone local wants to follow someone '''
activity = activitypub.get_follow_request(user, to_follow)
broadcast(user, activity, direct_recipients=[to_follow])
def handle_unfollow(user, to_unfollow):
''' someone local wants to follow someone '''
relationship = models.UserFollows.objects.get(
user_subject=user,
user_object=to_unfollow
)
activity = activitypub.get_unfollow(relationship)
broadcast(user, activity, direct_recipients=[to_unfollow])
to_unfollow.followers.remove(user)
def handle_accept(user, to_follow, follow_request):
''' send an acceptance message to a follow request '''
with transaction.atomic():
relationship = models.UserFollows.from_request(follow_request)
follow_request.delete()
relationship.save()
activity = activitypub.get_accept(to_follow, follow_request)
broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])
def handle_reject(user, to_follow, relationship):
''' a local user who managed follows rejects a follow request '''
relationship.delete()
activity = activitypub.get_reject(to_follow, relationship)
broadcast(to_follow, activity, privacy='direct', direct_recipients=[user])
def handle_shelve(user, book, shelf):
''' a local user is getting a book put on their shelf '''
# update the database
models.ShelfBook(book=book, shelf=shelf, added_by=user).save()
activity = activitypub.get_add(user, book, shelf)
broadcast(user, activity)
# tell the world about this cool thing that happened
verb = {
'to-read': 'wants to read',
'reading': 'started reading',
'read': 'finished reading'
}[shelf.identifier]
message = '%s "%s"' % (verb, book.title)
status = create_status(user, message, mention_books=[book])
status.status_type = 'Update'
status.save()
if shelf.identifier == 'reading':
read = models.ReadThrough(
user=user,
book=book,
start_date=datetime.now())
read.save()
elif shelf.identifier == 'read':
read = models.ReadThrough.objects.filter(
user=user,
book=book,
finish_date=None).order_by('-created_date').first()
if not read:
read = models.ReadThrough(
user=user,
book=book,
start_date=datetime.now())
read.finish_date = datetime.now()
read.save()
activity = activitypub.get_status(status)
create_activity = activitypub.get_create(user, activity)
broadcast(user, create_activity)
def handle_unshelve(user, book, shelf):
''' a local user is getting a book put on their shelf '''
# update the database
row = models.ShelfBook.objects.get(book=book, shelf=shelf)
row.delete()
activity = activitypub.get_remove(user, book, shelf)
broadcast(user, activity)
def handle_import_books(user, items):
''' process a goodreads csv and then post about it '''
new_books = []
for item in items:
if item.shelf:
desired_shelf = models.Shelf.objects.get(
identifier=item.shelf,
user=user
)
if isinstance(item.book, models.Work):
item.book = item.book.default_edition
if not item.book:
continue
_, created = models.ShelfBook.objects.get_or_create(
book=item.book, shelf=desired_shelf, added_by=user)
if created:
new_books.append(item.book)
activity = activitypub.get_add(user, item.book, desired_shelf)
broadcast(user, activity)
if item.rating or item.review:
review_title = "Review of {!r} on Goodreads".format(
item.book.title,
) if item.review else ""
handle_review(
user,
item.book,
review_title,
item.review,
item.rating,
)
for read in item.reads:
read.book = item.book
read.user = user
read.save()
if new_books:
message = 'imported {} books'.format(len(new_books))
status = create_status(user, message, mention_books=new_books)
status.status_type = 'Update'
status.save()
create_activity = activitypub.get_create(
user, activitypub.get_status(status))
broadcast(user, create_activity)
return status
def handle_rate(user, book, rating):
''' a review that's just a rating '''
builder = create_rating
fr_serializer = activitypub.get_rating
ap_serializer = activitypub.get_rating_note
handle_status(user, book, builder, fr_serializer, ap_serializer, rating)
def handle_review(user, book, name, content, rating):
''' post a review '''
# validated and saves the review in the database so it has an id
builder = create_review
fr_serializer = activitypub.get_review
ap_serializer = activitypub.get_review_article
handle_status(
user, book, builder, fr_serializer,
ap_serializer, name, content, rating)
def handle_quotation(user, book, content, quote):
''' post a review '''
# validated and saves the review in the database so it has an id
builder = create_quotation
fr_serializer = activitypub.get_quotation
ap_serializer = activitypub.get_quotation_article
handle_status(
user, book, builder, fr_serializer, ap_serializer, content, quote)
def handle_comment(user, book, content):
''' post a comment '''
# validated and saves the review in the database so it has an id
builder = create_comment
fr_serializer = activitypub.get_comment
ap_serializer = activitypub.get_comment_article
handle_status(
user, book, builder, fr_serializer, ap_serializer, content)
def handle_status(user, book_id, \
builder, fr_serializer, ap_serializer, *args):
''' generic handler for statuses '''
book = models.Edition.objects.get(id=book_id)
status = builder(user, book, *args)
activity = fr_serializer(status)
create_activity = activitypub.get_create(user, activity)
broadcast(user, create_activity, software='fedireads')
# re-format the activity for non-fedireads servers
remote_activity = ap_serializer(status)
remote_create_activity = activitypub.get_create(user, remote_activity)
broadcast(user, remote_create_activity, software='other')
def handle_tag(user, book, name):
''' tag a book '''
tag = create_tag(user, book, name)
tag_activity = activitypub.get_add_tag(tag)
broadcast(user, tag_activity)
def handle_untag(user, book, name):
''' tag a book '''
book = models.Book.objects.get(id=book)
tag = models.Tag.objects.get(name=name, book=book, user=user)
tag_activity = activitypub.get_remove_tag(tag)
tag.delete()
broadcast(user, tag_activity)
def handle_reply(user, review, content):
''' respond to a review or status '''
# validated and saves the comment in the database so it has an id
reply = create_status(user, content, reply_parent=review)
if reply.reply_parent:
create_notification(
reply.reply_parent.user,
'REPLY',
related_user=user,
related_status=reply,
)
reply_activity = activitypub.get_status(reply)
create_activity = activitypub.get_create(user, reply_activity)
broadcast(user, create_activity)
def handle_favorite(user, status):
''' a user likes a status '''
try:
favorite = models.Favorite.objects.create(
status=status,
user=user
)
except IntegrityError:
# you already fav'ed that
return
fav_activity = activitypub.get_favorite(favorite)
broadcast(
user, fav_activity, privacy='direct', direct_recipients=[status.user])
def handle_unfavorite(user, status):
''' a user likes a status '''
try:
favorite = models.Favorite.objects.get(
status=status,
user=user
)
except models.Favorite.DoesNotExist:
# can't find that status, idk
return
fav_activity = activitypub.get_unfavorite(favorite)
broadcast(user, fav_activity, direct_recipients=[status.user])
def handle_boost(user, status):
''' a user wishes to boost a status '''
if models.Boost.objects.filter(
boosted_status=status, user=user).exists():
# you already boosted that.
return
boost = models.Boost.objects.create(
boosted_status=status,
user=user,
)
boost.save()
boost_activity = activitypub.get_boost(boost)
broadcast(user, boost_activity)
def handle_update_book(user, book):
''' broadcast the news about our book '''
book_activity = activitypub.get_book(book)
update_activity = activitypub.get_update(user, book_activity)
broadcast(user, update_activity)
def handle_update_user(user):
''' broadcast editing a user's profile '''
actor = activitypub.get_actor(user)
update_activity = activitypub.get_update(user, actor)
broadcast(user, update_activity)
```
#### File: tests/connectors/test_fedireads_connector.py
```python
from dateutil import parser
from django.test import TestCase
import json
import pathlib
from fedireads import models
from fedireads.connectors.fedireads_connector import Connector
from fedireads.connectors.abstract_connector import SearchResult, get_date
class FedireadsConnector(TestCase):
def setUp(self):
models.Connector.objects.create(
identifier='example.com',
connector_file='fedireads_connector',
base_url='https://example.com',
books_url='https://example.com',
covers_url='https://example.com/images/covers',
search_url='https://example.com/search?q=',
)
self.connector = Connector('example.com')
work_file = pathlib.Path(__file__).parent.joinpath(
'../data/fr_work.json')
edition_file = pathlib.Path(__file__).parent.joinpath(
'../data/fr_edition.json')
self.work_data = json.loads(work_file.read_bytes())
self.edition_data = json.loads(edition_file.read_bytes())
def test_is_work_data(self):
self.assertEqual(self.connector.is_work_data(self.work_data), True)
self.assertEqual(self.connector.is_work_data(self.edition_data), False)
def test_get_edition_from_work_data(self):
edition = self.connector.get_edition_from_work_data(self.work_data)
self.assertEqual(edition['url'], 'https://example.com/book/122')
def test_get_work_from_edition_data(self):
work = self.connector.get_work_from_edition_date(self.edition_data)
self.assertEqual(work['url'], 'https://example.com/book/121')
def test_format_search_result(self):
datafile = pathlib.Path(__file__).parent.joinpath('../data/fr_search.json')
search_data = json.loads(datafile.read_bytes())
results = self.connector.parse_search_data(search_data)
self.assertIsInstance(results, list)
result = self.connector.format_search_result(results[0])
self.assertIsInstance(result, SearchResult)
self.assertEqual(result.title, '<NAME> and <NAME>')
self.assertEqual(result.key, 'https://example.com/book/122')
self.assertEqual(result.author, '<NAME>')
self.assertEqual(result.year, 2017)
def test_get_date(self):
date = get_date(self.edition_data['published_date'])
expected = parser.parse("2017-05-10T00:00:00+00:00")
self.assertEqual(date, expected)
```
#### File: tests/status/test_quotation.py
```python
from django.test import TestCase
from fedireads import models
from fedireads import status as status_builder
class Quotation(TestCase):
''' we have hecka ways to create statuses '''
def setUp(self):
self.user = models.User.objects.create_user(
'mouse', '<EMAIL>', 'mouseword')
self.book = models.Edition.objects.create(title='Example Edition')
def test_create_quotation(self):
quotation = status_builder.create_quotation(
self.user, self.book, 'commentary', 'a quote')
self.assertEqual(quotation.quote, 'a quote')
self.assertEqual(quotation.content, 'commentary')
def test_quotation_from_activity(self):
activity = {
'id': 'https://example.com/user/mouse/quotation/13',
'url': 'https://example.com/user/mouse/quotation/13',
'inReplyTo': None,
'published': '2020-05-10T02:38:31.150343+00:00',
'attributedTo': 'https://example.com/user/mouse',
'to': [
'https://www.w3.org/ns/activitystreams#Public'
],
'cc': [
'https://example.com/user/mouse/followers'
],
'sensitive': False,
'content': 'commentary',
'type': 'Note',
'attachment': [
{
'type': 'Document',
'mediaType': 'image//images/covers/2b4e4712-5a4d-4ac1-9df4-634cc9c7aff3jpg',
'url': 'https://example.com/images/covers/2b4e4712-5a4d-4ac1-9df4-634cc9c7aff3jpg',
'name': 'Cover of \'This Is How You Lose the Time War\''
}
],
'replies': {
'id': 'https://example.com/user/mouse/quotation/13/replies',
'type': 'Collection',
'first': {
'type': 'CollectionPage',
'next': 'https://example.com/user/mouse/quotation/13/replies?only_other_accounts=true&page=true',
'partOf': 'https://example.com/user/mouse/quotation/13/replies',
'items': []
}
},
'inReplyToBook': self.book.remote_id,
'fedireadsType': 'Quotation',
'quote': 'quote body'
}
quotation = status_builder.create_quotation_from_activity(
self.user, activity)
self.assertEqual(quotation.content, 'commentary')
self.assertEqual(quotation.quote, 'quote body')
self.assertEqual(quotation.book, self.book)
self.assertEqual(
quotation.published_date, '2020-05-10T02:38:31.150343+00:00')
```
#### File: tests/status/test_review.py
```python
from django.test import TestCase
from fedireads import models
from fedireads import status as status_builder
class Review(TestCase):
''' we have hecka ways to create statuses '''
def setUp(self):
self.user = models.User.objects.create_user(
'mouse', '<EMAIL>', 'mouseword')
self.book = models.Edition.objects.create(title='Example Edition')
def test_create_review(self):
review = status_builder.create_review(
self.user, self.book, 'review name', 'content', 5)
self.assertEqual(review.name, 'review name')
self.assertEqual(review.content, 'content')
self.assertEqual(review.rating, 5)
review = status_builder.create_review(
self.user, self.book, '<div>review</div> name', '<b>content', 5)
self.assertEqual(review.name, 'review name')
self.assertEqual(review.content, 'content')
self.assertEqual(review.rating, 5)
def test_review_rating(self):
review = status_builder.create_review(
self.user, self.book, 'review name', 'content', -1)
self.assertEqual(review.name, 'review name')
self.assertEqual(review.content, 'content')
self.assertEqual(review.rating, None)
review = status_builder.create_review(
self.user, self.book, 'review name', 'content', 6)
self.assertEqual(review.name, 'review name')
self.assertEqual(review.content, 'content')
self.assertEqual(review.rating, None)
def test_review_from_activity(self):
activity = {
'id': 'https://example.com/user/mouse/review/9',
'url': 'https://example.com/user/mouse/review/9',
'inReplyTo': None,
'published': '2020-05-04T00:00:00.000000+00:00',
'attributedTo': 'https://example.com/user/mouse',
'to': [
'https://www.w3.org/ns/activitystreams#Public'
],
'cc': [
'https://example.com/user/mouse/followers'
],
'sensitive': False,
'content': 'review content',
'type': 'Article',
'attachment': [],
'replies': {
'id': 'https://example.com/user/mouse/review/9/replies',
'type': 'Collection',
'first': {
'type': 'CollectionPage',
'next': 'https://example.com/user/mouse/review/9/replies?only_other_accounts=true&page=true',
'partOf': 'https://example.com/user/mouse/review/9/replies',
'items': []
}
},
'inReplyToBook': self.book.remote_id,
'fedireadsType': 'Review',
'name': 'review title',
'rating': 3
}
review = status_builder.create_review_from_activity(
self.user, activity)
self.assertEqual(review.content, 'review content')
self.assertEqual(review.name, 'review title')
self.assertEqual(review.rating, 3)
self.assertEqual(review.book, self.book)
self.assertEqual(
review.published_date, '2020-05-04T00:00:00.000000+00:00')
``` |
{
"source": "JohnBarton27/MovieMalarkey",
"score": 3
} |
#### File: MovieMalarkey/src/main.py
```python
from flask import Flask, make_response, render_template, request
from flask_socketio import SocketIO, join_room, leave_room, send, emit
import os
from urllib.parse import unquote
from movie import Movie
from room import Room
from user import User
app = Flask(__name__, template_folder=os.path.abspath('../templates'))
socketio = SocketIO(app)
rooms = []
users = []
def _get_user(username):
for user in users:
if user.name == username:
return user
def _get_user_from_room(username, room):
for user in room.users:
if user.name == username:
return user
def _get_room(room_code):
for room in rooms:
if room.code == room_code:
return room
@app.route('/')
def index():
return render_template('index.html')
@app.route('/roomSearch', methods=['POST'])
def new_user():
user = User(request.args.get('nickname'))
resp = make_response(render_template('roomSearch.html', user=user))
resp.set_cookie('user_name', user.name)
return resp
@app.route('/newRoom', methods=['POST'])
def new_room():
user = User(request.cookies.get('user_name'))
room = Room(user)
rooms.append(room)
resp = make_response(room.code)
resp.set_cookie('room', room.code)
return resp
@app.route('/submitGuess', methods=['POST'])
def submit_guess():
room = _get_room(request.cookies.get('room'))
# Handle non-existent room
if not room:
return 'false'
data = unquote(str(request.data))
# Grab everything after '=' and strip off trailing single quote (')
guess = data.split('=')[-1][:-1]
user = _get_user_from_room(request.cookies.get('user_name'), room)
user.current_answer = guess
# Acknowledge submission to other users
for guesser in room.users:
socketio.send({'event': 'user-answered', 'room': room.serialize()}, json=True,
to=guesser.socket_client)
# Send the host the guess
socketio.send({'event': 'user-guess', 'room': room.serialize(full=True)}, json=True,
to=room.current_round.judge.socket_client)
# All users have submitted
if room.all_guesses_submitted:
# Progress room to the VOTING phase
room.open_voting()
# Send non-full room to guessers
for user in room.current_round.guessers:
socketio.send({'event': 'all-guesses-submitted', 'room': room.serialize()}, json=True,
to=user.socket_client)
# Send full room to judge
socketio.send({'event': 'all-guesses-submitted', 'room': room.serialize(full=True)}, json=True,
to=room.current_round.judge.socket_client)
return 'Success'
@app.route('/revealGuess', methods=['POST'])
def reveal_guess():
room = _get_room(request.cookies.get('room'))
user = _get_user_from_room(request.args.get('username'), room)
answer_to_reveal = room.current_round.movie.plot if user == room.current_round.judge else user.current_answer
print(f'Revealing "{answer_to_reveal}"...')
socketio.send({'event': 'guess-reveal', 'plot': answer_to_reveal}, json=True, to=room.code)
return "Success"
@app.route('/vote', methods=['POST'])
def vote():
room = _get_room(request.cookies.get('room'))
user = _get_user_from_room(request.cookies.get('user_name'), room)
data = unquote(str(request.data))
user.current_vote = data.split('=')[-1][:-1]
if user.current_vote == room.current_round.movie.plot:
# Correct Guess - voter gets 2 points
room.current_round.give_points(2, user)
pass
else:
# Incorrect Guess
for guesser in room.current_round.guessers:
if user.current_vote == guesser.current_answer and user != guesser:
# 'guesser' gets one point for 'tricking' voter (unless they voted for their own answer, in which case
# they get nothing
room.current_round.give_points(1, guesser)
break
if room.current_round.all_votes_in:
# This was the last voter - move to the reveal
socketio.send({'event': 'full-reveal', 'room': room.serialize(full=True)}, json=True, to=room.code)
room.end_round()
return "Success"
@app.route('/checkRoomCode')
def check_room_code():
room = _get_room(request.args.get('code'))
# Handle non-existent room
if not room:
return 'false'
resp = make_response('true')
resp.set_cookie('room', room.code)
return resp
@app.route('/startGame')
def start_game():
# TODO add handling to make sure at least 3 people have joined the room
room = _get_room(request.args.get('code'))
# Handle non-existent room
if not room:
return 'Room not found'
room.start()
resp = make_response(room.serialize())
socketio.send({'event': 'start-game', 'room': room.serialize()}, json=True, to=room.code)
# Start the first round
start_round()
return resp
@app.route('/startRound', methods=['POST'])
def start_round():
room_code = request.args.get('code')
for room in rooms:
if room.code == room_code:
# Found the room
room.start_round()
# TODO Add handling to ensure the room doesn't ever see the same movie twice
movie = Movie.get_random()
room.current_round.movie = movie
# Send title & plot to host
socketio.send({'event': 'movie', 'room': room.serialize(), 'title': movie.title, 'plot': movie.plot}, json=True, to=room.current_round.judge.socket_client)
# Send notification to guessers that the judge is selecting a movie title
for guesser in room.current_round.guessers:
socketio.send({'event': 'judge-selecting', 'room': room.serialize()}, json=True,
to=guesser.socket_client)
return 'Started round!'
return 'Room not found'
@app.route('/skipMovie', methods=['POST'])
def skip_movie():
"""
The judge didn't like the movie they were presented with, so give them a new one.
Returns:
str: response string
"""
room = _get_room(request.args.get('code'))
movie = Movie.get_random()
room.current_round.movie = movie
# Send title & plot to host
socketio.send({'event': 'movie', 'room': room.serialize(), 'title': movie.title, 'plot': movie.plot}, json=True, to=room.current_round.judge.socket_client)
return 'Started round!'
@app.route('/openGuesses', methods=['POST'])
def open_guesses():
"""
"Opens" the guessing for other users - this means the judge has found a suitable title/plot and is ready for users
to start guessing.
Returns:
Response
"""
room = _get_room(request.cookies.get('room'))
room.open_guessing()
# Send title to rest of users
for guesser in room.current_round.guessers:
socketio.send({'event': 'movie-title', 'title': room.current_round.movie.title}, json=True,
to=guesser.socket_client)
return 'Opened guessing!'
@app.route('/play')
def enter_room():
room_code = request.cookies.get('room')
for room in rooms:
if room.code == room_code:
resp = make_response(render_template('room.html', room=room.serialize()))
return resp
return f'<h1>No room {room_code} found!</h1>'
@socketio.on('join_room')
def connect(data):
user = User(request.cookies.get('user_name'))
user.socket_client = request.sid
current_code = data['room']
for open_room in rooms:
if open_room.code == current_code:
open_room.add_user(user)
users.append(user)
room = open_room
join_room(room.code)
# TODO remove 'plot' from movies if not sending to judge
if room.current_round and room.current_round.judge and user == room.current_round.judge:
print(f'This user ({user.name}) is the current judge!')
# Send full (including current answers) to judge - this handles the judge refreshing their page and
# "re-joining" the game
send({'event': 'new-user', 'username': user.name, 'room': room.serialize(full=True)}, json=True, to=room.current_round.judge.socket_client)
# Send the judge joining event to all other users (but not the judge, or this would wipe the answers
# the judge has)
for guesser in room.current_round.guessers:
send({'event': 'new-user', 'username': user.name, 'room': room.serialize()}, json=True,
to=guesser.socket_client)
else:
send({'event': 'new-user', 'username': user.name, 'room': room.serialize()}, json=True, to=room.code)
print(f'{user.name} joined {room.code} ({user.socket_client})')
break
@socketio.on('disconnect')
def disconnect():
print('Disconnected!')
if __name__ == "__main__":
socketio.run(app, host="0.0.0.0", port=8010, debug=True)
```
#### File: MovieMalarkey/src/room.py
```python
import random
import string
from enum import Enum
from round import Round
from user import User
class Phase(Enum):
JOINING = 'JOINING'
SELECTING = 'SELECTING'
GUESSING = 'GUESSING'
VOTING = 'VOTING'
class Room:
"""
Class for representing "Rooms"
"""
def __init__(self, creator: User):
"""
Initializer for a Room object
Args:
creator (User): User who created the Room
"""
self.host = creator
self.users = [creator]
self.rounds = []
self.code = Room.generate_code()
self.started = False
self.phase = Phase.JOINING
def __repr__(self):
return self.code
def __str__(self):
return f'{self.code} (Users: {len(self.users)})'
def __eq__(self, other):
return self.code == other.code
def __hash__(self):
return hash(self.code)
@property
def all_guesses_submitted(self):
"""
Checks to see if all guesses for the round have been submitted.
Returns:
bool: True if all 'guessers' have submitted an answer; False if at least one has not guessed
"""
return all(user.current_answer for user in self.current_round.guessers)
@property
def current_round(self) -> Round:
"""
Gets the current (or most recent) round, if there is one.
Returns:
Round: Current (or most recent) round
"""
if len(self.rounds) == 0:
# No rounds have been started
return None
# Return last round
return self.rounds[-1]
@property
def previous_round(self) -> Round:
"""
Gets the previous round, if there is one.
Returns:
Round: Previous round
"""
if len(self.rounds) <= 1:
# No previous rounds
return None
# Return previous round
return self.rounds[-2]
def add_user(self, user: User):
"""
Add the given user to this Room (handles ensuring no duplicate users)
Args:
user (User): User to add to this room
Returns:
None
"""
# TODO add handling to stop users from joining an already-started game ('Spectator' mode?)
if user not in self.users:
self.users.append(user)
return
# Ensure user is fully populated
if user.socket_client:
for existing_user in self.users:
if existing_user == user:
existing_user.socket_client = user.socket_client
return
def start_round(self):
"""
Starts a new round in this room
Returns:
None
"""
self.rounds.append(Round(self, len(self.rounds) + 1))
self.current_round.judge = self.select_next_judge()
self.phase = Phase.SELECTING
def open_guessing(self):
"""
Opens the guessing for users in the room. Progresses the phase to GUESSING.
Returns:
None
"""
self.phase = Phase.GUESSING
def open_voting(self):
"""
Opens the voting for users in eht room. Progresses the phase to VOTING.
Returns:
None
"""
self.phase = Phase.VOTING
def end_round(self):
"""
Cleanup method for when a round ends (clears all answers, etc.)
Returns:
None
"""
self.current_round.end()
# Wipe all answers
for user in self.users:
user.current_answer = None
@staticmethod
def generate_code():
"""
Generates a random room code
Returns:
str: Unique room code
"""
# TODO ensure certain words (curse words, etc.) are not given as room codes
chars = string.ascii_uppercase
for i in range(0, 10):
chars += str(i)
code = ''.join(random.choice(chars) for i in range(4))
# TODO ensure code is not already in use
return code
def serialize(self, full=False):
return {
'code': self.code,
'host': self.host.serialize(),
'round': self.current_round.serialize(full=full) if self.current_round else '',
'started': str(self.started),
'users': [user.serialize(full=full) for user in self.users],
'phase': self.phase.name
}
def start(self):
"""
Starts a game in this room
Returns:
None
"""
self.started = True
def stop(self):
"""
Stops/Ends the game in this room
Returns:
None
"""
self.started = False
def select_next_judge(self):
"""
Selects the next judge for this room. This normally selects judges in a round-robin pattern.
If there has not been a previous room (and this is therefore picking the 'first' judge), it selects one
randomly.
Returns:
User: The next judge for this Room
"""
if not self.previous_round:
# If this is our first round, pick a random user from the room
return random.choice(self.users)
index = self.users.index(self.previous_round.judge)
index += 1
if index == len(self.users):
# We were at the 'last' user, so loop around & restart
return self.users[0]
return self.users[index]
``` |
{
"source": "JohnBat26/dp-agent",
"score": 2
} |
#### File: dp-agent/core/transform_config.py
```python
import sys
from os import getenv
from itertools import chain
from copy import deepcopy
from pathlib import Path
import yaml
from config import *
ANNOTATORS = [ANNOTATORS_1, ANNOTATORS_2, ANNOTATORS_3]
# generate component url
for service in chain(*ANNOTATORS, SKILL_SELECTORS, SKILLS, RESPONSE_SELECTORS,
POSTPROCESSORS):
if 'url' not in service:
host = service['name'] if getenv('DPA_LAUNCHING_ENV') == 'docker' else service['host']
service['url'] = f"{service['protocol']}://{host}:{service['port']}/{service['endpoint']}"
DB_HOST = 'mongo' if getenv('DPA_LAUNCHING_ENV') == 'docker' else DB_HOST
def _get_config_path(component_config: dict) -> dict:
component_config = deepcopy(component_config)
raw_path = component_config.get('path', None)
if not raw_path:
return component_config
config_path = Path(raw_path)
if not config_path.is_absolute():
config_path = Path(__file__).resolve().parents[2] / config_path
if isinstance(config_path, Path) and config_path.is_file():
component_config['path'] = config_path
else:
raise FileNotFoundError(f'config {raw_path} does not exists')
return component_config
_run_config_path: Path = Path(__file__).resolve().parent / 'config.yaml'
_component_groups = ['SKILLS', 'ANNOTATORS', 'SKILL_SELECTORS', 'RESPONSE_SELECTORS', 'POSTPROCESSORS']
_module = sys.modules[__name__]
if _run_config_path.is_file():
with _run_config_path.open('r', encoding='utf-8') as f:
config: dict = yaml.safe_load(f)
if config.get('use_config', False) is True:
config = config.get('agent_config', {})
MAX_WORKERS = config.get('MAX_WORKERS', MAX_WORKERS)
DB_NAME = config.get('DB_NAME', DB_NAME)
DB_HOST = config.get('HOST', DB_HOST)
DB_PORT = config.get('PORT', DB_PORT)
for group in _component_groups:
setattr(_module, group, list(map(_get_config_path, config.get(group, []))))
```
#### File: dp-agent/state_formatters/dp_formatters.py
```python
from typing import List, Any
def base_input_formatter(state: List):
"""This state_formatter takes the most popular fields from Agent state and returns them as dict values:
* last utterances: a list of last utterance from each dialog in the state
* last_annotations: a list of last annotation from each last utterance
* utterances_histories: a list of lists of all utterances from all dialogs
* annotations_histories: a list of lists of all annotations from all dialogs
* dialog_ids: a list of all dialog ids
* user_ids: a list of all user ids, each dialog have a unique human participant id
Args:
state: dialog state
Returns: formatted dialog state
"""
utterances_histories = []
last_utts = []
annotations_histories = []
last_annotations = []
dialog_ids = []
user_ids = []
for dialog in state:
utterances_history = []
annotations_history = []
for utterance in dialog['utterances']:
utterances_history.append(utterance['text'])
annotations_history.append(utterance['annotations'])
last_utts.append(utterances_history[-1])
utterances_histories.append(utterances_history)
last_annotations.append(annotations_history[-1])
annotations_histories.append(annotations_history)
dialog_ids.append(dialog['id'])
user_ids.extend([utt['user']['id'] for utt in state[0]['utterances']])
return {'dialogs': state,
'last_utterances': last_utts,
'last_annotations': last_annotations,
'utterances_histories': utterances_histories,
'annotation_histories': annotations_histories,
'dialog_ids': dialog_ids,
'user_ids': user_ids}
def last_utterances(payload, model_args_names):
utterances = base_input_formatter(payload)['last_utterances']
return {model_args_names[0]: utterances}
def base_skill_output_formatter(payload):
"""Works with a single batch instance
Args:
payload: one batch instance
Returns: a formatted batch instance
"""
return payload
def base_annotator_formatter(payload: Any, model_args_names=('x',), mode='in'):
if mode == 'in':
return last_utterances(payload, model_args_names)
if mode == 'out':
return payload
def ner_formatter(payload: Any, model_args_names=('x',), mode='in'):
if mode == 'in':
return last_utterances(payload, model_args_names)
if mode == 'out':
return {'tokens': payload[0],
'tags': payload[1]}
def sentiment_formatter(payload: Any, model_args_names=('x',), mode='in'):
if mode == 'in':
return last_utterances(payload, model_args_names)
if mode == 'out':
return [el for el in payload]
def chitchat_odqa_formatter(payload: Any, model_args_names=('x',), mode='in'):
if mode == 'in':
return last_utterances(payload, model_args_names)
if mode == 'out':
class_name = payload[0]
if class_name in ['speech', 'negative']:
response = ['chitchat']
else:
response = ['odqa']
return response
def odqa_formatter(payload: Any, model_args_names=('question_raw',), mode='in'):
if mode == 'in':
return last_utterances(payload, model_args_names)
if mode == 'out':
return [{"text": payload[0],
"confidence": 0.5}]
def chitchat_formatter(payload: Any, model_args_names=('q',), mode='in'):
if mode == 'in':
return last_utterances(payload, model_args_names)
if mode == 'out':
return [{"text": payload[0],
"confidence": 0.5}]
def chitchat_example_formatter(payload: Any,
model_args_names=("utterances", 'annotations', 'u_histories', 'dialogs'),
mode='in'):
if mode == 'in':
parsed = base_input_formatter(payload)
return {model_args_names[0]: parsed['last_utterances'],
model_args_names[1]: parsed['last_annotations'],
model_args_names[2]: parsed['utterances_histories'],
model_args_names[3]: parsed['dialogs']}
if mode == 'out':
return {"text": payload[0],
"confidence": payload[1],
"name": payload[2]}
``` |
{
"source": "johnbayko/hicp",
"score": 2
} |
#### File: python/default_app/reception.py
```python
from hicp import HICP, newLogger, EventType, Message, Panel, Window, Label, Button, TextField, Selection, SelectionItem
from hicp import App, AppInfo
from apps.test.test import TestApp
from apps.testml.testml import TestAppML
class ButtonSwitchAppHandler:
def __init__(self, app_name=None):
self.logger = newLogger(type(self).__name__)
self.__app_name = app_name
def set_app_name(self, app_name):
self.__app_name = app_name
def update(self, hicp, event_message, component):
if self.__app_name is not None:
hicp.switch_app(self.__app_name)
class AppSelectionHandler:
def __init__(self, buttonHandler, start_button, app_desc_label, app_text_ids):
self.__button_handler = buttonHandler
self.__start_button = start_button
self.__app_desc_label = app_desc_label
self.__app_text_ids = app_text_ids
def update(self, hicp, event_message, selection):
items = selection.get_selected_item_list()
# Should be one selected item, but check to be sure.
if 0 == len(items):
return
item = items[0]
app = item.item
self.__button_handler.set_app_name(app.app_name)
(app_name_id, app_desc_id) = self.__app_text_ids[app]
self.__start_button.set_text_id(app_name_id)
self.__start_button.update()
self.__app_desc_label.set_text_id(app_desc_id)
self.__app_desc_label.update()
class Reception(App):
def __init__(self):
self.__logger = newLogger(type(self).__name__)
@classmethod
def get_app_name(cls):
return 'reception'
@classmethod
def get_app_info(cls):
app_name = cls.get_app_name()
display_name = [('Reception', 'en')]
desc = [('List apps for user to choose.', 'en')]
return AppInfo(app_name, display_name, desc)
def connected(self, hicp):
self.__logger.debug("reception connected")
hicp.text_direction(hicp.RIGHT, hicp.DOWN) # debug
WINDOW_TITLE_ID = hicp.add_text_get_id("App list")
SELECT_APP_ID = hicp.add_text_get_id("Select app:")
TEST_APP_ID = hicp.add_text_get_id("Test")
TEST_APP_ML_ID = hicp.add_text_get_id("Test Multi-language")
window = self.new_app_window()
window.set_text_id(WINDOW_TITLE_ID)
hicp.add(window)
select_app_label = Label()
select_app_label.set_text_id(SELECT_APP_ID)
window.add(select_app_label, 0, 0)
# Show found apps
app_panel = Panel()
window.add(app_panel, 0, 1)
(group, subgroup) = hicp.get_text_group()
app_text_ids = {}
# Sort them first
unsorted_app_info = []
for app_info in hicp.get_all_app_info().values():
app_name = app_info.display_name.get_text(group, subgroup)
app_name_id = hicp.add_text_selector_get_id(app_info.display_name)
unsorted_app_info.append((app_name_id, app_info))
app_desc = app_info.description.get_text(group, subgroup)
app_desc_id = hicp.add_text_selector_get_id(app_info.description)
app_text_ids[app_info] = (app_name_id, app_desc_id)
sorted_app_info = hicp.sort(unsorted_app_info)
# Make app description label to add later.
app_desc = Label()
# Make start button to add later
# Selection handler updates this:
start_button_handler = ButtonSwitchAppHandler()
start_button = Button()
# Add app selection list.
app_selection = Selection()
app_selection.set_presentation(Selection.SCROLL)
app_selection.set_selection_mode(Selection.SINGLE)
# Add app names to selection list, using sorted app info index as id.
app_items = []
item_id = 0
for (app_name_id, app_info) in sorted_app_info:
# Skip adding button for this app.
if app_info.app_name != self.get_app_name():
app_item = SelectionItem(item_id, app_name_id, item=app_info)
app_items.append(app_item)
item_id += 1
app_selection.add_items(app_items)
app_selection.set_selected_list([0])
app_selection.set_handler(
EventType.CHANGED,
AppSelectionHandler(start_button_handler, start_button, app_desc, app_text_ids)
)
app_panel.add(app_selection, 0, 0)
items = app_selection.get_selected_item_list()
# Should be one selected item, but check to be sure.
if 0 < len(items):
initial_item = items[0]
initial_app = initial_item.item
start_button_handler.set_app_name(initial_app.app_name)
(initial_name_id, initial_desc_id) = app_text_ids[initial_app]
else:
initial_item = None
initial_name_id = \
initial_desc_id = hicp.add_text_get_id('None')
# Add app description label here.
app_desc.set_text_id(initial_desc_id)
app_panel.add(app_desc, 1, 0)
# Add start button here.
start_button.set_text_id(initial_name_id)
start_button.set_handler(
EventType.CLICK, start_button_handler
)
start_button.set_size(1, 1)
app_panel.add(start_button, 0, 1)
```
#### File: python/hicp/message.py
```python
import re
from hicp.logger import newLogger
class Message:
# Constants
EVENT = "event"
COMMAND = "command"
# Events
CHANGED = "changed"
CLICK = "click"
CLOSE = "close"
# Commands
ADD = "add"
MODIFY = "modify"
REMOVE = "remove"
# Events and commands
AUTHENTICATE = "authenticate"
CONNECT = "connect"
DISCONNECT = "disconnect"
# Headers
# TODO: Maybe make individual event classes?
ATTRIBUTES = "attributes"
APPLICATION = "application"
CATEGORY = "category"
CONTENT = "content"
EVENTS = "events"
GUI = "gui"
HEIGHT = "height"
ID = "id"
ITEMS = "items"
METHOD = "method"
MODE = "mode"
PRESENTATION = "presentation"
PARENT = "parent"
PASSWORD = "password"
PLAIN = "plain"
POSITION = "position"
SELECTED = "selected"
SIZE = "size"
TEXT = "text"
TEXT_DIRECTION = "text-direction"
USER = "user"
VISIBLE = "visible"
WIDTH = "width"
# These should be user visible. Move to Component or TextField (or parent,
# when TextPanel is added).
# ATTRIBUTES attributes
# CONTENT - already defined.
BOLD = "bold"
FONT = "font"
ITALIC = "italic"
LAYOUT = "layout"
SIZE = "size"
UNDERLINE = "underline"
# ATTRIBUTES FONT attributes
SERIF = "serif"
SANS_SERIF = "sans-serif"
SERIF_FIXED = "serif-fixed"
SANS_SERIF_FIXED = "sans-serif-fixed"
# ATTRIBUTES LAYOUT attributes
BLOCK = "block"
INDENT_FIRST = "indent-first"
INDENT_REST = "indent-rest"
LIST = "list"
# EVENTS attributes
ENABLED = "enabled"
DISABLED = "disabled"
UNSELECT = "unselect"
# MODE attributes
SINGLE = "single"
MULTIPLE = "multiple"
# PRESENTATION attributes
SCROLL = "scroll"
TOGGLE = "toggle"
DROPDOWN = "dropdown"
# TEXT_DIRECTION atributes
LEFT = "left"
RIGHT = "right"
UP = "up"
DOWN = "down"
LENGTH_RE = re.compile("(length) *= *(\d+)")
BOUNDARY_RE = re.compile("(boundary) *=(.*)\r\n")
ESC_RE = re.compile("(\033)(.)")
LAST_ESC_RE = re.compile("(\033.[^\033]*)*(\033.)")
def __init__(self, in_stream=None):
# These may be null.
self.logger = newLogger(type(self).__name__)
self.disconnected = False
# May be EVENT or COMMAND.
self.__type = None
# "event: " or "command: " value.
self.__type_value = None
self.__headers = {}
if in_stream is None:
# No message to read in - probably making one to send.
return
# Read headers into the header dict.
line_cnt = 0
try:
while True:
line = self.readline(in_stream)
if line == "\r\n" or line == "":
# End of message or EOF.
if 0 >= line_cnt:
# Message not even started, nothing to read.
# Probably disconnected.
self.disconnected = True
self.set_type(self.EVENT, self.DISCONNECT)
return
break
line_cnt = line_cnt + 1
# Split into key and value.
header_key = ""
header_value = ""
# Check for ":: " multi-line data section.
header_split_idx = line.find(":: ")
if 0 < header_split_idx:
header_key = line[0:header_split_idx]
# Skip ":: " 3 chracters.
termination_criterion = line[header_split_idx + 3:]
# Length or boundary termination criterion?
if 0 <= termination_criterion.find("length"):
length_match = \
self.LENGTH_RE.search(termination_criterion)
length = int(length_match.group(2), 10)
# header_value is the next length bytes (unless
# EOF is encountered).
header_value = in_stream.read(length)
# There must be a terminating CR LF, so read to
# the end of the input line (extra is discarded).
self.readline(in_stream)
elif 0 <= termination_criterion.find("boundary"):
boundary_match = \
self.BOUNDARY_RE.search(termination_criterion)
boundary = boundary_match.group(2)
# Boundary is either CR LF <text> or <text>.
if '' == boundary:
# Boundary is CR LF plus next line.
# The boundary excludes the next CR LF in the
# string returned by readline(), but it's
# easier to find the boundary if they are
# included.
boundary = self.readline(in_stream)
# This lets us compare the full line to the
# boundary string, so indicate that.
full_line_boundary = True
else:
full_line_boundary = False
# Read until boundary + CR LF is read.
header_value_list = []
prev_line_eol_esc = False
while 1:
header_value_part = self.readline(in_stream)
# Remove any escapes, but keep track of
# index of last one.
(header_value_unescaped, esc_cnt) = \
self.ESC_RE.subn("\\2", header_value_part)
if 0 < esc_cnt:
# String had escapes.
last_esc_match = \
self.LAST_ESC_RE.search(header_value_part)
after_last_esc_index = last_esc_match.end(2)
else:
# No esc in string.
after_last_esc_index = 0
if full_line_boundary:
if header_value_unescaped == boundary \
and False == prev_line_eol_esc \
and 0 == esc_cnt:
# Found the boundary. header value doesn't
# include this line.
# The final CR LF is also not included,
# Remove it from the last header value list
# string.
header_value_list[-1] = \
header_value_list[-1][:-2]
break
else:
# Check for boundary. Should always be at end
# of the line, but accept it if not - discard
# rest of line.
boundary_index = \
header_value_unescaped.find(boundary)
if 0 <= boundary_index \
and after_last_esc_index <= boundary_index:
# Found end of line boundary. Remove
# boundary and add to header value list.
header_value_no_boundary = \
header_value_unescaped[ : boundary_index]
header_value_list = \
header_value_list + [header_value_no_boundary]
break
# No boundary found, add to header value list
header_value_list = \
header_value_list + [header_value_unescaped]
prev_line_eol_esc = \
(after_last_esc_index >= len(header_value_part) - 2)
# Convert list to single string.
header_value = ''.join(header_value_list)
else:
# No termination criterion. Leave header value
# blank.
pass
else:
# Check for ": " single line data value.
header_split_idx = line.find(": ")
if 0 < header_split_idx:
# This is a valid header.
header_key = line[:header_split_idx]
# Skip ": " 2 chracters, and omit CR LF at the end.
header_value = line[header_split_idx + 2:-2]
else:
# No ": " or ":: ". Let header key be input line
# (without CR LF) and leave value as "".
header_key = line[:-2]
if header_key:
if 1 == line_cnt:
# First line always "event: " or "command: "
self.set_type(header_key, header_value)
else:
self.add_header(header_key, header_value)
else:
# Ignore non headers. Maybe log an error in the
# future.
self.logger.debug("non-header") # debug
pass
except ConnectionResetError:
# Connection closed, interpret as diconnect event.
self.disconnected = True
self.set_type(self.EVENT, self.DISCONNECT)
# Read until CR LF is found.
def readline(self, in_stream):
# Some systems may stop at CR (or LF), doesn't guarantee next is
# LF (or previous is CR), so loop until both are read.
line = ""
while True:
line_part = in_stream.readline()
if not line_part:
# EOF
break
if line_part[-1] == "\x0d":
# Ended at CR, try to get a LF as well. At EOF, read
# will return "", and this will loop back up to the
# readline above where the EOF will be detected.
line = line + line_part + in_stream.read(1)
else:
line = line + line_part
if line[-2] == "\x0d" and line[-1] == "\x0a":
# Found a CR LF combination, this is the end of the line.
break
return line
def write(self, out_stream):
if out_stream is None:
raise UnboundLocalError("out_stream required, not defined")
out_stream.write(self.__type + ": " + self.__type_value + "\r\n")
# Write all headers
for header_key in list(self.__headers.keys()):
header_value = self.__headers[header_key]
# If value has "\r\n" within it, output as data block,
# otherwise as simple header.
if -1 == header_value.find("\r\n"):
# Simple header field.
out_stream.write(header_key + ": " + header_value + "\r\n")
else:
# Data block.
out_stream.write(header_key + ":: boundary=\r\n--\r\n")
# Escape boundary ("\r\n--"), and single ESC.
esc_header_value = \
header_value.\
replace('\033', '\033\033').\
replace('\r\n--', '\033\r\n--')
out_stream.write(esc_header_value)
# Write out terminator sequence and extra "\r\n" as
# block terminator.
out_stream.write("\r\n--\r\n")
# Write end of message blank line
out_stream.write("\r\n")
out_stream.flush()
def set_type(self, message_type, value):
self.__type = message_type
self.__type_value = value
def get_type(self):
return self.__type
def get_type_value(self):
return self.__type_value
def clear(self):
self.__headers.clear()
def get_header(self, header_key):
"Return value for key, or None if no header with that key, or key is None."
if header_key is None:
return None
try:
# Header keys are stored as lower case.
header_value = self.__headers[header_key.lower()]
except KeyError:
self.logger.debug("Header not found key: " + header_key) # debug
for header_key in list(self.__headers.keys()): # debug
header_value = self.__headers[header_key] # debug
self.logger.debug("get_header " + header_key + ": " + header_value) # debug
header_value = None
return header_value
def add_header(self, header_key, header_value):
if not isinstance(header_key, str):
raise TypeError("header key must be a string")
if not isinstance(header_value, str):
raise TypeError("header value must be a string")
# Store keys as lower case.
header_key = header_key.lower()
# Special case check: EVENT and COMMAND are not headers, they're
# message types, so redirect them.
if Message.EVENT == header_key or Message.COMMAND == header_key:
self.set_type(header_key, header_value)
else:
self.__headers[header_key.lower()] = header_value
def log(self, msg):
self.logger.debug(msg)
``` |
{
"source": "johnbeard/doc-convert",
"score": 3
} |
#### File: common/pydevhelp/devhelp.py
```python
import lxml.etree as ET
class Keyword(object):
def __init__(self, name, link, type):
self.name = name
self.link = link
self.type = type
def getXml(self):
e = ET.Element('keyword', attrib={'name':self.name, 'link':self.link, 'type':self.type})
return e
class Subsection(object):
def __init__(self, name, link):
self.name = name
self.link = link
self.subs = []
def addSub(self, sub):
self.subs.append(sub)
def getXml(self):
e = ET.Element('sub', attrib={'name':self.name, 'link':self.link})
for s in self.subs:
e.append(s.getXml());
return e
class FunctionList(object):
def __init__(self):
# list of keyword items
self.functions = []
def addKeyword(self, kw):
self.functions.append(kw)
def getXml(self):
e = ET.Element('functions')
for f in self.functions:
e.append(f.getXml())
return e
class ChapterList(object):
def __init__(self):
# list of keyword items
self.chapters = []
def addChapter(self, sub):
self.chapters.append(sub)
def getXml(self):
e = ET.Element('chapters')
for f in self.chapters:
e.append(f.getXml())
return e
class DevhelpBook(object):
xmlVersion = 2
xmlns="http://www.devhelp.net/book"
def __init__(self, name, title, base, link, language=None, author=""):
self.name = name
self.title = title
self.base = base
self.link = link
self.language = language
self.author = author
self.funcs = FunctionList()
self.chaps = ChapterList()
def getXml(self):
tree = ET.Element('book', attrib= {
'language':self.language,
'author':self.author,
'name':self.name,
'title':self.title,
'base':self.base,
'link':self.link,
'version':str(self.xmlVersion)
})
tree.append(self.chaps.getXml())
tree.append(self.funcs.getXml())
return ET.ElementTree(tree)
def addChapter(self, sub):
self.chaps.addChapter(sub)
def addKeyword(self, kw):
self.funcs.addKeyword(kw)
def write(self, fn):
tree = self.getXml()
tree.write(fn, encoding='utf-8', standalone=False, pretty_print=True)
```
#### File: juce/python/juce_dox.py
```python
import pydevhelp.devhelp as devhelp
import pydevhelp.dox_tagfile
import os
import argparse
import lxml.html as HTML
def getModules(fileList):
"""
These aren't in the tag file, and they don't even come out properly
in Doxygen XML, just just manually mess with the files
"""
modList = {}
for d in fileList:
readModuleCrumbs(d, modList)
mod = devhelp.Subsection("Modules", modList['modules']['href'])
addModules(mod, modList['modules'])
return mod
def addModules(parent, module):
for m in module['sub']:
submod = module['sub'][m]
s = devhelp.Subsection(m, submod['href'])
parent.addSub(s)
addModules(s, submod)
def readModuleCrumbs(file, modList):
html = HTML.parse(file)
# <li class="navelem"><a class="el" href="dir_e05d7e2b1ecd646af5bb94391405f3b5.html">modules</a></li><li class="navelem"><a class="el" href="dir_fd421517ec8f709274e931dda731313f.html">juce_core</a></li><li class="navelem"><a class="el" href="dir_0d31e411142695dc4add552e9ff0c68a.html">streams</a></li> </ul>
crumbs = html.iterfind("//li[@class='navelem']/a[@class='el']")
currMod = modList
# add the crumbs to the module list, adding each level if not
# there already
for c in crumbs:
name = c.text
if name not in currMod:
currMod[name] = {'name':name, 'href': c.attrib['href'], "sub": {}}
currMod = currMod[name]["sub"]
class JuceDoc(object):
def __init__(self, doc_src, doc_root):
self.dsrc = doc_src
self.db = devhelp.DevhelpBook(title = "JUCE 4.3.0 Reference Manual",
name = "juce-doc",
base = doc_root,
link = "index.html",
language = "c++")
mods = self.getModules()
self.db.addChapter(mods)
tf = pydevhelp.dox_tagfile.DoxygenTagFile(os.path.join(self.dsrc, 'juce.tag'), filterFunc=self.filterTags)
tf.extractKeywords( lambda kw: self.db.addKeyword(kw))
nsChap = devhelp.Subsection('Namespaces', 'index.html')
tf.extractNamespaces(lambda ns: nsChap.addSub(ns))
self.db.addChapter(nsChap)
classChap = devhelp.Subsection('Classes', 'classes.html')
tf.extractClasses(None, lambda s: classChap.addSub(s))
self.db.addChapter(classChap)
def filterTags(self, tree):
# the juce namespace is a bit wierd and just duplicates a subset of the classes
for remove in tree.xpath("/tagfile/compound[@kind='namespace' and name = 'juce']"):
remove.getparent().remove(remove)
# get rid of one of the duplicate littlfoot namespaces
for remove in tree.xpath("/tagfile/compound[@kind='namespace' and name = 'juce::littlefoot']"):
remove.getparent().remove(remove)
def getModules(self):
"""
These aren't in the tag file, and they don't even come out properly
in Doxygen XML, just just manually mess with the files
"""
fileList = [os.path.join(self.dsrc, d) for d in os.listdir(self.dsrc) if d.startswith('dir_')];
mod = getModules(fileList)
return mod
def output(self, fn):
data = self.db.write(fn)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert JUCE Doxygen documentation into devhelp documentation')
parser.add_argument('-d', '--doc-src', metavar='DOCSRC',
type=str, required=True,
help='the root of existing generated JUCE Doxygen documentation (e.g. "doxygen/doc" under your JUCE path, or an installed location in /usr/share/doc)')
parser.add_argument('-r', '--doc-root', metavar='DOCROOT',
type=str, required=True,
help='the root of the documentation when installed (probably an installed location in /usr/share/doc)')
parser.add_argument('-o', '--output', metavar='OUTFILE',
type=str, default='juce-doc.devhelp2',
help='output devhelp2 file, default is current dir file called juce-doc.devhelp2')
args = parser.parse_args()
jd = JuceDoc(args.doc_src, args.doc_root)
jd.output(args.output)
``` |
{
"source": "johnbeckettn2e/sysrepo",
"score": 2
} |
#### File: tests/ConcurrentHelpers/TestManager.py
```python
from __future__ import print_function
# -*- coding: utf-8 -*-
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>>"
__copyright__ = "Copyright 2016, Cisco Systems, Inc."
__license__ = "Apache 2.0"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Process, Manager
import os
import sys
import signal
class TestManager:
"""
Class manages the testers and helps them to execute steps in synchronized order.
Each tester is executed in a separeted process.
"""
def __init__(self):
self.manager = Manager()
self.lock = self.manager.Lock()
self.process_done = self.manager.Semaphore(0)
self.queue = self.manager.Queue()
self.sub_proc = self.manager.Queue()
self._setup()
def _setup(self):
self.testers = []
self.next_steps = []
self.proc_ids = []
self.subprocToKill = []
def add_tester(self, tester):
self.testers.append(tester)
def start_processes(self, rand_sleep):
"""create process for each tester"""
self.pids = self.manager.Array('l', range(len(self.testers)))
for id in range(len(self.testers)):
self.process_done.release()
next_s = self.manager.Semaphore(0)
p = Process(target=self.testers[id].run, args=(self.process_done, next_s, rand_sleep, self.lock, self.sub_proc, self.pids, id, self.queue))
self.proc_ids.append(p)
self.next_steps.append(next_s)
p.start()
self.pids[id] = p.pid
def wait_for_processes(self):
"""wait for all process to finish"""
for p in self.proc_ids:
p.join()
p.terminate()
self.lock.acquire()
self.lock.release()
def run(self, rand_sleep=True):
"""Execute tester steps"""
self.start_processes(rand_sleep)
step = -1
will_continue = range(len(self.next_steps))
wait_for = range(len(self.next_steps))
while True:
if step >= 0:
print("\n\n=================== TestManager step", step, "testers:", wait_for, file=sys.stderr)
for _ in wait_for:
self.process_done.acquire()
if step >= 0:
proc, name, status = self.queue.get()
print(("Received ", proc, name, status), file=sys.stderr)
if status == True:
will_continue.append(proc)
elif isinstance(status, BaseException):
print("Error in tester", proc, name, "step", step)
for p in self.proc_ids:
p.terminate()
while not self.sub_proc.empty():
pid = self.sub_proc.get()
try:
os.kill(pid, signal.SIGKILL)
except:
pass
raise status
if len(will_continue) == 0:
break
for id in will_continue:
self.next_steps[id].release()
wait_for = will_continue[:]
will_continue = []
step += 1
self.wait_for_processes()
``` |
{
"source": "JohnBee/SnakeAi",
"score": 4
} |
#### File: SnakeAi/snake_engine/engine.py
```python
from random import randint
class Engine:
def __init__(self, world_width=10, world_height=10):
'''
Initialise the snake engine where all game operations will take place.
:param world_width: Width of the game world the snake should roam
:param world_height: Height of the game world the snake should roam
'''
self.world_width = world_width
self.world_height = world_height
self.food = []
## Initialise the snake
self.snake = Snake(world_width // 2, world_height // 2, 4)
self.score = 0
self.game_end = False
# place the first piece of food
self.add_food()
def reset(self):
self.food = []
## Initialise the snake
self.snake = Snake(self.world_width // 2, self.world_height // 2, 4)
self.score = 0
self.game_end = False
# place the first piece of food
self.add_food()
def make_move(self, input_move):
old_head = (self.snake.head[0], self.snake.head[1])
if input_move == 0:
self.snake.move_forward(self)
elif input_move == 1:
self.snake.turn_left(self)
elif input_move == 2:
self.snake.turn_right(self)
# add food if it's been eaten
reward = 0
if not self.food:
self.score += 1
reward += 10
if not self.add_food():
self.game_end
# return reward for making this move
# if closer to food, increase reward, else decrease
new_head = (self.snake.head[0], self.snake.head[1])
food = (self.food[0][0], self.food[0][1])
# taxicab geometry
old_dist = abs(food[0] - old_head[0]) + abs(food[1] - old_head[1])
new_dist = abs(food[0] - new_head[0]) + abs(food[1] - new_head[1])
if new_dist < old_dist:
reward += 1
else:
reward -= 1
return reward
def export_game_state(self):
'''
Exports the game state
:return: a dictionary with set values representing the game state
'''
return {"score": self.score,
"world_width": self.world_width,
"world_height": self.world_height,
"food": self.food,
"snake_direction": self.snake.direction,
"snake_body": self.snake.body,
"snake_head": self.snake.head,
"snake_size": self.snake.length,
"game_end": self.game_end}
def import_game_state(self, game_state):
'''
Import a game state to load
:param game_state: a dictionary with the defined
:return: True or false depending on if it was successful in loading the game state
'''
try:
self.score = game_state["score"]
self.world_width = game_state["world_width"]
self.world_height = game_state["world_height"]
self.food = game_state["food"]
self.snake.body = game_state["snake_body"]
self.snake.head = game_state["snake_head"]
self.snake.direction = game_state["snake_direction"]
self.snake.length = game_state["snake_length"]
self.game_end = game_state["game_end"]
except KeyError as error:
print("Missing game state argument!")
print(error)
return False
return True
def add_food(self):
'''
Add food to the game world, possible locations are only where the snake isn't
:return: True or False depending if food was able to be added, if it false then the game must be complete.
'''
possible_locations = [(x, y) for x in range(self.world_width) for y in range(self.world_height)]
for s_not_possible in self.snake.body + self.food:
if s_not_possible in possible_locations:
possible_locations.remove(s_not_possible)
if not possible_locations:
return False
else:
# select a possible location
self.food.append(possible_locations[randint(0, len(possible_locations) - 1)])
return True
def output_world(self):
'''
Output the game world as a list of list of characters to the parsed by AI or printed
:return:
'''
out = []
for y in range(self.world_height):
out.append([])
for x in range(self.world_width):
if (x, y) not in self.food and (x, y) not in self.snake.body:
out[-1].append(".")
elif (x, y) in self.food:
out[-1].append("o")
elif (x, y) in self.snake.body:
if (x, y) == self.snake.body[0]:
if self.snake.direction == 0:
out[-1].append(">")
if self.snake.direction == 1:
out[-1].append("^")
if self.snake.direction == 2:
out[-1].append("<")
if self.snake.direction == 3:
out[-1].append("v")
else:
out[-1].append("#")
return out
def pretty_print_world(self):
for y in self.output_world():
print(" ".join(y))
class Snake:
def __init__(self, pos_x=0, pos_y=0, length=3):
# init tail of given length
self.head = (pos_x, pos_y)
self.direction = 0 # 0 = right, 1 = up, 2 = left, 3 = down
self.length = length
self.body = self.gen_tail(pos_x, pos_y, self.length)
def turn_left(self, engine):
self.direction = (self.direction + 1) % 4
self.move_forward(engine)
def turn_right(self, engine):
self.direction = (self.direction - 1) % 4
self.move_forward(engine)
def move_forward(self, engine):
if self.direction == 0:
self.body = [(self.head[0] + 1, self.head[1])] + self.body
elif self.direction == 1:
self.body = [(self.head[0], self.head[1] - 1)] + self.body
elif self.direction == 2:
self.body = [(self.head[0] - 1, self.head[1])] + self.body
elif self.direction == 3:
self.body = [(self.head[0], self.head[1] + 1)] + self.body
# check if head not on food then don't increase snake length
if self.body[0] not in engine.food:
self.body.pop()
else:
# eat the food
engine.food.remove(self.body[0])
engine.score += 1
self.head = self.body[0]
# check if dead
if len([a for a in self.body if a == self.head]) > 1:
engine.game_end = True
if self.head[1] < 0 or self.head[0] < 0 or self.head[0] >= engine.world_width or self.head[1] >= engine.world_height:
engine.game_end = True
@staticmethod
def gen_tail(head_x, head_y, length=3):
return [(x, head_y) for x in range(head_x, head_x - length, -1)]
def play_game():
e = Engine()
# draw game state
while not e.game_end:
e.pretty_print_world()
move = None
while move is None:
move = int(input("Enter 0 or 1 or 2 for no change, turn left or turn right: "))
if move in [0,1,2]:
e.make_move(move)
else:
print(f"Invalid move: {move}")
move = None
if __name__ == "__main__":
play_game()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.