metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jgliss/pydoas",
"score": 2
}
|
#### File: pydoas/pydoas/inout.py
```python
from __future__ import unicode_literals
from os.path import join
from os import listdir
from collections import OrderedDict as od
def get_data_dirs():
"""Get directories containing example package data
:returns: list of package subfolders containing data files
"""
from pydoas import _LIBDIR
return listdir(join(_LIBDIR, "data"))
def get_data_files(which = "doasis"):
"""Get all example result files from package data"""
from pydoas import _LIBDIR
if which == "doasis":
p = join(_LIBDIR, join("data", "doasis_resultfiles"))
elif which == "fake":
p = join(_LIBDIR, join("data", "fake_resultfiles"))
else:
raise ValueError("No example resultfiles available for ID %s, "
"choose from *fake* or *doasis*")
return listdir(p), p
def get_result_type_ids():
"""Read file import_info.txt and find all valid import types"""
try:
from pydoas import _LIBDIR
except:
raise
with open(join(_LIBDIR, join("data", "import_info.txt"))) as f:
types = []
for line in f:
spl = line.split(":", 1)
if spl[0] == "type":
tp = spl[1].split("#")[0].strip()
if len(tp) > 0:
types.append(tp)
return types
def import_type_exists(type_id):
"""Checks if data import type exists in import_info.txt
:param str type_id: string ID to be searched in import_info.txt
"""
if type_id in get_result_type_ids():
return True
return False
def get_import_info(resulttype="doasis"):
"""Try to load DOAS result import specification for default type
Import specifications for a specified data type (see package data
file "import_info.txt" for available types, use the instructions in
this file to create your own import setup if necessary)
:param str resulttype: name of result type (field "type" in
"import_info.txt" file)
"""
from pydoas import _LIBDIR
from codecs import decode
dat = od()
with open(join(_LIBDIR, join("data", "import_info.txt"))) as f:
found = 0
for line in f:
if "ENDTYPE" in line and found:
print(dat)
return dat
spl = line.split(":", 1)
if spl[0] == "type" and spl[1].split("#")[0].strip() ==\
resulttype:
found = 1
if found:
if not any([line[0] == x for x in["#","\n"]]):
k = spl[0].strip()
d = [x.strip() for x in spl[1].split("#")[0].split(',')]
if k == "time_str_formats":
dat[k] = d
elif k == "delim":
print(decode(d[0],"unicode-escape"))
#dat[k] = str(d[0].decode("unicode_escape"))
dat[k] = decode(d[0], "unicode-escape")
else:
try:
val = int(d[0])
except:
val = str(d[0])
dat[k] = val
raise IOError("Info for type %s could not be found" %resulttype)
def import_info_file():
"""Return path to supplementary file import_info.txt"""
from pydoas import _LIBDIR
return join(_LIBDIR, join("data", "import_info.txt"))
def _fake_import_specs():
"""Returns dictionary for adding a new fake import type"""
return od([("type", "fake"),
("access_type", "col_index"),
("file_type", "csv"),
("time_str_formats", "%Y%m%d%H%M"),
("delim", ";"),
("start", 0), #col num
("stop", 1), #col num
("bla" , "Blub"), #invalid (for test purpose)
("num_scans", 4)]) #colnum
def write_import_info_to_default_file(import_dict):
try:
if import_type_exists(import_dict["type"]):
raise TypeError("Import specifications for ID %s already exists in "
"file import_info.txt, please change ID and try again"
%import_dict["type"])
except KeyError:
raise KeyError("Please specify type in dictionary")
keys = list(get_import_info().keys())
p = import_info_file()
print(("Writing to %s" %p))
with open(p, "a") as myfile:
myfile.write("\n\nNEWTYPE\n")
for k, v in list(import_dict.items()):
if k in keys:
print(("ADDING %s: %s" %(k, v)))
if isinstance(v, list):
s = str(v[0])
for i in range(1, len(v)):
s += ",%s" %v[i]
myfile.write("%s:%s\n" %(k, s))
else:
myfile.write("%s:%s\n" %(k, v))
else:
print(("INVALID KEY (not added) %s: %s" %(k,v)))
myfile.write("ENDTYPE")
myfile.close()
```
|
{
"source": "jg-LitFrame/Lit_Client",
"score": 2
}
|
#### File: tools/xresconv/xresconv-cli.py
```python
import glob
import io
import locale
import os
import platform
import re
import shutil
import string
import sys
import tempfile
# ==================================================================================
import threading
import xml.etree.ElementTree as ET
from multiprocessing import cpu_count
from optparse import OptionParser
from subprocess import PIPE, STDOUT, Popen
from print_color import cprintf_stderr, cprintf_stdout, print_style
console_encoding = sys.getfilesystemencoding()
if 'utf-8' != sys.getdefaultencoding().lower():
try:
sys.setdefaultencoding('utf-8')
except Exception:
reload(sys)
sys.setdefaultencoding('utf-8')
xconv_options = {
'version': '1.1.0.1',
'conv_list': None,
'real_run': True,
'args': {},
'ext_args_l1': [],
'ext_args_l2': [],
'work_dir': '.',
'xresloader_path': 'xresloader.jar',
'item': [],
'parallelism': int((cpu_count() - 1) / 2) + 1,
'java_options': [],
'default_scheme': {}
}
# 默认双线程,实际测试过程中java的运行优化反而比多线程更能提升效率
if xconv_options['parallelism'] > 2:
xconv_options['parallelism'] = 2
xconv_xml_global_nodes = []
xconv_xml_list_item_nodes = []
usage = "usage: %prog [options...] <convert list file> [xresloader options...]"
parser = OptionParser(usage)
parser.disable_interspersed_args()
parser.add_option(
"-v",
"--version",
action="store_true",
help="show version and exit",
dest="version",
default=False)
parser.add_option(
"-s",
"--scheme-name",
action="append",
help="only convert schemes with name <scheme name>",
metavar="<scheme>",
dest="rule_schemes",
default=[])
parser.add_option(
"-t",
"--test",
action="store_true",
help="test run and show cmds",
dest="test",
default=False)
parser.add_option(
"-p",
"--parallelism",
action="store",
help="set parallelism task number(default:" +
str(xconv_options['parallelism']) + ')',
metavar="<number>",
dest="parallelism",
type="int",
default=xconv_options['parallelism'])
parser.add_option(
"-j",
"--java-option",
action="append",
help="add java options to command(example: Xmx=2048m)",
metavar="<java option>",
dest="java_options",
default=[])
(options, left_args) = parser.parse_args()
if options.version:
print(xconv_options['version'])
exit(0)
def print_help_msg(err_code):
parser.print_help()
exit(err_code)
if 0 == len(left_args):
print_help_msg(-1)
xconv_options['conv_list'] = left_args.pop(0)
xconv_options['ext_args_l2'] = left_args
# ========================================= 全局配置解析 =========================================
''' 读取xml文件 '''
def load_xml_file(file_path):
try:
xml_doc = ET.parse(file_path)
except Exception as e:
print(e)
exit(-2)
root_node = xml_doc.getroot()
if root_node == None:
print('[ERROR] root node not found in xml')
print_help_msg(-3)
# 枚举include文件
include_nodes = root_node.findall("./include")
if include_nodes and len(include_nodes) > 0:
dir_prefix = os.path.dirname(file_path)
for include_node in include_nodes:
include_file_path = include_node.text
if include_file_path and len(include_file_path) > 1:
if include_file_path[0] != '/' and include_file_path[1] != ':':
include_file_path = os.path.join(dir_prefix,
include_file_path)
load_xml_file(include_file_path)
global_nodes = root_node.findall("./global")
if global_nodes and len(global_nodes) > 0:
xconv_xml_global_nodes.extend(global_nodes)
list_item_nodes = root_node.findall("./list/item")
if list_item_nodes and len(list_item_nodes) > 0:
xconv_xml_list_item_nodes.extend(list_item_nodes)
load_xml_file(xconv_options['conv_list'])
# global配置解析/合并
def load_global_options(gns):
for global_node in gns:
for global_option in global_node:
tag_name = global_option.tag.lower()
text_value = global_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
if tag_name == 'work_dir':
xconv_options['work_dir'] = text_value
elif tag_name == 'xresloader_path':
xconv_options['xresloader_path'] = text_value
elif tag_name == 'proto':
xconv_options['args']['-p'] = trip_value
elif tag_name == 'output_type':
xconv_options['args']['-t'] = trip_value
elif tag_name == 'proto_file':
xconv_options['args']['-f'] = '"' + text_value + '"'
elif tag_name == 'output_dir':
xconv_options['args']['-o'] = '"' + text_value + '"'
elif tag_name == 'data_src_dir':
xconv_options['args']['-d'] = '"' + text_value + '"'
elif tag_name == 'rename':
xconv_options['args']['-n'] = '"' + trip_value + '"'
elif tag_name == 'option':
xconv_options['ext_args_l1'].append(trip_value)
elif tag_name == 'java_option':
xconv_options['java_options'].append(trip_value)
elif tag_name == 'default_scheme':
if 'name' in global_option.attrib:
scheme_key = global_option.attrib['name']
if scheme_key in xconv_options['default_scheme']:
xconv_options['default_scheme'][scheme_key].append(
trip_value)
else:
xconv_options['default_scheme'][
scheme_key] = [text_value]
else:
print('[ERROR] unknown global configure ' + tag_name)
if xconv_xml_global_nodes and len(xconv_xml_global_nodes) > 0:
load_global_options(xconv_xml_global_nodes)
# ----------------------------------------- 全局配置解析 -----------------------------------------
conv_list_dir = os.path.dirname(xconv_options['conv_list'])
if conv_list_dir:
os.chdir(conv_list_dir)
os.chdir(xconv_options['work_dir'])
cprintf_stdout([print_style.FC_YELLOW],
'[NOTICE] start to run conv cmds on dir: {0}' + os.linesep,
os.getcwd())
if not os.path.exists(xconv_options['xresloader_path']):
print(os.getcwd())
cprintf_stderr([print_style.FC_RED],
'[ERROR] xresloader not found.({0})' + os.linesep,
xconv_options['xresloader_path'])
exit(-4)
# ========================================= 转换表配置解析 =========================================
# 转换项配置解析/合并
def load_list_item_nodes(lis):
for item in lis:
conv_item_obj = {
'file': False,
'scheme': False,
'options': [],
'enable': False,
'scheme_data': {}
}
if 'file' in item.attrib:
conv_item_obj['file'] = item.attrib['file']
if 'scheme' in item.attrib:
conv_item_obj['scheme'] = item.attrib['scheme']
# 局部选项
for local_option in item.findall('./option'):
text_value = local_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
conv_item_obj['options'].append(trip_value)
# 局部选项
for local_option in item.findall('./scheme'):
text_value = local_option.text
if text_value:
trip_value = text_value.strip()
else:
trip_value = None
if not trip_value:
continue
if 'name' in local_option.attrib:
scheme_key = local_option.attrib['name']
if scheme_key and scheme_key in conv_item_obj['scheme_data']:
conv_item_obj['scheme_data'][scheme_key].append(text_value)
else:
conv_item_obj['scheme_data'][scheme_key] = [text_value]
for key in xconv_options['default_scheme']:
if key not in conv_item_obj['scheme_data']:
conv_item_obj['scheme_data'][key] = xconv_options[
'default_scheme'][key]
# 转换规则
if not options.rule_schemes or 0 == len(
options.rule_schemes) or conv_item_obj[
'scheme'] in options.rule_schemes:
conv_item_obj['enable'] = True
xconv_options['item'].append(conv_item_obj)
if xconv_xml_list_item_nodes and len(xconv_xml_list_item_nodes) > 0:
load_list_item_nodes(xconv_xml_list_item_nodes)
# ----------------------------------------- 转换配置解析 -----------------------------------------
# ========================================= 生成转换命令 =========================================
##### 全局命令和配置
global_cmd_prefix = ''
for global_optk in xconv_options['args']:
global_optv = xconv_options['args'][global_optk]
global_cmd_prefix += ' ' + global_optk + ' ' + global_optv
if len(xconv_options['ext_args_l1']) > 0:
global_cmd_prefix += ' ' + ' '.join(xconv_options['ext_args_l1'])
##### 命令行参数
global_cmd_suffix = ''
if len(xconv_options['ext_args_l2']) > 0:
global_cmd_suffix += ' ' + ' '.join(xconv_options['ext_args_l2'])
cmd_list = []
for conv_item in xconv_options['item']:
if not conv_item['enable']:
continue
item_cmd_options = ''
if len(conv_item['options']) > 0:
item_cmd_options += ' ' + ' '.join(conv_item['options'])
if conv_item['file'] and conv_item['scheme']:
cmd_scheme_info = ' -s "{:s}" -m "{:s}"'.format(conv_item['file'],
conv_item['scheme'])
else:
cmd_scheme_info = ''
for key in conv_item['scheme_data']:
for opt_val in conv_item['scheme_data'][key]:
cmd_scheme_info += ' -m "{:s}={:s}"'.format(key, opt_val)
run_cmd = global_cmd_prefix + item_cmd_options + cmd_scheme_info + global_cmd_suffix
cmd_list.append(run_cmd)
cmd_list.reverse()
# ----------------------------------------- 生成转换命令 -----------------------------------------
exit_code = 0
all_worker_thread = []
cmd_picker_lock = threading.Lock()
def worker_func(idx):
global exit_code
java_options = ""
if len(options.java_options) > 0:
java_options += ' "-{0}"'.format('" "-'.join(options.java_options))
if len(xconv_options['java_options']) > 0:
java_options += ' "{0}"'.format('" "'.join(xconv_options[
'java_options']))
pexec = None
if not options.test:
pexec = Popen(
'java {0} -jar "{1}" --stdin'.format(
java_options, xconv_options['xresloader_path']),
stdin=PIPE,
stdout=None,
stderr=None,
shell=True)
while True:
cmd_picker_lock.acquire()
if len(cmd_list) <= 0:
cmd_picker_lock.release()
break
pexec.stdin.write(cmd_list.pop().encode(console_encoding))
cmd_picker_lock.release()
pexec.stdin.write(os.linesep.encode(console_encoding))
pexec.stdin.flush()
pexec.stdin.close()
cmd_exit_code = pexec.wait()
exit_code = exit_code + cmd_exit_code
else:
this_thd_cmds = []
while True:
cmd_picker_lock.acquire()
if len(cmd_list) <= 0:
cmd_picker_lock.release()
break
# python2 must use encode string to bytes or there will be messy code
# python3 must not use encode methed because it will transform string to bytes
if sys.version_info.major < 3:
this_thd_cmds.append(cmd_list.pop().encode(console_encoding))
else:
this_thd_cmds.append(cmd_list.pop())
cmd_picker_lock.release()
cprintf_stdout([print_style.FC_GREEN], (
'java {0} -jar "{1}" --stdin' + os.linesep + '\t>{2}' + os.linesep
).format(java_options, xconv_options['xresloader_path'],
(os.linesep + '\t>').join(this_thd_cmds)))
for i in range(0, options.parallelism):
this_worker_thd = threading.Thread(target=worker_func, args=[i])
this_worker_thd.start()
all_worker_thread.append(this_worker_thd)
# 等待退出
for thd in all_worker_thread:
thd.join()
# ----------------------------------------- 实际开始转换 -----------------------------------------
cprintf_stdout([print_style.FC_MAGENTA],
'[INFO] all jobs done. {0} job(s) failed.{1}'.format(
exit_code, os.linesep))
exit(exit_code)
```
|
{
"source": "JGLTechnologies/aiohttp-ratelimiter",
"score": 2
}
|
#### File: aiohttp-ratelimiter/aiohttplimiter/limiter.py
```python
from functools import wraps
import json
from typing import Callable, Awaitable, Union, Optional
import asyncio
from aiohttp.web import Request, Response
from limits.aio.storage import Storage, MemoryStorage
from limits.aio.strategies import MovingWindowRateLimiter
from limits import RateLimitItemPerYear, RateLimitItemPerMonth, RateLimitItemPerDay, RateLimitItemPerHour, \
RateLimitItemPerMinute, RateLimitItemPerSecond
def default_keyfunc(request: Request) -> str:
"""
Returns the user's IP
"""
ip = request.headers.get(
"X-Forwarded-For") or request.remote or "127.0.0.1"
ip = ip.split(",")[0]
return ip
class Allow:
def __init__(self) -> None:
pass
class RateLimitExceeded:
def __init__(self, detail: str) -> None:
self._detail = detail
@property
def detail(self):
return self._detail
class BaseRateLimitDecorator:
def __init__(self, db: Storage, path_id: str, keyfunc: Callable,
moving_window: MovingWindowRateLimiter, ratelimit: str,
exempt_ips: Optional[set] = None, error_handler: Optional[Union[Callable, Awaitable]] = None) -> None:
self.exempt_ips = exempt_ips or set()
calls, period = ratelimit.split("/")
self._calls = calls
calls = int(calls)
period = int(period)
assert period > 0
assert calls > 0
self.period = period
self.keyfunc = keyfunc
self.calls = calls
self.db = db
self.error_handler = error_handler
self.path_id = path_id
self.moving_window = moving_window
if self.period >= 31_536_000:
self.item = RateLimitItemPerYear(self.calls, self.period / 31_536_000)
elif self.period >= 2_628_000:
self.item = RateLimitItemPerMonth(self.calls, self.period / 2_628_000)
elif self.period >= 86400:
self.item = RateLimitItemPerDay(self.calls, self.period / 86400)
elif self.period >= 3600:
self.item = RateLimitItemPerHour(self.calls, self.period / 3600)
elif self.period >= 60:
self.item = RateLimitItemPerMinute(self.calls, self.period / 60)
else:
self.item = RateLimitItemPerSecond(self.calls, self.period)
def __call__(self, func: Union[Callable, Awaitable]) -> Awaitable:
@wraps(func)
async def wrapper(request: Request) -> Response:
key = self.keyfunc(request)
db_key = f"{key}:{self.path_id or request.path}"
if isinstance(self.db, MemoryStorage):
if not await self.db.check():
await self.db.reset()
if asyncio.iscoroutinefunction(func):
# Checks if the user's IP is in the set of exempt IPs
if default_keyfunc(request) in self.exempt_ips:
return await func(request)
# Returns a response if the number of calls exceeds the max amount of calls
if not await self.moving_window.test(self.item, db_key):
if self.error_handler is not None:
if asyncio.iscoroutinefunction(self.error_handler):
r = await self.error_handler(request, RateLimitExceeded(
**{"detail": f"{self._calls} request(s) per {self.period} second(s)"}))
if isinstance(r, Allow):
return await func(request)
return r
else:
r = self.error_handler(request, RateLimitExceeded(
**{"detail": f"{self._calls} request(s) per {self.period} second(s)"}))
if isinstance(r, Allow):
return await func(request)
return r
data = json.dumps(
{"error": f"Rate limit exceeded: {self._calls} request(s) per {self.period} second(s)"})
response = Response(
text=data, content_type="application/json", status=429)
response.headers.add(
"error", f"Rate limit exceeded: {self._calls} request(s) per {self.period} second(s)")
return response
# Increments the number of calls by 1
await self.moving_window.hit(self.item, db_key)
# Returns normal response if the user did not go over the rate limit
return await func(request)
else:
# Checks if the user's IP is in the set of exempt IPs
if default_keyfunc(request) in self.exempt_ips:
return func(request)
# Returns a response if the number of calls exceeds the max amount of calls
if not await self.moving_window.test(self.item, db_key):
if self.error_handler is not None:
if asyncio.iscoroutinefunction(self.error_handler):
r = await self.error_handler(request, RateLimitExceeded(
**{"detail": f"{self._calls} request(s) per {self.period} second(s)"}))
if isinstance(r, Allow):
return func(request)
return r
else:
r = self.error_handler(request, RateLimitExceeded(
**{"detail": f"{self._calls} request(s) per {self.period} second(s)"}))
if isinstance(r, Allow):
return func(request)
return r
data = json.dumps(
{"error": f"Rate limit exceeded: {self._calls} request(s) per {self.period} second(s)"})
response = Response(
text=data, content_type="application/json", status=429)
response.headers.add(
"error", f"Rate limit exceeded: {self._calls} request(s) per {self.period} second(s)")
return response
# Increments the number of calls by 1
await self.moving_window.hit(self.item, db_key)
# Returns normal response if the user did not go over the rate limit
return func(request)
return wrapper
```
|
{
"source": "jgm48/ia-flood-risk-project",
"score": 4
}
|
#### File: ia-flood-risk-project/floodsystem/geo.py
```python
from typing import Type
from floodsystem.stationdata import build_station_list
from .utils import sorted_by_key # noqa
from haversine import haversine
# for task 1B
def stations_by_distance(stations, p):
"Takes a list of stations and a coordinate p. returns a list of (station, distance) tuples in order of distance"
# Build the lsit of stations
stations = build_station_list()
# Make an empty list for the (station, distance) tuples to go into
list_of_tuples = []
for i in stations:
distance = haversine(i.coord, p)
station_distance_tuple = (i, distance)
list_of_tuples.append(station_distance_tuple)
return sorted_by_key(list_of_tuples, 1)
# for task 1C
def stations_within_radius(stations, centre, r):
"""Returns a list of stations within a radius of a given coordinate"""
# Build a list of stations
stations = build_station_list()
# Create an empty list for the stations within the radius to go in to
list_of_stations_in_radius = []
# check if the distance between the station and the centre is < r
for i in stations:
if haversine(i.coord, centre) < r:
list_of_stations_in_radius.append(i.name)
return list_of_stations_in_radius
# for task 1D
def rivers_with_station(stations):
"""Return a set with the names of the rivers with a monitoring station"""
# make an empty list for the items to go into
river_names = []
for i in stations:
river_names.append(i.river)
# convert to a set to remove duplicates
river_names_set = set(river_names)
return river_names_set
# for task 1D
def stations_by_river(stations):
"""Return a dictionary that maps river names to a list of station objects on a given river"""
# create an empty dictionary
river_dict = {}
# add river names as keys with empty values
for i in rivers_with_station(stations):
river_dict.update({i: []})
# add station names as values to corresponding keys in dictionary
for i in stations:
for j in rivers_with_station(stations):
if i.river == j:
river_dict[j].append(i.name)
return river_dict
'''TASK 1E'''
def rivers_by_station_number(stations, N):
'''Returns a list of tuples containing river name and number of stations, sorted by number of stations'''
# Get a list of rivers with at least one station
valid_rivers = rivers_with_station(stations)
# Get the dictionary from above
river_dict = stations_by_river(stations)
# List of tuples (river name, number of stations)
river_number_list = []
# Iterate through the list of all rivers, and find how many stations they each have
for name, stations in river_dict.items():
river_station_tuple = (name, len(stations))
# I changed this from extend to append, and it worked instantly
river_number_list.append(river_station_tuple)
# Check N is a valid number
if type(N) != int:
raise TypeError("N must be an integer")
elif N > len(river_number_list):
raise ValueError("N must be less than the total number of rivers with at least one monitoring station")
else:
pass
# Sort list by number of stations descending
river_number_list = sorted_by_key(river_number_list, 1, reverse=True)
# Final list for holding number of stations for each river
river_output_list = river_number_list[:N]
# Check to see if next key has same value
if river_number_list[N][1] != river_number_list[N-1][1]:
return river_output_list
else:
for i in range(N, len(river_number_list)):
if river_number_list[i][1] == river_number_list[i-1][1]:
river_output_list.append(river_number_list[i])
else:
break
return river_output_list
```
#### File: jgm48/ia-flood-risk-project/Task1C.py
```python
from distutils.command.build import build
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_within_radius
def run():
# Build list of stations
stations = build_station_list()
# Cambridge town centre
centre = (52.2053, 0.1218)
# Radius = 10km
r = 10.0
cambridge_stations = stations_within_radius(stations, centre, r)
print("Stations within 10km of Cambridge town centre: ")
print(sorted(cambridge_stations))
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run()
```
#### File: jgm48/ia-flood-risk-project/Task1D.py
```python
from distutils.command.build import build
from floodsystem.stationdata import build_station_list
from floodsystem.geo import rivers_with_station, stations_by_distance, stations_by_river
def run():
# build a list of stations
stations = build_station_list()
# build list of rivers
rivers = rivers_with_station(stations)
# print no. of rivers
print(str(len(rivers)) + " rivers have at least one monitoring station")
# sort rivers alphabetically, print the first 10
sorted_rivers = sorted(rivers)
print("The first 10 of these rivers in alphabetical order are:",
sorted_rivers[0:10])
# Sort stations for 3 rivers alphabetically, print them
print("The stations on the River Aire are: ",
(sorted((stations_by_river(stations))["River Aire"])))
print("The stations on the River Cam are: ",
(sorted((stations_by_river(stations))["River Cam"])))
print("The stations on the River Thames are: ",
(sorted((stations_by_river(stations))["River Thames"])))
if __name__ == "__main__":
print("*** Task 1D: CUED Part IA Flood Warning System ***")
run()
```
#### File: jgm48/ia-flood-risk-project/Task2C.py
```python
from floodsystem.flood import stations_highest_rel_level
from floodsystem.stationdata import build_station_list, update_water_levels
def run():
"""Requirements for Task 2C"""
# Build station objects and update their levels
stations = build_station_list()
update_water_levels(stations)
# Print in descending order 10 rivers with highest levels
N = 10
highest_n = stations_highest_rel_level(stations, N)
for i in highest_n:
print(i[0].name, i[1])
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run()
```
#### File: jgm48/ia-flood-risk-project/Task2F.py
```python
from floodsystem.stationdata import update_water_levels, build_station_list
from floodsystem.flood import stations_highest_rel_level
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.plot import plot_water_levels, plot_water_level_with_fit
import datetime
def run():
"""Requirements for Task 2F"""
#build and update list of station objects
stations = build_station_list()
update_water_levels(stations)
# get the 5 stations where the water level is highest
highest = stations_highest_rel_level(stations, 5)
dt = 2 # number of days
#cannot get station names directly from list of tuples
# make list of station ids
high_station_id = []
for i in range(5):
high_station_tuple = (highest[i][0]).measure_id
high_station_id.append(high_station_tuple)
# make list of station objects
high_objects = []
for i in range(5):
high_objects.append(highest[i][0])
# actually plot the rivers over time on same graph
for i in range(5):
dates, levels = fetch_measure_levels(high_objects[i].measure_id, dt=datetime.timedelta(days=dt)) # for 2 days
plot_water_level_with_fit(high_objects[i], dates, levels, 4) and plot_water_levels(high_objects[i], dates, levels)
#NB when I ran this, hayes basin gave very dodgy readings (completely out of max/min levels)
if __name__ == "__main__":
print("*** Task 2F: CUED Part IA Flood Warning System ***")
run()
```
|
{
"source": "jgmakin/ecogVIS",
"score": 3
}
|
#### File: ecogvis/signal_processing/detect_events.py
```python
import numpy as np
import scipy.signal as sgn
from ecogvis.signal_processing.resample import resample
def detect_events(speaker_data, mic_data=None, interval=None, dfact=30,
smooth_width=0.4, speaker_threshold=0.05, mic_threshold=0.05,
direction='both'):
"""
Automatically detects events in audio signals.
Parameters
----------
speaker_data : 'pynwb.base.TimeSeries' object
Object containing speaker data.
mic_data : 'pynwb.base.TimeSeries' object
Object containing microphone data.
interval : list of floats
Interval to be used [Start_bin, End_bin]. If 'None', the whole
signal is used.
dfact : float
Downsampling factor. Default 30.
smooth_width: float
Width scale for median smoothing filter (default = .4, decent for CVs).
speaker_threshold : float
Sets threshold level for speaker.
mic_threshold : float
Sets threshold level for mic.
direction : str
'Up' detects events start times. 'Down' detects events stop times.
'Both'
detects both start and stop times.
Returns
-------
speakerDS : 1D array of floats
Downsampled speaker signal.
speakerEventDS : 1D array of floats
Event times for speaker signal.
speakerFilt : 1D array of floats
Filtered speaker signal.
micDS : 1D array of floats
Downsampled microphone signal.
micEventDS : 1D array of floats
Event times for microphone signal.
micFilt : 1D array of floats
Filtered microphone signal.
"""
# Downsampling Speaker ---------------------------------------------------
speakerDS, speakerEventDS, speakerFilt = None, None, None
if speaker_data is not None:
if interval is None:
X = speaker_data.data[:]
else:
X = speaker_data.data[interval[0]:interval[1]]
fs = speaker_data.rate # sampling rate
ds = fs / dfact
# Pad zeros to make signal length a power of 2, improves performance
nBins = X.shape[0]
extraBins = 2 ** (np.ceil(np.log2(nBins)).astype('int')) - nBins
extraZeros = np.zeros(extraBins)
X = np.append(X, extraZeros)
speakerDS = resample(X, ds, fs)
# Remove excess bins (because of zero padding on previous step)
excessBins = int(np.ceil(extraBins * ds / fs))
speakerDS = speakerDS[0:-excessBins]
# Kernel size must be an odd number
speakerFilt = sgn.medfilt(
volume=np.diff(np.append(speakerDS, speakerDS[-1])) ** 2,
kernel_size=int((smooth_width * ds // 2) * 2 + 1))
# Normalize the filtered signal.
speakerFilt /= np.max(np.abs(speakerFilt))
# Find threshold crossing times
stimBinsDS = threshcross(speakerFilt, speaker_threshold, direction)
# Remove events that have a duration less than 0.1 s.
speaker_events = stimBinsDS.reshape((-1, 2))
rem_ind = np.where((speaker_events[:, 1] - speaker_events[:, 0]) < ds * 0.1)[0]
speaker_events = np.delete(speaker_events, rem_ind, axis=0)
stimBinsDS = speaker_events.reshape((-1))
# Transform bins to time
speakerEventDS = (stimBinsDS / ds) + (interval[0] / fs)
# Downsampling Mic -------------------------------------------------------
micDS, micEventDS, micFilt = None, None, None
if mic_data is not None:
if interval is None:
X = mic_data.data[:]
else:
X = mic_data.data[interval[0]:interval[1]]
fs = mic_data.rate # sampling rate
ds = fs / dfact
# Pad zeros to make signal length a power of 2, improves performance
nBins = X.shape[0]
extraBins = 2 ** (np.ceil(np.log2(nBins)).astype('int')) - nBins
extraZeros = np.zeros(extraBins)
X = np.append(X, extraZeros)
micDS = resample(X, ds, fs)
# Remove excess bins (because of zero padding on previous step)
excessBins = int(np.ceil(extraBins * ds / fs))
micDS = micDS[0:-excessBins]
# Remove mic response to speaker
micDS[np.where(speakerFilt > speaker_threshold)[0]] = 0
micFilt = sgn.medfilt(volume=np.diff(np.append(micDS, micDS[-1])) ** 2,
kernel_size=int(
(smooth_width * ds // 2) * 2 + 1))
# Normalize the filtered signal.
micFilt /= np.max(np.abs(micFilt))
# Find threshold crossing times
micBinsDS = threshcross(micFilt, mic_threshold, direction)
# Remove events that have a duration less than 0.1 s.
mic_events = micBinsDS.reshape((-1, 2))
rem_ind = np.where((mic_events[:, 1] - mic_events[:, 0]) < ds * 0.1)[0]
mic_events = np.delete(mic_events, rem_ind, axis=0)
micBinsDS = mic_events.reshape((-1))
# Transform bins to time
micEventDS = (micBinsDS / ds) + (interval[0] / fs)
return speakerDS, speakerEventDS, speakerFilt, micDS, micEventDS, micFilt
def threshcross(data, threshold=0, direction='up'):
"""
Outputs the indices where the signal crossed the threshold.
Parameters
----------
data : array of floats
Numpy array of floats, containing signal.
threshold : float
Value of threshold.
direction : str
Defines the direction of cross detected: 'up', 'down', or 'both'.
With 'both', it will check to make sure that up and down crosses are
detected. In other words,
Returns
-------
out : array
Array with indices where data crossed threshold.
"""
# Find crosses
over = (data >= threshold).astype('int')
cross = np.append(False, np.diff(over))
if direction == 'up':
out = np.where(cross == 1)[0]
elif direction == 'down':
out = np.where(cross == -1)[0]
elif direction == 'both':
cross_nonzero = np.where(cross != 0)[0]
events = []
for i in range(len(cross_nonzero) - 1):
# Skip to the next ind if this one was already recorded.
if cross_nonzero[i] in events:
continue
if (cross[cross_nonzero[i]] == 1) and (
cross[cross_nonzero[i + 1]] == -1):
events.append(cross_nonzero[i])
events.append(cross_nonzero[i + 1])
out = np.array(events)
return out
```
#### File: ecogvis/signal_processing/linenoise_notch.py
```python
from __future__ import division
import numpy as np
from scipy.signal import firwin2, filtfilt
from .fft import rfftfreq, rfft, irfft
__all__ = ['linenoise_notch']
__authors__ = "<NAME>"
def apply_notches(X, notches, rate, fft=True):
if fft:
fs = rfftfreq(X.shape[-1], 1./rate)
delta = 1.
fd = rfft(X)
else:
nyquist = rate/2.
n_taps = 1001
gain = [1, 1, 0, 0, 1, 1]
for notch in notches:
if fft:
window_mask = np.logical_and(fs > notch-delta, fs < notch+delta)
window_size = window_mask.sum()
window = np.hamming(window_size)
fd[:, window_mask] = (fd[:, window_mask] *
(1.-window)[np.newaxis, :])
else:
freq = np.array([0, notch-1, notch-.5,
notch+.5, notch+1, nyquist]) / nyquist
filt = firwin2(n_taps, freq, gain)
X = filtfilt(filt, np.array([1]), X)
if fft:
X = irfft(fd)
return X
def linenoise_notch(X, rate, notch_freq=None):
"""
Apply Notch filter at 60 Hz (or user chosen notch_freq) and its harmonics
Parameters
----------
X : array
Input data, dimensions (n_channels, n_timePoints)
rate : float
Number of samples per second
notch_freq : float
Main frequency of notch filter
Returns
-------
X : array
Denoised data, dimensions (n_channels, n_timePoints)
"""
nyquist = rate / 2
if notch_freq is None:
noise_hz = 60.
else: noise_hz = notch_freq
notches = np.arange(noise_hz, nyquist, noise_hz)
return apply_notches(X, notches, rate)
```
#### File: signal_processing/tests/test_linenoise_notch.py
```python
import numpy as np
from ecog.signal_processing import linenoise_notch
def test_linenoise_notch_return():
"""
Test the return shape.
"""
X = np.random.randn(32, 1000)
rate = 200
Xh = linenoise_notch(X, rate)
assert Xh.shape == X.shape
```
|
{
"source": "jgmartinss/bookstore",
"score": 2
}
|
#### File: apps/catalog/admin.py
```python
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from mptt.admin import DraggableMPTTAdmin
from image_cropping import ImageCroppingMixin
from bookstore.apps.catalog.models import (
BookImages,
AuthorImages,
Category,
PublishingCompany,
Author,
Book,
BookReview,
)
from bookstore.apps.catalog.forms import (
BookImagesForm,
AuthorImagesForm,
AuthorForm,
BookForm,
)
class BookImagesAdmin(ImageCroppingMixin, admin.ModelAdmin):
model = BookImages
form = BookImagesForm
fieldsets = (
(
None,
{"fields": ("book", "image", "list_page_cropping", "detail_page_cropping")},
),
)
class AuthorImagesAdmin(ImageCroppingMixin, admin.ModelAdmin):
model = AuthorImages
form = AuthorImagesForm
fieldsets = (
(
None,
{
"fields": (
"author",
"image",
"list_page_cropping",
"detail_page_cropping",
)
},
),
)
class BookImagesInline(ImageCroppingMixin, admin.TabularInline):
model = BookImages
class CategoryAdmin(DraggableMPTTAdmin, admin.ModelAdmin):
model = Category
ordering = ("name", "-created")
search_fields = ("name",)
list_display = ("name",)
list_display_links = ["name"]
prepopulated_fields = {"slug": ("name",)}
fieldsets = ((None, {"fields": (("name", "slug"), "parent")}),)
class PublishingCompanyAdmin(admin.ModelAdmin):
model = PublishingCompany
ordering = ("name", "-created")
search_fields = ("name",)
list_display = ("name",)
list_display_links = ["name"]
prepopulated_fields = {"slug": ("name",)}
fieldsets = ((None, {"fields": (("name", "slug"),)}),)
class AuthorAdmin(admin.ModelAdmin):
model = Author
form = AuthorForm
list_per_page = 20
ordering = ("name", "-created")
search_fields = ("name",)
list_display = ("name",)
list_display_links = ["name"]
prepopulated_fields = {"slug": ("name",)}
fieldsets = ((None, {"fields": (("name", "slug"), "about_of")}),)
class BookAdmin(admin.ModelAdmin):
model = Book
form = BookForm
# inlines = [BookImagesInline]
list_per_page = 15
save_on_top = True
save_as = True
ordering = ("title", "-created")
search_fields = ("title", "original_title", "isbn")
list_display = (
"title",
"isbn",
"get_authors",
"get_categories",
"dimensions_of_the_book",
"price",
"quantity",
)
list_display_links = ["title"]
list_filter = (
"is_active",
"is_featured",
"availability_of_stock",
"hardback",
"author",
)
date_hierarchy = "created"
radio_fields = {
"availability_of_stock": admin.HORIZONTAL,
"show_real_price": admin.HORIZONTAL,
}
filter_horizontal = ["author", "category"]
prepopulated_fields = {"slug": ("title",)}
fieldsets = (
("Product info", {"fields": (("visible_where", "is_active", "is_featured"),)}),
(
"Book info",
{
"fields": (
("title", "slug"),
("isbn", "publishing_company"),
"synopsis",
("language", "num_of_pages", "hardback"),
("author", "category"),
)
},
),
(
"Dimensions of the book",
{"fields": (("length", "height", "width"), "weight")},
),
(
"Inventory info",
{
"fields": (
"availability_of_stock",
("quantity", "notify_when_stock_is_exhausted"),
(
"inventory_maintenance_unit",
"quantity_out_of_stock",
"maximum_quantity_in_the_shopping_cart",
),
)
},
),
(
"Prices",
{
"fields": (
"show_real_price",
"price",
"cost_price",
"special_price",
("special_price_from_date", "special_price_to_date"),
)
},
),
)
def get_authors(self, obj):
return ",\n".join([a.name for a in obj.author.all()])
get_authors.short_description = _("Author(s)")
def get_categories(self, obj):
return ",\n".join([c.name for c in obj.category.all()])
get_categories.short_description = _("Categories")
class BookReviewAdmin(admin.ModelAdmin):
model = BookReview
ordering = ("-created",)
search_fields = ("user", "book__title")
list_display = ("book", "get_short_comment", "user", "number_of_stars")
list_display_links = ["book"]
list_filter = ("number_of_stars",)
date_hierarchy = "created"
list_per_page = 15
admin.site.register(Author, AuthorAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(PublishingCompany, PublishingCompanyAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(BookImages, BookImagesAdmin)
admin.site.register(AuthorImages, AuthorImagesAdmin)
admin.site.register(BookReview, BookReviewAdmin)
```
#### File: apps/newsletter/views.py
```python
from django.contrib import messages
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.generic import ListView, DeleteView, CreateView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from bookstore.apps.newsletter.forms import SubscribeForm
from bookstore.apps.newsletter.models import Subscribe
class SubscribeCreateView(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = Subscribe
form_class = SubscribeForm
template_name = "newsletter/new_subscribe.html"
success_message = _("Newsletter created successfully!")
success_url = reverse_lazy("newsletter:list-newsletter")
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(SubscribeCreateView, self).form_valid(form)
class MyNewsletterListView(LoginRequiredMixin, ListView):
model = Subscribe
context_object_name = "my_newsletter"
paginate_by = 5
template_name = "newsletter/list_newsllater.html"
def get_queryset(self, **kwargs):
return Subscribe.objects.filter(created_by=self.request.user).order_by(
"-created"
)
class NewsletterDeleteView(LoginRequiredMixin, SuccessMessageMixin, DeleteView):
model = Subscribe
template_name = "partials/delete_object.html"
success_message = _("Newsletter #%s deleted successfully!")
success_url = reverse_lazy("newsletter:list-newsletter")
def get_object(self):
_id = self.kwargs.get("id")
return get_object_or_404(Subscribe, id=_id)
def delete(self, request, *args, **kwargs):
obj = self.get_object()
messages.success(self.request, self.success_message % obj.id)
return super(NewsletterDeleteView, self).delete(request, *args, **kwargs)
```
|
{
"source": "jgmartinss/flask-simple-boilerplate",
"score": 3
}
|
#### File: flask-simple-boilerplate/tests/test_user_models.py
```python
from unittest import TestCase
from app import create_app
from app.database import db
from app.auth import models
class UserModelTest(TestCase):
user = models.User(
id=1,
first_name='Test',
last_name='Tests',
username='tests',
email="<EMAIL>",
_password=<PASSWORD>,
created_on='Mon, 01 Oct 2018 17:36:42 GMT',
updated_on='Mon, 01 Oct 2018 17:36:42 GMT',
)
def setUp(self):
self.app = create_app(__name__, 'testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_table_name_should_return_correct_name(self):
expected = 'tb_users'
self.assertEqual(self.user.__tablename__, expected)
def test_table_repr_should_return_correct_user(self):
expected = 'User(first_name="Test", last_name="Tests", username="tests", email="<EMAIL>", _password=<PASSWORD>)'
self.assertEqual(self.user.__repr__(), expected)
def test_table_str_should_return_correct_user(self):
expected = '(tests) - <EMAIL>'
self.assertEqual(self.user.__str__(), expected)
def test_table_as_dict_should_return_corret_user(self):
expected = {
"_password": <PASSWORD>,
"created_on": "Mon, 01 Oct 2018 17:36:42 GMT",
"email": "<EMAIL>",
"first_name": "Test",
"id": 1,
"last_name": "Tests",
"username": "tests",
"updated_on": "Mon, 01 Oct 2018 17:36:42 GMT",
}
self.assertEqual(self.user.as_dict(), expected)
```
|
{
"source": "jgmartinss/flask_start",
"score": 2
}
|
#### File: flask_start/app/__init__.py
```python
from flask import Flask
from config import get_config
from app.database import db
from app.auth import views, admin
def create_app(name, config_name):
app = Flask(name, template_folder='templates')
app.config.from_object(get_config())
db.init_app(app)
views.configure(app)
admin.configure(app)
return app
```
#### File: jgmartinss/flask_start/config.py
```python
from decouple import config
class Config(object):
SECRET_KEY = config('SECRET_KEY')
#CSRF_SESSION_KEY = SESSION_KEY
SQLALCHEMY_TRACK_MODIFICATIONS = True
migration_directory = 'migrations'
class Development(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = config('DATABASE_PATH')
class Testing(Config):
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = config('DATABASE_PATH')
class Production(Config):
TESTING = False
DEBUG = False
app_config = {
'development': Development,
'testing': Testing,
'production': Production
}
def get_config():
"""Return env class."""
if config('ENV') == 'dev':
return app_config['development']
elif config('ENV') == 'test':
return app_config['testing']
elif config('ENV') == 'prod':
return app_config['production']
```
#### File: jgmartinss/flask_start/manage.py
```python
import click
from decouple import config
from unittest import TestLoader, runner
from flask import current_app
from flask_script import Manager, Server, Shell
from flask_migrate import Migrate, MigrateCommand
from app import create_app, db
from app.auth import models
from config import get_config
app = create_app(__name__, get_config())
manager = Manager(app)
runserver = Server(host=config('HOST'), port=config('PORT'))
manager.add_command('runserver', runserver)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.shell
def make_shell_context():
return dict(app=current_app, db=db, models=models)
@manager.command
def create_user():
db.drop_all(bind=None)
db.create_all(bind=None)
user = models.User(
first_name=u'Test',
last_name=u'Tests',
username=u'tests',
email=u'<EMAIL>',
_password=u'<PASSWORD>',
)
db.session.add(user)
db.session.commit()
@manager.command
def tests():
loader = TestLoader()
test = loader.discover('tests/')
testrunner = runner.TextTestRunner(verbosity=2)
testrunner.run(test)
if __name__ == '__main__':
manager.run()
```
|
{
"source": "jgmchan/ecs-deploy",
"score": 2
}
|
#### File: ecs-deploy/tests/test_newrelic.py
```python
from pytest import fixture, raises
from mock import patch
from ecs_deploy.newrelic import Deployment, NewRelicDeploymentException
class DeploymentResponseSuccessfulMock(object):
status_code = 201
content = {
"deployment": {
"id": 1234567890,
"revision": "1.2.3",
"changelog": "Lorem Ipsum",
"description": "Lorem ipsum usu amet dicat nullam ea. Nec detracto lucilius democritum in.",
"user": "username", "timestamp": "2016-06-21T09:45:08+00:00",
"links": {"application": 12345}
},
"links": {"deployment.agent": "/v2/applications/{application_id}"}
}
class DeploymentResponseUnsuccessfulMock(object):
status_code = 400
content = {"message": "Something went wrong"}
@fixture
def api_key():
return 'APIKEY'
@fixture
def app_id():
return '12345'
@fixture
def user():
return 'username'
@fixture
def revision():
return '1.2.3'
@fixture
def changelog():
return 'Lorem Ipsum'
@fixture
def description():
return 'Lorem ipsum usu amet dicat nullam ea. Nec detracto lucilius democritum in.'
def test_get_endpoint(api_key, app_id, user):
endpoint = 'https://api.newrelic.com/v2/applications/%(app_id)s/deployments.json' % dict(app_id=app_id)
deployment = Deployment(api_key, app_id, user)
assert deployment.endpoint == endpoint
def test_get_headers(api_key, app_id, user):
headers = {
'X-Api-Key': api_key,
'Content-Type': 'application/json',
}
deployment = Deployment(api_key, app_id, user)
assert deployment.headers == headers
def test_get_payload(api_key, app_id, user, revision, changelog, description):
payload = {
'deployment': {
'revision': revision,
'changelog': changelog,
'description': description,
'user': user,
}
}
deployment = Deployment(api_key, app_id, user)
assert deployment.get_payload(revision, changelog, description) == payload
@patch('requests.post')
def test_deploy_sucessful(post, api_key, app_id, user, revision, changelog, description):
post.return_value = DeploymentResponseSuccessfulMock()
deployment = Deployment(api_key, app_id, user)
response = deployment.deploy(revision, changelog, description)
payload = deployment.get_payload(revision, changelog, description)
post.assert_called_with(deployment.endpoint, headers=deployment.headers, json=payload)
assert response.status_code == 201
@patch('requests.post')
def test_deploy_unsucessful(post, api_key, app_id, user, revision, changelog, description):
with raises(NewRelicDeploymentException):
post.return_value = DeploymentResponseUnsuccessfulMock()
deployment = Deployment(api_key, app_id, user)
deployment.deploy(revision, changelog, description)
```
|
{
"source": "JG-Mike/pollapp",
"score": 2
}
|
#### File: pollapp/polls/views.py
```python
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from .models import Choice, Question
#Get question and display them
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html')
# Show specific question and choices
def details(request, question_id):
try:
question = Question.objects.get(pk = question_id)
except Question.DoesNotExist:
raise Http404("Queston does not exist")
return render(request, 'polls/results.html', {'question': question})
# Get question and display results
#def results (request, question_id):
# question = get_object_or_404(Question, pk=question_id)
#return render(request, 'polls/results.html',{'question': question })
```
|
{
"source": "jgmitchell257/mitm-cs",
"score": 4
}
|
#### File: python/res500/res500.py
```python
def z_value(x: float, mean: float, sigma: float) -> float:
z = (x - mean) / sigma
return z
def compound_interest(principal: int, interest: float, periods: int) -> float:
"""
Calculates the total return on a standard deposit and interest rate every period.
Args
principal: amount to be deposited every period
interest: expressed in decimal 3% = .03
periods: the number of periods to calculate for
"""
value = 0
total_value = 0
periods = periods
while periods > 0:
value = principal * ((1 + interest) ** periods)
total_value = total_value + value
periods -= 1
return total_value
def simple_compound_interest(principal: int, interest: float, periods: int) -> float:
"""
Calculates the total return on a single deposit with compounding interest
Args
principal: amount to be deposited
interest: expressed in decimal 3% = .03
periods: the number of periods to calculate for
"""
total_value = principal * ((1 + interest) ** periods)
return total_value
def quarterly_compound_interest(p: int, i: float, n: int) -> float:
"""
Calculates the total return on a standard deposit and interest rate every period.
Args
principal: amount to be deposited every period
interest: expressed in decimal 3% = .03
periods: the number of periods to calculate for
"""
value = 0
total_value = 0
periods = n
while periods > 0:
value = p * (1 + i) ** periods
total_value = total_value + value
periods -= 4
print(value, total_value)
return total_value
def bank_like_interest(p: int, r: float, n: int, t: int) -> float:
future_value = p * (1 + (r / n)) ** (n * t)
return future_value
```
|
{
"source": "jgmitchell257/thistle",
"score": 3
}
|
#### File: thistle/cisco/get_exp_ver.py
```python
from getpass import getpass
import expressway_v1 as exp
def load_file_to_list(filename: str) -> list:
clean_data = []
with open(filename, "r") as f:
raw_data = f.readlines()
for i in raw_data:
clean_file_input = i.strip("\n")
clean_data.append(clean_file_input)
f.close()
return clean_data
username = input("Expressway admin user: ")
password = getpass("<PASSWORD>: ")
exp_list = load_file_to_list("xway_int_hs.txt")
for i in exp_list:
server = i
print(server)
try:
exp.Expressway(server, username, password).getSysInfo()
except:
print("Connection error")
```
#### File: thistle/thistle/security.py
```python
import random
def load_word_list() -> list:
"""Load /usr/share/dict/words file
Returns:
list: cleaned contents of /usr/share/dict/words
"""
with open("/usr/share/dict/words", "r") as words:
word_list = words.readlines()
cleaned_list = []
for word in word_list:
w = word.strip("\n")
cleaned_list.append(w)
return cleaned_list
def create_passphrase(x: int) -> str:
"""Create a passphrase that is x words long
Args:
x (int): Number of words to sample from the word list
Returns:
str: passphrase
"""
with open("/usr/share/dict/words", "r") as words:
word_list = words.readlines()
passphrase_list = random.sample(word_list, k=x)
random.shuffle(passphrase_list)
cleaned_list = []
for word in passphrase_list:
w = word.strip("\n")
cleaned_list.append(w)
return "-".join(cleaned_list)
from cryptography.fernet import Fernet
def create_key() -> bytes:
"""Generate key"""
key = Fernet.generate_key()
return key
def load_key(keyfile: str, *args) -> bytes:
"""Loads key from keyfile
Arguments:
keyfile (str): name of the file to load
Returns:
key (bytes): bytes encoded key
"""
with open(keyfile, "r") as kf:
key = kf.read()
return key.encode()
def save_key(key: bytes, keyfile: str, *args):
"""Saves key to keyfile
Arguments:
key (bytes): key in bytes format
keyfile (str): name of the file to save to
Returns:
nothing
"""
with open(keyfile, "w") as kf:
kf.write(key.decode())
return "Success"
def encrypt_string(clr_str: str, key: bytes, *args) -> bytes:
"""Symmetric string encryption
Arguments:
clr_str (str): string to be encrypted
key (bytes): encryption key
Returns:
token (bytes): encrypted string
"""
f = Fernet(key)
token = f.encrypt(clr_str.encode())
return token
def decrypt_string(enc_str: bytes, key: bytes, *args) -> str:
"""Symmetric string decryption
Arguments:
enc_str (str): string to be decrypted
key (bytes): encryption key
Returns:
token (bytes): decrypted string
"""
f = Fernet(key)
token = f.decrypt(enc_str)
return token.decode()
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import datetime
import ssl
def download_cert(servername, port=None):
if port:
cert = ssl.get_server_certificate((servername, port))
else:
cert = ssl.get_server_certificate((servername, 443))
return cert
def cert_expiration(cert):
c = x509.load_pem_x509_certificate(cert.encode(), default_backend())
expires = c.not_valid_after.isoformat()
return expires
```
|
{
"source": "jgmize/kitsune",
"score": 2
}
|
#### File: customercare/tests/test_templates.py
```python
import json
from django.conf import settings
from django.core.cache import cache
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.customercare.replies import REPLIES_DOCUMENT_SLUG
from kitsune.sumo.tests import TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.tests import document, revision
CANNED_RESPONSES_WIKI = """
Any initial text above the first H1 should be ignored.
=Category 1=
==Reply 1==
Reply goes here http://example.com/kb-article
==Reply 2==
Another reply here
=Category 2=
==Reply 3==
And another reply
"""
MESSED_UP_CANNED_RESPONSES_WIKI = """
Lal al ala la alaa lala la
==Bogus Reply will be ignored==
==Another bogus one==
Any initial text above the first H1 should be ignored.
=Category 1=
==Reply 1==
Reply goes here http://example.com/kb-article
==Reply 2==
Another reply here [[Bad link]]
==A reply without text==
=Category 2=
==Another reply without text==
==Reply 3==
And another reply
==Another Reply without text==
"""
class CannedResponsesTestCase(TestCase):
"""Canned responses tests."""
def _create_doc(self, content):
# Create the canned responses article.
doc = document(slug=REPLIES_DOCUMENT_SLUG, save=True)
rev = revision(
document=doc,
content=content,
is_approved=True,
save=True)
doc.current_revision = rev
doc.save()
def test_list_canned_responses(self):
"""Listing canned responses works as expected."""
# Create the canned responses article.
self._create_doc(CANNED_RESPONSES_WIKI)
r = self.client.get(reverse('customercare.landing'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
responses_plain = doc('#accordion').text()
# Verify categories and replies
assert 'Category 1' in responses_plain
assert 'Reply 1' in responses_plain
assert 'Reply goes here' in responses_plain
assert 'Category 2' in responses_plain
assert 'Reply 3' in responses_plain
assert 'And another reply' in responses_plain
# Listing all responses
eq_(3, len(doc('#accordion a.reply-topic')))
def test_list_canned_responses_nondefault_locale(self):
"""Listing canned responses gives all snippets regardless of locale.
"""
# Create the canned responses article.
self._create_doc(CANNED_RESPONSES_WIKI)
r = self.client.get(reverse('customercare.landing', locale='es'),
follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
# Listing all responses, l10n-agnostic (English if not in Verbatim).
eq_(3, len(doc('#accordion a.reply-topic')))
def test_messed_up_canned_responses(self):
"""Make sure we don't blow up if the article is malformed."""
# Create the canned responses article.
self._create_doc(MESSED_UP_CANNED_RESPONSES_WIKI)
r = self.client.get(reverse('customercare.landing'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
responses_plain = doc('#accordion').text()
assert 'Category 1' in responses_plain
assert 'Category 2' in responses_plain
class TweetListTestCase(TestCase):
"""Tests for the list of tweets."""
def test_fallback_message(self):
"""Fallback message when there are no tweets."""
r = self.client.get(reverse('customercare.landing'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
assert doc('#tweets-wrap .warning-box'), (
'Fallback message is not showing up.')
class StatsTests(TestCase):
"""Tests for the activity and contributors stats."""
def test_contributors(self):
"""Only contributors stats are set."""
with open('kitsune/customercare/tests/stats.json') as f:
json_data = json.load(f)
cache.set(settings.CC_TOP_CONTRIB_CACHE_KEY,
json_data['contributors'],
settings.CC_STATS_CACHE_TIMEOUT)
r = self.client.get(reverse('customercare.landing'), follow=True)
eq_(200, r.status_code)
cache.delete(settings.CC_TOP_CONTRIB_CACHE_KEY)
```
#### File: forums/migrations/0001_initial.py
```python
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Forum'
db.create_table('forums_forum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
('last_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_post_in_forum', null=True, on_delete=models.SET_NULL, to=orm['forums.Post'])),
('display_order', self.gf('django.db.models.fields.IntegerField')(default=1, db_index=True)),
('is_listed', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
))
db.send_create_signal('forums', ['Forum'])
# Adding model 'Thread'
db.create_table('forums_thread', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('forum', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Forum'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('last_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_post_in', null=True, on_delete=models.SET_NULL, to=orm['forums.Post'])),
('replies', self.gf('django.db.models.fields.IntegerField')(default=0)),
('is_locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_sticky', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
))
db.send_create_signal('forums', ['Thread'])
# Adding model 'Post'
db.create_table('forums_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('thread', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Thread'])),
('content', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='post_last_updated_by', null=True, to=orm['auth.User'])),
))
db.send_create_signal('forums', ['Post'])
def backwards(self, orm):
# Deleting model 'Forum'
db.delete_table('forums_forum')
# Deleting model 'Thread'
db.delete_table('forums_thread')
# Deleting model 'Post'
db.delete_table('forums_post')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forums.forum': {
'Meta': {'ordering': "['display_order', 'id']", 'object_name': 'Forum'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'display_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_listed': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'last_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_post_in_forum'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['forums.Post']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'forums.post': {
'Meta': {'ordering': "['created']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Thread']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_last_updated_by'", 'null': 'True', 'to': "orm['auth.User']"})
},
'forums.thread': {
'Meta': {'ordering': "['-is_sticky', '-last_post__created']", 'object_name': 'Thread'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Forum']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_post_in'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['forums.Post']"}),
'replies': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['forums']
```
#### File: kitsune/gallery/models.py
```python
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from kitsune.sumo.models import ModelBase, LocaleField
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import auto_delete_files
class Media(ModelBase):
"""Generic model for media"""
title = models.CharField(max_length=255, db_index=True)
created = models.DateTimeField(default=datetime.now, db_index=True)
updated = models.DateTimeField(default=datetime.now, db_index=True)
updated_by = models.ForeignKey(User, null=True)
description = models.TextField(max_length=10000)
locale = LocaleField(default=settings.GALLERY_DEFAULT_LANGUAGE,
db_index=True)
is_draft = models.NullBooleanField(default=None, null=True, editable=False)
class Meta(object):
abstract = True
ordering = ['-created']
unique_together = (('locale', 'title'), ('is_draft', 'creator'))
def __unicode__(self):
return '[%s] %s' % (self.locale, self.title)
@auto_delete_files
class Image(Media):
creator = models.ForeignKey(User, related_name='gallery_images')
file = models.ImageField(upload_to=settings.GALLERY_IMAGE_PATH,
max_length=settings.MAX_FILEPATH_LENGTH)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_IMAGE_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['image', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail, if set, else self.file"""
return self.thumbnail.url if self.thumbnail else self.file.url
@auto_delete_files
class Video(Media):
creator = models.ForeignKey(User, related_name='gallery_videos')
webm = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
ogv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
flv = models.FileField(upload_to=settings.GALLERY_VIDEO_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
poster = models.ImageField(upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH,
max_length=settings.MAX_FILEPATH_LENGTH,
null=True)
thumbnail = models.ImageField(
upload_to=settings.GALLERY_VIDEO_THUMBNAIL_PATH, null=True,
max_length=settings.MAX_FILEPATH_LENGTH)
def get_absolute_url(self):
return reverse('gallery.media', args=['video', self.id])
def thumbnail_url_if_set(self):
"""Returns self.thumbnail.url, if set, else default thumbnail URL"""
progress_url = settings.GALLERY_VIDEO_THUMBNAIL_PROGRESS_URL
return self.thumbnail.url if self.thumbnail else progress_url
```
#### File: karma/tests/test_api.py
```python
from datetime import datetime, timedelta
import json
import mock
from nose import SkipTest
from nose.tools import eq_
import waffle
from kitsune.karma import models
from kitsune.karma.manager import KarmaManager
from kitsune.karma.tests import TestAction1, TestAction2
from kitsune.questions.tests import answer
from kitsune.sumo.helpers import urlparams
from kitsune.sumo.redis_utils import redis_client, RedisError
from kitsune.sumo.tests import TestCase, LocalizingClient
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import user, add_permission
class KarmaAPITests(TestCase):
client_class = LocalizingClient
@mock.patch.object(waffle, 'switch_is_active')
def setUp(self, switch_is_active):
switch_is_active.return_value = True
super(KarmaAPITests, self).setUp()
try:
self.mgr = KarmaManager()
redis_client('karma').flushdb()
except RedisError:
raise SkipTest
self.user1 = user(save=True)
self.user2 = user(save=True)
self.user3 = user(save=True)
TestAction1(user=self.user1).save()
TestAction2(user=self.user2).save()
TestAction2(user=self.user2).save()
TestAction1(user=self.user3).save()
TestAction1(user=self.user3).save()
TestAction1(user=self.user3).save()
self.mgr.update_top()
self.client.login(username=self.user1.username, password='<PASSWORD>')
add_permission(self.user1, models.Title, 'view_dashboard')
@mock.patch.object(waffle, 'switch_is_active')
def test_user_api_no_permission(self, switch_is_active):
"""No view_dashboard permission? No API for you."""
switch_is_active.return_value = True
self.client.login(username=self.user2.username, password='<PASSWORD>')
url = reverse('karma.api.users')
response = self.client.get(url)
eq_(403, response.status_code)
@mock.patch.object(waffle, 'switch_is_active')
def test_user_api_default(self, switch_is_active):
"""Test user API with all defaults."""
switch_is_active.return_value = True
url = reverse('karma.api.users')
response = self.client.get(url)
eq_(200, response.status_code)
r = json.loads(response.content)
user_ids = [u[0] for u in r['results']]
eq_([self.user2.id, self.user3.id, self.user1.id], user_ids)
@mock.patch.object(waffle, 'switch_is_active')
def test_user_api_sort_testaction1(self, switch_is_active):
"""Test user API with sort = TestAction1."""
switch_is_active.return_value = True
url = reverse('karma.api.users')
url = urlparams(url, sort=TestAction1.action_type)
response = self.client.get(url)
eq_(200, response.status_code)
r = json.loads(response.content)
user_ids = [u[0] for u in r['results']]
eq_([self.user3.id, self.user1.id], user_ids)
@mock.patch.object(waffle, 'switch_is_active')
def test_user_api_sort_testaction2(self, switch_is_active):
"""Test user API with sort = TestAction2."""
switch_is_active.return_value = True
url = reverse('karma.api.users')
url = urlparams(url, sort=TestAction2.action_type)
response = self.client.get(url)
eq_(200, response.status_code)
r = json.loads(response.content)
user_ids = [u[0] for u in r['results']]
eq_([self.user2.id], user_ids)
@mock.patch.object(waffle, 'switch_is_active')
def test_user_api_last_activity(self, switch_is_active):
"""Verify the last activity field."""
switch_is_active.return_value = True
now = datetime.now()
one_day = now - timedelta(days=1)
two_days = now - timedelta(days=2)
answer(creator=self.user1, created=now, save=True)
answer(creator=self.user2, created=one_day, save=True)
answer(creator=self.user3, created=two_days, save=True)
url = reverse('karma.api.users')
response = self.client.get(url)
eq_(200, response.status_code)
r = json.loads(response.content)
days_since_last_activity = [u[2] for u in r['results']]
eq_([1, 2, 0], days_since_last_activity)
@mock.patch.object(waffle, 'switch_is_active')
def test_overview_api(self, switch_is_active):
"""Test overview API."""
switch_is_active.return_value = True
url = reverse('karma.api.overview')
url = urlparams(url, daterange='6m')
response = self.client.get(url)
eq_(200, response.status_code)
r = json.loads(response.content)
overview = r['overview']
eq_(4, overview['test-action-1'])
eq_(2, overview['test-action-2'])
```
#### File: kbadge/tests/test_awards.py
```python
from django.core import mail
from nose.tools import eq_
from kitsune.kbadge.tests import award, badge
from kitsune.sumo.tests import TestCase
class AwardNotificationTests(TestCase):
def test_notification(self):
# Note: Need to do this import here so the
# notify_award_recipient function handles the
# badge_was_awarded signal. This works fine in production
# because badges gets loaded by django-badger in startup.
from kitsune.kbadge import badges
new_badge = badge(save=True)
# Check the mail queue first.
eq_(0, len(mail.outbox))
# Create an award and save it. This triggers the notification.
award(description=u'yay!', badge=new_badge, save=True)
eq_(1, len(mail.outbox))
# TODO: test contents--not doing that now because it's a
# mockup.
```
#### File: landings/tests/test_templates.py
```python
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.products.models import HOT_TOPIC_SLUG
from kitsune.products.tests import product, topic
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.tests import document, revision
class HomeTestCase(ElasticTestCase):
def test_home(self):
"""Verify that home page renders products."""
# Create some topics and products
for i in range(4):
product(save=True)
# GET the home page and verify the content
r = self.client.get(reverse('home'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(5, len(doc('#products-and-services li')))
def test_mozilla_news(self):
"""Verifies the Mozilla News section."""
# If "Mozilla News" article doesn't exist, home page
# should still work and omit the section.
r = self.client.get(reverse('home'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(len(doc('#mozilla-news')), 0)
# Create the "Mozilla News" article and verify it on home page.
d = document(title='Mozilla News', slug='mozilla-news', save=True)
rev = revision(
document=d, content='splendid', is_approved=True, save=True)
d.current_revision = rev
d.save()
r = self.client.get(reverse('home'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
moz_news = doc('#mozilla-news')
eq_(len(moz_news), 1)
assert 'splendid' in moz_news.text()
```
#### File: landings/tests/test_views.py
```python
from nose.tools import eq_
from kitsune.sumo.tests import MobileTestCase, TestCase
from kitsune.sumo.urlresolvers import reverse
class RootRedirectTests(TestCase):
def test_default_redirect(self):
"""/ redirects to /home"""
response = self.client.get(reverse('home.default', locale='en-US'),
follow=False)
eq_(302, response.status_code)
eq_('http://testserver/en-US/home', response['location'])
class RootRedirectForMobileTests(MobileTestCase):
def test_default_redirect(self):
"""/ redirects to /mobile"""
response = self.client.get(reverse('home.default', locale='en-US'),
follow=False)
eq_(302, response.status_code)
eq_('http://testserver/en-US/products', response['location'])
```
#### File: kitsune/landings/views.py
```python
from django.shortcuts import get_list_or_404, render
from django.views.decorators.cache import never_cache
from mobility.decorators import mobile_template
from kitsune.products.models import Product, Topic, HOT_TOPIC_SLUG
from kitsune.sumo.parser import get_object_fallback
from kitsune.sumo.views import redirect_to
from kitsune.wiki.facets import documents_for
from kitsune.wiki.models import Document
# Docs for the new IA:
MOZILLA_NEWS_DOC = 'Mozilla News'
@never_cache
def desktop_or_mobile(request):
"""Redirect mobile browsers to /mobile and others to /home."""
mobile = 'products'
url_name = mobile if request.MOBILE else 'home'
return redirect_to(request, url_name, permanent=False)
def home(request):
"""The home page."""
if request.MOBILE:
return redirect_to(request, 'products', permanent=False)
products = Product.objects.filter(visible=True)
moz_news = get_object_fallback(
Document, MOZILLA_NEWS_DOC, request.LANGUAGE_CODE)
return render(request, 'landings/home.html', {
'products': products,
'moz_news': moz_news})
@mobile_template('landings/{mobile/}get-involved.html')
def get_involved(request, template):
return render(request, template)
@mobile_template('landings/{mobile/}get-involved-aoa.html')
def get_involved_aoa(request, template):
return render(request, template)
@mobile_template('landings/{mobile/}get-involved-questions.html')
def get_involved_questions(request, template):
return render(request, template)
@mobile_template('landings/{mobile/}get-involved-kb.html')
def get_involved_kb(request, template):
return render(request, template)
@mobile_template('landings/{mobile/}get-involved-l10n.html')
def get_involved_l10n(request, template):
return render(request, template)
def integrity_check(request):
return render(request, 'landings/integrity-check.html')
```
#### File: products/tests/test_templates.py
```python
from datetime import datetime, timedelta
from django.core.cache import cache
from nose.tools import eq_
from pyquery import PyQuery as pq
from waffle.models import Flag
from kitsune.products.models import HOT_TOPIC_SLUG
from kitsune.products.tests import product, topic
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.tags.tests import tag
from kitsune.wiki.tests import revision, helpful_vote
from kitsune.questions.tests import question
class ProductViewsTestCase(ElasticTestCase):
def test_products(self):
"""Verify that /products page renders products."""
# Create some products.
for i in range(3):
product(save=True)
# GET the products page and verify the content.
r = self.client.get(reverse('products'), follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(4, len(doc('#products-and-services li')))
def test_product_landing(self):
"""Verify that /products/<slug> page renders topics."""
# Create a product.
p = product(save=True)
# Create some topics.
topic(slug=HOT_TOPIC_SLUG, product=p, save=True)
topics = []
for i in range(11):
topics.append(topic(product=p, save=True))
# Create a document and assign the product and 10 topics.
doc = revision(is_approved=True, save=True).document
doc.products.add(p)
for i in range(10):
doc.topics.add(topics[i])
self.refresh()
# GET the product landing page and verify the content.
url = reverse('products.product', args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(11, len(doc('#help-topics li')))
eq_(p.slug, doc('#support-search input[name=product]').attr['value'])
def test_document_listing(self):
"""Verify /products/<product slug>/<topic slug> renders articles."""
# Create a topic and product.
p = product(save=True)
t1 = topic(product=p, save=True)
# Create 3 documents with the topic and product and one without.
for i in range(3):
doc = revision(is_approved=True, save=True).document
doc.topics.add(t1)
doc.products.add(p)
doc = revision(is_approved=True, save=True).document
self.refresh()
# GET the page and verify the content.
url = reverse('products.documents', args=[p.slug, t1.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(3, len(doc('#document-list > ul > li')))
eq_(p.slug, doc('#support-search input[name=product]').attr['value'])
def test_document_listing_order(self):
"""Verify documents are listed in order of helpful votes."""
# Create topic, product and documents.
p = product(save=True)
t = topic(product=p, save=True)
docs = []
for i in range(3):
doc = revision(is_approved=True, save=True).document
doc.topics.add(t)
doc.products.add(p)
docs.append(doc)
# Add a helpful vote to the second document. It should be first now.
rev = docs[1].current_revision
helpful_vote(revision=rev, helpful=True, save=True)
docs[1].save() # Votes don't trigger a reindex.
self.refresh()
url = reverse('products.documents', args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc('#document-list > ul > li:first-child > a').text(), docs[1].title)
# Add 2 helpful votes the third document. It should be first now.
rev = docs[2].current_revision
helpful_vote(revision=rev, helpful=True, save=True)
helpful_vote(revision=rev, helpful=True, save=True)
docs[2].save() # Votes don't trigger a reindex.
self.refresh()
cache.clear() # documents_for() is cached
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(doc('#document-list > ul > li:first-child > a').text(), docs[2].title)
def test_hot_topics(self):
"""Verifies the hot topics section."""
# Create a product and the hot topics topic.
p = product(save=True)
hot = topic(slug=HOT_TOPIC_SLUG, product=p, save=True)
# Create 7 hot documents.
for i in range(7):
doc = revision(is_approved=True, save=True).document
doc.products.add(p)
doc.topics.add(hot)
# Create a not hot document.
doc = revision(is_approved=True, save=True).document
doc.products.add(p)
self.refresh()
# GET the product landing page and verify the content.
url = reverse('products.product', args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(7, len(doc('#hot-topics li')))
def test_hot_questions(self):
"""Verifies that hot questions show up in the hot topics section."""
# Create a product and the hot topics topic.
p = product(save=True)
hot_tag = tag(name='hot', slug=HOT_TOPIC_SLUG, save=True)
# Create a flag, since this code is flagged off by default.
Flag.objects.create(name='hot_questions', everyone=True)
# Create 4 hot questions.
titles = ['apple', 'banana', 'cherry', 'date']
timestamp = datetime.now() - timedelta(days=7)
for i in range(4):
q = question(title=titles[i], created=timestamp, save=True)
q.products.add(p)
q.tags.add(hot_tag)
timestamp += timedelta(days=1)
# Create a non-hot document.
q = question(title='elderberry', save=True)
q.products.add(p)
# GET the product landing page and verify the content.
url = reverse('products.product', args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(3, len(doc('#hot-topics li.question')))
# Only the 3 newest hot topics should show up.
assert 'apple' not in r.content
assert 'banana' in r.content
assert 'cherry' in r.content
assert 'date' in r.content
# Non-hot topics should not show up.
assert 'elderberry' not in r.content
def test_subtopics(self):
"""Verifies subtopics appear on document listing page."""
# Create a topic and product.
p = product(save=True)
t = topic(product=p, save=True)
# Create a documents with the topic and product
doc = revision(is_approved=True, save=True).document
doc.topics.add(t)
doc.products.add(p)
self.refresh()
# GET the page and verify no subtopics yet.
url = reverse('products.documents', args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(0, len(pqdoc('li.subtopic')))
# Create a subtopic, it still shouldn't show up because no
# articles are assigned.
subtopic = topic(parent=t, product=p, save=True)
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(0, len(pqdoc('li.subtopic')))
# Add a document to the subtopic, now it should appear.
doc.topics.add(subtopic)
self.refresh()
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
pqdoc = pq(r.content)
eq_(1, len(pqdoc('li.subtopic')))
```
#### File: questions/tests/test_feeds.py
```python
from datetime import datetime, timedelta
from django.core.cache import cache
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.helpers import urlparams
from kitsune.products.tests import product, topic
from kitsune.questions.feeds import QuestionsFeed, TaggedQuestionsFeed
from kitsune.questions.models import Question
from kitsune.questions.tests import TestCaseBase, question
from kitsune.tags.tests import tag
from kitsune.users.tests import user
class ForumTestFeeds(TestCaseBase):
def test_tagged_feed(self):
"""Test the tagged feed."""
t = tag(name='green', slug='green', save=True)
q = question(save=True)
q.tags.add('green')
items = TaggedQuestionsFeed().items(t)
eq_(1, len(items))
eq_(q.id, items[0].id)
cache.clear()
q = question(save=True)
q.tags.add('green')
q.updated = datetime.now() + timedelta(days=1)
q.save()
items = TaggedQuestionsFeed().items(t)
eq_(2, len(items))
eq_(q.id, items[0].id)
def test_tagged_feed_link(self):
"""Make sure the tagged feed is discoverable on the questions page."""
tag(name='green', slug='green', save=True)
url = urlparams(reverse('questions.questions'), tagged='green')
response = self.client.get(url)
doc = pq(response.content)
feed_links = doc('link[type="application/atom+xml"]')
eq_(2, len(feed_links))
eq_('Recently updated questions', feed_links[0].attrib['title'])
eq_('/en-US/questions/feed', feed_links[0].attrib['href'])
eq_('Recently updated questions tagged green',
feed_links[1].attrib['title'])
eq_('/en-US/questions/tagged/green/feed',
feed_links[1].attrib['href'])
def test_no_inactive_users(self):
"""Ensure that inactive users' questions don't appear in the feed."""
u = user(is_active=False, save=True)
q = Question(title='Test Question', content='Lorem Ipsum Dolor',
creator_id=u.id)
q.save()
assert q.id not in [x.id for x in QuestionsFeed().items({})]
def test_question_feed_with_product(self):
"""Test that questions feeds with products work."""
p = product(save=True)
url = urlparams(reverse('questions.questions'), product=p.slug)
res = self.client.get(url)
doc = pq(res.content)
feed_links = doc('link[type="application/atom+xml"]')
feed = feed_links[0]
eq_(1, len(feed_links))
eq_('Recently updated questions', feed.attrib['title'])
eq_('/en-US/questions/feed?product=' + p.slug, feed.attrib['href'])
def test_question_feed_with_product_and_topic(self):
"""Test that questions feeds with products and topics work."""
p = product(save=True)
t = topic(product=p, save=True)
url = urlparams(reverse('questions.questions'),
product=p.slug, topic=t.slug)
res = self.client.get(url)
doc = pq(res.content)
feed_links = doc('link[type="application/atom+xml"]')
feed = feed_links[0]
eq_(1, len(feed_links))
eq_('Recently updated questions', feed.attrib['title'])
eq_(urlparams('/en-US/questions/feed', product=p.slug, topic=t.slug),
feed.attrib['href'])
```
#### File: kitsune/sumo/static_finders.py
```python
from django.contrib.staticfiles.finders import BaseStorageFinder
from django.contrib.staticfiles.storage import StaticFilesStorage
class WTFinder(BaseStorageFinder):
"""A staticfiles finder that looks in STATIC_ROOT.
This is super lame!
It is specifically for when DEBUG = True because jingo-minify puts
compiled files in STATIC_ROOT. gah!
"""
storage = StaticFilesStorage
def list(self, ignore_patterns):
return []
```
#### File: kitsune/migrations/198-set-unusable-password.py
```python
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD
def run():
users = User.objects.filter(password='<PASSWORD>')
num = users.update(password=<PASSWORD>)
if not num:
print 'There is nothing to update.'
return
print 'Done! Updated %d passwords.' % num
```
#### File: kitsune/migrations/226-retopic-questions.py
```python
from kitsune.products.models import Product, Topic
from kitsune.questions import question_config
from kitsune.questions.models import Question
def run():
# Make sure all topics listed in kitsune.questions.question_config exist.
for prod_desc in question_config.products.values():
for product_slug in prod_desc.get('products', []):
# Note: If this fails, add the missing product to
# migration 156.
product = Product.objects.get(slug=product_slug)
for topic_desc in prod_desc['categories'].values():
_, created = Topic.objects.get_or_create(
slug=topic_desc['topic'],
product=product,
defaults={
'title': topic_desc['name'],
'display_order': 1000,
}
)
if created:
print ('Created missing topic %s/%s'
% (product_slug, topic_desc['topic']))
# Assign all the right new topics to the right old topics.
for product in Product.objects.all():
topics = Topic.objects.filter(product=product)
for topic in topics:
questions = Question.objects.filter(products=product,
old_topics__slug=topic.slug)
print '%s / %s (%d questions)' % (product.title, topic.title, len(questions))
for q in questions:
q.topics.add(topic)
```
#### File: kitsune/scripts/localelinter.py
```python
import itertools
import optparse
import os
import re
import sys
try:
import polib # from http://bitbucket.org/izi/polib
except ImportError:
print 'You need to install polib. Do:'
print ''
print ' pip install polib'
sys.exit()
USAGE = 'usage: %prog [FILE|DIR]'
INTERP_RE = re.compile(
r'('
r'(?:%(?:[(]\S+?[)])?[#0+-]?[\.\d\*]*[hlL]?[diouxXeEfFgGcrs%])'
r'|'
r'(?:\{\S+?\})'
r')')
def asciify(thing):
if isinstance(thing, basestring):
return thing.encode('ascii', 'replace')
elif isinstance(thing, (list, tuple)):
return [asciify(s) for s in thing]
return repr(thing)
def extract_tokens(msg):
try:
tokens = [token for token in INTERP_RE.findall(msg)]
tokens.sort()
return tuple(tokens)
except TypeError:
print 'TYPEERROR', repr(msg)
def equal(id_tokens, str_tokens):
if str_tokens is None:
# This means they haven't translated the msgid, so there's
# no entry. I'm pretty sure this only applies to plurals.
return True
id_tokens = list(id_tokens)
str_tokens = list(str_tokens)
for id_token, str_token in itertools.izip_longest(
id_tokens, str_tokens, fillvalue=None):
if id_token is None or str_token is None:
return False
if id_token != str_token:
return False
return True
def verify(msgid, id_text, id_tokens, str_text, str_tokens, index):
# If the token lists aren't equal and there's a msgstr, then
# that's a problem. If there's no msgstr, it means it hasn't been
# translated.
if not equal(id_tokens, str_tokens) and str_text.strip():
print ('\nError for msgid: {msgid}\n'
'tokens: {id_tokens} VS. {str_tokens}\n'
'{key}: {id_text}\n'
'msgstr{index}: {str_text}'.format(
index='[{index}]'.format(index=index) if index is not None else '',
key='id' if index in (None, '0') else 'plural',
msgid=asciify(msgid),
id_text=asciify(id_text),
id_tokens=', '.join(asciify(id_tokens)),
str_text=asciify(str_text),
str_tokens=', '.join(asciify(str_tokens))))
return False
return True
def verify_file(fname):
"""Verifies file fname
This prints to stdout errors it found in fname. It returns the
number of errors.
"""
if not fname.endswith('.po'):
print '{fname} is not a .po file.'.format(fname=fname)
return 1
print 'Working on {fname}'.format(fname=fname)
po = polib.pofile(fname)
count = 0
bad_count = 0
for entry in po:
if not entry.msgid_plural:
if not entry.msgid and entry.msgstr:
continue
id_tokens = extract_tokens(entry.msgid)
str_tokens = extract_tokens(entry.msgstr)
if not verify(entry.msgid, entry.msgid, id_tokens, entry.msgstr,
str_tokens, None):
bad_count += 1
else:
for key in sorted(entry.msgstr_plural.keys()):
if key == '0':
# This is the 1 case.
text = entry.msgid
else:
text = entry.msgid_plural
id_tokens = extract_tokens(text)
str_tokens = extract_tokens(entry.msgstr_plural[key])
if not verify(entry.msgid, text, id_tokens,
entry.msgstr_plural[key], str_tokens, key):
bad_count += 1
count += 1
print ('\nVerified {count} messages in {fname}. '
'{badcount} possible errors.'.format(
count=count, fname=fname, badcount=bad_count))
return bad_count
def verify_directory(dir):
po_files = {}
for root, dirs, files in os.walk(dir):
for fn in files:
if not fn.endswith('.po'):
continue
fn = os.path.join(root, fn)
po_files[fn] = verify_file(fn)
print '---'
total_errors = sum(val for key, val in po_files.items())
if total_errors == 0:
return 0
print 'Problem locale files:'
po_files = sorted([(val, key) for key, val in po_files.items()],
reverse=True)
for val, key in po_files:
if val:
print '{val:>5} {key}'.format(key=key, val=val)
return 1
if __name__ == '__main__':
parser = optparse.OptionParser(usage=USAGE)
(options, args) = parser.parse_args()
if not args:
parser.print_help()
sys.exit(1)
if os.path.isdir(args[0]):
sys.exit(verify_directory(args[0]))
# Return 0 if everything was fine or 1 if there were errors.
sys.exit(verify_file(args[0]) != 0)
```
#### File: tests/client_0_8/fake_redirect.py
```python
import socket
import sys
from optparse import OptionParser
from Queue import Queue
import amqplib.client_0_8 as amqp
from amqplib.client_0_8.connection import AMQP_PROTOCOL_HEADER, _MethodReader
from amqplib.client_0_8.serialization import AMQPReader, AMQPWriter
class FakeRedirectConnection(amqp.Connection):
def __init__(self, sock):
self.channels = {}
super(amqp.Connection, self).__init__(self, 0)
self.out = AMQPWriter(sock.makefile('w'))
self.input = AMQPReader(sock.makefile('r'))
self.method_reader = _MethodReader(self.input)
def do_redirect(self, dest):
if self.input.read(8) != AMQP_PROTOCOL_HEADER:
print "Didn't receive AMQP 0-8 header"
return
# major, minor seems backwards, but that's what RabbitMQ sends
self.start(8, 0,
{'product': 'fake_redirect_0_8.py'},
['AMQPLAIN'],
['en_US'])
self.wait(allowed_methods=[
(10, 11), # start_ok
])
self.tune(0, 0, 0)
self.wait(allowed_methods=[
(10, 31), # tune_ok
])
self.wait(allowed_methods=[
(10, 40), # open
])
if self.insist:
self.close(reply_text="Can't redirect, insist was set to True")
else:
self.redirect(dest, '')
try:
self.wait(allowed_methods=[
(10, 60), # close
])
except amqp.AMQPConnectionException:
pass
print 'Redirect finished'
def fake_op(self, args):
"""
We're not really much interested in what the client sends for
start_ok, tune_ok
"""
pass
##############
def _open(self, args):
virtual_host = args.read_shortstr()
capabilities = args.read_shortstr()
self.insist = args.read_bit()
def redirect(self, host, known_hosts):
args = AMQPWriter()
args.write_shortstr(host)
args.write_shortstr(known_hosts)
self._send_channel_method_frame(0, (10, 50), args)
def start(self, version_major, version_minor, server_properties,
mechanisms, locales):
args = AMQPWriter()
args.write_octet(version_major)
args.write_octet(version_minor)
args.write_table(server_properties)
args.write_longstr(' '.join(mechanisms))
args.write_longstr(' '.join(locales))
self._send_channel_method_frame(0, (10, 10), args)
def tune(self, channel_max, frame_max, heartbeat):
args = AMQPWriter()
args.write_short(channel_max)
args.write_long(frame_max)
args.write_short(heartbeat)
self._send_channel_method_frame(0, (10, 30), args)
#
# Monkeypatch the amqplib.client_0_8.Connection _METHOD_MAP dict to
# work with our FakeRedirectConnection
#
amqp.Connection._METHOD_MAP[(10, 11)] = FakeRedirectConnection.fake_op
amqp.Connection._METHOD_MAP[(10, 31)] = FakeRedirectConnection.fake_op
amqp.Connection._METHOD_MAP[(10, 40)] = FakeRedirectConnection._open
def main():
parser = OptionParser(usage='usage: %prog [options]\nexample: %prog --listen=127.0.0.1:5000 --redirect=127.0.0.1:5672')
parser.add_option('--listen', dest='listen',
help='ip:port to listen for an AMQP connection on',
default=None)
parser.add_option('--redirect', dest='redirect',
help='ip:port to redirect AMQP connection to',
default=None)
options, args = parser.parse_args()
if not options.listen or not options.redirect:
parser.print_help()
sys.exit(1)
listen_ip, listen_port = options.listen.split(':', 1)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((listen_ip, int(listen_port)))
print 'listening for connection...'
s.listen(1)
while True:
sock, addr = s.accept()
print 'Accepted connection from', addr
conn = FakeRedirectConnection(sock)
conn.do_redirect(options.redirect)
if __name__ == '__main__':
main()
```
#### File: tests/client_0_8/settings.py
```python
import logging
from optparse import OptionParser
connect_args = {}
test_args = {'verbosity': 1}
def parse_args():
parser = OptionParser(usage='usage: %prog [options]')
parser.add_option('--host', dest='host',
help='AMQP server to connect to (default: %default)',
default='localhost')
parser.add_option('-u', '--userid', dest='userid',
help='userid to authenticate as (default: %default)',
default='guest')
parser.add_option('-p', '--password', dest='password',
help='password to authenticate with (default: %default)',
default='guest')
parser.add_option('--ssl', dest='ssl', action='store_true',
help='Enable SSL (default: not enabled)',
default=False)
parser.add_option('--debug', dest='debug', action='store_true',
help='Display debugging output',
default=False)
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='Run unittests with increased verbosity',
default=False)
options, args = parser.parse_args()
if options.debug:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
amqplib_logger = logging.getLogger('amqplib')
amqplib_logger.addHandler(console)
amqplib_logger.setLevel(logging.DEBUG)
connect_args['host'] = options.host
connect_args['userid'] = options.userid
connect_args['password'] = <PASSWORD>
connect_args['ssl'] = options.ssl
if options.verbose:
test_args['verbosity'] = 2
parse_args()
```
#### File: coverage/coverage/misc.py
```python
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
def expensive(fn):
"""A decorator to cache the result of an expensive operation.
Only applies to methods with no arguments.
"""
attr = "_cache_" + fn.__name__
def _wrapped(self):
"""Inner fn that checks the cache."""
if not hasattr(self, attr):
setattr(self, attr, fn(self))
return getattr(self, attr)
return _wrapped
class CoverageException(Exception):
"""An exception specific to Coverage."""
pass
class NoSource(CoverageException):
"""Used to indicate we couldn't find the source for a module."""
pass
```
#### File: coverage/coverage/parser.py
```python
import glob, opcode, os, re, sys, token, tokenize
from coverage.backward import set, sorted, StringIO # pylint: disable-msg=W0622
from coverage.bytecode import ByteCodes, CodeObjects
from coverage.misc import nice_pair, CoverageException, NoSource, expensive
class CodeParser(object):
"""Parse code to find executable lines, excluded lines, etc."""
def __init__(self, text=None, filename=None, exclude=None):
"""
Source can be provided as `text`, the text itself, or `filename`, from
which text will be read. Excluded lines are those that match `exclude`,
a regex.
"""
assert text or filename, "CodeParser needs either text or filename"
self.filename = filename or "<code>"
self.text = text
if not self.text:
try:
sourcef = open(self.filename, 'rU')
self.text = sourcef.read()
sourcef.close()
except IOError:
_, err, _ = sys.exc_info()
raise NoSource(
"No source for code: %r: %s" % (self.filename, err)
)
self.text = self.text.replace('\r\n', '\n')
self.exclude = exclude
self.show_tokens = False
# The text lines of the parsed code.
self.lines = self.text.split('\n')
# The line numbers of excluded lines of code.
self.excluded = set()
# The line numbers of docstring lines.
self.docstrings = set()
# The line numbers of class definitions.
self.classdefs = set()
# A dict mapping line numbers to (lo,hi) for multi-line statements.
self.multiline = {}
# The line numbers that start statements.
self.statement_starts = set()
# Lazily-created ByteParser
self._byte_parser = None
def _get_byte_parser(self):
"""Create a ByteParser on demand."""
if not self._byte_parser:
self._byte_parser = \
ByteParser(text=self.text, filename=self.filename)
return self._byte_parser
byte_parser = property(_get_byte_parser)
def _raw_parse(self):
"""Parse the source to find the interesting facts about its lines.
A handful of member fields are updated.
"""
# Find lines which match an exclusion pattern.
if self.exclude:
re_exclude = re.compile(self.exclude)
for i, ltext in enumerate(self.lines):
if re_exclude.search(ltext):
self.excluded.add(i+1)
# Tokenize, to find excluded suites, to find docstrings, and to find
# multi-line statements.
indent = 0
exclude_indent = 0
excluding = False
prev_toktype = token.INDENT
first_line = None
tokgen = tokenize.generate_tokens(StringIO(self.text).readline)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: no cover
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
))
if toktype == token.INDENT:
indent += 1
elif toktype == token.DEDENT:
indent -= 1
elif toktype == token.NAME and ttext == 'class':
# Class definitions look like branches in the byte code, so
# we need to exclude them. The simplest way is to note the
# lines with the 'class' keyword.
self.classdefs.add(slineno)
elif toktype == token.OP and ttext == ':':
if not excluding and elineno in self.excluded:
# Start excluding a suite. We trigger off of the colon
# token so that the #pragma comment will be recognized on
# the same line as the colon.
exclude_indent = indent
excluding = True
elif toktype == token.STRING and prev_toktype == token.INDENT:
# Strings that are first on an indented line are docstrings.
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
for i in range(slineno, elineno+1):
self.docstrings.add(i)
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
# different line than the first line of the statement,
# so record a multi-line range.
rng = (first_line, elineno)
for l in range(first_line, elineno+1):
self.multiline[l] = rng
first_line = None
if ttext.strip() and toktype != tokenize.COMMENT:
# A non-whitespace token.
if first_line is None:
# The token is not whitespace, and is the first in a
# statement.
first_line = slineno
# Check whether to end an excluded suite.
if excluding and indent <= exclude_indent:
excluding = False
if excluding:
self.excluded.add(elineno)
prev_toktype = toktype
# Find the starts of the executable statements.
self.statement_starts.update(self.byte_parser._find_statements())
def first_line(self, line):
"""Return the first line number of the statement including `line`."""
rng = self.multiline.get(line)
if rng:
first_line = rng[0]
else:
first_line = line
return first_line
def first_lines(self, lines, ignore=None):
"""Map the line numbers in `lines` to the correct first line of the
statement.
Skip any line mentioned in `ignore`.
Returns a sorted list of the first lines.
"""
ignore = ignore or []
lset = set()
for l in lines:
if l in ignore:
continue
new_l = self.first_line(l)
if new_l not in ignore:
lset.add(new_l)
return sorted(lset)
def parse_source(self):
"""Parse source text to find executable lines, excluded lines, etc.
Return values are 1) a sorted list of executable line numbers, and
2) a sorted list of excluded line numbers.
Reported line numbers are normalized to the first line of multi-line
statements.
"""
self._raw_parse()
excluded_lines = self.first_lines(self.excluded)
ignore = excluded_lines + list(self.docstrings)
lines = self.first_lines(self.statement_starts, ignore)
return lines, excluded_lines
def arcs(self):
"""Get information about the arcs available in the code.
Returns a sorted list of line number pairs. Line numbers have been
normalized to the first line of multiline statements.
"""
all_arcs = []
for l1, l2 in self.byte_parser._all_arcs():
fl1 = self.first_line(l1)
fl2 = self.first_line(l2)
if fl1 != fl2:
all_arcs.append((fl1, fl2))
return sorted(all_arcs)
arcs = expensive(arcs)
def exit_counts(self):
"""Get a mapping from line numbers to count of exits from that line.
Excluded lines are excluded.
"""
excluded_lines = self.first_lines(self.excluded)
exit_counts = {}
for l1, l2 in self.arcs():
if l1 == -1:
# Don't ever report -1 as a line number
continue
if l1 in excluded_lines:
# Don't report excluded lines as line numbers.
continue
if l2 in excluded_lines:
# Arcs to excluded lines shouldn't count.
continue
if l1 not in exit_counts:
exit_counts[l1] = 0
exit_counts[l1] += 1
# Class definitions have one extra exit, so remove one for each:
for l in self.classdefs:
# Ensure key is there: classdefs can include excluded lines.
if l in exit_counts:
exit_counts[l] -= 1
return exit_counts
exit_counts = expensive(exit_counts)
## Opcodes that guide the ByteParser.
def _opcode(name):
"""Return the opcode by name from the opcode module."""
return opcode.opmap[name]
def _opcode_set(*names):
"""Return a set of opcodes by the names in `names`."""
return set([_opcode(name) for name in names])
# Opcodes that leave the code object.
OPS_CODE_END = _opcode_set('RETURN_VALUE')
# Opcodes that unconditionally end the code chunk.
OPS_CHUNK_END = _opcode_set(
'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS',
'BREAK_LOOP', 'CONTINUE_LOOP',
)
# Opcodes that push a block on the block stack.
OPS_PUSH_BLOCK = _opcode_set('SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY')
# Block types for exception handling.
OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
# Opcodes that pop a block from the block stack.
OPS_POP_BLOCK = _opcode_set('POP_BLOCK')
# Opcodes that have a jump destination, but aren't really a jump.
OPS_NO_JUMP = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
# Individual opcodes we need below.
OP_BREAK_LOOP = _opcode('BREAK_LOOP')
OP_END_FINALLY = _opcode('END_FINALLY')
OP_COMPARE_OP = _opcode('COMPARE_OP')
COMPARE_EXCEPTION = 10 # just have to get this const from the code.
OP_LOAD_CONST = _opcode('LOAD_CONST')
OP_RETURN_VALUE = _opcode('RETURN_VALUE')
class ByteParser(object):
"""Parse byte codes to understand the structure of code."""
def __init__(self, code=None, text=None, filename=None):
if code:
self.code = code
else:
if not text:
assert filename, "If no code or text, need a filename"
sourcef = open(filename, 'rU')
text = sourcef.read()
sourcef.close()
try:
# Python 2.3 and 2.4 don't like partial last lines, so be sure
# the text ends nicely for them.
self.code = compile(text + '\n', filename, "exec")
except SyntaxError:
_, synerr, _ = sys.exc_info()
raise CoverageException(
"Couldn't parse '%s' as Python source: '%s' at line %d" %
(filename, synerr.msg, synerr.lineno)
)
def child_parsers(self):
"""Iterate over all the code objects nested within this one.
The iteration includes `self` as its first value.
"""
return map(lambda c: ByteParser(code=c), CodeObjects(self.code))
# Getting numbers from the lnotab value changed in Py3.0.
if sys.hexversion >= 0x03000000:
def _lnotab_increments(self, lnotab):
"""Return a list of ints from the lnotab bytes in 3.x"""
return list(lnotab)
else:
def _lnotab_increments(self, lnotab):
"""Return a list of ints from the lnotab string in 2.x"""
return [ord(c) for c in lnotab]
def _bytes_lines(self):
"""Map byte offsets to line numbers in `code`.
Uses co_lnotab described in Python/compile.c to map byte offsets to
line numbers. Returns a list: [(b0, l0), (b1, l1), ...]
"""
# Adapted from dis.py in the standard library.
byte_increments = self._lnotab_increments(self.code.co_lnotab[0::2])
line_increments = self._lnotab_increments(self.code.co_lnotab[1::2])
bytes_lines = []
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
bytes_lines.append((byte_num, line_num))
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
bytes_lines.append((byte_num, line_num))
return bytes_lines
def _find_statements(self):
"""Find the statements in `self.code`.
Return a set of line numbers that start statements. Recurses into all
code objects reachable from `self.code`.
"""
stmts = set()
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
stmts.add(l)
return stmts
def _disassemble(self): # pragma: no cover
"""Disassemble code, for ad-hoc experimenting."""
import dis
for bp in self.child_parsers():
print("\n%s: " % bp.code)
dis.dis(bp.code)
print("Bytes lines: %r" % bp._bytes_lines())
print("")
def _split_into_chunks(self):
"""Split the code object into a list of `Chunk` objects.
Each chunk is only entered at its first instruction, though there can
be many exits from a chunk.
Returns a list of `Chunk` objects.
"""
# The list of chunks so far, and the one we're working on.
chunks = []
chunk = None
bytes_lines_map = dict(self._bytes_lines())
# The block stack: loops and try blocks get pushed here for the
# implicit jumps that can occur.
# Each entry is a tuple: (block type, destination)
block_stack = []
# Some op codes are followed by branches that should be ignored. This
# is a count of how many ignores are left.
ignore_branch = 0
# We have to handle the last two bytecodes specially.
ult = penult = None
for bc in ByteCodes(self.code.co_code):
# Maybe have to start a new block
if bc.offset in bytes_lines_map:
if chunk:
chunk.exits.add(bc.offset)
chunk = Chunk(bc.offset, bytes_lines_map[bc.offset])
chunks.append(chunk)
if not chunk:
chunk = Chunk(bc.offset)
chunks.append(chunk)
# Look at the opcode
if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP:
if ignore_branch:
# Someone earlier wanted us to ignore this branch.
ignore_branch -= 1
else:
# The opcode has a jump, it's an exit for this chunk.
chunk.exits.add(bc.jump_to)
if bc.op in OPS_CODE_END:
# The opcode can exit the code object.
chunk.exits.add(-1)
if bc.op in OPS_PUSH_BLOCK:
# The opcode adds a block to the block_stack.
block_stack.append((bc.op, bc.jump_to))
if bc.op in OPS_POP_BLOCK:
# The opcode pops a block from the block stack.
block_stack.pop()
if bc.op in OPS_CHUNK_END:
# This opcode forces the end of the chunk.
if bc.op == OP_BREAK_LOOP:
# A break is implicit: jump where the top of the
# block_stack points.
chunk.exits.add(block_stack[-1][1])
chunk = None
if bc.op == OP_END_FINALLY:
if block_stack:
# A break that goes through a finally will jump to whatever
# block is on top of the stack.
chunk.exits.add(block_stack[-1][1])
# For the finally clause we need to find the closest exception
# block, and use its jump target as an exit.
for iblock in range(len(block_stack)-1, -1, -1):
if block_stack[iblock][0] in OPS_EXCEPT_BLOCKS:
chunk.exits.add(block_stack[iblock][1])
break
if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION:
# This is an except clause. We want to overlook the next
# branch, so that except's don't count as branches.
ignore_branch += 1
penult = ult
ult = bc
if chunks:
# The last two bytecodes could be a dummy "return None" that
# shouldn't be counted as real code. Every Python code object seems
# to end with a return, and a "return None" is inserted if there
# isn't an explicit return in the source.
if ult and penult:
if penult.op == OP_LOAD_CONST and ult.op == OP_RETURN_VALUE:
if self.code.co_consts[penult.arg] is None:
# This is "return None", but is it dummy? A real line
# would be a last chunk all by itself.
if chunks[-1].byte != penult.offset:
# Split the last chunk
last_chunk = chunks[-1]
last_chunk.exits.remove(-1)
last_chunk.exits.add(penult.offset)
chunk = Chunk(penult.offset)
chunk.exits.add(-1)
chunks.append(chunk)
# Give all the chunks a length.
chunks[-1].length = bc.next_offset - chunks[-1].byte
for i in range(len(chunks)-1):
chunks[i].length = chunks[i+1].byte - chunks[i].byte
return chunks
def _arcs(self):
"""Find the executable arcs in the code.
Returns a set of pairs, (from,to). From and to are integer line
numbers. If from is -1, then the arc is an entrance into the code
object. If to is -1, the arc is an exit from the code object.
"""
chunks = self._split_into_chunks()
# A map from byte offsets to chunks jumped into.
byte_chunks = dict([(c.byte, c) for c in chunks])
# Build a map from byte offsets to actual lines reached.
byte_lines = {-1:[-1]}
bytes_to_add = set([c.byte for c in chunks])
while bytes_to_add:
byte_to_add = bytes_to_add.pop()
if byte_to_add in byte_lines or byte_to_add == -1:
continue
# Which lines does this chunk lead to?
bytes_considered = set()
bytes_to_consider = [byte_to_add]
lines = set()
while bytes_to_consider:
byte = bytes_to_consider.pop()
bytes_considered.add(byte)
# Find chunk for byte
try:
ch = byte_chunks[byte]
except KeyError:
for ch in chunks:
if ch.byte <= byte < ch.byte+ch.length:
break
else:
# No chunk for this byte!
raise Exception("Couldn't find chunk @ %d" % byte)
byte_chunks[byte] = ch
if ch.line:
lines.add(ch.line)
else:
for ex in ch.exits:
if ex == -1:
lines.add(-1)
elif ex not in bytes_considered:
bytes_to_consider.append(ex)
bytes_to_add.update(ch.exits)
byte_lines[byte_to_add] = lines
# Figure out for each chunk where the exits go.
arcs = set()
for chunk in chunks:
if chunk.line:
for ex in chunk.exits:
for exit_line in byte_lines[ex]:
if chunk.line != exit_line:
arcs.add((chunk.line, exit_line))
for line in byte_lines[0]:
arcs.add((-1, line))
return arcs
def _all_chunks(self):
"""Returns a list of `Chunk` objects for this code and its children.
See `_split_into_chunks` for details.
"""
chunks = []
for bp in self.child_parsers():
chunks.extend(bp._split_into_chunks())
return chunks
def _all_arcs(self):
"""Get the set of all arcs in this code object and its children.
See `_arcs` for details.
"""
arcs = set()
for bp in self.child_parsers():
arcs.update(bp._arcs())
return arcs
class Chunk(object):
"""A sequence of bytecodes with a single entrance.
To analyze byte code, we have to divide it into chunks, sequences of byte
codes such that each basic block has only one entrance, the first
instruction in the block.
This is almost the CS concept of `basic block`_, except that we're willing
to have many exits from a chunk, and "basic block" is a more cumbersome
term.
.. _basic block: http://en.wikipedia.org/wiki/Basic_block
An exit of -1 means the chunk can leave the code (return).
"""
def __init__(self, byte, line=0):
self.byte = byte
self.line = line
self.length = 0
self.exits = set()
def __repr__(self):
return "<%d+%d @%d %r>" % (
self.byte, self.length, self.line, list(self.exits)
)
class AdHocMain(object): # pragma: no cover
"""An ad-hoc main for code parsing experiments."""
def main(self, args):
"""A main function for trying the code from the command line."""
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-c", action="store_true", dest="chunks",
help="Show basic block chunks"
)
parser.add_option(
"-d", action="store_true", dest="dis",
help="Disassemble"
)
parser.add_option(
"-R", action="store_true", dest="recursive",
help="Recurse to find source files"
)
parser.add_option(
"-s", action="store_true", dest="source",
help="Show analyzed source"
)
parser.add_option(
"-t", action="store_true", dest="tokens",
help="Show tokens"
)
options, args = parser.parse_args()
if options.recursive:
if args:
root = args[0]
else:
root = "."
for root, _, _ in os.walk(root):
for f in glob.glob(root + "/*.py"):
self.adhoc_one_file(options, f)
else:
self.adhoc_one_file(options, args[0])
def adhoc_one_file(self, options, filename):
"""Process just one file."""
if options.dis or options.chunks:
try:
bp = ByteParser(filename=filename)
except CoverageException:
_, err, _ = sys.exc_info()
print("%s" % (err,))
return
if options.dis:
print("Main code:")
bp._disassemble()
if options.chunks:
chunks = bp._all_chunks()
if options.recursive:
print("%6d: %s" % (len(chunks), filename))
else:
print("Chunks: %r" % chunks)
arcs = bp._all_arcs()
print("Arcs: %r" % sorted(arcs))
if options.source or options.tokens:
cp = CodeParser(filename=filename, exclude=r"no\s*cover")
cp.show_tokens = options.tokens
cp._raw_parse()
if options.source:
if options.chunks:
arc_width, arc_chars = self.arc_ascii_art(arcs)
else:
arc_width, arc_chars = 0, {}
exit_counts = cp.exit_counts()
for i, ltext in enumerate(cp.lines):
lineno = i+1
m0 = m1 = m2 = m3 = a = ' '
if lineno in cp.statement_starts:
m0 = '-'
exits = exit_counts.get(lineno, 0)
if exits > 1:
m1 = str(exits)
if lineno in cp.docstrings:
m2 = '"'
if lineno in cp.classdefs:
m2 = 'C'
if lineno in cp.excluded:
m3 = 'x'
a = arc_chars.get(lineno, '').ljust(arc_width)
print("%4d %s%s%s%s%s %s" %
(lineno, m0, m1, m2, m3, a, ltext)
)
def arc_ascii_art(self, arcs):
"""Draw arcs as ascii art.
Returns a width of characters needed to draw all the arcs, and a
dictionary mapping line numbers to ascii strings to draw for that line.
"""
arc_chars = {}
for lfrom, lto in sorted(arcs):
if lfrom == -1:
arc_chars[lto] = arc_chars.get(lto, '') + 'v'
elif lto == -1:
arc_chars[lfrom] = arc_chars.get(lfrom, '') + '^'
else:
if lfrom == lto-1:
# Don't show obvious arcs.
continue
if lfrom < lto:
l1, l2 = lfrom, lto
else:
l1, l2 = lto, lfrom
w = max([len(arc_chars.get(l, '')) for l in range(l1, l2+1)])
for l in range(l1, l2+1):
if l == lfrom:
ch = '<'
elif l == lto:
ch = '>'
else:
ch = '|'
arc_chars[l] = arc_chars.get(l, '').ljust(w) + ch
arc_width = 0
if arc_chars:
arc_width = max([len(a) for a in arc_chars.values()])
else:
arc_width = 0
return arc_width, arc_chars
if __name__ == '__main__':
AdHocMain().main(sys.argv[1:])
```
#### File: packages/logilab-astng/builder.py
```python
__docformat__ = "restructuredtext en"
import sys
from os.path import splitext, basename, dirname, exists, abspath
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from logilab.common.fileutils import norm_read
from logilab.common.modutils import modpath_from_file
from logilab.astng._exceptions import ASTNGBuildingException
from logilab.astng.raw_building import *
try:
from _ast import PyCF_ONLY_AST
def parse(string):
return compile(string, "<string>", 'exec', PyCF_ONLY_AST)
from logilab.astng._nodes_ast import TreeRebuilder
except ImportError, exc:
from compiler import parse
from logilab.astng import patchcomptransformer
from logilab.astng._nodes_compiler import TreeRebuilder
# ast NG builder ##############################################################
class ASTNGBuilder:
"""provide astng building methods
"""
def __init__(self, manager=None):
if manager is None:
from logilab.astng import MANAGER as manager
self._manager = manager
self._module = None
self._file = None
self._done = None
self.rebuilder = TreeRebuilder(manager)
self._dyn_modname_map = {'gtk': 'gtk._gtk'}
def module_build(self, module, modname=None):
"""build an astng from a living module instance
"""
node = None
self._module = module
path = getattr(module, '__file__', None)
if path is not None:
path_, ext = splitext(module.__file__)
if ext in ('.py', '.pyc', '.pyo') and exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
node = build_module(modname, module.__doc__)
node.file = node.path = path and abspath(path) or path
if self._manager is not None:
self._manager._cache[modname] = node
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
data = norm_read(path)
except IOError, ex:
msg = 'Unable to load file %r (%s)' % (path, ex)
raise ASTNGBuildingException(msg)
self._file = path
# get module name if necessary, *before modifying sys.path*
if modname is None:
try:
modname = '.'.join(modpath_from_file(path))
except ImportError:
modname = splitext(basename(path))[0]
# build astng representation
try:
sys.path.insert(0, dirname(path))
node = self.string_build(data, modname, path)
node.file = abspath(path)
finally:
self._file = None
sys.path.pop(0)
return node
def string_build(self, data, modname='', path=None):
"""build astng from a source code stream (i.e. from an ast)"""
return self.ast_build(parse(data + '\n'), modname, path)
def ast_build(self, node, modname='', path=None):
"""build the astng from AST, return the new tree"""
if path is not None:
node_file = abspath(path)
else:
node_file = '<?>'
if modname.endswith('.__init__'):
modname = modname[:-9]
package = True
else:
package = path and path.find('__init__.py') > -1 or False
newnode = self.rebuilder.build(node, modname, node_file)
newnode.package = package
return newnode
# astng from living objects ###############################################
#
# this is actually a really minimal representation, including only Module,
# Function and Class nodes and some others as guessed
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if self._done.has_key(obj):
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if ismethod(member):
member = member.im_func
if isfunction(member):
# verify this is not an imported function
if member.func_code.co_filename != getattr(self._module, '__file__', None):
attach_dummy_node(node, name, member)
continue
object_build_function(node, member, name)
elif isbuiltin(member):
# verify this is not an imported member
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
object_build_methoddescriptor(node, member, name)
elif isclass(member):
# verify this is not an imported class
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
if member in self._done:
class_node = self._done[member]
if not class_node in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
elif ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, (int, long, float, str, unicode)) or member is None:
attach_const_node(node, name, member)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def _member_module(self, member):
modname = getattr(member, '__module__', None)
return self._dyn_modname_map.get(modname, modname)
def imported_member(node, member, name):
"""consider a class/builtin member where __module__ != current module name
check if it's sound valid and then add an import node, else use a dummy node
"""
# /!\ some classes like ExtensionClass doesn't have a
# __module__ attribute !
member_module = getattr(member, '__module__', '__builtin__')
try:
getattr(sys.modules[member_module], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, member_module, name)
```
#### File: test/data/noendingnewline.py
```python
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testIt(self):
self.a = 10
self.xxx()
def xxx(self):
if False:
pass
print 'a'
if False:
pass
pass
if False:
pass
print 'rara'
if __name__ == '__main__':
print 'test2'
unittest.main()
```
#### File: test/data/nonregr.py
```python
from __future__ import generators
try:
enumerate = enumerate
except NameError:
def enumerate(iterable):
"""emulates the python2.3 enumerate() function"""
i = 0
for val in iterable:
yield i, val
i += 1
def toto(value):
for k, v in value:
print v.get('yo')
import imp
fp, mpath, desc = imp.find_module('optparse',a)
s_opt = imp.load_module('std_optparse', fp, mpath, desc)
class OptionParser(s_opt.OptionParser):
def parse_args(self, args=None, values=None, real_optparse=False):
if real_optparse:
pass
## return super(OptionParser, self).parse_args()
else:
import optcomp
optcomp.completion(self)
class Aaa(object):
"""docstring"""
def __init__(self):
self.__setattr__('a','b')
pass
def one_public(self):
"""docstring"""
pass
def another_public(self):
"""docstring"""
pass
class Ccc(Aaa):
"""docstring"""
class Ddd(Aaa):
"""docstring"""
pass
class Eee(Ddd):
"""docstring"""
pass
```
#### File: packages/logilab-common/decorators.py
```python
__docformat__ = "restructuredtext en"
from types import MethodType
from time import clock, time
import sys, re
# XXX rewrite so we can use the decorator syntax when keyarg has to be specified
def cached(callableobj, keyarg=None):
"""Simple decorator to cache result of method call."""
if callableobj.func_code.co_argcount == 1 or keyarg == 0:
def cache_wrapper1(self, *args):
cache = '_%s_cache_' % callableobj.__name__
#print 'cache1?', cache
try:
return self.__dict__[cache]
except KeyError:
#print 'miss'
value = callableobj(self, *args)
setattr(self, cache, value)
return value
cache_wrapper1.__doc__ = callableobj.__doc__
return cache_wrapper1
elif keyarg:
def cache_wrapper2(self, *args, **kwargs):
cache = '_%s_cache_' % callableobj.__name__
key = args[keyarg-1]
#print 'cache2?', cache, self, key
try:
_cache = self.__dict__[cache]
except KeyError:
#print 'init'
_cache = {}
setattr(self, cache, _cache)
try:
return _cache[key]
except KeyError:
#print 'miss', self, cache, key
_cache[key] = callableobj(self, *args, **kwargs)
return _cache[key]
cache_wrapper2.__doc__ = callableobj.__doc__
return cache_wrapper2
def cache_wrapper3(self, *args):
cache = '_%s_cache_' % callableobj.__name__
#print 'cache3?', cache, self, args
try:
_cache = self.__dict__[cache]
except KeyError:
#print 'init'
_cache = {}
setattr(self, cache, _cache)
try:
return _cache[args]
except KeyError:
#print 'miss'
_cache[args] = callableobj(self, *args)
return _cache[args]
cache_wrapper3.__doc__ = callableobj.__doc__
return cache_wrapper3
def clear_cache(obj, funcname):
"""Function to clear a cache handled by the cached decorator."""
try:
del obj.__dict__['_%s_cache_' % funcname]
except KeyError:
pass
def copy_cache(obj, funcname, cacheobj):
"""Copy cache for <funcname> from cacheobj to obj."""
cache = '_%s_cache_' % funcname
try:
setattr(obj, cache, cacheobj.__dict__[cache])
except KeyError:
pass
class wproperty(object):
"""Simple descriptor expecting to take a modifier function as first argument
and looking for a _<function name> to retrieve the attribute.
"""
def __init__(self, setfunc):
self.setfunc = setfunc
self.attrname = '_%s' % setfunc.__name__
def __set__(self, obj, value):
self.setfunc(obj, value)
def __get__(self, obj, cls):
assert obj is not None
return getattr(obj, self.attrname)
class classproperty(object):
"""this is a simple property-like class but for class attributes.
"""
def __init__(self, get):
self.get = get
def __get__(self, inst, cls):
return self.get(cls)
class iclassmethod(object):
'''Descriptor for method which should be available as class method if called
on the class or instance method if called on an instance.
'''
def __init__(self, func):
self.func = func
def __get__(self, instance, objtype):
if instance is None:
return MethodType(self.func, objtype, objtype.__class__)
return MethodType(self.func, instance, objtype)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def timed(f):
def wrap(*args, **kwargs):
t = time()
c = clock()
res = f(*args, **kwargs)
print '%s clock: %.9f / time: %.9f' % (f.__name__,
clock() - c, time() - t)
return res
return wrap
def locked(acquire, release):
"""Decorator taking two methods to acquire/release a lock as argument,
returning a decorator function which will call the inner method after
having called acquire(self) et will call release(self) afterwards.
"""
def decorator(f):
def wrapper(self, *args, **kwargs):
acquire(self)
try:
return f(self, *args, **kwargs)
finally:
release(self)
return wrapper
return decorator
def monkeypatch(klass, methodname=None):
"""Decorator extending class with the decorated function
>>> class A:
... pass
>>> @monkeypatch(A)
... def meth(self):
... return 12
...
>>> a = A()
>>> a.meth()
12
>>> @monkeypatch(A, 'foo')
... def meth(self):
... return 12
...
>>> a.foo()
12
"""
def decorator(func):
setattr(klass, methodname or func.__name__, func)
return func
return decorator
```
#### File: logilab-common/test/unittest_date.py
```python
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.date import date_range, endOfMonth
from logilab.common.date import add_days_worked, nb_open_days, \
get_national_holidays
from datetime import date, datetime, timedelta
try:
from mx.DateTime import Date as mxDate, DateTime as mxDateTime, \
now as mxNow, RelativeDateTime, RelativeDate
except ImportError:
mxDate = mxDateTime = RelativeDateTime = mxNow = None
class DateTC(TestCase):
datecls = date
datetimecls = datetime
timedeltacls = timedelta
now = datetime.now
def test_day(self):
"""enumerate days"""
r = list(date_range(self.datecls(2000,1,1), self.datecls(2000,1,4)))
expected = [self.datecls(2000,1,1), self.datecls(2000,1,2), self.datecls(2000,1,3)]
self.assertListEquals(r, expected)
r = list(date_range(self.datecls(2000,1,31), self.datecls(2000,2,3)))
expected = [self.datecls(2000,1,31), self.datecls(2000,2,1), self.datecls(2000,2,2)]
self.assertListEquals(r, expected)
r = list(date_range(self.datecls(2000,1,1), self.datecls(2000,1,6), 2))
expected = [self.datecls(2000,1,1), self.datecls(2000,1,3), self.datecls(2000,1,5)]
self.assertListEquals(r, expected)
def test_add_days_worked(self):
add = add_days_worked
# normal
self.assertEquals(add(self.datecls(2008, 1, 3), 1), self.datecls(2008, 1, 4))
# skip week-end
self.assertEquals(add(self.datecls(2008, 1, 3), 2), self.datecls(2008, 1, 7))
# skip 2 week-ends
self.assertEquals(add(self.datecls(2008, 1, 3), 8), self.datecls(2008, 1, 15))
# skip holiday + week-end
self.assertEquals(add(self.datecls(2008, 4, 30), 2), self.datecls(2008, 5, 5))
def test_get_national_holidays(self):
holidays = get_national_holidays
yield self.assertEquals, holidays(self.datecls(2008, 4, 29), self.datecls(2008, 5, 2)), \
[self.datecls(2008, 5, 1)]
yield self.assertEquals, holidays(self.datecls(2008, 5, 7), self.datecls(2008, 5, 8)), []
x = self.datetimecls(2008, 5, 7, 12, 12, 12)
yield self.assertEquals, holidays(x, x + self.timedeltacls(days=1)), []
def test_open_days_now_and_before(self):
nb = nb_open_days
x = self.now()
y = x - self.timedeltacls(seconds=1)
self.assertRaises(AssertionError, nb, x, y)
def assertOpenDays(self, start, stop, expected):
got = nb_open_days(start, stop)
self.assertEquals(got, expected)
def test_open_days_tuesday_friday(self):
self.assertOpenDays(self.datecls(2008, 3, 4), self.datecls(2008, 3, 7), 3)
def test_open_days_day_nextday(self):
self.assertOpenDays(self.datecls(2008, 3, 4), self.datecls(2008, 3, 5), 1)
def test_open_days_friday_monday(self):
self.assertOpenDays(self.datecls(2008, 3, 7), self.datecls(2008, 3, 10), 1)
def test_open_days_friday_monday_with_two_weekends(self):
self.assertOpenDays(self.datecls(2008, 3, 7), self.datecls(2008, 3, 17), 6)
def test_open_days_tuesday_wednesday(self):
"""week-end + easter monday"""
self.assertOpenDays(self.datecls(2008, 3, 18), self.datecls(2008, 3, 26), 5)
def test_open_days_friday_saturday(self):
self.assertOpenDays(self.datecls(2008, 3, 7), self.datecls(2008, 3, 8), 1)
def test_open_days_friday_sunday(self):
self.assertOpenDays(self.datecls(2008, 3, 7), self.datecls(2008, 3, 9), 1)
def test_open_days_saturday_sunday(self):
self.assertOpenDays(self.datecls(2008, 3, 8), self.datecls(2008, 3, 9), 0)
def test_open_days_saturday_monday(self):
self.assertOpenDays(self.datecls(2008, 3, 8), self.datecls(2008, 3, 10), 0)
def test_open_days_saturday_tuesday(self):
self.assertOpenDays(self.datecls(2008, 3, 8), self.datecls(2008, 3, 11), 1)
def test_open_days_now_now(self):
x = self.now()
self.assertOpenDays(x, x, 0)
def test_open_days_now_now2(self):
x = self.datetimecls(2010, 5, 24)
self.assertOpenDays(x, x, 0)
def test_open_days_afternoon_before_holiday(self):
self.assertOpenDays(self.datetimecls(2008, 5, 7, 14), self.datetimecls(2008, 5, 8, 0), 1)
def test_open_days_afternoon_before_saturday(self):
self.assertOpenDays(self.datetimecls(2008, 5, 9, 14), self.datetimecls(2008, 5, 10, 14), 1)
def test_open_days_afternoon(self):
self.assertOpenDays(self.datetimecls(2008, 5, 6, 14), self.datetimecls(2008, 5, 7, 14), 1)
class MxDateTC(DateTC):
datecls = mxDate
datetimecls = mxDateTime
timedeltacls = RelativeDateTime
now = mxNow
def check_mx(self):
if mxDate is None:
self.skip('mx.DateTime is not installed')
def setUp(self):
self.check_mx()
def test_month(self):
"""enumerate months"""
r = list(date_range(self.datecls(2000,1,2), self.datecls(2000,4,4), endOfMonth))
expected = [self.datecls(2000,1,2), self.datecls(2000,2,29), self.datecls(2000,3,31)]
self.assertListEquals(r, expected)
r = list(date_range(self.datecls(2000,11,30), self.datecls(2001,2,3), endOfMonth))
expected = [self.datecls(2000,11,30), self.datecls(2000,12,31), self.datecls(2001,1,31)]
self.assertListEquals(r, expected)
if __name__ == '__main__':
unittest_main()
```
#### File: logilab-common/test/unittest_deprecation.py
```python
import warnings
from logilab.common.testlib import TestCase, unittest_main
from logilab.common import deprecation
def moving_target():
pass
class RawInputTC(TestCase):
# XXX with 2.6 we could test warnings
# http://docs.python.org/library/warnings.html#testing-warnings
# instead we just make sure it does not crash
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def mk_func(self):
def any_func():
pass
return any_func
def test_class_deprecated(self):
class AnyClass:
__metaclass__ = deprecation.class_deprecated
def test_deprecated_func(self):
any_func = deprecation.deprecated()(self.mk_func())
any_func()
any_func = deprecation.deprecated('message')(self.mk_func())
any_func()
def test_deprecated_decorator(self):
@deprecation.deprecated_function
def any_func():
pass
any_func()
@deprecation.deprecated()
def any_func():
pass
any_func()
@deprecation.deprecated('message')
def any_func():
pass
any_func()
def test_moved(self):
# this test needs l.c.test.__init__
module = 'logilab.common.test.unittest_deprecation'
any_func = deprecation.moved(module, 'moving_target')
any_func()
if __name__ == '__main__':
unittest_main()
```
#### File: mock/tests/support.py
```python
import sys
info = sys.version_info
if info[:3] >= (3, 2, 0):
# for Python 3.2 ordinary unittest is fine
import unittest as unittest2
else:
import unittest2
try:
# need to turn it into a local variable or we can't
# import it from here under Python 2
apply = apply
except NameError:
# no apply in Python 3
def apply(f, *args, **kw):
return f(*args, **kw)
inPy3k = sys.version_info[0] == 3
with_available = sys.version_info[:2] >= (2, 5)
class SomeClass(object):
class_attribute = None
def wibble(self):
pass
```
#### File: mock/tests/support_with.py
```python
from __future__ import with_statement
import sys
__all__ = ['nested', 'catch_warnings', 'examine_warnings']
try:
from contextlib import nested
except ImportError:
from contextlib import contextmanager
@contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
raise exc[1]
# copied from Python 2.6
try:
from warnings import catch_warnings
except ImportError:
class catch_warnings(object):
def __init__(self, record=False, module=None):
self._record = record
self._module = sys.modules['warnings']
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
class WarningMessage(object):
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = None
if category.__name__:
self._category_name = category.__name__
def examine_warnings(func):
def wrapper():
with catch_warnings(record=True) as ws:
func(ws)
return wrapper
```
#### File: mock/tests/testmagicmethods.py
```python
from tests.support import unittest2, inPy3k
try:
unicode
except NameError:
# Python 3
unicode = str
long = int
import inspect
from mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest2.TestCase):
def testDeletingMagicMethods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def testMagicMethodWrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
# When you pull the function back of the *instance*
# the first argument (self) is removed
def instance_f(name):
pass
self.assertEqual(inspect.getargspec(mock.__getitem__), inspect.getargspec(instance_f))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def testMagicMethodsIsolatedBetweenMocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def testRepr(self):
mock = Mock()
self.assertEqual(repr(mock), object.__repr__(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def testStr(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
@unittest2.skipIf(inPy3k, "no unicode in Python 3")
def testUnicode(self):
mock = Mock()
self.assertEqual(unicode(mock), unicode(str(mock)))
mock.__unicode__ = lambda s: unicode('foo')
self.assertEqual(unicode(mock), unicode('foo'))
def testDictMethods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def testNumeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def testHash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def testNonZero(self):
m = Mock()
self.assertTrue(bool(m))
nonzero = lambda s: False
if not inPy3k:
m.__nonzero__ = nonzero
else:
m.__bool__ = nonzero
self.assertFalse(bool(m))
def testComparison(self):
if not inPy3k:
# incomparable in Python 3
self. assertEqual(Mock() < 3, object() < 3)
self. assertEqual(Mock() > 3, object() > 3)
self. assertEqual(Mock() <= 3, object() <= 3)
self. assertEqual(Mock() >= 3, object() >= 3)
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
def testEquality(self):
mock = Mock()
self.assertEqual(mock, mock)
self.assertNotEqual(mock, Mock())
self.assertNotEqual(mock, 3)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
def testLenContainsIter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def testMagicMock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
if inPy3k:
mock.__bool__.return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
else:
mock.__nonzero__.return_value = False
self.assertFalse(hasattr(mock, '__bool__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def testMagicMockDefaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertEqual(long(mock), long(1))
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertEqual(unicode(mock), object.__str__(mock))
self.assertIsInstance(unicode(mock), unicode)
self.assertTrue(bool(mock))
if not inPy3k:
self.assertEqual(oct(mock), '1')
else:
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
@unittest2.skipIf(inPy3k, "no __cmp__ in Python 3")
def testNonDefaultMagicMethods(self):
mock = MagicMock()
self.assertRaises(AttributeError, lambda: mock.__cmp__)
mock = Mock()
mock.__cmp__ = lambda s, o: 0
self.assertEqual(mock, object())
def testMagicMethodsAndSpec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def testMagicMethodsAndSpecSet(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def testSettingUnsupportedMagicMethod(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegexp(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def testAttributesAndReturnValue(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
if __name__ == '__main__':
unittest2.main()
```
#### File: mock/tests/testmock.py
```python
from tests.support import unittest2, inPy3k
import copy
import sys
from mock import Mock, sentinel, DEFAULT
try:
unicode
except NameError:
unicode = str
class MockTest(unittest2.TestCase):
def testAll(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from mock import *")
def testConstructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(isinstance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock...
self.assertFalse('_items' in mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._parent, "parent not initialised correctly")
self.assertIsNone(mock._methods, "methods not initialised correctly")
self.assertEqual(mock._children, {},
"children not initialised incorrectly")
def testUnicodeNotBroken(self):
# This used to raise an exception with Python 2.5 and Mock 0.4
unicode(Mock())
def testReturnValueInConstructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def testRepr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
self.assertIn('foo.bar', repr(mock.bar))
self.assertIn('mock.baz', repr(mock().baz))
def testReprWithSpec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def testSideEffect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
def testReset(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._name, "child", "name incorrectly reset")
self.assertEqual(mock._parent, parent, "parent incorrectly reset")
self.assertEqual(mock._methods, spec, "methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def testCall(self):
mock = Mock()
self.assertTrue(isinstance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incoreect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def testCallArgsComparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
def testAssertCalledWith(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def testAssertCalledOnceWith(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(AssertionError, lambda:
mock.assert_called_once_with('bob', 'bar', baz=2)
)
def testAttributeAccessReturnsMocks(self):
mock = Mock()
something = mock.something
self.assertTrue(isinstance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def testAttributesHaveNameAndParentSet(self):
mock = Mock()
something = mock.something
self.assertEqual(something._name, "something",
"attribute name not set correctly")
self.assertEqual(something._parent, mock,
"attribute parent not set correctly")
def testMethodCallsRecorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def testMethodCallsCompareEasily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls, [('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def testOnlyAllowedMethodsExist(self):
spec = ["something"]
mock = Mock(spec=spec)
# this should be allowed
mock.something
self.assertRaisesRegexp(AttributeError,
"Mock object has no attribute 'something_else'",
lambda: mock.something_else)
def testFromSpec(self):
class Something(object):
x = 3
__something__ = None
def y(self):
pass
def testAttributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegexp(AttributeError,
"Mock object has no attribute 'z'",
lambda: mock.z)
self.assertRaisesRegexp(AttributeError,
"Mock object has no attribute '__foobar__'",
lambda: mock.__foobar__)
testAttributes(Mock(spec=Something))
testAttributes(Mock(spec=Something()))
def testWrapsCalls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def testWrapsCallWithNonDefaultReturnValue(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def testWrapsAttributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def testExceptionalSideEffect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def testBaseExceptionalSideEffect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def testAssertCalledWithMessage(self):
mock = Mock()
self.assertRaisesRegexp(AssertionError, 'Not called',
mock.assert_called_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def testSpecClass(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertTrue(isinstance(mock, X))
mock = Mock(spec=X())
self.assertTrue(isinstance(mock, X))
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertTrue(isinstance(mock, X))
mock = Mock(spec_set=X())
self.assertTrue(isinstance(mock, X))
def testSettingAttributeWithSpec(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def testCopy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
@unittest2.skipIf(inPy3k, "no old style classes in Python 3")
def testSpecOldStyleClasses(self):
class Foo:
bar = 7
mock = Mock(spec=Foo)
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
mock = Mock(spec=Foo())
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
@unittest2.skipIf(inPy3k, "no old style classes in Python 3")
def testSpecSetOldStyleClasses(self):
class Foo:
bar = 7
mock = Mock(spec_set=Foo)
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
def _set():
mock.foo = 3
self.assertRaises(AttributeError, _set)
mock = Mock(spec_set=Foo())
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
def _set():
mock.foo = 3
self.assertRaises(AttributeError, _set)
def testSubclassWithProperties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
if __name__ == '__main__':
unittest2.main()
```
#### File: mock/tests/testwith.py
```python
import sys
if sys.version_info[:2] >= (2, 5):
from tests._testwith import *
else:
from tests.support import unittest2
class TestWith(unittest2.TestCase):
@unittest2.skip('tests using with statement skipped on Python 2.4')
def testWith(self):
pass
if __name__ == '__main__':
unittest2.main()
```
#### File: nose/functional_tests/test_importer.py
```python
import os
import sys
import unittest
from nose.importer import Importer
class TestImporter(unittest.TestCase):
def setUp(self):
self.dir = os.path.normpath(os.path.join(os.path.dirname(__file__),
'support'))
self.imp = Importer()
self._mods = sys.modules.copy()
self._path = sys.path[:]
sys.modules.pop('mod', None)
sys.modules.pop('pak', None)
sys.modules.pop('pak.mod', None)
sys.modules.pop('pak.sub', None)
def tearDown(self):
to_del = [ m for m in sys.modules.keys() if
m not in self._mods ]
if to_del:
for mod in to_del:
del sys.modules[mod]
sys.modules.update(self._mods)
sys.path = self._path[:]
def test_import_from_dir(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
d2 = os.path.join(self.dir, 'dir2')
# simple name
m1 = imp.importFromDir(d1, 'mod')
m2 = imp.importFromDir(d2, 'mod')
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1.__file__, m2.__file__)
# dotted name
p1 = imp.importFromDir(d1, 'pak.mod')
p2 = imp.importFromDir(d2, 'pak.mod')
self.assertNotEqual(p1, p2)
self.assertNotEqual(p1.__file__, p2.__file__)
def test_import_from_path(self):
imp = self.imp
jn = os.path.join
d1 = jn(self.dir, 'dir1')
d2 = jn(self.dir, 'dir2')
# simple name
m1 = imp.importFromPath(jn(d1, 'mod.py'), 'mod')
m2 = imp.importFromPath(jn(d2, 'mod.py'), 'mod')
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1.__file__, m2.__file__)
# dotted name
p1 = imp.importFromPath(jn(d1, 'pak', 'mod.py'), 'pak.mod')
p2 = imp.importFromPath(jn(d2, 'pak', 'mod.py'), 'pak.mod')
self.assertNotEqual(p1, p2)
self.assertNotEqual(p1.__file__, p2.__file__)
# simple name -- package
sp1 = imp.importFromPath(jn(d1, 'pak'), 'pak')
sp2 = imp.importFromPath(jn(d2, 'pak'), 'pak')
self.assertNotEqual(sp1, sp2)
assert sp1.__path__
assert sp2.__path__
self.assertNotEqual(sp1.__path__, sp2.__path__)
# dotted name -- package
dp1 = imp.importFromPath(jn(d1, 'pak', 'sub'), 'pak.sub')
dp2 = imp.importFromPath(jn(d2, 'pak', 'sub'), 'pak.sub')
self.assertNotEqual(dp1, dp2)
assert dp1.__path__
assert dp2.__path__
self.assertNotEqual(dp1.__path__, dp2.__path__)
def test_import_sets_intermediate_modules(self):
imp = self.imp
path = os.path.join(self.dir,
'package2', 'test_pak', 'test_sub', 'test_mod.py')
mod = imp.importFromPath(path, 'test_pak.test_sub.test_mod')
print mod, dir(mod)
assert 'test_pak' in sys.modules, 'test_pak was not imported?'
test_pak = sys.modules['test_pak']
assert hasattr(test_pak, 'test_sub'), "test_pak.test_sub was not set"
def test_cached_no_reload(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
m1 = imp.importFromDir(d1, 'mod')
m2 = imp.importFromDir(d1, 'mod')
assert m1 is m2, "%s is not %s" % (m1, m2)
def test_cached_no_reload_dotted(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
p1 = imp.importFromDir(d1, 'pak.mod')
p2 = imp.importFromDir(d1, 'pak.mod')
assert p1 is p2, "%s is not %s" % (p1, p2)
def test_import_sets_sys_modules(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
p1 = imp.importFromDir(d1, 'pak.mod')
assert sys.modules['pak.mod'] is p1, "pak.mod not in sys.modules"
assert sys.modules['pak'], "pak not in sys.modules"
assert sys.modules['pak'].mod is p1, \
"sys.modules['pak'].mod is not the module we loaded"
def test_failed_import_raises_import_error(self):
imp = self.imp
def bad_import():
imp.importFromPath(self.dir, 'no.such.module')
self.assertRaises(ImportError, bad_import)
def test_sys_modules_same_path_no_reload(self):
imp = self.imp
d1 = os.path.join(self.dir, 'dir1')
d2 = os.path.join(self.dir, 'dir2')
sys.path.insert(0, d1)
mod_sys_imported = __import__('mod')
mod_nose_imported = imp.importFromDir(d1, 'mod')
assert mod_nose_imported is mod_sys_imported, \
"nose reimported a module in sys.modules from the same path"
mod_nose_imported2 = imp.importFromDir(d2, 'mod')
assert mod_nose_imported2 != mod_sys_imported, \
"nose failed to reimport same name, different dir"
def test_import_pkg_from_path_fpw(self):
imp = self.imp
imp.config.firstPackageWins = True
jn = os.path.join
d1 = jn(self.dir, 'dir1')
d2 = jn(self.dir, 'dir2')
# dotted name
p1 = imp.importFromPath(jn(d1, 'pak', 'mod.py'), 'pak.mod')
p2 = imp.importFromPath(jn(d2, 'pak', 'mod.py'), 'pak.mod')
self.assertEqual(p1, p2)
self.assertEqual(p1.__file__, p2.__file__)
# simple name -- package
sp1 = imp.importFromPath(jn(d1, 'pak'), 'pak')
sp2 = imp.importFromPath(jn(d2, 'pak'), 'pak')
self.assertEqual(sp1, sp2)
assert sp1.__path__
assert sp2.__path__
self.assertEqual(sp1.__path__, sp2.__path__)
# dotted name -- package
dp1 = imp.importFromPath(jn(d1, 'pak', 'sub'), 'pak.sub')
dp2 = imp.importFromPath(jn(d2, 'pak', 'sub'), 'pak.sub')
self.assertEqual(dp1, dp2)
assert dp1.__path__
assert dp2.__path__
self.assertEqual(dp1.__path__, dp2.__path__)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
```
#### File: test_multiprocessing/support/timeout.py
```python
def test_timeout():
"this test *should* fail when process-timeout=1"
from time import sleep
sleep(2)
```
#### File: pyes/pyes/fakettypes.py
```python
__author__ = '<NAME>'
#
# Fake ttypes to use in http protocol to simulate thrift ones
#
class Method:
GET = 0
PUT = 1
POST = 2
DELETE = 3
HEAD = 4
OPTIONS = 5
_VALUES_TO_NAMES = {
0: "GET",
1: "PUT",
2: "POST",
3: "DELETE",
4: "HEAD",
5: "OPTIONS",
}
_NAMES_TO_VALUES = {
"GET": 0,
"PUT": 1,
"POST": 2,
"DELETE": 3,
"HEAD": 4,
"OPTIONS": 5,
}
class Status:
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIED = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
INSUFFICIENT_STORAGE = 506
_VALUES_TO_NAMES = {
100: "CONTINUE",
101: "SWITCHING_PROTOCOLS",
200: "OK",
201: "CREATED",
202: "ACCEPTED",
203: "NON_AUTHORITATIVE_INFORMATION",
204: "NO_CONTENT",
205: "RESET_CONTENT",
206: "PARTIAL_CONTENT",
207: "MULTI_STATUS",
300: "MULTIPLE_CHOICES",
301: "MOVED_PERMANENTLY",
302: "FOUND",
303: "SEE_OTHER",
304: "NOT_MODIFIED",
305: "USE_PROXY",
307: "TEMPORARY_REDIRECT",
400: "BAD_REQUEST",
401: "UNAUTHORIZED",
402: "PAYMENT_REQUIRED",
403: "FORBIDDEN",
404: "NOT_FOUND",
405: "METHOD_NOT_ALLOWED",
406: "NOT_ACCEPTABLE",
407: "PROXY_AUTHENTICATION",
408: "REQUEST_TIMEOUT",
409: "CONFLICT",
410: "GONE",
411: "LENGTH_REQUIRED",
412: "PRECONDITION_FAILED",
413: "REQUEST_ENTITY_TOO_LARGE",
414: "REQUEST_URI_TOO_LONG",
415: "UNSUPPORTED_MEDIA_TYPE",
416: "REQUESTED_RANGE_NOT_SATISFIED",
417: "EXPECTATION_FAILED",
422: "UNPROCESSABLE_ENTITY",
423: "LOCKED",
424: "FAILED_DEPENDENCY",
500: "INTERNAL_SERVER_ERROR",
501: "NOT_IMPLEMENTED",
502: "BAD_GATEWAY",
503: "SERVICE_UNAVAILABLE",
504: "GATEWAY_TIMEOUT",
506: "INSUFFICIENT_STORAGE",
}
_NAMES_TO_VALUES = {
"CONTINUE": 100,
"SWITCHING_PROTOCOLS": 101,
"OK": 200,
"CREATED": 201,
"ACCEPTED": 202,
"NON_AUTHORITATIVE_INFORMATION": 203,
"NO_CONTENT": 204,
"RESET_CONTENT": 205,
"PARTIAL_CONTENT": 206,
"MULTI_STATUS": 207,
"MULTIPLE_CHOICES": 300,
"MOVED_PERMANENTLY": 301,
"FOUND": 302,
"SEE_OTHER": 303,
"NOT_MODIFIED": 304,
"USE_PROXY": 305,
"TEMPORARY_REDIRECT": 307,
"BAD_REQUEST": 400,
"UNAUTHORIZED": 401,
"PAYMENT_REQUIRED": 402,
"FORBIDDEN": 403,
"NOT_FOUND": 404,
"METHOD_NOT_ALLOWED": 405,
"NOT_ACCEPTABLE": 406,
"PROXY_AUTHENTICATION": 407,
"REQUEST_TIMEOUT": 408,
"CONFLICT": 409,
"GONE": 410,
"LENGTH_REQUIRED": 411,
"PRECONDITION_FAILED": 412,
"REQUEST_ENTITY_TOO_LARGE": 413,
"REQUEST_URI_TOO_LONG": 414,
"UNSUPPORTED_MEDIA_TYPE": 415,
"REQUESTED_RANGE_NOT_SATISFIED": 416,
"EXPECTATION_FAILED": 417,
"UNPROCESSABLE_ENTITY": 422,
"LOCKED": 423,
"FAILED_DEPENDENCY": 424,
"INTERNAL_SERVER_ERROR": 500,
"NOT_IMPLEMENTED": 501,
"BAD_GATEWAY": 502,
"SERVICE_UNAVAILABLE": 503,
"GATEWAY_TIMEOUT": 504,
"INSUFFICIENT_STORAGE": 506,
}
class RestRequest:
"""
Attributes:
- method
- uri
- parameters
- headers
- body
"""
def __init__(self, method=None, uri=None, parameters=None, headers=None, body=None,):
self.method = method
self.uri = uri
self.parameters = parameters
self.headers = headers
self.body = body
class RestResponse:
"""
Attributes:
- status
- headers
- body
"""
def __init__(self, status=None, headers=None, body=None,):
self.status = status
self.headers = headers
self.body = body
```
#### File: pyes/pyes/highlight.py
```python
__author__ = '<NAME>'
class HighLighter:
"""
This object manage the highlighting
"""
def __init__(self, pre_tags = None, post_tags = None, fields = None, fragment_size = None, number_of_fragments = None):
self.pre_tags = pre_tags
self.post_tags = post_tags
self.fields = fields or {}
self.fragment_size = fragment_size
self.number_of_fragments = number_of_fragments
def add_field(self, name, fragment_size=150, number_of_fragments=3):
"""
Add a field to Highlinghter
"""
data = {}
if fragment_size:
data['fragment_size'] = fragment_size
if number_of_fragments is not None:
data['number_of_fragments'] = number_of_fragments
self.fields[name] = data
def serialize(self):
res = {}
if self.pre_tags:
res["pre_tags"] = self.pre_tags
if self.post_tags:
res["post_tags"] = self.post_tags
if self.fragment_size:
res["fragment_size"] = self.fragment_size
if self.number_of_fragments:
res["number_of_fragments"] = self.number_of_fragments
if self.fields:
res["fields"] = self.fields
else:
res["fields"] = {"_all" : {}}
return res
```
#### File: pyes/pyes/utils.py
```python
__author__ = '<NAME>'
__all__ = ['clean_string', 'ResultSet', "ESRange", "ESRangeOp", "string_b64encode", "string_b64decode"]
import base64
def string_b64encode(s):
"""
This function is useful to convert a string to a valid id to be used in ES.
You can use it to generate an ID for urls or some texts
"""
return base64.urlsafe_b64encode(s).strip('=')
def string_b64decode(s):
return base64.urlsafe_b64decode(s + '=' * (len(s) % 4))
# Characters that are part of Lucene query syntax must be stripped
# from user input: + - && || ! ( ) { } [ ] ^ " ~ * ? : \
# See: http://lucene.apache.org/java/3_0_2/queryparsersyntax.html#Escaping
SPECIAL_CHARS = [33, 34, 38, 40, 41, 42, 45, 58, 63, 91, 92, 93, 94, 123, 124, 125, 126]
UNI_SPECIAL_CHARS = dict((c, None) for c in SPECIAL_CHARS)
STR_SPECIAL_CHARS = ''.join([chr(c) for c in SPECIAL_CHARS])
class ESRange(object):
def __init__(self, field, from_value=None, to_value=None, include_lower=None,
include_upper=None, boost=None, **kwargs):
"""
type can be "gt", "gte", "lt", "lte"
"""
self.field = field
self.from_value = from_value
self.to_value = to_value
self.type = type
self.include_lower = include_lower
self.include_upper = include_upper
self.boost = boost
def serialize(self):
filters = {}
if self.from_value is not None:
filters['from'] = self.from_value
if self.to_value is not None:
filters['to'] = self.to_value
if self.include_lower is not None:
filters['include_lower'] = self.include_lower
if self.include_upper is not None:
filters['include_upper'] = self.include_upper
if self.boost is not None:
filters['boost'] = self.boost
return self.field, filters
class ESRangeOp(ESRange):
def __init__(self, field, op, value, boost=None):
from_value = to_value = include_lower = include_upper = None
if op == "gt":
from_value = value
include_lower = False
elif op == "gte":
from_value = value
include_lower = True
if op == "lt":
to_value = value
include_upper = False
elif op == "lte":
to_value = value
include_upper = True
super(ESRangeOp, self).__init__(field, from_value, to_value, \
include_lower, include_upper, boost)
def clean_string(text):
"""
Remove Lucene reserved characters from query string
"""
if isinstance(text, unicode):
return text.translate(UNI_SPECIAL_CHARS).strip()
return text.translate(None, STR_SPECIAL_CHARS).strip()
class ResultSet(object):
def __init__(self, results, fix_keys=True, clean_highlight=True):
"""
results: an es query results dict
fix_keys: remove the "_" from every key, useful for django views
clean_highlight: removed empty highlight
"""
self._results = results
self._total = None
self.valid = False
self.facets = results.get('facets', {})
if 'hits' in results:
self.valid = True
self.results = results['hits']['hits']
if fix_keys:
self.fix_keys()
if clean_highlight:
self.clean_highlight()
@property
def total(self):
if self._total is None:
self._total = 0
if self.valid:
self._total = self._results.get("hits", {}).get('total', 0)
return self._total
def fix_keys(self):
"""
Remove the _ from the keys of the results
"""
if not self.valid:
return
for hit in self._results['hits']['hits']:
for key, item in hit.items():
if key.startswith("_"):
hit[key[1:]] = item
del hit[key]
def clean_highlight(self):
"""
Remove the empty highlight
"""
if not self.valid:
return
for hit in self._results['hits']['hits']:
if 'highlight' in hit:
hl = hit['highlight']
for key, item in hl.items():
if not item:
del hl[key]
def __getattr__(self, name):
return self._results['hits'][name]
def keys_to_string(data):
"""
Function to convert all the unicode keys in string keys
"""
if isinstance(data, dict):
for key in list(data.keys()):
if isinstance(key, unicode):
value = data[key]
val = keys_to_string(value)
del data[key]
data[key.encode("utf8", "ignore")] = val
return data
```
#### File: test/input/func_bad_assigment_to_exception_var.py
```python
__revision__ = 'toto'
import sys
e = 1
e2 = 'yo'
e3 = None
try:
raise e, 'toto'
except Exception, ex:
print ex
_, _, tb = sys.exc_info()
raise e2
def func():
"""bla bla bla"""
raise e3
def reraise():
"""reraise a catched exception instance"""
try:
raise Exception()
except Exception, exc:
print exc
raise exc
raise e3
```
#### File: test/input/func_class_access_protected_members.py
```python
__revision__ = ''
class MyClass:
"""class docstring"""
_cls_protected = 5
def __init__(self, other):
"""init"""
self._protected = 1
self.public = other
def test(self):
"""test"""
self._protected += self._cls_protected
print self.public._haha
def clsmeth(cls):
"""this is ok"""
print cls._cls_protected
clsmeth = classmethod(clsmeth)
INST = MyClass()
print INST.public
print INST._protected
print INST._cls_protected
```
#### File: test/input/func_e0101.py
```python
__revision__ = 'yo'
class MyClass:
"""dummy class"""
def __init__(self):
return 1
class MyClass2:
"""dummy class"""
def __init__(self):
return
class MyClass3:
"""dummy class"""
def __init__(self):
return None
class MyClass4:
"""dummy class"""
def __init__(self):
yield None
```
#### File: test/input/func_e0206.py
```python
__revision__ = None
class Abcd:
"""dummy"""
__implements__ = __revision__
def __init__(self):
self.attr = None
class Cdef:
"""dummy"""
__implements__ = (__revision__, Abcd)
def __init__(self):
pass
```
#### File: test/input/func_e99xx.py
```python
__revision__ = 1
PARG_1 = PARG_2 = PARG_3 = 1
def pprint():
"""Test string format
"""
print "%s %s" % {'PARG_1': 1, 'PARG_2': 2} # E9906
print "%s" % (PARG_1, PARG_2) # E9905
print "%(PARG_1)d %d" % {'PARG_1': 1, 'PARG_2': 2} # E9902
print "%(PARG_1)d %(PARG_2)d" % {'PARG_1': 1} # E9904
print "%(PARG_1)d %(PARG_2)d" % {'PARG_1': 1, 'PARG_2':2, 'PARG_3':3} # W9901
print "%(PARG_1)d %(PARG_2)d" % {'PARG_1': 1, 2:3} # W9900 E9904
print "%(PARG_1)d %(PARG_2)d" % (2, 3) # 9903
print "%(PARG_1)d %(PARG_2)d" % [2, 3] # 9903
print "%2z" % PARG_1
print "strange format %2" % PARG_2
```
#### File: test/input/func___name___access.py
```python
__revision__ = 1
class Aaaa:
"""old class"""
def __init__(self):
print self.__name__
print self.__class__.__name__
class NewClass(object):
"""new class"""
def __new__(cls, *args, **kwargs):
print 'new', cls.__name__
return object.__new__(cls, *args, **kwargs)
def __init__(self):
print 'init', self.__name__
```
#### File: test/input/func_newstyle_property.py
```python
__revision__ = 1
def getter(self):
"""interesting"""
return self
class OkOk(object):
"""correct usage"""
method = property(getter, doc='hop')
class HaNonNonNon:
"""bad usage"""
method = property(getter, doc='hop')
def __init__(self):
pass
```
#### File: test/input/func_noerror_nested_classes.py
```python
__revision__ = 1
class Temelekefe:
"""gloubliboulga"""
def __init__(self):
"""nested class with function raise error"""
class Toto:
"""toto nested class"""
def __init__(self):
self.attr = 2
def toto_method(self):
"""toto nested class method"""
print self
print 'error ?', self, Toto
```
#### File: test/input/func_noerror_object_as_class_attribute.py
```python
__revision__ = None
class Statement(object):
""" ... """
def __init__(self):
pass
object = None
```
#### File: test/input/func_unused_overridden_argument.py
```python
__revision__ = 'thx to <NAME>'
class Base(object):
"parent"
def inherited(self, aaa, aab, aac):
"abstract method"
raise NotImplementedError
class Sub(Base):
"child 1"
def inherited(self, aaa, aab, aac):
"overridden method, though don't use every argument"
return aaa
def newmethod(self, aax, aay):
"another method, warning for aay desired"
print self, aax
class Sub2(Base):
"child 1"
def inherited(self, aaa, aab, aac):
"overridden method, use every argument"
return aaa + aab + aac
```
#### File: test/input/func_w0105.py
```python
__revision__ = ''
def stupid_function(arg):
"""reallly stupid function"""
if arg == 1:
print 1
elif arg == 2:
print 2
elif arg == 3:
print 3
elif arg == 4:
print 4
elif arg == 5:
print 5
elif arg == 6:
print 6
elif arg == 7:
print 7
elif arg == 8:
print 8
elif arg == 9:
print 9
elif arg == 10:
print 10
elif arg < 1:
print 0
print 100
arg = 0
for val in range(arg):
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
print val
```
#### File: test/input/func_w0133.py
```python
__revision__ = 1
def Run():
"""method without any good name"""
class B:
"""nested class should not be tested has a variable"""
def __init__(self):
pass
bBb = 1
return A, bBb
def run():
"""anothrer method without only good name"""
class Aaa:
"""nested class should not be tested has a variable"""
def __init__(self):
pass
bbb = 1
return Aaa(bbb)
A = None
def HOHOHOHO():
"""yo"""
HIHIHI = 1
print HIHIHI
class xyz:
"""yo"""
def __init__(self):
pass
def Youplapoum(self):
"""bad method name"""
def nested_args(arg1, (arg21, arg22)):
"""function with nested arguments"""
print arg1, arg21, arg22
GOOD_CONST_NAME = ''
benpasceluila = 0
class Correct:
"""yo"""
def __init__(self):
self.cava = 12
self._Ca_va_Pas = None
V = [WHAT_Ever_inListComp for WHAT_Ever_inListComp in GOOD_CONST_NAME]
```
#### File: test/input/func_w0152.py
```python
from operator import add
__revision__ = reduce(*(add, (1, 2, 3)))
def func(arg1, arg2):
"""magic function
"""
return arg2, arg1
MYDICT = {'arg1':2, 'arg2': 4}
func(**MYDICT)
def coolfunc(*args, **kwargs):
"""magic function"""
return func(*args, **kwargs)
```
#### File: pylint/test/test_similar.py
```python
import sys
import unittest
from cStringIO import StringIO
from pylint.checkers import similar
class SimilarTC(unittest.TestCase):
"""test the similar command line utility"""
def test(self):
sys.stdout = StringIO()
try:
similar.run(['--ignore-comments', 'input/similar1', 'input/similar2'])
output = sys.stdout.getvalue()
finally:
sys.stdout = sys.__stdout__
self.assertEquals(output.strip(), """
7 similar lines in 2 files
==input/similar1:5
==input/similar2:5
same file as this one.
more than 4
identical lines should
be
detected
TOTAL lines=38 duplicates=7 percent=0.184210526316
""".strip())
def test_help(self):
sys.stdout = StringIO()
try:
try:
similar.run(['--help'])
except SystemExit, ex:
self.assertEquals(ex.code, 0)
else:
self.fail()
finally:
sys.stdout = sys.__stdout__
def test_no_args(self):
sys.stdout = StringIO()
try:
try:
similar.run([])
except SystemExit, ex:
self.assertEquals(ex.code, 1)
else:
self.fail()
finally:
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main()
```
#### File: setuptools/setuptools/sandbox.py
```python
import os, sys, __builtin__, tempfile, operator, pkg_resources
_os = sys.modules[os.name]
_open = open
_file = file
from distutils.errors import DistutilsError
from pkg_resources import working_set
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
old_dir = os.getcwd()
save_argv = sys.argv[:]
save_path = sys.path[:]
setup_dir = os.path.abspath(os.path.dirname(setup_script))
temp_dir = os.path.join(setup_dir,'temp')
if not os.path.isdir(temp_dir): os.makedirs(temp_dir)
save_tmp = tempfile.tempdir
save_modules = sys.modules.copy()
pr_state = pkg_resources.__getstate__()
try:
tempfile.tempdir = temp_dir; os.chdir(setup_dir)
try:
sys.argv[:] = [setup_script]+list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist:dist.activate())
DirectorySandbox(setup_dir).run(
lambda: execfile(
"setup.py",
{'__file__':setup_script, '__name__':'__main__'}
)
)
except SystemExit, v:
if v.args and v.args[0]:
raise
# Normal exit, just return
finally:
pkg_resources.__setstate__(pr_state)
sys.modules.update(save_modules)
for key in list(sys.modules):
if key not in save_modules: del sys.modules[key]
os.chdir(old_dir)
sys.path[:] = save_path
sys.argv[:] = save_argv
tempfile.tempdir = save_tmp
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self,name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source,name))
def run(self, func):
"""Run 'func' under os sandboxing"""
try:
self._copy(self)
__builtin__.file = self._file
__builtin__.open = self._open
self._active = True
return func()
finally:
self._active = False
__builtin__.open = _open
__builtin__.file = _file
self._copy(_os)
def _mk_dual_path_wrapper(name):
original = getattr(_os,name)
def wrap(self,src,dst,*args,**kw):
if self._active:
src,dst = self._remap_pair(name,src,dst,*args,**kw)
return original(src,dst,*args,**kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os,name): locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return original(path,*args,**kw)
return wrap
_open = _mk_single_path_wrapper('open', _open)
_file = _mk_single_path_wrapper('file', _file)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os,name): locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os,name)
def wrap(self,path,*args,**kw):
if self._active:
path = self._remap_input(name,path,*args,**kw)
return self._remap_output(name, original(path,*args,**kw))
return original(path,*args,**kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os,name): locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os,name)
def wrap(self,*args,**kw):
retval = original(*args,**kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os,name): locals()[name] = _mk_query(name)
def _validate_path(self,path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self,operation,path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation+'-from',src,*args,**kw),
self._remap_input(operation+'-to',dst,*args,**kw)
)
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
def __init__(self,sandbox):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox,'')
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
raise SandboxViolation(operation, args, kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path,mode,*args,**kw)
def tmpnam(self): self._violation("tmpnam")
def _ok(self,path):
if hasattr(_os,'devnull') and path==_os.devnull: return True
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
if realpath==self._sandbox or realpath.startswith(self._prefix):
return True
finally:
self._active = active
def _remap_input(self,operation,path,*args,**kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self,operation,src,dst,*args,**kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst)
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path,mode,*args,**kw)
def open(self, file, flags, mode=0777):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode)
return _os.open(file,flags,mode)
WRITE_FLAGS = reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
def __str__(self):
return """SandboxViolation: %s%r %s
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.""" % self.args
#
```
#### File: examples/derived_attributes/attributes.py
```python
from functools import update_wrapper
import new
class method(object):
def __init__(self, func, expr=None):
self.func = func
self.expr = expr or func
def __get__(self, instance, owner):
if instance is None:
return new.instancemethod(self.expr, owner, owner.__class__)
else:
return new.instancemethod(self.func, instance, owner)
def expression(self, expr):
self.expr = expr
return self
class property_(object):
def __init__(self, fget, fset=None, fdel=None, expr=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
self.expr = expr or fget
update_wrapper(self, fget)
def __get__(self, instance, owner):
if instance is None:
return self.expr(owner)
else:
return self.fget(instance)
def __set__(self, instance, value):
self.fset(instance, value)
def __delete__(self, instance):
self.fdel(instance)
def setter(self, fset):
self.fset = fset
return self
def deleter(self, fdel):
self.fdel = fdel
return self
def expression(self, expr):
self.expr = expr
return self
### Example code
from sqlalchemy import Table, Column, Integer, create_engine, func
from sqlalchemy.orm import sessionmaker, aliased
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class BaseInterval(object):
@method
def contains(self,point):
"""Return true if the interval contains the given interval."""
return (self.start <= point) & (point < self.end)
@method
def intersects(self, other):
"""Return true if the interval intersects the given interval."""
return (self.start < other.end) & (self.end > other.start)
@method
def _max(self, x, y):
"""Return the max of two values."""
return max(x, y)
@_max.expression
def _max(cls, x, y):
"""Return the SQL max of two values."""
return func.max(x, y)
@method
def max_length(self, other):
"""Return the longer length of this interval and another."""
return self._max(self.length, other.length)
def __repr__(self):
return "%s(%s..%s)" % (self.__class__.__name__, self.start, self.end)
class Interval1(BaseInterval, Base):
"""Interval stored as endpoints"""
__table__ = Table('interval1', Base.metadata,
Column('id', Integer, primary_key=True),
Column('start', Integer, nullable=False),
Column('end', Integer, nullable=False)
)
def __init__(self, start, end):
self.start = start
self.end = end
@property_
def length(self):
return self.end - self.start
class Interval2(BaseInterval, Base):
"""Interval stored as start and length"""
__table__ = Table('interval2', Base.metadata,
Column('id', Integer, primary_key=True),
Column('start', Integer, nullable=False),
Column('length', Integer, nullable=False)
)
def __init__(self, start, length):
self.start = start
self.length = length
@property_
def end(self):
return self.start + self.length
engine = create_engine('sqlite://', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(engine)()
intervals = [Interval1(1,4), Interval1(3,15), Interval1(11,16)]
for interval in intervals:
session.add(interval)
session.add(Interval2(interval.start, interval.length))
session.commit()
for Interval in (Interval1, Interval2):
print "Querying using interval class %s" % Interval.__name__
print
print '-- length less than 10'
print [(i, i.length) for i in
session.query(Interval).filter(Interval.length < 10).all()]
print
print '-- contains 12'
print session.query(Interval).filter(Interval.contains(12)).all()
print
print '-- intersects 2..10'
other = Interval1(2,10)
result = session.query(Interval).\
filter(Interval.intersects(other)).\
order_by(Interval.length).all()
print [(interval, interval.intersects(other)) for interval in result]
print
print '-- longer length'
interval_alias = aliased(Interval)
print session.query(Interval.length,
interval_alias.length,
Interval.max_length(interval_alias)).all()
```
#### File: dialects/mssql/zxjdbc.py
```python
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mssql.base import MSDialect, MSExecutionContext
from sqlalchemy.engine import base
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error, e:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = base.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = 'jtds:sqlserver'
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc
```
#### File: dialects/oracle/base.py
```python
import random, re
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, log
from sqlalchemy.engine import default, base, reflection
from sqlalchemy.sql import compiler, visitors, expression
from sqlalchemy.sql import operators as sql_operators, functions as sql_functions
from sqlalchemy import types as sqltypes
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, \
BLOB, CLOB, TIMESTAMP, FLOAT
RESERVED_WORDS = set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '
'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '
'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE ANY '
'TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE BY ASC '
'REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES AS IN VIEW '
'EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS NOT TRIGGER '
'ELSE CREATE INTERSECT PCTFREE DISTINCT USER CONNECT SET MODE '
'OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR DECIMAL UNION PUBLIC '
'AND START UID COMMENT'.split())
class RAW(sqltypes.LargeBinary):
pass
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = 'NCLOB'
VARCHAR2 = VARCHAR
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = 'NUMBER'
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
def adapt(self, impltype):
ret = super(NUMBER, self).adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
@property
def _type_affinity(self):
if bool(self.scale and self.scale > 0):
return sqltypes.Numeric
else:
return sqltypes.Integer
class DOUBLE_PRECISION(sqltypes.Numeric):
__visit_name__ = 'DOUBLE_PRECISION'
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = False
super(DOUBLE_PRECISION, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal)
class BFILE(sqltypes.LargeBinary):
__visit_name__ = 'BFILE'
class LONG(sqltypes.Text):
__visit_name__ = 'LONG'
class INTERVAL(sqltypes.TypeEngine):
__visit_name__ = 'INTERVAL'
def __init__(self,
day_precision=None,
second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs (cx_oracle and zxjdbc).
:param day_precision: the day precision value. this is the number of digits
to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the number of digits
to store for the fractional seconds field. Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(day_precision=interval.day_precision,
second_precision=interval.second_precision)
def adapt(self, impltype):
return impltype(day_precision=self.day_precision,
second_precision=self.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
class ROWID(sqltypes.TypeEngine):
"""Oracle ROWID type.
When used in a cast() or similar, generates ROWID.
"""
__visit_name__ = 'ROWID'
class _OracleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
colspecs = {
sqltypes.Boolean : _OracleBoolean,
sqltypes.Interval : INTERVAL,
}
ischema_names = {
'VARCHAR2' : VARCHAR,
'NVARCHAR2' : NVARCHAR,
'CHAR' : CHAR,
'DATE' : DATE,
'NUMBER' : NUMBER,
'BLOB' : BLOB,
'BFILE' : BFILE,
'CLOB' : CLOB,
'NCLOB' : NCLOB,
'TIMESTAMP' : TIMESTAMP,
'TIMESTAMP WITH TIME ZONE' : TIMESTAMP,
'INTERVAL DAY TO SECOND' : INTERVAL,
'RAW' : RAW,
'FLOAT' : FLOAT,
'DOUBLE PRECISION' : DOUBLE_PRECISION,
'LONG' : LONG,
}
class OracleTypeCompiler(compiler.GenericTypeCompiler):
# Note:
# Oracle DATE == DATETIME
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
def visit_datetime(self, type_):
return self.visit_DATE(type_)
def visit_float(self, type_):
return self.visit_FLOAT(type_)
def visit_unicode(self, type_):
if self.dialect._supports_nchar:
return self.visit_NVARCHAR(type_)
else:
return self.visit_VARCHAR(type_)
def visit_INTERVAL(self, type_):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None and
"(%d)" % type_.day_precision or
"",
type_.second_precision is not None and
"(%d)" % type_.second_precision or
"",
)
def visit_TIMESTAMP(self, type_):
if type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
def visit_DOUBLE_PRECISION(self, type_):
return self._generate_numeric(type_, "DOUBLE PRECISION")
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
def _generate_numeric(self, type_, name, precision=None, scale=None):
if precision is None:
precision = type_.precision
if scale is None:
scale = getattr(type_, 'scale', None)
if precision is None:
return name
elif scale is None:
return "%(name)s(%(precision)s)" % {'name':name,'precision': precision}
else:
return "%(name)s(%(precision)s, %(scale)s)" % {'name':name,'precision': precision, 'scale' : scale}
def visit_VARCHAR(self, type_):
if self.dialect._supports_char_length:
return "VARCHAR(%(length)s CHAR)" % {'length' : type_.length}
else:
return "VARCHAR(%(length)s)" % {'length' : type_.length}
def visit_NVARCHAR(self, type_):
return "NVARCHAR2(%(length)s)" % {'length' : type_.length}
def visit_text(self, type_):
return self.visit_CLOB(type_)
def visit_unicode_text(self, type_):
if self.dialect._supports_nchar:
return self.visit_NCLOB(type_)
else:
return self.visit_CLOB(type_)
def visit_large_binary(self, type_):
return self.visit_BLOB(type_)
def visit_big_integer(self, type_):
return self.visit_NUMBER(type_, precision=19)
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_RAW(self, type_):
return "RAW(%(length)s)" % {'length' : type_.length}
def visit_ROWID(self, type_):
return "ROWID"
class OracleCompiler(compiler.SQLCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{
expression.CompoundSelect.EXCEPT : 'MINUS'
}
)
def __init__(self, *args, **kwargs):
super(OracleCompiler, self).__init__(*args, **kwargs)
self.__wheres = {}
self._quoted_bind_names = {}
def visit_mod(self, binary, **kw):
return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_char_length_func(self, fn, **kw):
return "LENGTH" + self.function_argspec(fn, **kw)
def visit_match_op(self, binary, **kw):
return "CONTAINS (%s, %s)" % (self.process(binary.left), self.process(binary.right))
def get_select_hint_text(self, byfroms):
return " ".join(
"/*+ %s */" % text for table, text in byfroms.items()
)
def function_argspec(self, fn, **kw):
if len(fn.clauses) > 0:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def default_from(self):
"""Called when a ``SELECT`` statement has no froms, and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def visit_join(self, join, **kwargs):
if self.dialect.use_ansi:
return compiler.SQLCompiler.visit_join(self, join, **kwargs)
else:
kwargs['asfrom'] = True
return self.process(join.left, **kwargs) + \
", " + self.process(join.right, **kwargs)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
def visit_join(join):
if join.isouter:
def visit_binary(binary):
if binary.operator == sql_operators.eq:
if binary.left.table is join.right:
binary.left = _OuterJoinColumn(binary.left)
elif binary.right.table is join.right:
binary.right = _OuterJoinColumn(binary.right)
clauses.append(visitors.cloned_traverse(join.onclause, {},
{'binary':visit_binary}))
else:
clauses.append(join.onclause)
for j in join.left, join.right:
if isinstance(j, expression.Join):
visit_join(j)
for f in froms:
if isinstance(f, expression.Join):
visit_join(f)
if not clauses:
return None
else:
return sql.and_(*clauses)
def visit_outer_join_column(self, vc):
return self.process(vc.column) + "(+)"
def visit_sequence(self, seq):
return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs):
"""Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??"""
if asfrom or ashint:
alias_name = isinstance(alias.name, expression._generated_label) and \
self._truncated_identifier("alias", alias.name) or alias.name
if ashint:
return alias_name
elif asfrom:
return self.process(alias.original, asfrom=asfrom, **kwargs) + \
" " + self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def returning_clause(self, stmt, returning_cols):
def create_out_param(col, i):
bindparam = sql.outparam("ret_%d" % i, type_=col.type)
self.binds[bindparam.key] = bindparam
return self.bindparam_string(self._truncate_bindparam(bindparam))
columnlist = list(expression._select_iterables(returning_cols))
# within_columns_clause =False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) for c in columnlist]
binds = [create_out_param(c, i) for i, c in enumerate(columnlist)]
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
def _TODO_visit_compound_select(self, select):
"""Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle."""
pass
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``rownum`` criterion.
"""
if not getattr(select, '_oracle_visit', None):
if not self.dialect.use_ansi:
if self.stack and 'from' in self.stack[-1]:
existingfroms = self.stack[-1]['from']
else:
existingfroms = None
froms = select._get_display_froms(existingfroms)
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
select._oracle_visit = True
if select._limit is not None or select._offset is not None:
# See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html
#
# Generalized form of an Oracle pagination query:
# select ... from (
# select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from (
# select distinct ... where ... order by ...
# ) where ROWNUM <= :limit+:offset
# ) where ora_rn > :offset
# Outer select and "ROWNUM as ora_rn" can be dropped if limit=0
# TODO: use annotations instead of clone + attr set ?
select = select._generate()
select._oracle_visit = True
# Wrap the middle select and add the hint
limitselect = sql.select([c for c in select.c])
if select._limit and self.dialect.optimize_limits:
limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit)
limitselect._oracle_visit = True
limitselect._is_wrapper = True
# If needed, add the limiting clause
if select._limit is not None:
max_row = select._limit
if select._offset is not None:
max_row += select._offset
limitselect.append_whereclause(
sql.literal_column("ROWNUM")<=max_row)
# If needed, add the ora_rn, and wrap again with offset.
if select._offset is None:
limitselect.for_update = select.for_update
select = limitselect
else:
limitselect = limitselect.column(
sql.literal_column("ROWNUM").label("ora_rn"))
limitselect._oracle_visit = True
limitselect._is_wrapper = True
offsetselect = sql.select(
[c for c in limitselect.c if c.key!='ora_rn'])
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
offsetselect.append_whereclause(
sql.literal_column("ora_rn")>select._offset)
offsetselect.for_update = select.for_update
select = offsetselect
kwargs['iswrapper'] = getattr(select, '_is_wrapper', False)
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def limit_clause(self, select):
return ""
def for_update_clause(self, select):
if self.is_subquery():
return ""
elif select.for_update == "nowait":
return " FOR UPDATE NOWAIT"
else:
return super(OracleCompiler, self).for_update_clause(select)
class OracleDDLCompiler(compiler.DDLCompiler):
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
# its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
"Oracle does not contain native UPDATE CASCADE "
"functionality - onupdates will not be rendered for foreign keys. "
"Consider using deferrable=True, initially='deferred' or triggers.")
return text
class OracleIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([x.lower() for x in RESERVED_WORDS])
illegal_initial_characters = set(xrange(0, 10)).union(["_", "$"])
def _bindparam_requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(unicode(value))
)
def format_savepoint(self, savepoint):
name = re.sub(r'^_+', '', savepoint.ident)
return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name)
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq):
return int(self._execute_scalar("SELECT " +
self.dialect.identifier_preparer.format_sequence(seq) +
".nextval FROM DUAL"))
class OracleDialect(default.DefaultDialect):
name = 'oracle'
supports_alter = True
supports_unicode_statements = False
supports_unicode_binds = False
max_identifier_length = 30
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
supports_sequences = True
sequences_optional = False
postfetch_lastrowid = False
default_paramstyle = 'named'
colspecs = colspecs
ischema_names = ischema_names
requires_name_normalize = True
supports_default_values = False
supports_empty_insert = False
statement_compiler = OracleCompiler
ddl_compiler = OracleDDLCompiler
type_compiler = OracleTypeCompiler
preparer = OracleIdentifierPreparer
execution_ctx_cls = OracleExecutionContext
reflection_options = ('oracle_resolve_synonyms', )
def __init__(self,
use_ansi=True,
optimize_limits=False,
**kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.use_ansi = use_ansi
self.optimize_limits = optimize_limits
def initialize(self, connection):
super(OracleDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (10, ) and \
self.__dict__.get('implicit_returning', True)
if self._is_oracle_8:
self.colspecs = self.colspecs.copy()
self.colspecs.pop(sqltypes.Interval)
self.use_ansi = False
@property
def _is_oracle_8(self):
return self.server_version_info and \
self.server_version_info < (9, )
@property
def _supports_char_length(self):
return not self._is_oracle_8
@property
def _supports_nchar(self):
return not self._is_oracle_8
def do_release_savepoint(self, connection, name):
# Oracle does not support RELEASE SAVEPOINT
pass
def has_table(self, connection, table_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text("SELECT table_name FROM all_tables "
"WHERE table_name = :name AND owner = :schema_name"),
name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
if not schema:
schema = self.default_schema_name
cursor = connection.execute(
sql.text("SELECT sequence_name FROM all_sequences "
"WHERE sequence_name = :name AND sequence_owner = :schema_name"),
name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema))
return cursor.first() is not None
def normalize_name(self, name):
if name is None:
return None
# Py2K
if isinstance(name, str):
name = name.decode(self.encoding)
# end Py2K
if name.upper() == name and \
not self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
else:
return name
def denormalize_name(self, name):
if name is None:
return None
elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()):
name = name.upper()
# Py2K
if not self.supports_unicode_binds:
name = name.encode(self.encoding)
else:
name = unicode(name)
# end Py2K
return name
def _get_default_schema_name(self, connection):
return self.normalize_name(connection.execute(u'SELECT USER FROM DUAL').scalar())
def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None):
"""search for a local synonym matching the given desired owner/name.
if desired_owner is None, attempts to locate a distinct owner.
returns the actual name, owner, dblink name, and synonym name if found.
"""
q = "SELECT owner, table_owner, table_name, db_link, synonym_name FROM all_synonyms WHERE "
clauses = []
params = {}
if desired_synonym:
clauses.append("synonym_name = :synonym_name")
params['synonym_name'] = desired_synonym
if desired_owner:
clauses.append("table_owner = :desired_owner")
params['desired_owner'] = desired_owner
if desired_table:
clauses.append("table_name = :tname")
params['tname'] = desired_table
q += " AND ".join(clauses)
result = connection.execute(sql.text(q), **params)
if desired_owner:
row = result.first()
if row:
return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
else:
return None, None, None, None
else:
rows = result.fetchall()
if len(rows) > 1:
raise AssertionError("There are multiple tables visible to the schema, you must specify owner")
elif len(rows) == 1:
row = rows[0]
return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name']
else:
return None, None, None, None
@reflection.cache
def _prepare_reflection_args(self, connection, table_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(schema),
desired_synonym=self.denormalize_name(table_name)
)
else:
actual_name, owner, dblink, synonym = None, None, None, None
if not actual_name:
actual_name = self.denormalize_name(table_name)
if not dblink:
dblink = ''
if not owner:
owner = self.denormalize_name(schema or self.default_schema_name)
return (actual_name, owner, dblink, synonym)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "SELECT username FROM all_users ORDER BY username"
cursor = connection.execute(s,)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
# note that table_names() isnt loading DBLINKed or synonym'ed tables
if schema is None:
schema = self.default_schema_name
s = sql.text(
"SELECT table_name FROM all_tables "
"WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') "
"AND OWNER = :owner "
"AND IOT_NAME IS NULL")
cursor = connection.execute(s, owner=schema)
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
schema = self.denormalize_name(schema or self.default_schema_name)
s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
cursor = connection.execute(s, owner=self.denormalize_name(schema))
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
columns = []
if self._supports_char_length:
char_length_col = 'char_length'
else:
char_length_col = 'data_length'
c = connection.execute(sql.text(
"SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
"nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
"WHERE table_name = :table_name AND owner = :owner "
"ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
table_name=table_name, owner=schema)
for row in c:
(colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
(self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])
if coltype == 'NUMBER' :
coltype = NUMBER(precision, scale)
elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
coltype = self.ischema_names.get(coltype)(length)
elif 'WITH TIME ZONE' in coltype:
coltype = TIMESTAMP(timezone=True)
else:
coltype = re.sub(r'\(\d+\)', '', coltype)
try:
coltype = self.ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, colname))
coltype = sqltypes.NULLTYPE
cdict = {
'name': colname,
'type': coltype,
'nullable': nullable,
'default': default,
}
if orig_colname.lower() == orig_colname:
cdict['quote'] = True
columns.append(cdict)
return columns
@reflection.cache
def get_indexes(self, connection, table_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
indexes = []
q = sql.text("""
SELECT a.index_name, a.column_name, b.uniqueness
FROM ALL_IND_COLUMNS%(dblink)s a,
ALL_INDEXES%(dblink)s b
WHERE
a.index_name = b.index_name
AND a.table_owner = b.table_owner
AND a.table_name = b.table_name
AND a.table_name = :table_name
AND a.table_owner = :schema
ORDER BY a.index_name, a.column_position""" % {'dblink': dblink})
rp = connection.execute(q, table_name=self.denormalize_name(table_name),
schema=self.denormalize_name(schema))
indexes = []
last_index_name = None
pkeys = self.get_primary_keys(connection, table_name, schema,
resolve_synonyms=resolve_synonyms,
dblink=dblink,
info_cache=kw.get('info_cache'))
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)
def upper_name_set(names):
return set([i.upper() for i in names])
pk_names = upper_name_set(pkeys)
def remove_if_primary_key(index):
# don't include the primary key index
if index is not None and \
upper_name_set(index['column_names']) == pk_names:
indexes.pop()
index = None
for rset in rp:
if rset.index_name != last_index_name:
remove_if_primary_key(index)
index = dict(name=self.normalize_name(rset.index_name), column_names=[])
indexes.append(index)
index['unique'] = uniqueness.get(rset.uniqueness, False)
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
index['column_names'].append(self.normalize_name(rset.column_name))
last_index_name = rset.index_name
remove_if_primary_key(index)
return indexes
@reflection.cache
def _get_constraint_data(self, connection, table_name, schema=None,
dblink='', **kw):
rp = connection.execute(
sql.text("""SELECT
ac.constraint_name,
ac.constraint_type,
loc.column_name AS local_column,
rem.table_name AS remote_table,
rem.column_name AS remote_column,
rem.owner AS remote_owner,
loc.position as loc_pos,
rem.position as rem_pos
FROM all_constraints%(dblink)s ac,
all_cons_columns%(dblink)s loc,
all_cons_columns%(dblink)s rem
WHERE ac.table_name = :table_name
AND ac.constraint_type IN ('R','P')
AND ac.owner = :owner
AND ac.owner = loc.owner
AND ac.constraint_name = loc.constraint_name
AND ac.r_owner = rem.owner(+)
AND ac.r_constraint_name = rem.constraint_name(+)
AND (rem.position IS NULL or loc.position=rem.position)
ORDER BY ac.constraint_name, loc.position""" % {'dblink': dblink}),
table_name=table_name, owner=schema)
constraint_data = rp.fetchall()
return constraint_data
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
return self._get_primary_keys(connection, table_name, schema, **kw)[0]
@reflection.cache
def _get_primary_keys(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
pkeys = []
constraint_name = None
constraint_data = self._get_constraint_data(connection, table_name,
schema, dblink,
info_cache=kw.get('info_cache'))
for row in constraint_data:
#print "ROW:" , row
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'P':
if constraint_name is None:
constraint_name = self.normalize_name(cons_name)
pkeys.append(local_column)
return pkeys, constraint_name
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols, name = self._get_primary_keys(connection, table_name, schema=schema, **kw)
return {
'constrained_columns':cols,
'name':name
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
kw arguments can be:
oracle_resolve_synonyms
dblink
"""
requested_schema = schema # to check later on
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
(table_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, table_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
constraint_data = self._get_constraint_data(connection, table_name,
schema, dblink,
info_cache=kw.get('info_cache'))
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
for row in constraint_data:
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'R':
if remote_table is None:
# ticket 363
util.warn(
("Got 'None' querying 'table_name' from "
"all_cons_columns%(dblink)s - does the user have "
"proper rights to the table?") % {'dblink':dblink})
continue
rec = fkeys[cons_name]
rec['name'] = cons_name
local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
if resolve_synonyms:
ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(remote_owner),
desired_table=self.denormalize_name(remote_table)
)
if ref_synonym:
remote_table = self.normalize_name(ref_synonym)
remote_owner = self.normalize_name(ref_remote_owner)
rec['referred_table'] = remote_table
if requested_schema is not None or self.denormalize_name(remote_owner) != schema:
rec['referred_schema'] = remote_owner
local_cols.append(local_column)
remote_cols.append(remote_column)
return fkeys.values()
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None,
resolve_synonyms=False, dblink='', **kw):
info_cache = kw.get('info_cache')
(view_name, schema, dblink, synonym) = \
self._prepare_reflection_args(connection, view_name, schema,
resolve_synonyms, dblink,
info_cache=info_cache)
s = sql.text("""
SELECT text FROM all_views
WHERE owner = :schema
AND view_name = :view_name
""")
rp = connection.execute(s,
view_name=view_name, schema=schema).scalar()
if rp:
return rp.decode(self.encoding)
else:
return None
class _OuterJoinColumn(sql.ClauseElement):
__visit_name__ = 'outer_join_column'
def __init__(self, column):
self.column = column
```
#### File: dialects/sqlite/pysqlite.py
```python
from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE
from sqlalchemy import schema, exc, pool
from sqlalchemy.engine import default
from sqlalchemy import types as sqltypes
from sqlalchemy import util
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = 'qmark'
poolclass = pool.SingletonThreadPool
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date:_SQLite_pysqliteDate,
sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp,
}
)
# Py3K
#description_encoding = None
driver = 'pysqlite'
def __init__(self, **kwargs):
SQLiteDialect.__init__(self, **kwargs)
if self.dbapi is not None:
sqlite_ver = self.dbapi.version_info
if sqlite_ver < (2, 1, 3):
util.warn(
("The installed version of pysqlite2 (%s) is out-dated "
"and will cause errors in some cases. Version 2.1.3 "
"or greater is recommended.") %
'.'.join([str(subver) for subver in sqlite_ver]))
@classmethod
def dbapi(cls):
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError, e:
try:
from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name.
except ImportError:
raise e
return sqlite
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,))
filename = url.database or ':memory:'
opts = url.query.copy()
util.coerce_kw_type(opts, 'timeout', float)
util.coerce_kw_type(opts, 'isolation_level', str)
util.coerce_kw_type(opts, 'detect_types', int)
util.coerce_kw_type(opts, 'check_same_thread', bool)
util.coerce_kw_type(opts, 'cached_statements', int)
return ([filename], opts)
def is_disconnect(self, e):
return isinstance(e, self.dbapi.ProgrammingError) and "Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
```
#### File: sqlalchemy/ext/sqlsoup.py
```python
from sqlalchemy import Table, MetaData, join
from sqlalchemy import schema, sql
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import scoped_session, sessionmaker, mapper, \
class_mapper, relationship, session,\
object_session
from sqlalchemy.orm.interfaces import MapperExtension, EXT_CONTINUE
from sqlalchemy.exceptions import SQLAlchemyError, InvalidRequestError, ArgumentError
from sqlalchemy.sql import expression
__all__ = ['PKNotFoundError', 'SqlSoup']
Session = scoped_session(sessionmaker(autoflush=True, autocommit=False))
class AutoAdd(MapperExtension):
def __init__(self, scoped_session):
self.scoped_session = scoped_session
def instrument_class(self, mapper, class_):
class_.__init__ = self._default__init__(mapper)
def _default__init__(ext, mapper):
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
return __init__
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
session = self.scoped_session()
session._save_without_cascade(instance)
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
sess = object_session(instance)
if sess:
sess.expunge(instance)
return EXT_CONTINUE
class PKNotFoundError(SQLAlchemyError):
pass
def _ddl_error(cls):
msg = 'SQLSoup can only modify mapped Tables (found: %s)' \
% cls._table.__class__.__name__
raise InvalidRequestError(msg)
# metaclass is necessary to expose class methods with getattr, e.g.
# we want to pass db.users.select through to users._mapper.select
class SelectableClassType(type):
def insert(cls, **kwargs):
_ddl_error(cls)
def __clause_element__(cls):
return cls._table
def __getattr__(cls, attr):
if attr == '_query':
# called during mapper init
raise AttributeError()
return getattr(cls._query, attr)
class TableClassType(SelectableClassType):
def insert(cls, **kwargs):
o = cls()
o.__dict__.update(kwargs)
return o
def relate(cls, propname, *args, **kwargs):
class_mapper(cls)._configure_property(propname, relationship(*args, **kwargs))
def _is_outer_join(selectable):
if not isinstance(selectable, sql.Join):
return False
if selectable.isouter:
return True
return _is_outer_join(selectable.left) or _is_outer_join(selectable.right)
def _selectable_name(selectable):
if isinstance(selectable, sql.Alias):
return _selectable_name(selectable.element)
elif isinstance(selectable, sql.Select):
return ''.join(_selectable_name(s) for s in selectable.froms)
elif isinstance(selectable, schema.Table):
return selectable.name.capitalize()
else:
x = selectable.__class__.__name__
if x[0] == '_':
x = x[1:]
return x
def _class_for_table(session, engine, selectable, base_cls=object, **mapper_kwargs):
selectable = expression._clause_element_as_expr(selectable)
mapname = 'Mapped' + _selectable_name(selectable)
# Py2K
if isinstance(mapname, unicode):
engine_encoding = engine.dialect.encoding
mapname = mapname.encode(engine_encoding)
# end Py2K
if isinstance(selectable, Table):
klass = TableClassType(mapname, (base_cls,), {})
else:
klass = SelectableClassType(mapname, (base_cls,), {})
def _compare(self, o):
L = list(self.__class__.c.keys())
L.sort()
t1 = [getattr(self, k) for k in L]
try:
t2 = [getattr(o, k) for k in L]
except AttributeError:
raise TypeError('unable to compare with %s' % o.__class__)
return t1, t2
# python2/python3 compatible system of
# __cmp__ - __lt__ + __eq__
def __lt__(self, o):
t1, t2 = _compare(self, o)
return t1 < t2
def __eq__(self, o):
t1, t2 = _compare(self, o)
return t1 == t2
def __repr__(self):
L = ["%s=%r" % (key, getattr(self, key, ''))
for key in self.__class__.c.keys()]
return '%s(%s)' % (self.__class__.__name__, ','.join(L))
for m in ['__eq__', '__repr__', '__lt__']:
setattr(klass, m, eval(m))
klass._table = selectable
klass.c = expression.ColumnCollection()
mappr = mapper(klass,
selectable,
extension=AutoAdd(session),
**mapper_kwargs)
for k in mappr.iterate_properties:
klass.c[k.key] = k.columns[0]
klass._query = session.query_property()
return klass
class SqlSoup(object):
def __init__(self, engine_or_metadata, base=object, **kw):
"""Initialize a new ``SqlSoup``.
`base` is the class that all created entity classes should subclass.
`args` may either be an ``SQLEngine`` or a set of arguments
suitable for passing to ``create_engine``.
"""
self.session = kw.pop('session', Session)
self.base=base
if isinstance(engine_or_metadata, MetaData):
self._metadata = engine_or_metadata
elif isinstance(engine_or_metadata, (basestring, Engine)):
self._metadata = MetaData(engine_or_metadata)
else:
raise ArgumentError("invalid engine or metadata argument %r" % engine_or_metadata)
self._cache = {}
self.schema = None
@property
def engine(self):
return self._metadata.bind
bind = engine
def delete(self, *args, **kwargs):
self.session.delete(*args, **kwargs)
def execute(self, stmt, **params):
return self.session.execute(sql.text(stmt, bind=self.bind), **params)
@property
def _underlying_session(self):
if isinstance(self.session, session.Session):
return self.session
else:
return self.session()
def connection(self):
return self._underlying_session._connection_for_bind(self.bind)
def flush(self):
self.session.flush()
def rollback(self):
self.session.rollback()
def commit(self):
self.session.commit()
def clear(self):
self.session.expunge_all()
def expunge(self, *args, **kw):
self.session.expunge(*args, **kw)
def expunge_all(self):
self.session.expunge_all()
def map(self, selectable, **kwargs):
try:
t = self._cache[selectable]
except KeyError:
t = _class_for_table(self.session, self.engine, selectable, **kwargs)
self._cache[selectable] = t
return t
def with_labels(self, item):
# TODO give meaningful aliases
return self.map(
expression._clause_element_as_expr(item).
select(use_labels=True).
alias('foo'))
def join(self, *args, **kwargs):
j = join(*args, **kwargs)
return self.map(j)
def entity(self, attr, schema=None):
try:
t = self._cache[attr]
except KeyError, ke:
table = Table(attr, self._metadata, autoload=True, autoload_with=self.bind, schema=schema or self.schema)
if not table.primary_key.columns:
raise PKNotFoundError('table %r does not have a primary key defined [columns: %s]' % (attr, ','.join(table.c.keys())))
if table.columns:
t = _class_for_table(self.session, self.engine, table, self.base)
else:
t = None
self._cache[attr] = t
return t
def __getattr__(self, attr):
return self.entity(attr)
def __repr__(self):
return 'SqlSoup(%r)' % self._metadata
```
#### File: lib/sqlalchemy/log.py
```python
import logging
import sys
from sqlalchemy import util
rootlogger = logging.getLogger('sqlalchemy')
if rootlogger.level == logging.NOTSET:
rootlogger.setLevel(logging.WARN)
default_enabled = False
def default_logging(name):
global default_enabled
if logging.getLogger(name).getEffectiveLevel() < logging.WARN:
default_enabled = True
if not default_enabled:
default_enabled = True
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'))
rootlogger.addHandler(handler)
_logged_classes = set()
def class_logger(cls, enable=False):
logger = logging.getLogger(cls.__module__ + "." + cls.__name__)
if enable == 'debug':
logger.setLevel(logging.DEBUG)
elif enable == 'info':
logger.setLevel(logging.INFO)
cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG)
cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO)
cls.logger = logger
_logged_classes.add(cls)
class Identified(object):
@util.memoized_property
def logging_name(self):
# limit the number of loggers by chopping off the hex(id).
# some novice users unfortunately create an unlimited number
# of Engines in their applications which would otherwise
# cause the app to run out of memory.
return "0x...%s" % hex(id(self))[-4:]
def instance_logger(instance, echoflag=None):
"""create a logger for an instance that implements :class:`Identified`.
Warning: this is an expensive call which also results in a permanent
increase in memory overhead for each call. Use only for
low-volume, long-time-spanning objects.
"""
name = "%s.%s.%s" % (instance.__class__.__module__,
instance.__class__.__name__, instance.logging_name)
if echoflag is not None:
l = logging.getLogger(name)
if echoflag == 'debug':
default_logging(name)
l.setLevel(logging.DEBUG)
elif echoflag is True:
default_logging(name)
l.setLevel(logging.INFO)
elif echoflag is False:
l.setLevel(logging.WARN)
else:
l = logging.getLogger(name)
instance._should_log_debug = lambda: l.isEnabledFor(logging.DEBUG)
instance._should_log_info = lambda: l.isEnabledFor(logging.INFO)
return l
class echo_property(object):
__doc__ = """\
When ``True``, enable log output for this element.
This has the effect of setting the Python logging level for the namespace
of this element's class and object reference. A value of boolean ``True``
indicates that the loglevel ``logging.INFO`` will be set for the logger,
whereas the string value ``debug`` will set the loglevel to
``logging.DEBUG``.
"""
def __get__(self, instance, owner):
if instance is None:
return self
else:
return instance._should_log_debug() and 'debug' or \
(instance._should_log_info() and True or False)
def __set__(self, instance, value):
instance_logger(instance, echoflag=value)
```
#### File: sqlalchemy/sql/functions.py
```python
from sqlalchemy import types as sqltypes
from sqlalchemy.sql.expression import (
ClauseList, Function, _literal_as_binds, text, _type_from_args
)
from sqlalchemy.sql import operators
from sqlalchemy.sql.visitors import VisitableType
class _GenericMeta(VisitableType):
def __call__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
return type.__call__(self, *args, **kwargs)
class GenericFunction(Function):
__metaclass__ = _GenericMeta
def __init__(self, type_=None, args=(), **kwargs):
self.packagenames = []
self.name = self.__class__.__name__
self._bind = kwargs.get('bind', None)
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).self_group()
self.type = sqltypes.to_instance(
type_ or getattr(self, '__return_type__', None))
class AnsiFunction(GenericFunction):
def __init__(self, **kwargs):
GenericFunction.__init__(self, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
kwargs.setdefault('type_', _type_from_args(args))
GenericFunction.__init__(self, args=args, **kwargs)
class coalesce(ReturnTypeFromArgs):
pass
class max(ReturnTypeFromArgs):
pass
class min(ReturnTypeFromArgs):
pass
class sum(ReturnTypeFromArgs):
pass
class now(GenericFunction):
__return_type__ = sqltypes.DateTime
class concat(GenericFunction):
__return_type__ = sqltypes.String
def __init__(self, *args, **kwargs):
GenericFunction.__init__(self, args=args, **kwargs)
class char_length(GenericFunction):
__return_type__ = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, args=[arg], **kwargs)
class random(GenericFunction):
def __init__(self, *args, **kwargs):
kwargs.setdefault('type_', None)
GenericFunction.__init__(self, args=args, **kwargs)
class count(GenericFunction):
"""The ANSI COUNT aggregate function. With no arguments, emits COUNT \*."""
__return_type__ = sqltypes.Integer
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = text('*')
GenericFunction.__init__(self, args=(expression,), **kwargs)
class current_date(AnsiFunction):
__return_type__ = sqltypes.Date
class current_time(AnsiFunction):
__return_type__ = sqltypes.Time
class current_timestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class current_user(AnsiFunction):
__return_type__ = sqltypes.String
class localtime(AnsiFunction):
__return_type__ = sqltypes.DateTime
class localtimestamp(AnsiFunction):
__return_type__ = sqltypes.DateTime
class session_user(AnsiFunction):
__return_type__ = sqltypes.String
class sysdate(AnsiFunction):
__return_type__ = sqltypes.DateTime
class user(AnsiFunction):
__return_type__ = sqltypes.String
```
#### File: sqlalchemy/test/config.py
```python
import optparse, os, sys, re, ConfigParser, time, warnings
# 2to3
import StringIO
logging = None
__all__ = 'parser', 'configure', 'options',
db = None
db_label, db_url, db_opts = None, None, {}
options = None
file_config = None
base_config = """
[db]
sqlite=sqlite:///:memory:
sqlite_file=sqlite:///querytest.db
postgresql=postgresql://scott:[email protected]:5432/test
postgres=postgresql://scott:[email protected]:5432/test
pg8000=postgresql+pg8000://scott:[email protected]:5432/test
postgresql_jython=postgresql+zxjdbc://scott:[email protected]:5432/test
mysql_jython=mysql+zxjdbc://scott:[email protected]:5432/test
mysql=mysql://scott:[email protected]:3306/test
oracle=oracle://scott:[email protected]:1521
oracle8=oracle://scott:[email protected]:1521/?use_ansi=0
mssql=mssql://scott:tiger@SQUAWK\\SQLEXPRESS/test
firebird=firebird://sysdba:masterkey@localhost//tmp/test.fdb
maxdb=maxdb://MONA:RED@/maxdb1
"""
def _log(option, opt_str, value, parser):
global logging
if not logging:
import logging
logging.basicConfig()
if opt_str.endswith('-info'):
logging.getLogger(value).setLevel(logging.INFO)
elif opt_str.endswith('-debug'):
logging.getLogger(value).setLevel(logging.DEBUG)
def _list_dbs(*args):
print "Available --db options (use --dburi to override)"
for macro in sorted(file_config.options('db')):
print "%20s\t%s" % (macro, file_config.get('db', macro))
sys.exit(0)
def _server_side_cursors(options, opt_str, value, parser):
db_opts['server_side_cursors'] = True
def _engine_strategy(options, opt_str, value, parser):
if value:
db_opts['strategy'] = value
class _ordered_map(object):
def __init__(self):
self._keys = list()
self._data = dict()
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._data[key] = value
def __iter__(self):
for key in self._keys:
yield self._data[key]
# at one point in refactoring, modules were injecting into the config
# process. this could probably just become a list now.
post_configure = _ordered_map()
def _engine_uri(options, file_config):
global db_label, db_url
db_label = 'sqlite'
if options.dburi:
db_url = options.dburi
db_label = db_url[:db_url.index(':')]
elif options.db:
db_label = options.db
db_url = None
if db_url is None:
if db_label not in file_config.options('db'):
raise RuntimeError(
"Unknown engine. Specify --dbs for known engines.")
db_url = file_config.get('db', db_label)
post_configure['engine_uri'] = _engine_uri
def _require(options, file_config):
if not(options.require or
(file_config.has_section('require') and
file_config.items('require'))):
return
try:
import pkg_resources
except ImportError:
raise RuntimeError("setuptools is required for version requirements")
cmdline = []
for requirement in options.require:
pkg_resources.require(requirement)
cmdline.append(re.split('\s*(<!>=)', requirement, 1)[0])
if file_config.has_section('require'):
for label, requirement in file_config.items('require'):
if not label == db_label or label.startswith('%s.' % db_label):
continue
seen = [c for c in cmdline if requirement.startswith(c)]
if seen:
continue
pkg_resources.require(requirement)
post_configure['require'] = _require
def _engine_pool(options, file_config):
if options.mockpool:
from sqlalchemy import pool
db_opts['poolclass'] = pool.AssertionPool
post_configure['engine_pool'] = _engine_pool
def _create_testing_engine(options, file_config):
from sqlalchemy.test import engines, testing
global db
db = engines.testing_engine(db_url, db_opts)
testing.db = db
post_configure['create_engine'] = _create_testing_engine
def _prep_testing_database(options, file_config):
from sqlalchemy.test import engines
from sqlalchemy import schema
# also create alt schemas etc. here?
if options.dropfirst:
e = engines.utf8_engine()
existing = e.table_names()
if existing:
print "Dropping existing tables in database: " + db_url
try:
print "Tables: %s" % ', '.join(existing)
except:
pass
print "Abort within 5 seconds..."
time.sleep(5)
md = schema.MetaData(e, reflect=True)
md.drop_all()
e.dispose()
post_configure['prep_db'] = _prep_testing_database
def _set_table_options(options, file_config):
from sqlalchemy.test import schema
table_options = schema.table_options
for spec in options.tableopts:
key, value = spec.split('=')
table_options[key] = value
if options.mysql_engine:
table_options['mysql_engine'] = options.mysql_engine
post_configure['table_options'] = _set_table_options
def _reverse_topological(options, file_config):
if options.reversetop:
from sqlalchemy.orm import unitofwork, session, mapper, dependency
from sqlalchemy import topological
from sqlalchemy.test.util import RandomSet
topological.set = unitofwork.set = session.set = mapper.set = dependency.set = RandomSet
post_configure['topological'] = _reverse_topological
```
#### File: sqlalchemy/test/orm.py
```python
import inspect, re
import config, testing
from sqlalchemy import orm
__all__ = 'mapper',
_whitespace = re.compile(r'^(\s+)')
def _find_pragma(lines, current):
m = _whitespace.match(lines[current])
basis = m and m.group() or ''
for line in reversed(lines[0:current]):
if 'testlib.pragma' in line:
return line
m = _whitespace.match(line)
indent = m and m.group() or ''
# simplistic detection:
# >> # testlib.pragma foo
# >> center_line()
if indent == basis:
break
# >> # testlib.pragma foo
# >> if fleem:
# >> center_line()
if line.endswith(':'):
break
return None
def _make_blocker(method_name, fallback):
"""Creates tripwired variant of a method, raising when called.
To excempt an invocation from blockage, there are two options.
1) add a pragma in a comment::
# testlib.pragma exempt:methodname
offending_line()
2) add a magic cookie to the function's namespace::
__sa_baremethodname_exempt__ = True
...
offending_line()
another_offending_lines()
The second is useful for testing and development.
"""
if method_name.startswith('__') and method_name.endswith('__'):
frame_marker = '__sa_%s_exempt__' % method_name[2:-2]
else:
frame_marker = '__sa_%s_exempt__' % method_name
pragma_marker = 'exempt:' + method_name
def method(self, *args, **kw):
frame_r = None
try:
frame = inspect.stack()[1][0]
frame_r = inspect.getframeinfo(frame, 9)
module = frame.f_globals.get('__name__', '')
type_ = type(self)
pragma = _find_pragma(*frame_r[3:5])
exempt = (
(not module.startswith('sqlalchemy')) or
(pragma and pragma_marker in pragma) or
(frame_marker in frame.f_locals) or
('self' in frame.f_locals and
getattr(frame.f_locals['self'], frame_marker, False)))
if exempt:
supermeth = getattr(super(type_, self), method_name, None)
if (supermeth is None or
getattr(supermeth, 'im_func', None) is method):
return fallback(self, *args, **kw)
else:
return supermeth(*args, **kw)
else:
raise AssertionError(
"%s.%s called in %s, line %s in %s" % (
type_.__name__, method_name, module, frame_r[1], frame_r[2]))
finally:
del frame
method.__name__ = method_name
return method
def mapper(type_, *args, **kw):
forbidden = [
('__hash__', 'unhashable', lambda s: id(s)),
('__eq__', 'noncomparable', lambda s, o: s is o),
('__ne__', 'noncomparable', lambda s, o: s is not o),
('__cmp__', 'noncomparable', lambda s, o: object.__cmp__(s, o)),
('__le__', 'noncomparable', lambda s, o: object.__le__(s, o)),
('__lt__', 'noncomparable', lambda s, o: object.__lt__(s, o)),
('__ge__', 'noncomparable', lambda s, o: object.__ge__(s, o)),
('__gt__', 'noncomparable', lambda s, o: object.__gt__(s, o)),
('__nonzero__', 'truthless', lambda s: 1), ]
if isinstance(type_, type) and type_.__bases__ == (object,):
for method_name, option, fallback in forbidden:
if (getattr(config.options, option, False) and
method_name not in type_.__dict__):
setattr(type_, method_name, _make_blocker(method_name, fallback))
return orm.mapper(type_, *args, **kw)
```
#### File: test/dialect/test_access.py
```python
from sqlalchemy import *
from sqlalchemy import sql
from sqlalchemy.databases import access
from sqlalchemy.test import *
class CompileTest(TestBase, AssertsCompiledSQL):
__dialect__ = access.dialect()
def test_extract(self):
t = sql.table('t', sql.column('col1'))
mapping = {
'month': 'm',
'day': 'd',
'year': 'yyyy',
'second': 's',
'hour': 'h',
'doy': 'y',
'minute': 'n',
'quarter': 'q',
'dow': 'w',
'week': 'ww'
}
for field, subst in mapping.items():
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % subst)
```
#### File: test/engine/_base.py
```python
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy.test.testing import adict
class TablesTest(testing.TestBase):
"""An integration test that creates and uses tables."""
# 'once', 'each', None
run_setup_bind = 'once'
# 'once', 'each', None
run_define_tables = 'once'
# 'once', 'each', None
run_inserts = 'each'
# 'foreach', None
run_deletes = 'each'
# 'once', 'each', None
run_dispose_bind = None
_artifact_registries = ('tables', 'other_artifacts')
bind = None
metadata = None
tables = None
other_artifacts = None
@classmethod
def setup_class(cls):
if cls.run_setup_bind is None:
assert cls.bind is not None
assert cls.run_deletes in (None, 'each')
if cls.run_inserts == 'once':
assert cls.run_deletes is None
if cls.tables is None:
cls.tables = adict()
if cls.other_artifacts is None:
cls.other_artifacts = adict()
if cls.bind is None:
setattr(cls, 'bind', cls.setup_bind())
if cls.metadata is None:
setattr(cls, 'metadata', sa.MetaData())
if cls.metadata.bind is None:
cls.metadata.bind = cls.bind
if cls.run_define_tables == 'once':
cls.define_tables(cls.metadata)
cls.metadata.create_all()
cls.tables.update(cls.metadata.tables)
if cls.run_inserts == 'once':
cls._load_fixtures()
cls.insert_data()
def setup(self):
cls = self.__class__
if self.setup_bind == 'each':
setattr(cls, 'bind', self.setup_bind())
if self.run_define_tables == 'each':
self.tables.clear()
self.metadata.drop_all()
self.metadata.clear()
self.define_tables(self.metadata)
self.metadata.create_all()
self.tables.update(self.metadata.tables)
if self.run_inserts == 'each':
self._load_fixtures()
self.insert_data()
def teardown(self):
# no need to run deletes if tables are recreated on setup
if self.run_define_tables != 'each' and self.run_deletes:
for table in reversed(self.metadata.sorted_tables):
try:
table.delete().execute().close()
except sa.exc.DBAPIError, ex:
print >> sys.stderr, "Error emptying table %s: %r" % (
table, ex)
if self.run_dispose_bind == 'each':
self.dispose_bind(self.bind)
@classmethod
def teardown_class(cls):
cls.metadata.drop_all()
if cls.dispose_bind:
cls.dispose_bind(cls.bind)
cls.metadata.bind = None
if cls.run_setup_bind is not None:
cls.bind = None
@classmethod
def setup_bind(cls):
return testing.db
@classmethod
def dispose_bind(cls, bind):
if hasattr(bind, 'dispose'):
bind.dispose()
elif hasattr(bind, 'close'):
bind.close()
@classmethod
def define_tables(cls, metadata):
raise NotImplementedError()
@classmethod
def fixtures(cls):
return {}
@classmethod
def insert_data(cls):
pass
def sql_count_(self, count, fn):
self.assert_sql_count(self.bind, fn, count)
def sql_eq_(self, callable_, statements, with_sequences=None):
self.assert_sql(self.bind,
callable_, statements, with_sequences)
def _load_fixtures(self):
headers, rows = {}, {}
for table, data in self.fixtures().iteritems():
if isinstance(table, basestring):
table = self.tables[table]
headers[table] = data[0]
rows[table] = data[1:]
for table in self.metadata.sorted_tables:
if table not in headers:
continue
table.bind.execute(
table.insert(),
[dict(zip(headers[table], column_values))
for column_values in rows[table]])
class AltEngineTest(testing.TestBase):
engine = None
@classmethod
def setup_class(cls):
cls.engine = cls.create_engine()
super(AltEngineTest, cls).setup_class()
@classmethod
def teardown_class(cls):
cls.engine.dispose()
cls.engine = None
super(AltEngineTest, cls).teardown_class()
@classmethod
def create_engine(cls):
raise NotImplementedError
```
#### File: orm/inheritance/test_selects.py
```python
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test import testing
from test.orm._fixtures import Base
from test.orm._base import MappedTest
class InheritingSelectablesTest(MappedTest):
@classmethod
def define_tables(cls, metadata):
global foo, bar, baz
foo = Table('foo', metadata,
Column('a', String(30), primary_key=1),
Column('b', String(30), nullable=0))
bar = foo.select(foo.c.b == 'bar').alias('bar')
baz = foo.select(foo.c.b == 'baz').alias('baz')
def test_load(self):
# TODO: add persistence test also
testing.db.execute(foo.insert(), a='not bar', b='baz')
testing.db.execute(foo.insert(), a='also not bar', b='baz')
testing.db.execute(foo.insert(), a='i am bar', b='bar')
testing.db.execute(foo.insert(), a='also bar', b='bar')
class Foo(Base): pass
class Bar(Foo): pass
class Baz(Foo): pass
mapper(Foo, foo, polymorphic_on=foo.c.b)
mapper(Baz, baz,
with_polymorphic=('*', foo.join(baz, foo.c.b=='baz').alias('baz')),
inherits=Foo,
inherit_condition=(foo.c.a==baz.c.a),
inherit_foreign_keys=[baz.c.a],
polymorphic_identity='baz')
mapper(Bar, bar,
with_polymorphic=('*', foo.join(bar, foo.c.b=='bar').alias('bar')),
inherits=Foo,
inherit_condition=(foo.c.a==bar.c.a),
inherit_foreign_keys=[bar.c.a],
polymorphic_identity='bar')
s = sessionmaker(bind=testing.db)()
assert [Baz(), Baz(), Bar(), Bar()] == s.query(Foo).order_by(Foo.b.desc()).all()
assert [Bar(), Bar()] == s.query(Bar).all()
```
#### File: test/orm/test_backref_mutations.py
```python
from sqlalchemy.test.testing import assert_raises, assert_raises_message
from sqlalchemy import Integer, String, ForeignKey, Sequence, exc as sa_exc
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, relationship, create_session, class_mapper, backref, sessionmaker
from sqlalchemy.orm import attributes, exc as orm_exc
from sqlalchemy.test import testing
from sqlalchemy.test.testing import eq_
from test.orm import _base, _fixtures
class O2MCollectionTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relationship(Address, backref="user"),
))
@testing.resolve_artifact_names
def test_collection_move_hitslazy(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
a2 = Address(email_address="address2")
a3 = Address(email_address="address3")
u1= User(name='jack', addresses=[a1, a2, a3])
u2= User(name='ed')
sess.add_all([u1, a1, a2, a3])
sess.commit()
#u1.addresses
def go():
u2.addresses.append(a1)
u2.addresses.append(a2)
u2.addresses.append(a3)
self.assert_sql_count(testing.db, go, 0)
@testing.resolve_artifact_names
def test_collection_move_preloaded(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', addresses=[a1])
u2 = User(name='ed')
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.addresses collection
u1.addresses
u2.addresses.append(a1)
# backref fires
assert a1.user is u2
# doesn't extend to the previous collection tho,
# which was already loaded.
# flushing at this point means its anyone's guess.
assert a1 in u1.addresses
assert a1 in u2.addresses
@testing.resolve_artifact_names
def test_collection_move_notloaded(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', addresses=[a1])
u2 = User(name='ed')
sess.add_all([u1, u2])
sess.commit() # everything is expired
u2.addresses.append(a1)
# backref fires
assert a1.user is u2
# u1.addresses wasn't loaded,
# so when it loads its correct
assert a1 not in u1.addresses
assert a1 in u2.addresses
@testing.resolve_artifact_names
def test_collection_move_commitfirst(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', addresses=[a1])
u2 = User(name='ed')
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.addresses collection
u1.addresses
u2.addresses.append(a1)
# backref fires
assert a1.user is u2
# everything expires, no changes in
# u1.addresses, so all is fine
sess.commit()
assert a1 not in u1.addresses
assert a1 in u2.addresses
@testing.resolve_artifact_names
def test_scalar_move_preloaded(self):
sess = sessionmaker()()
u1 = User(name='jack')
u2 = User(name='ed')
a1 = Address(email_address='a1')
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# u1.addresses is loaded
u1.addresses
# direct set - the "old" is "fetched",
# but only from the local session - not the
# database, due to the PASSIVE_NO_FETCH flag.
# this is a more fine grained behavior introduced
# in 0.6
a1.user = u2
assert a1 not in u1.addresses
assert a1 in u2.addresses
@testing.resolve_artifact_names
def test_plain_load_passive(self):
"""test that many-to-one set doesn't load the old value."""
sess = sessionmaker()()
u1 = User(name='jack')
u2 = User(name='ed')
a1 = Address(email_address='a1')
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# in this case, a lazyload would
# ordinarily occur except for the
# PASSIVE_NO_FETCH flag.
def go():
a1.user = u2
self.assert_sql_count(testing.db, go, 0)
assert a1 not in u1.addresses
assert a1 in u2.addresses
@testing.resolve_artifact_names
def test_set_none(self):
sess = sessionmaker()()
u1 = User(name='jack')
a1 = Address(email_address='a1')
a1.user = u1
sess.add_all([u1, a1])
sess.commit()
# works for None too
def go():
a1.user = None
self.assert_sql_count(testing.db, go, 0)
assert a1 not in u1.addresses
@testing.resolve_artifact_names
def test_scalar_move_notloaded(self):
sess = sessionmaker()()
u1 = User(name='jack')
u2 = User(name='ed')
a1 = Address(email_address='a1')
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# direct set - the fetching of the
# "old" u1 here allows the backref
# to remove it from the addresses collection
a1.user = u2
assert a1 not in u1.addresses
assert a1 in u2.addresses
@testing.resolve_artifact_names
def test_scalar_move_commitfirst(self):
sess = sessionmaker()()
u1 = User(name='jack')
u2 = User(name='ed')
a1 = Address(email_address='a1')
a1.user = u1
sess.add_all([u1, u2, a1])
sess.commit()
# u1.addresses is loaded
u1.addresses
# direct set - the fetching of the
# "old" u1 here allows the backref
# to remove it from the addresses collection
a1.user = u2
sess.commit()
assert a1 not in u1.addresses
assert a1 in u2.addresses
class O2OScalarBackrefMoveTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(Address, addresses)
mapper(User, users, properties = {
'address':relationship(Address, backref=backref("user"), uselist=False)
})
@testing.resolve_artifact_names
def test_collection_move_preloaded(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', address=a1)
u2 = User(name='ed')
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.address
u1.address
# reassign
u2.address = a1
assert u2.address is a1
# backref fires
assert a1.user is u2
# doesn't extend to the previous attribute tho.
# flushing at this point means its anyone's guess.
assert u1.address is a1
assert u2.address is a1
@testing.resolve_artifact_names
def test_scalar_move_preloaded(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
a2 = Address(email_address="address1")
u1 = User(name='jack', address=a1)
sess.add_all([u1, a1, a2])
sess.commit() # everything is expired
# load a1.user
a1.user
# reassign
a2.user = u1
# backref fires
assert u1.address is a2
# stays on both sides
assert a1.user is u1
assert a2.user is u1
@testing.resolve_artifact_names
def test_collection_move_notloaded(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', address=a1)
u2 = User(name='ed')
sess.add_all([u1, u2])
sess.commit() # everything is expired
# reassign
u2.address = a1
assert u2.address is a1
# backref fires
assert a1.user is u2
# u1.address loads now after a flush
assert u1.address is None
assert u2.address is a1
@testing.resolve_artifact_names
def test_scalar_move_notloaded(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
a2 = Address(email_address="address1")
u1 = User(name='jack', address=a1)
sess.add_all([u1, a1, a2])
sess.commit() # everything is expired
# reassign
a2.user = u1
# backref fires
assert u1.address is a2
# stays on both sides
assert a1.user is u1
assert a2.user is u1
@testing.resolve_artifact_names
def test_collection_move_commitfirst(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', address=a1)
u2 = User(name='ed')
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.address
u1.address
# reassign
u2.address = a1
assert u2.address is a1
# backref fires
assert a1.user is u2
# the commit cancels out u1.addresses
# being loaded, on next access its fine.
sess.commit()
assert u1.address is None
assert u2.address is a1
@testing.resolve_artifact_names
def test_scalar_move_commitfirst(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
a2 = Address(email_address="address2")
u1 = User(name='jack', address=a1)
sess.add_all([u1, a1, a2])
sess.commit() # everything is expired
# load
assert a1.user is u1
# reassign
a2.user = u1
# backref fires
assert u1.address is a2
# didnt work this way tho
assert a1.user is u1
# moves appropriately after commit
sess.commit()
assert u1.address is a2
assert a1.user is None
assert a2.user is u1
class O2OScalarMoveTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(Address, addresses)
mapper(User, users, properties = {
'address':relationship(Address, uselist=False)
})
@testing.resolve_artifact_names
def test_collection_move_commitfirst(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', address=a1)
u2 = User(name='ed')
sess.add_all([u1, u2])
sess.commit() # everything is expired
# load u1.address
u1.address
# reassign
u2.address = a1
assert u2.address is a1
# the commit cancels out u1.addresses
# being loaded, on next access its fine.
sess.commit()
assert u1.address is None
assert u2.address is a1
class O2OScalarOrphanTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(Address, addresses)
mapper(User, users, properties = {
'address':relationship(Address, uselist=False,
backref=backref('user', single_parent=True, cascade="all, delete-orphan"))
})
@testing.resolve_artifact_names
def test_m2o_event(self):
sess = sessionmaker()()
a1 = Address(email_address="address1")
u1 = User(name='jack', address=a1)
sess.add(u1)
sess.commit()
sess.expunge(u1)
u2= User(name='ed')
# the _SingleParent extension sets the backref get to "active" !
# u1 gets loaded and deleted
u2.address = a1
sess.commit()
assert sess.query(User).count() == 1
class M2MScalarMoveTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(Item, items, properties={
'keyword':relationship(Keyword, secondary=item_keywords, uselist=False, backref=backref("item", uselist=False))
})
mapper(Keyword, keywords)
@testing.resolve_artifact_names
def test_collection_move_preloaded(self):
sess = sessionmaker()()
k1 = Keyword(name='k1')
i1 = Item(description='i1', keyword=k1)
i2 = Item(description='i2')
sess.add_all([i1, i2, k1])
sess.commit() # everything is expired
# load i1.keyword
assert i1.keyword is k1
i2.keyword = k1
assert k1.item is i2
# nothing happens.
assert i1.keyword is k1
assert i2.keyword is k1
@testing.resolve_artifact_names
def test_collection_move_notloaded(self):
sess = sessionmaker()()
k1 = Keyword(name='k1')
i1 = Item(description='i1', keyword=k1)
i2 = Item(description='i2')
sess.add_all([i1, i2, k1])
sess.commit() # everything is expired
i2.keyword = k1
assert k1.item is i2
assert i1.keyword is None
assert i2.keyword is k1
@testing.resolve_artifact_names
def test_collection_move_commit(self):
sess = sessionmaker()()
k1 = Keyword(name='k1')
i1 = Item(description='i1', keyword=k1)
i2 = Item(description='i2')
sess.add_all([i1, i2, k1])
sess.commit() # everything is expired
# load i1.keyword
assert i1.keyword is k1
i2.keyword = k1
assert k1.item is i2
sess.commit()
assert i1.keyword is None
assert i2.keyword is k1
```
#### File: test/orm/test_bind.py
```python
from sqlalchemy.test.testing import assert_raises, assert_raises_message
from sqlalchemy import MetaData, Integer
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, create_session
import sqlalchemy as sa
from sqlalchemy.test import testing
from test.orm import _base
class BindTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('test_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', Integer))
@classmethod
def setup_classes(cls):
class Foo(_base.BasicEntity):
pass
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
meta = MetaData()
test_table.tometadata(meta)
assert meta.tables['test_table'].bind is None
mapper(Foo, meta.tables['test_table'])
@testing.resolve_artifact_names
def test_session_bind(self):
engine = self.metadata.bind
for bind in (engine, engine.connect()):
try:
sess = create_session(bind=bind)
assert sess.bind is bind
f = Foo()
sess.add(f)
sess.flush()
assert sess.query(Foo).get(f.id) is f
finally:
if hasattr(bind, 'close'):
bind.close()
@testing.resolve_artifact_names
def test_session_unbound(self):
sess = create_session()
sess.add(Foo())
assert_raises_message(
sa.exc.UnboundExecutionError,
('Could not locate a bind configured on Mapper|Foo|test_table '
'or this Session'),
sess.flush)
```
#### File: test/orm/test_utils.py
```python
from sqlalchemy.test.testing import assert_raises, assert_raises_message
from sqlalchemy.orm import interfaces, util
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.orm import aliased
from sqlalchemy.orm import mapper, create_session
from sqlalchemy.test import TestBase, testing
from test.orm import _fixtures
from sqlalchemy.test.testing import eq_
class ExtensionCarrierTest(TestBase):
def test_basic(self):
carrier = util.ExtensionCarrier()
assert 'translate_row' not in carrier
assert carrier.translate_row() is interfaces.EXT_CONTINUE
assert 'translate_row' not in carrier
assert_raises(AttributeError, lambda: carrier.snickysnack)
class Partial(object):
def __init__(self, marker):
self.marker = marker
def translate_row(self, row):
return self.marker
carrier.append(Partial('end'))
assert 'translate_row' in carrier
assert carrier.translate_row(None) == 'end'
carrier.push(Partial('front'))
assert carrier.translate_row(None) == 'front'
assert 'populate_instance' not in carrier
carrier.append(interfaces.MapperExtension)
# Py3K
#assert 'populate_instance' not in carrier
# Py2K
assert 'populate_instance' in carrier
# end Py2K
assert carrier.interface
for m in carrier.interface:
assert getattr(interfaces.MapperExtension, m)
class AliasedClassTest(TestBase):
def point_map(self, cls):
table = Table('point', MetaData(),
Column('id', Integer(), primary_key=True),
Column('x', Integer),
Column('y', Integer))
mapper(cls, table)
return table
def test_simple(self):
class Point(object):
pass
table = self.point_map(Point)
alias = aliased(Point)
assert alias.id
assert alias.x
assert alias.y
assert Point.id.__clause_element__().table is table
assert alias.id.__clause_element__().table is not table
def test_notcallable(self):
class Point(object):
pass
table = self.point_map(Point)
alias = aliased(Point)
assert_raises(TypeError, alias)
def test_instancemethods(self):
class Point(object):
def zero(self):
self.x, self.y = 0, 0
table = self.point_map(Point)
alias = aliased(Point)
assert Point.zero
# Py2K
# TODO: what is this testing ??
assert not getattr(alias, 'zero')
# end Py2K
def test_classmethods(self):
class Point(object):
@classmethod
def max_x(cls):
return 100
table = self.point_map(Point)
alias = aliased(Point)
assert Point.max_x
assert alias.max_x
assert Point.max_x() == alias.max_x()
def test_simpleproperties(self):
class Point(object):
@property
def max_x(self):
return 100
table = self.point_map(Point)
alias = aliased(Point)
assert Point.max_x
assert Point.max_x != 100
assert alias.max_x
assert Point.max_x is alias.max_x
def test_descriptors(self):
class descriptor(object):
"""Tortured..."""
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return 'method'
class Point(object):
center = (0, 0)
@descriptor
def thing(self, arg):
return arg.center
table = self.point_map(Point)
alias = aliased(Point)
assert Point.thing != (0, 0)
assert Point().thing == (0, 0)
assert Point.thing.method() == 'method'
assert alias.thing != (0, 0)
assert alias.thing.method() == 'method'
def test_hybrid_descriptors(self):
from sqlalchemy import Column # override testlib's override
import types
class MethodDescriptor(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
# Py3K
#args = (self.func, owner)
# Py2K
args = (self.func, owner, owner.__class__)
# end Py2K
else:
# Py3K
#args = (self.func, instance)
# Py2K
args = (self.func, instance, owner)
# end Py2K
return types.MethodType(*args)
class PropertyDescriptor(object):
def __init__(self, fget, fset, fdel):
self.fget = fget
self.fset = fset
self.fdel = fdel
def __get__(self, instance, owner):
if instance is None:
return self.fget(owner)
else:
return self.fget(instance)
def __set__(self, instance, value):
self.fset(instance, value)
def __delete__(self, instance):
self.fdel(instance)
hybrid = MethodDescriptor
def hybrid_property(fget, fset=None, fdel=None):
return PropertyDescriptor(fget, fset, fdel)
def assert_table(expr, table):
for child in expr.get_children():
if isinstance(child, Column):
assert child.table is table
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid
def left_of(self, other):
return self.x < other.x
double_x = hybrid_property(lambda self: self.x * 2)
table = self.point_map(Point)
alias = aliased(Point)
alias_table = alias.x.__clause_element__().table
assert table is not alias_table
p1 = Point(-10, -10)
p2 = Point(20, 20)
assert p1.left_of(p2)
assert p1.double_x == -20
assert_table(Point.double_x, table)
assert_table(alias.double_x, alias_table)
assert_table(Point.left_of(p2), table)
assert_table(alias.left_of(p2), alias_table)
class IdentityKeyTest(_fixtures.FixtureTest):
run_inserts = None
@testing.resolve_artifact_names
def test_identity_key_1(self):
mapper(User, users)
key = util.identity_key(User, 1)
eq_(key, (User, (1,)))
key = util.identity_key(User, ident=1)
eq_(key, (User, (1,)))
@testing.resolve_artifact_names
def test_identity_key_2(self):
mapper(User, users)
s = create_session()
u = User(name='u1')
s.add(u)
s.flush()
key = util.identity_key(instance=u)
eq_(key, (User, (u.id,)))
@testing.resolve_artifact_names
def test_identity_key_3(self):
mapper(User, users)
row = {users.c.id: 1, users.c.name: "Frank"}
key = util.identity_key(User, row=row)
eq_(key, (User, (1,)))
```
#### File: test/sql/test_defaults.py
```python
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
import datetime
from sqlalchemy import Sequence, Column, func
from sqlalchemy.schema import CreateSequence, DropSequence
from sqlalchemy.sql import select, text
import sqlalchemy as sa
from sqlalchemy.test import testing, engines
from sqlalchemy import MetaData, Integer, String, ForeignKey, Boolean, exc
from sqlalchemy.test.schema import Table
from sqlalchemy.test.testing import eq_
from test.sql import _base
class DefaultTest(testing.TestBase):
@classmethod
def setup_class(cls):
global t, f, f2, ts, currenttime, metadata, default_generator
db = testing.db
metadata = MetaData(db)
default_generator = {'x':50}
def mydefault():
default_generator['x'] += 1
return default_generator['x']
def myupdate_with_ctx(ctx):
conn = ctx.connection
return conn.execute(sa.select([sa.text('13')])).scalar()
def mydefault_using_connection(ctx):
conn = ctx.connection
try:
return conn.execute(sa.select([sa.text('12')])).scalar()
finally:
# ensure a "close()" on this connection does nothing,
# since its a "branched" connection
conn.close()
use_function_defaults = testing.against('postgresql', 'mssql', 'maxdb')
is_oracle = testing.against('oracle')
# select "count(1)" returns different results on different DBs also
# correct for "current_date" compatible as column default, value
# differences
currenttime = func.current_date(type_=sa.Date, bind=db)
if is_oracle:
ts = db.scalar(sa.select([func.trunc(func.sysdate(), sa.literal_column("'DAY'"), type_=sa.Date).label('today')]))
assert isinstance(ts, datetime.date) and not isinstance(ts, datetime.datetime)
f = sa.select([func.length('abcdef')], bind=db).scalar()
f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar()
# TODO: engine propigation across nested functions not working
currenttime = func.trunc(currenttime, sa.literal_column("'DAY'"), bind=db, type_=sa.Date)
def1 = currenttime
def2 = func.trunc(sa.text("sysdate"), sa.literal_column("'DAY'"), type_=sa.Date)
deftype = sa.Date
elif use_function_defaults:
f = sa.select([func.length('abcdef')], bind=db).scalar()
f2 = sa.select([func.length('abcdefghijk')], bind=db).scalar()
def1 = currenttime
deftype = sa.Date
if testing.against('maxdb'):
def2 = sa.text("curdate")
elif testing.against('mssql'):
def2 = sa.text("getdate()")
else:
def2 = sa.text("current_date")
ts = db.func.current_date().scalar()
else:
f = len('abcdef')
f2 = len('abcdefghijk')
def1 = def2 = "3"
ts = 3
deftype = Integer
t = Table('default_test1', metadata,
# python function
Column('col1', Integer, primary_key=True,
default=mydefault),
# python literal
Column('col2', String(20),
default="imthedefault",
onupdate="im the update"),
# preexecute expression
Column('col3', Integer,
default=func.length('abcdef'),
onupdate=func.length('abcdefghijk')),
# SQL-side default from sql expression
Column('col4', deftype,
server_default=def1),
# SQL-side default from literal expression
Column('col5', deftype,
server_default=def2),
# preexecute + update timestamp
Column('col6', sa.Date,
default=currenttime,
onupdate=currenttime),
Column('boolcol1', sa.Boolean, default=True),
Column('boolcol2', sa.Boolean, default=False),
# python function which uses ExecutionContext
Column('col7', Integer,
default=mydefault_using_connection,
onupdate=myupdate_with_ctx),
# python builtin
Column('col8', sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today),
# combo
Column('col9', String(20),
default='py',
server_default='ddl'))
t.create()
@classmethod
def teardown_class(cls):
t.drop()
def teardown(self):
default_generator['x'] = 50
t.delete().execute()
def test_bad_arg_signature(self):
ex_msg = \
"ColumnDefault Python function takes zero or one positional arguments"
def fn1(x, y): pass
def fn2(x, y, z=3): pass
class fn3(object):
def __init__(self, x, y):
pass
class FN4(object):
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(sa.exc.ArgumentError,
ex_msg,
sa.ColumnDefault, fn)
def test_arg_signature(self):
def fn1(): pass
def fn2(): pass
def fn3(x=1): pass
def fn4(x=1, y=2, z=3): pass
fn5 = list
class fn6(object):
def __init__(self, x):
pass
class fn6(object):
def __init__(self, x, y=3):
pass
class FN7(object):
def __call__(self, x):
pass
fn7 = FN7()
class FN8(object):
def __call__(self, x, y=3):
pass
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6, fn7, fn8:
c = sa.ColumnDefault(fn)
@testing.fails_on('firebird', 'Data type unknown')
def test_standalone(self):
c = testing.db.engine.contextual_connect()
x = c.execute(t.c.col1.default)
y = t.c.col2.default.execute()
z = c.execute(t.c.col3.default)
assert 50 <= x <= 57
eq_(y, 'imthedefault')
eq_(z, f)
eq_(f2, 11)
def test_py_vs_server_default_detection(self):
def has_(name, *wanted):
slots = ['default', 'onupdate', 'server_default', 'server_onupdate']
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
tbl = t
has_('col1', 'default')
has_('col2', 'default', 'onupdate')
has_('col3', 'default', 'onupdate')
has_('col4', 'server_default')
has_('col5', 'server_default')
has_('col6', 'default', 'onupdate')
has_('boolcol1', 'default')
has_('boolcol2', 'default')
has_('col7', 'default', 'onupdate')
has_('col8', 'default', 'onupdate')
has_('col9', 'default', 'server_default')
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
t2 = Table('t2', MetaData(),
Column('col1', Integer, Sequence('foo')),
Column('col2', Integer,
default=Sequence('foo'),
server_default='y'),
Column('col3', Integer,
Sequence('foo'),
server_default='x'),
Column('col4', Integer,
ColumnDefault('x'),
DefaultClause('y')),
Column('col4', Integer,
ColumnDefault('x'),
DefaultClause('y'),
DefaultClause('y', for_update=True)),
Column('col5', Integer,
ColumnDefault('x'),
DefaultClause('y'),
onupdate='z'),
Column('col6', Integer,
ColumnDefault('x'),
server_default='y',
onupdate='z'),
Column('col7', Integer,
default='x',
server_default='y',
onupdate='z'),
Column('col8', Integer,
server_onupdate='u',
default='x',
server_default='y',
onupdate='z'))
tbl = t2
has_('col1', 'default')
has_('col2', 'default', 'server_default')
has_('col3', 'default', 'server_default')
has_('col4', 'default', 'server_default', 'server_onupdate')
has_('col5', 'default', 'server_default', 'onupdate')
has_('col6', 'default', 'server_default', 'onupdate')
has_('col7', 'default', 'server_default', 'onupdate')
has_('col8', 'default', 'server_default', 'onupdate', 'server_onupdate')
@testing.fails_on('firebird', 'Data type unknown')
def test_insert(self):
r = t.insert().execute()
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
r = t.insert(inline=True).execute()
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
t.insert().execute()
ctexec = sa.select([currenttime.label('now')], bind=testing.db).scalar()
l = t.select().order_by(t.c.col1).execute()
today = datetime.date.today()
eq_(l.fetchall(), [
(x, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py')
for x in range(51, 54)])
t.insert().execute(col9=None)
assert r.lastrow_has_defaults()
eq_(set(r.context.postfetch_cols),
set([t.c.col3, t.c.col5, t.c.col4, t.c.col6]))
eq_(t.select(t.c.col1==54).execute().fetchall(),
[(54, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, None)])
@testing.fails_on('firebird', 'Data type unknown')
def test_insertmany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if (testing.against('mysql+mysqldb') and
testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)):
return
r = t.insert().execute({}, {}, {})
ctexec = currenttime.scalar()
l = t.select().execute()
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py'),
(52, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py'),
(53, 'imthedefault', f, ts, ts, ctexec, True, False,
12, today, 'py')])
def test_missing_many_param(self):
assert_raises_message(exc.InvalidRequestError,
"A value is required for bind parameter 'col7', in parameter group 1",
t.insert().execute,
{'col4':7, 'col7':12, 'col8':19},
{'col4':7, 'col8':19},
{'col4':7, 'col7':12, 'col8':19},
)
def test_insert_values(self):
t.insert(values={'col3':50}).execute()
l = t.select().execute()
eq_(50, l.first()['col3'])
@testing.fails_on('firebird', 'Data type unknown')
def test_updatemany(self):
# MySQL-Python 1.2.2 breaks functions in execute_many :(
if (testing.against('mysql+mysqldb') and
testing.db.dialect.dbapi.version_info[:3] == (1, 2, 2)):
return
t.insert().execute({}, {}, {})
t.update(t.c.col1==sa.bindparam('pkval')).execute(
{'pkval':51,'col7':None, 'col8':None, 'boolcol1':False})
t.update(t.c.col1==sa.bindparam('pkval')).execute(
{'pkval':51,},
{'pkval':52,},
{'pkval':53,})
l = t.select().execute()
ctexec = currenttime.scalar()
today = datetime.date.today()
eq_(l.fetchall(),
[(51, 'im the update', f2, ts, ts, ctexec, False, False,
13, today, 'py'),
(52, 'im the update', f2, ts, ts, ctexec, True, False,
13, today, 'py'),
(53, 'im the update', f2, ts, ts, ctexec, True, False,
13, today, 'py')])
@testing.fails_on('firebird', 'Data type unknown')
def test_update(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1==pk).execute(col4=None, col5=None)
ctexec = currenttime.scalar()
l = t.select(t.c.col1==pk).execute()
l = l.first()
eq_(l,
(pk, 'im the update', f2, None, None, ctexec, True, False,
13, datetime.date.today(), 'py'))
eq_(11, f2)
@testing.fails_on('firebird', 'Data type unknown')
def test_update_values(self):
r = t.insert().execute()
pk = r.inserted_primary_key[0]
t.update(t.c.col1==pk, values={'col3': 55}).execute()
l = t.select(t.c.col1==pk).execute()
l = l.first()
eq_(55, l['col3'])
class PKDefaultTest(_base.TablesTest):
__requires__ = ('subqueries',)
@classmethod
def define_tables(cls, metadata):
t2 = Table('t2', metadata,
Column('nextid', Integer))
Table('t1', metadata,
Column('id', Integer, primary_key=True,
default=sa.select([func.max(t2.c.nextid)]).as_scalar()),
Column('data', String(30)))
@testing.requires.returning
def test_with_implicit_returning(self):
self._test(True)
def test_regular(self):
self._test(False)
@testing.resolve_artifact_names
def _test(self, returning):
if not returning and not testing.db.dialect.implicit_returning:
engine = testing.db
else:
engine = engines.testing_engine(options={'implicit_returning':returning})
engine.execute(t2.insert(), nextid=1)
r = engine.execute(t1.insert(), data='hi')
eq_([1], r.inserted_primary_key)
engine.execute(t2.insert(), nextid=2)
r = engine.execute(t1.insert(), data='there')
eq_([2], r.inserted_primary_key)
class PKIncrementTest(_base.TablesTest):
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
Table("aitable", metadata,
Column('id', Integer, Sequence('ai_id_seq', optional=True),
primary_key=True),
Column('int1', Integer),
Column('str1', String(20)))
# TODO: add coverage for increment on a secondary column in a key
@testing.fails_on('firebird', 'Data type unknown')
@testing.resolve_artifact_names
def _test_autoincrement(self, bind):
ids = set()
rs = bind.execute(aitable.insert(), int1=1)
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), str1='row 2')
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(), int1=3, str1='row 3')
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
rs = bind.execute(aitable.insert(values={'int1':func.length('four')}))
last = rs.inserted_primary_key[0]
self.assert_(last)
self.assert_(last not in ids)
ids.add(last)
eq_(ids, set([1,2,3,4]))
eq_(list(bind.execute(aitable.select().order_by(aitable.c.id))),
[(1, 1, None), (2, None, 'row 2'), (3, 3, 'row 3'), (4, 4, None)])
@testing.resolve_artifact_names
def test_autoincrement_autocommit(self):
self._test_autoincrement(testing.db)
@testing.resolve_artifact_names
def test_autoincrement_transaction(self):
con = testing.db.connect()
tx = con.begin()
try:
try:
self._test_autoincrement(con)
except:
try:
tx.rollback()
except:
pass
raise
else:
tx.commit()
finally:
con.close()
class EmptyInsertTest(testing.TestBase):
@testing.exclude('sqlite', '<', (3, 3, 8), 'no empty insert support')
@testing.fails_on('oracle', 'FIXME: unknown')
def test_empty_insert(self):
metadata = MetaData(testing.db)
t1 = Table('t1', metadata,
Column('is_true', Boolean, server_default=('1')))
metadata.create_all()
try:
result = t1.insert().execute()
eq_(1, select([func.count(text('*'))], from_obj=t1).scalar())
eq_(True, t1.select().scalar())
finally:
metadata.drop_all()
class AutoIncrementTest(_base.TablesTest):
__requires__ = ('identity',)
run_define_tables = 'each'
@classmethod
def define_tables(cls, metadata):
"""Each test manipulates self.metadata individually."""
@testing.exclude('sqlite', '<', (3, 4), 'no database support')
def test_autoincrement_single_col(self):
single = Table('single', self.metadata,
Column('id', Integer, primary_key=True))
single.create()
r = single.insert().execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 1)
eq_(1, sa.select([func.count(sa.text('*'))], from_obj=single).scalar())
def test_autoincrement_fk(self):
nodes = Table('nodes', self.metadata,
Column('id', Integer, primary_key=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
nodes.create()
r = nodes.insert().execute(data='foo')
id_ = r.inserted_primary_key[0]
nodes.insert().execute(data='bar', parent_id=id_)
@testing.fails_on('sqlite', 'FIXME: unknown')
def test_non_autoincrement(self):
# sqlite INT primary keys can be non-unique! (only for ints)
nonai = Table("nonaitest", self.metadata,
Column('id', Integer, autoincrement=False, primary_key=True),
Column('data', String(20)))
nonai.create()
try:
# postgresql + mysql strict will fail on first row,
# mysql in legacy mode fails on second row
nonai.insert().execute(data='row 1')
nonai.insert().execute(data='row 2')
assert False
except sa.exc.SQLError, e:
assert True
nonai.insert().execute(id=1, data='row 1')
class SequenceTest(testing.TestBase, testing.AssertsCompiledSQL):
@classmethod
@testing.requires.sequences
def setup_class(cls):
global cartitems, sometable, metadata
metadata = MetaData(testing.db)
cartitems = Table("cartitems", metadata,
Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True),
Column("description", String(40)),
Column("createdate", sa.DateTime())
)
sometable = Table( 'Manager', metadata,
Column('obj_id', Integer, Sequence('obj_id_seq'), ),
Column('name', String(128)),
Column('id', Integer, Sequence('Manager_id_seq', optional=True),
primary_key=True),
)
metadata.create_all()
def test_compile(self):
self.assert_compile(
CreateSequence(Sequence('foo_seq')),
"CREATE SEQUENCE foo_seq",
use_default_dialect=True,
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', start=5)),
"CREATE SEQUENCE foo_seq START WITH 5",
use_default_dialect=True,
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', increment=2)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2",
use_default_dialect=True,
)
self.assert_compile(
CreateSequence(Sequence('foo_seq', increment=2, start=5)),
"CREATE SEQUENCE foo_seq INCREMENT BY 2 START WITH 5",
use_default_dialect=True,
)
self.assert_compile(
DropSequence(Sequence('foo_seq')),
"DROP SEQUENCE foo_seq",
use_default_dialect=True,
)
@testing.fails_on('firebird', 'no FB support for start/increment')
@testing.requires.sequences
def test_start_increment(self):
for seq in (
Sequence('foo_seq'),
Sequence('foo_seq', start=8),
Sequence('foo_seq', increment=5)):
seq.create(testing.db)
try:
values = [
testing.db.execute(seq) for i in range(3)
]
start = seq.start or 1
inc = seq.increment or 1
assert values == list(xrange(start, start + inc * 3, inc))
finally:
seq.drop(testing.db)
@testing.requires.sequences
def test_seq_nonpk(self):
"""test sequences fire off as defaults on non-pk columns"""
engine = engines.testing_engine(options={'implicit_returning':False})
result = engine.execute(sometable.insert(), name="somename")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
result = engine.execute(sometable.insert(), name="someother")
assert set(result.postfetch_cols()) == set([sometable.c.obj_id])
sometable.insert().execute(
{'name':'name3'},
{'name':'name4'})
eq_(sometable.select().order_by(sometable.c.id).execute().fetchall(),
[(1, "somename", 1),
(2, "someother", 2),
(3, "name3", 3),
(4, "name4", 4)])
@testing.requires.sequences
def test_sequence(self):
cartitems.insert().execute(description='hi')
cartitems.insert().execute(description='there')
r = cartitems.insert().execute(description='lala')
assert r.inserted_primary_key and r.inserted_primary_key[0] is not None
id_ = r.inserted_primary_key[0]
eq_(1,
sa.select([func.count(cartitems.c.cart_id)],
sa.and_(cartitems.c.description == 'lala',
cartitems.c.cart_id == id_)).scalar())
cartitems.select().execute().fetchall()
@testing.fails_on('maxdb', 'FIXME: unknown')
# maxdb db-api seems to double-execute NEXTVAL internally somewhere,
# throwing off the numbers for these tests...
@testing.requires.sequences
def test_implicit_sequence_exec(self):
s = Sequence("my_sequence", metadata=MetaData(testing.db))
s.create()
try:
x = s.execute()
eq_(x, 1)
finally:
s.drop()
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.requires.sequences
def teststandalone_explicit(self):
s = Sequence("my_sequence")
s.create(bind=testing.db)
try:
x = s.execute(testing.db)
eq_(x, 1)
finally:
s.drop(testing.db)
@testing.requires.sequences
def test_checkfirst(self):
s = Sequence("my_sequence")
s.create(testing.db, checkfirst=False)
s.create(testing.db, checkfirst=True)
s.drop(testing.db, checkfirst=False)
s.drop(testing.db, checkfirst=True)
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.requires.sequences
def teststandalone2(self):
x = cartitems.c.cart_id.default.execute()
self.assert_(1 <= x <= 4)
@classmethod
@testing.requires.sequences
def teardown_class(cls):
metadata.drop_all()
```
#### File: test/zblog/blog.py
```python
import datetime
__all__ = ['Blog', 'Post', 'Topic', 'TopicAssociation', 'Comment']
class Blog(object):
def __init__(self, owner=None):
self.owner = owner
class Post(object):
topics = set
def __init__(self, user=None, headline=None, summary=None):
self.user = user
self.datetime = datetime.datetime.today()
self.headline = headline
self.summary = summary
self.comments = []
self.comment_count = 0
class Topic(object):
def __init__(self, keyword=None, description=None):
self.keyword = keyword
self.description = description
class TopicAssociation(object):
def __init__(self, post=None, topic=None, is_primary=False):
self.post = post
self.topic = topic
self.is_primary = is_primary
class Comment(object):
def __init__(self, subject=None, body=None):
self.subject = subject
self.datetime = datetime.datetime.today()
self.body = body
```
#### File: test/zblog/test_zblog.py
```python
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test import *
from test.zblog import mappers, tables
from test.zblog.user import *
from test.zblog.blog import *
class ZBlogTest(TestBase, AssertsExecutionResults):
@classmethod
def create_tables(cls):
tables.metadata.drop_all(bind=testing.db)
tables.metadata.create_all(bind=testing.db)
@classmethod
def drop_tables(cls):
tables.metadata.drop_all(bind=testing.db)
@classmethod
def setup_class(cls):
cls.create_tables()
@classmethod
def teardown_class(cls):
cls.drop_tables()
def teardown(self):
pass
def setup(self):
pass
class SavePostTest(ZBlogTest):
@classmethod
def setup_class(cls):
super(SavePostTest, cls).setup_class()
mappers.zblog_mappers()
global blog_id, user_id
s = create_session(bind=testing.db)
user = User('zbloguser', "Zblog User", "hello", group=administrator)
blog = Blog(owner=user)
blog.name = "this is a blog"
s.add(user)
s.add(blog)
s.flush()
blog_id = blog.id
user_id = user.id
s.close()
@classmethod
def teardown_class(cls):
clear_mappers()
super(SavePostTest, cls).teardown_class()
def testattach(self):
"""test that a transient/pending instance has proper bi-directional behavior.
this requires that lazy loaders do not fire off for a transient/pending instance."""
s = create_session(bind=testing.db)
s.begin()
try:
blog = s.query(Blog).get(blog_id)
post = Post(headline="asdf asdf", summary="asdfasfd")
s.add(post)
post.blog_id=blog_id
post.blog = blog
assert post in blog.posts
finally:
s.rollback()
def testoptimisticorphans(self):
"""test that instances in the session with un-loaded parents will not
get marked as "orphans" and then deleted """
s = create_session(bind=testing.db)
s.begin()
try:
blog = s.query(Blog).get(blog_id)
post = Post(headline="asdf asdf", summary="asdfasfd")
post.blog = blog
user = s.query(User).get(user_id)
post.user = user
s.add(post)
s.flush()
s.expunge_all()
user = s.query(User).get(user_id)
blog = s.query(Blog).get(blog_id)
post = blog.posts[0]
comment = Comment(subject="some subject", body="some body")
comment.post = post
comment.user = user
s.flush()
s.expunge_all()
assert s.query(Post).get(post.id) is not None
finally:
s.rollback()
```
#### File: translate/convert/accesskey.py
```python
from translate.storage.placeables.general import XMLEntityPlaceable
DEFAULT_ACCESSKEY_MARKER = u"&"
def extract(string, accesskey_marker=DEFAULT_ACCESSKEY_MARKER):
"""Extract the label and accesskey from a label+accesskey string
The function will also try to ignore &entities; which would obviously not
contain accesskeys.
@type string: Unicode
@param string: A string that might contain a label with accesskey marker
@type accesskey_marker: Char
@param accesskey_marker: The character that is used to prefix an access key
"""
assert isinstance(string, unicode)
assert isinstance(accesskey_marker, unicode)
assert len(accesskey_marker) == 1
if string == u"":
return u"", u""
accesskey = u""
label = string
marker_pos = 0
while marker_pos >= 0:
marker_pos = string.find(accesskey_marker, marker_pos)
if marker_pos != -1:
marker_pos += 1
if accesskey_marker == '&' and XMLEntityPlaceable.regex.match(string[marker_pos-1:]):
continue
label = string[:marker_pos-1] + string[marker_pos:]
accesskey = string[marker_pos]
break
return label, accesskey
def combine(label, accesskey,
accesskey_marker=DEFAULT_ACCESSKEY_MARKER):
"""Combine a label and and accesskey to form a label+accesskey string
We place an accesskey marker before the accesskey in the label and this creates a
string with the two combined e.g. "File" + "F" = "&File"
@type label: unicode
@param label: a label
@type accesskey: unicode char
@param accesskey: The accesskey
@rtype: unicode or None
@return: label+accesskey string or None if uncombineable
"""
assert isinstance(label, unicode)
assert isinstance(accesskey, unicode)
if len(accesskey) == 0:
return None
searchpos = 0
accesskeypos = -1
in_entity = False
accesskeyaltcasepos = -1
while (accesskeypos < 0) and searchpos < len(label):
searchchar = label[searchpos]
if searchchar == '&':
in_entity = True
elif searchchar == ';':
in_entity = False
else:
if not in_entity:
if searchchar == accesskey.upper():
# always prefer uppercase
accesskeypos = searchpos
if searchchar == accesskey.lower():
# take lower case otherwise...
if accesskeyaltcasepos == -1:
# only want to remember first altcasepos
accesskeyaltcasepos = searchpos
# note: we keep on looping through in hope
# of exact match
searchpos += 1
# if we didn't find an exact case match, use an alternate one if available
if accesskeypos == -1:
accesskeypos = accesskeyaltcasepos
# now we want to handle whatever we found...
if accesskeypos >= 0:
string = label[:accesskeypos] + accesskey_marker + label[accesskeypos:]
string = string.encode("UTF-8", "replace")
return string
else:
# can't currently mix accesskey if it's not in label
return None
```
#### File: translate/convert/convert.py
```python
import os.path
from translate.misc import optrecurse
# don't import optparse ourselves, get the version from optrecurse
optparse = optrecurse.optparse
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class ConvertOptionParser(optrecurse.RecursiveOptionParser, object):
"""a specialized Option Parser for convertor tools..."""
def __init__(self, formats, usetemplates=False, usepots=False, allowmissingtemplate=False, description=None):
"""construct the specialized Option Parser"""
optrecurse.RecursiveOptionParser.__init__(self, formats, usetemplates,
allowmissingtemplate=allowmissingtemplate, description=description)
self.usepots = usepots
self.setpotoption()
self.set_usage()
def add_fuzzy_option(self, default=False):
"""adds an option to include / exclude fuzzy translations"""
fuzzyhelp = "use translations marked fuzzy"
nofuzzyhelp = "don't use translations marked fuzzy"
if default:
fuzzyhelp += " (default)"
else:
nofuzzyhelp += " (default)"
self.add_option("", "--fuzzy", dest="includefuzzy", action="store_true", default=default, help=fuzzyhelp)
self.add_option("", "--nofuzzy", dest="includefuzzy", action="store_false", default=default, help=nofuzzyhelp)
self.passthrough.append("includefuzzy")
def add_duplicates_option(self, default="msgctxt"):
"""adds an option to say what to do with duplicate strings"""
self.add_option("", "--duplicates", dest="duplicatestyle", default=default,
type="choice", choices=["msgctxt", "merge"],
help="what to do with duplicate strings (identical source text): merge, msgctxt (default: '%s')" % default, metavar="DUPLICATESTYLE")
self.passthrough.append("duplicatestyle")
def add_multifile_option(self, default="single"):
"""adds an option to say how to split the po/pot files"""
self.add_option("", "--multifile", dest="multifilestyle", default=default,
type="choice", choices=["single", "toplevel", "onefile"],
help="how to split po/pot files (single, toplevel or onefile)", metavar="MULTIFILESTYLE")
self.passthrough.append("multifilestyle")
def potifyformat(self, fileformat):
"""converts a .po to a .pot where required"""
if fileformat is None:
return fileformat
elif fileformat == "po":
return "pot"
elif fileformat.endswith(os.extsep + "po"):
return fileformat + "t"
else:
return fileformat
def getformathelp(self, formats):
"""make a nice help string for describing formats..."""
# include implicit pot options...
helpformats = []
for fileformat in formats:
helpformats.append(fileformat)
potformat = self.potifyformat(fileformat)
if potformat != fileformat:
helpformats.append(potformat)
return super(ConvertOptionParser, self).getformathelp(helpformats)
def filterinputformats(self, options):
"""filters input formats, processing relevant switches in options"""
if self.usepots and options.pot:
return [self.potifyformat(inputformat) for inputformat in self.inputformats]
else:
return self.inputformats
def filteroutputoptions(self, options):
"""filters output options, processing relevant switches in options"""
if self.usepots and options.pot:
outputoptions = {}
for (inputformat, templateformat), (outputformat, convertor) in self.outputoptions.iteritems():
inputformat = self.potifyformat(inputformat)
templateformat = self.potifyformat(templateformat)
outputformat = self.potifyformat(outputformat)
outputoptions[(inputformat, templateformat)] = (outputformat, convertor)
return outputoptions
else:
return self.outputoptions
def setpotoption(self):
"""sets the -P/--pot option depending on input/output formats etc"""
if self.usepots:
potoption = optparse.Option("-P", "--pot", \
action="store_true", dest="pot", default=False, \
help="output PO Templates (.pot) rather than PO files (.po)")
self.define_option(potoption)
def verifyoptions(self, options):
"""verifies that the options are valid (required options are present, etc)"""
pass
def run(self, argv=None):
"""parses the command line options and runs the conversion"""
(options, args) = self.parse_args(argv)
options.inputformats = self.filterinputformats(options)
options.outputoptions = self.filteroutputoptions(options)
self.usepsyco(options)
self.verifyoptions(options)
self.recursiveprocess(options)
def copyinput(inputfile, outputfile, templatefile, **kwargs):
"""copies the input file to the output file"""
outputfile.write(inputfile.read())
return True
def copytemplate(inputfile, outputfile, templatefile, **kwargs):
"""copies the template file to the output file"""
outputfile.write(templatefile.read())
return True
class Replacer:
"""an object that knows how to replace strings in files"""
def __init__(self, searchstring, replacestring):
self.searchstring = searchstring
self.replacestring = replacestring
def doreplace(self, text):
"""actually replace the text"""
if self.searchstring is not None and self.replacestring is not None:
return text.replace(self.searchstring, self.replacestring)
else:
return text
def searchreplaceinput(self, inputfile, outputfile, templatefile, **kwargs):
"""copies the input file to the output file, searching and replacing"""
outputfile.write(self.doreplace(inputfile.read()))
return True
def searchreplacetemplate(self, inputfile, outputfile, templatefile, **kwargs):
"""copies the template file to the output file, searching and replacing"""
outputfile.write(self.doreplace(templatefile.read()))
return True
# archive files need to know how to:
# - openarchive: creates an archive object for the archivefilename
# * requires a constructor that takes the filename
# - iterarchivefile: iterate through the names in the archivefile
# * requires the default iterator to do this
# - archivefileexists: check if a given pathname exists inside the archivefile
# * uses the in operator - requires __contains__ (or will use __iter__ by default)
# - openarchiveinputfile: returns an open input file from the archive, given the path
# * requires an archivefile.openinputfile method that takes the pathname
# - openarchiveoutputfile: returns an open output file from the archive, given the path
# * requires an archivefile.openoutputfile method that takes the pathname
class ArchiveConvertOptionParser(ConvertOptionParser):
"""ConvertOptionParser that can handle recursing into single archive files.
archiveformats maps extension to class. if the extension doesn't matter, it can be None.
if the extension is only valid for input/output/template, it can be given as (extension, filepurpose)"""
def __init__(self, formats, usetemplates=False, usepots=False, description=None, archiveformats=None):
if archiveformats is None:
self.archiveformats = {}
else:
self.archiveformats = archiveformats
self.archiveoptions = {}
ConvertOptionParser.__init__(self, formats, usetemplates, usepots, description=description)
def setarchiveoptions(self, **kwargs):
"""allows setting options that will always be passed to openarchive"""
self.archiveoptions = kwargs
def isrecursive(self, fileoption, filepurpose='input'):
"""checks if fileoption is a recursive file"""
if self.isarchive(fileoption, filepurpose): return True
return super(ArchiveConvertOptionParser, self).isrecursive(fileoption, filepurpose)
def isarchive(self, fileoption, filepurpose='input'):
"""returns whether the file option is an archive file"""
if not isinstance(fileoption, (str, unicode)):
return False
mustexist = (filepurpose != 'output')
if mustexist and not os.path.isfile(fileoption):
return False
fileext = self.splitext(fileoption)[1]
# if None is in the archive formats, then treat all non-directory inputs as archives
return self.getarchiveclass(fileext, filepurpose, os.path.isdir(fileoption)) is not None
def getarchiveclass(self, fileext, filepurpose, isdir=False):
"""returns the archiveclass for the given fileext and filepurpose"""
archiveclass = self.archiveformats.get(fileext, None)
if archiveclass is not None:
return archiveclass
archiveclass = self.archiveformats.get((fileext, filepurpose), None)
if archiveclass is not None:
return archiveclass
if not isdir:
archiveclass = self.archiveformats.get(None, None)
if archiveclass is not None:
return archiveclass
archiveclass = self.archiveformats.get((None, filepurpose), None)
if archiveclass is not None:
return archiveclass
return None
def openarchive(self, archivefilename, filepurpose, **kwargs):
"""creates an archive object for the given file"""
archiveext = self.splitext(archivefilename)[1]
archiveclass = self.getarchiveclass(archiveext, filepurpose, os.path.isdir(archivefilename))
archiveoptions = self.archiveoptions.copy()
archiveoptions.update(kwargs)
return archiveclass(archivefilename, **archiveoptions)
def recurseinputfiles(self, options):
"""recurse through archive file / directories and return files to be converted"""
if self.isarchive(options.input, 'input'):
options.inputarchive = self.openarchive(options.input, 'input')
return self.recursearchivefiles(options)
else:
return super(ArchiveConvertOptionParser, self).recurseinputfiles(options)
def recursearchivefiles(self, options):
"""recurse through archive files and convert files"""
inputfiles = []
for inputpath in options.inputarchive:
if self.isexcluded(options, inputpath):
continue
top, name = os.path.split(inputpath)
if not self.isvalidinputname(options, name):
continue
inputfiles.append(inputpath)
return inputfiles
def openinputfile(self, options, fullinputpath):
"""opens the input file"""
if self.isarchive(options.input, 'input'):
return options.inputarchive.openinputfile(fullinputpath)
else:
return super(ArchiveConvertOptionParser, self).openinputfile(options, fullinputpath)
def getfullinputpath(self, options, inputpath):
"""gets the absolute path to an input file"""
if self.isarchive(options.input, 'input'):
return inputpath
else:
return os.path.join(options.input, inputpath)
def opentemplatefile(self, options, fulltemplatepath):
"""opens the template file (if required)"""
if fulltemplatepath is not None:
if options.recursivetemplate and self.isarchive(options.template, 'template'):
# TODO: deal with different names in input/template archives
if fulltemplatepath in options.templatearchive:
return options.templatearchive.openinputfile(fulltemplatepath)
else:
self.warning("missing template file %s" % fulltemplatepath)
return super(ArchiveConvertOptionParser, self).opentemplatefile(options, fulltemplatepath)
def getfulltemplatepath(self, options, templatepath):
"""gets the absolute path to a template file"""
if templatepath is not None and self.usetemplates and options.template:
if self.isarchive(options.template, 'template'):
return templatepath
elif not options.recursivetemplate:
return templatepath
else:
return os.path.join(options.template, templatepath)
else:
return None
def templateexists(self, options, templatepath):
"""returns whether the given template exists..."""
if templatepath is not None:
if self.isarchive(options.template, 'template'):
# TODO: deal with different names in input/template archives
return templatepath in options.templatearchive
return super(ArchiveConvertOptionParser, self).templateexists(options, templatepath)
def getfulloutputpath(self, options, outputpath):
"""gets the absolute path to an output file"""
if self.isarchive(options.output, 'output'):
return outputpath
elif options.recursiveoutput and options.output:
return os.path.join(options.output, outputpath)
else:
return outputpath
def checkoutputsubdir(self, options, subdir):
"""checks to see if subdir under options.output needs to be created, creates if neccessary"""
if not self.isarchive(options.output, 'output'):
super(ArchiveConvertOptionParser, self).checkoutputsubdir(options, subdir)
def openoutputfile(self, options, fulloutputpath):
"""opens the output file"""
if self.isarchive(options.output, 'output'):
outputstream = options.outputarchive.openoutputfile(fulloutputpath)
if outputstream is None:
self.warning("Could not find where to put %s in output archive; writing to tmp" % fulloutputpath)
return StringIO()
return outputstream
else:
return super(ArchiveConvertOptionParser, self).openoutputfile(options, fulloutputpath)
def inittemplatearchive(self, options):
"""opens the templatearchive if not already open"""
if not self.usetemplates:
return
if options.template and self.isarchive(options.template, 'template') and not hasattr(options, "templatearchive"):
options.templatearchive = self.openarchive(options.template, 'template')
def initoutputarchive(self, options):
"""creates an outputarchive if required"""
if options.output and self.isarchive(options.output, 'output'):
options.outputarchive = self.openarchive(options.output, 'output', mode="w")
def recursiveprocess(self, options):
"""recurse through directories and convert files"""
if hasattr(options, "multifilestyle"):
self.setarchiveoptions(multifilestyle=options.multifilestyle)
for filetype in ("input", "output", "template"):
allowoption = "allowrecursive%s" % filetype
if options.multifilestyle == "onefile" and getattr(options, allowoption, True):
setattr(options, allowoption, False)
self.inittemplatearchive(options)
self.initoutputarchive(options)
return super(ArchiveConvertOptionParser, self).recursiveprocess(options)
def processfile(self, fileprocessor, options, fullinputpath, fulloutputpath, fulltemplatepath):
"""run an invidividual conversion"""
if self.isarchive(options.output, 'output'):
inputfile = self.openinputfile(options, fullinputpath)
# TODO: handle writing back to same archive as input/template
templatefile = self.opentemplatefile(options, fulltemplatepath)
outputfile = self.openoutputfile(options, fulloutputpath)
passthroughoptions = self.getpassthroughoptions(options)
if fileprocessor(inputfile, outputfile, templatefile, **passthroughoptions):
if not outputfile.isatty():
outputfile.close()
return True
else:
if fulloutputpath and os.path.isfile(fulloutputpath):
outputfile.close()
os.unlink(fulloutputpath)
return False
else:
return super(ArchiveConvertOptionParser, self).processfile(fileprocessor, options, fullinputpath, fulloutputpath, fulltemplatepath)
def main(argv=None):
parser = ArchiveConvertOptionParser({}, description=__doc__)
parser.run(argv)
```
#### File: translate/convert/po2rc.py
```python
from translate.storage import po
from translate.storage import rc
class rerc:
def __init__(self, templatefile, charset="utf-8", lang=None, sublang=None):
self.templatefile = templatefile
self.templatestore = rc.rcfile(templatefile)
self.inputdict = {}
self.charset = charset
self.lang = lang
self.sublang = sublang
def convertstore(self, inputstore, includefuzzy=False):
self.makestoredict(inputstore, includefuzzy)
outputblocks = []
for block in self.templatestore.blocks:
outputblocks.append(self.convertblock(block))
if self.charset == "utf-8":
outputblocks.insert(0, "#pragma code_page(65001)\n")
outputblocks.append("#pragma code_page(default)")
return outputblocks
def makestoredict(self, store, includefuzzy=False):
""" make a dictionary of the translations"""
for unit in store.units:
if includefuzzy or not unit.isfuzzy():
for location in unit.getlocations():
rcstring = unit.target
if len(rcstring.strip()) == 0:
rcstring = unit.source
self.inputdict[location] = rc.escape_to_rc(rcstring).encode(self.charset)
def convertblock(self, block):
newblock = block
if isinstance(newblock, unicode):
newblock = newblock.encode('utf-8')
if newblock.startswith("LANGUAGE"):
return "LANGUAGE %s, %s" % (self.lang, self.sublang)
for unit in self.templatestore.units:
location = unit.getlocations()[0]
if self.inputdict.has_key(location):
if self.inputdict[location] != unit.match.groupdict()['value']:
newmatch = unit.match.group().replace(unit.match.groupdict()['value'], self.inputdict[location])
newblock = newblock.replace(unit.match.group(), newmatch)
if isinstance(newblock, unicode):
newblock = newblock.encode(self.charset)
return newblock
def convertrc(inputfile, outputfile, templatefile, includefuzzy=False, charset=None, lang=None, sublang=None):
inputstore = po.pofile(inputfile)
if not lang:
raise ValueError("must specify a target language")
if templatefile is None:
raise ValueError("must have template file for rc files")
# convertor = po2rc()
else:
convertor = rerc(templatefile, charset, lang, sublang)
outputrclines = convertor.convertstore(inputstore, includefuzzy)
outputfile.writelines(outputrclines)
return 1
def main(argv=None):
# handle command line options
from translate.convert import convert
formats = {("po", "rc"): ("rc", convertrc)}
parser = convert.ConvertOptionParser(formats, usetemplates=True, description=__doc__)
defaultcharset = "utf-8"
parser.add_option("", "--charset", dest="charset", default=defaultcharset,
help="charset to use to decode the RC files (default: %s)" % defaultcharset, metavar="CHARSET")
parser.add_option("-l", "--lang", dest="lang", default=None,
help="LANG entry", metavar="LANG")
defaultsublang="SUBLANG_DEFAULT"
parser.add_option("", "--sublang", dest="sublang", default=defaultsublang,
help="SUBLANG entry (default: %s)" % defaultsublang, metavar="SUBLANG")
parser.passthrough.append("charset")
parser.passthrough.append("lang")
parser.passthrough.append("sublang")
parser.add_fuzzy_option()
parser.run(argv)
if __name__ == '__main__':
main()
```
#### File: translate/convert/poreplace.py
```python
from translate.storage import po
class poreplace:
def convertstring(self, postr):
"""does the conversion required on the given string (nothing in this case)"""
return postr
def convertfile(self, thepofile):
"""goes through a po file and converts each element"""
for thepo in thepofile.units:
thepo.msgstr = [self.convertstring(postr) for postr in thepo.msgstr]
return thepofile
def convertpo(self, inputfile, outputfile, templatefile):
"""reads in inputfile using po, converts using poreplace, writes to outputfile"""
# note that templatefile is not used, but it is required by the converter...
inputstore = po.pofile(inputfile)
if inputstore.isempty():
return 0
outputstore = self.convertfile(inputstore)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
def main(converterclass, argv=None):
# handle command line options
from translate.convert import convert
replacer = converterclass()
formats = {"po":("po", replacer.convertpo), "pot":("pot", replacer.convertpo)}
parser = convert.ConvertOptionParser(formats, usepots=True)
parser.run(argv)
if __name__ == '__main__':
main(poreplace)
```
#### File: translate/convert/prop2po.py
```python
import sys
from translate.storage import po
from translate.storage import properties
class prop2po:
"""convert a .properties file to a .po file for handling the translation..."""
def convertstore(self, thepropfile, personality="java", duplicatestyle="msgctxt"):
"""converts a .properties file to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality == "mozilla" or self.personality == "skype":
targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit", x_accelerator_marker="&")
else:
targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit")
targetheader.addnote("extracted from %s" % thepropfile.filename, "developer")
# we try and merge the header po with any comments at the start of the properties file
appendedheader = False
waitingcomments = []
for propunit in thepropfile.units:
pounit = self.convertunit(propunit, "developer")
if pounit is None:
waitingcomments.extend(propunit.comments)
# FIXME the storage class should not be creating blank units
if pounit is "discard":
continue
if not appendedheader:
if propunit.isblank():
targetheader.addnote("\n".join(waitingcomments).rstrip(), "developer", position="prepend")
waitingcomments = []
pounit = None
appendedheader = True
if pounit is not None:
pounit.addnote("\n".join(waitingcomments).rstrip(), "developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(pounit)
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def mergestore(self, origpropfile, translatedpropfile, personality="java", blankmsgstr=False, duplicatestyle="msgctxt"):
"""converts two .properties files to a .po file..."""
self.personality = personality
thetargetfile = po.pofile()
if self.personality == "mozilla" or self.personality == "skype":
targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit", x_accelerator_marker="&")
else:
targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit")
targetheader.addnote("extracted from %s, %s" % (origpropfile.filename, translatedpropfile.filename), "developer")
translatedpropfile.makeindex()
# we try and merge the header po with any comments at the start of the properties file
appendedheader = False
waitingcomments = []
# loop through the original file, looking at units one by one
for origprop in origpropfile.units:
origpo = self.convertunit(origprop, "developer")
if origpo is None:
waitingcomments.extend(origprop.comments)
# FIXME the storage class should not be creating blank units
if origpo is "discard":
continue
# handle the header case specially...
if not appendedheader:
if origprop.isblank():
targetheader.addnote(u"".join(waitingcomments).rstrip(), "developer", position="prepend")
waitingcomments = []
origpo = None
appendedheader = True
# try and find a translation of the same name...
if origprop.name in translatedpropfile.locationindex:
translatedprop = translatedpropfile.locationindex[origprop.name]
# Need to check that this comment is not a copy of the developer comments
translatedpo = self.convertunit(translatedprop, "translator")
if translatedpo is "discard":
continue
else:
translatedpo = None
# if we have a valid po unit, get the translation and add it...
if origpo is not None:
if translatedpo is not None and not blankmsgstr:
origpo.target = translatedpo.source
origpo.addnote(u"".join(waitingcomments).rstrip(), "developer", position="prepend")
waitingcomments = []
thetargetfile.addunit(origpo)
elif translatedpo is not None:
print >> sys.stderr, "error converting original properties definition %s" % origprop.name
thetargetfile.removeduplicates(duplicatestyle)
return thetargetfile
def convertunit(self, propunit, commenttype):
"""Converts a .properties unit to a .po unit. Returns None if empty
or not for translation."""
if propunit is None:
return None
# escape unicode
pounit = po.pounit(encoding="UTF-8")
if hasattr(propunit, "comments"):
for comment in propunit.comments:
if "DONT_TRANSLATE" in comment:
return "discard"
pounit.addnote(u"".join(propunit.getnotes()).rstrip(), commenttype)
# TODO: handle multiline msgid
if propunit.isblank():
return None
pounit.addlocation(propunit.name)
pounit.source = propunit.source
pounit.target = u""
return pounit
def convertmozillaprop(inputfile, outputfile, templatefile, pot=False, duplicatestyle="msgctxt"):
"""Mozilla specific convertor function"""
return convertprop(inputfile, outputfile, templatefile, personality="mozilla", pot=pot, duplicatestyle=duplicatestyle)
def convertprop(inputfile, outputfile, templatefile, personality="java", pot=False, duplicatestyle="msgctxt"):
"""reads in inputfile using properties, converts using prop2po, writes to outputfile"""
inputstore = properties.propfile(inputfile, personality)
convertor = prop2po()
if templatefile is None:
outputstore = convertor.convertstore(inputstore, personality, duplicatestyle=duplicatestyle)
else:
templatestore = properties.propfile(templatefile, personality)
outputstore = convertor.mergestore(templatestore, inputstore, personality, blankmsgstr=pot, duplicatestyle=duplicatestyle)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
def main(argv=None):
from translate.convert import convert
formats = {"properties": ("po", convertprop),
("properties", "properties"): ("po", convertprop),
"lang": ("po", convertprop),
("lang", "lang"): ("po", convertprop),}
parser = convert.ConvertOptionParser(formats, usetemplates=True, usepots=True, description=__doc__)
parser.add_option("", "--personality", dest="personality", default="java", type="choice",
choices=["java", "mozilla", "skype"],
help="set the input behaviour: java (default), mozilla, skype", metavar="TYPE")
parser.add_duplicates_option()
parser.passthrough.append("pot")
parser.passthrough.append("personality")
parser.run(argv)
if __name__ == '__main__':
main()
```
#### File: translate/convert/test_html2po.py
```python
from translate.convert import html2po
from translate.convert import po2html
from translate.convert import test_convert
from translate.misc import wStringIO
class TestHTML2PO:
def html2po(self, markup, includeuntagged=False, duplicatestyle="msgctxt", keepcomments=False):
"""Helper to convert html to po without a file."""
inputfile = wStringIO.StringIO(markup)
convertor = html2po.html2po()
outputpo = convertor.convertfile(inputfile, "test", False, includeuntagged, duplicatestyle, keepcomments)
return outputpo
def po2html(self, posource, htmltemplate):
"""Helper to convert po to html without a file."""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(htmltemplate)
assert po2html.converthtml(inputfile, outputfile, templatefile)
return outputfile.getvalue()
def countunits(self, pofile, expected):
"""helper to check that we got the expected number of messages"""
actual = len(pofile.units)
if actual > 0:
if pofile.units[0].isheader():
actual = actual - 1
print pofile
assert actual == expected
def compareunit(self, pofile, unitnumber, expected):
"""helper to validate a PO message"""
if not pofile.units[0].isheader():
unitnumber = unitnumber - 1
print 'unit source: ' + pofile.units[unitnumber].source.encode('utf-8') + '|'
print 'expected: ' + expected.encode('utf-8') + '|'
assert unicode(pofile.units[unitnumber].source) == unicode(expected)
def check_single(self, markup, itemtext):
"""checks that converting this markup produces a single element with value itemtext"""
pofile = self.html2po(markup)
self.countunits(pofile, 1)
self.compareunit(pofile, 1, itemtext)
def check_null(self, markup):
"""checks that converting this markup produces no elements"""
pofile = self.html2po(markup)
self.countunits(pofile, 0)
def check_phpsnippet(self, php):
"""Given a snippet of php, put it into an HTML shell and see
if the results are as expected"""
self.check_single('<html><head></head><body><p><a href="'+php+'/site.html">Body text</a></p></body></html>', "Body text")
self.check_single('<html><head></head><body><p>More things in <a href="'+php+'/site.html">Body text</a></p></body></html>', 'More things in <a href="'+php+'/site.html">Body text</a>')
self.check_null('<html><head></head><body><p>'+php+'</p></body></html>')
def test_htmllang(self):
"""test to ensure that we no longer use the lang attribure"""
markup = '''<html lang="en"><head><title>My title</title></head><body></body></html>'''
pofile = self.html2po(markup)
self.countunits(pofile, 1)
# Check that the first item is the <title> not <head>
self.compareunit(pofile, 1, "My title")
def test_title(self):
"""test that we can extract the <title> tag"""
self.check_single("<html><head><title>My title</title></head><body></body></html>", "My title")
def test_title_with_linebreak(self):
"""Test a linebreak in the <title> tag"""
htmltext = '''<html>
<head>
<title>My
title</title>
</head>
<body>
</body>
</html>
'''
self.check_single(htmltext, "My title")
def test_meta(self):
"""Test that we can extract certain <meta> info from <head>."""
self.check_single('''<html><head><meta name="keywords" content="these are keywords"></head><body></body></html>''', "these are keywords")
def test_tag_p(self):
"""test that we can extract the <p> tag"""
self.check_single("<html><head></head><body><p>A paragraph.</p></body></html>", "A paragraph.")
markup = "<p>First line.<br>Second line.</p>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_p_with_linebreak(self):
"""Test newlines within the <p> tag."""
htmltext = '''<html>
<head>
</head>
<body>
<p>
A paragraph is a section in a piece of writing, usually highlighting a
particular point or topic. It always begins on a new line and usually
with indentation, and it consists of at least one sentence.
</p>
</body>
</html>
'''
self.check_single(htmltext, "A paragraph is a section in a piece of writing, usually highlighting a particular point or topic. It always begins on a new line and usually with indentation, and it consists of at least one sentence.")
markup = "<p>First\nline.<br>Second\nline.</p>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_div(self):
"""test that we can extract the <div> tag"""
self.check_single("<html><head></head><body><div>A paragraph.</div></body></html>", "A paragraph.")
markup = "<div>First line.<br>Second line.</div>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_div_with_linebreaks(self):
"""Test linebreaks within a <div> tag."""
htmltext = '''<html>
<head>
</head>
<body>
<div>
A paragraph is a section in a piece of writing, usually highlighting a
particular point or topic. It always begins on a new line and usually
with indentation, and it consists of at least one sentence.
</div>
</body>
</html>
'''
self.check_single(htmltext, "A paragraph is a section in a piece of writing, usually highlighting a particular point or topic. It always begins on a new line and usually with indentation, and it consists of at least one sentence.")
markup = "<div>First\nline.<br>Second\nline.</div>"
pofile = self.html2po(markup)
self.compareunit(pofile, 1, "First line.<br>Second line.")
def test_tag_a(self):
"""test that we can extract the <a> tag"""
self.check_single('<html><head></head><body><p>A paragraph with <a href="http://translate.org.za/">hyperlink</a>.</p></body></html>', 'A paragraph with <a href="http://translate.org.za/">hyperlink</a>.')
def test_tag_a_with_linebreak(self):
"""Test that we can extract the <a> tag with newlines in it."""
htmltext = '''<html>
<head>
</head>
<body>
<p>A
paragraph
with <a
href="http://translate.org.za/">hyperlink</a>
and
newlines.</p></body></html>
'''
self.check_single(htmltext, 'A paragraph with <a href="http://translate.org.za/">hyperlink</a> and newlines.')
def test_tag_img(self):
"""Test that we can extract the alt attribute from the <img> tag."""
self.check_single('''<html><head></head><body><img src="picture.png" alt="A picture"></body></html>''', "A picture")
def test_img_empty(self):
"""Test that we can extract the alt attribute from the <img> tag."""
htmlsource = '''<html><head></head><body><img src="images/topbar.jpg" width="750" height="80"></body></html>'''
self.check_null(htmlsource)
def test_tag_table_summary(self):
"""Test that we can extract the summary attribute."""
self.check_single('''<html><head></head><body><table summary="Table summary"></table></body></html>''', "Table summary")
def test_table_simple(self):
"""Test that we can fully extract a simple table."""
markup = '''<html><head></head><body><table><tr><th>Heading One</th><th>Heading Two</th><tr><td>One</td><td>Two</td></tr></table></body></html>'''
pofile = self.html2po(markup)
self.countunits(pofile, 4)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "One")
self.compareunit(pofile, 4, "Two")
def test_table_complex(self):
markup = '''<table summary="This is the summary"><caption>A caption</caption><thead><tr><th abbr="Head 1">Heading One</th><th>Heading Two</th></thead><tfoot><tr><td>Foot One</td><td>Foot Two</td></tr></tfoot><tbody><tr><td>One</td><td>Two</td></tr></tbody></table>'''
pofile = self.html2po(markup)
self.countunits(pofile, 9)
self.compareunit(pofile, 1, "This is the summary")
self.compareunit(pofile, 2, "A caption")
self.compareunit(pofile, 3, "Head 1")
self.compareunit(pofile, 4, "Heading One")
self.compareunit(pofile, 5, "Heading Two")
self.compareunit(pofile, 6, "Foot One")
self.compareunit(pofile, 7, "Foot Two")
self.compareunit(pofile, 8, "One")
self.compareunit(pofile, 9, "Two")
def test_table_empty(self):
"""Test that we ignore tables that are empty.
A table is deemed empty if it has no translatable content.
"""
self.check_null('''<html><head></head><body><table><tr><td><img src="bob.png"></td></tr></table></body></html>''')
self.check_null('''<html><head></head><body><table><tr><td> </td></tr></table></body></html>''')
self.check_null('''<html><head></head><body><table><tr><td><strong></strong></td></tr></table></body></html>''')
def test_address(self):
"""Test to see if the address element is extracted"""
self.check_single("<body><address>My address</address></body>", "My address")
def test_headings(self):
"""Test to see if the h* elements are extracted"""
markup = "<html><head></head><body><h1>Heading One</h1><h2>Heading Two</h2><h3>Heading Three</h3><h4>Heading Four</h4><h5>Heading Five</h5><h6>Heading Six</h6></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 6)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "Heading Three")
self.compareunit(pofile, 4, "Heading Four")
self.compareunit(pofile, 5, "Heading Five")
self.compareunit(pofile, 6, "Heading Six")
def test_headings_with_linebreaks(self):
"""Test to see if h* elements with newlines can be extracted"""
markup = "<html><head></head><body><h1>Heading\nOne</h1><h2>Heading\nTwo</h2><h3>Heading\nThree</h3><h4>Heading\nFour</h4><h5>Heading\nFive</h5><h6>Heading\nSix</h6></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 6)
self.compareunit(pofile, 1, "Heading One")
self.compareunit(pofile, 2, "Heading Two")
self.compareunit(pofile, 3, "Heading Three")
self.compareunit(pofile, 4, "Heading Four")
self.compareunit(pofile, 5, "Heading Five")
self.compareunit(pofile, 6, "Heading Six")
def test_dt(self):
"""Test to see if the definition list title (dt) element is extracted"""
self.check_single("<html><head></head><body><dl><dt>Definition List Item Title</dt></dl></body></html>", "Definition List Item Title")
def test_dd(self):
"""Test to see if the definition list description (dd) element is extracted"""
self.check_single("<html><head></head><body><dl><dd>Definition List Item Description</dd></dl></body></html>", "Definition List Item Description")
def test_span(self):
"""test to check that we don't double extract a span item"""
self.check_single("<html><head></head><body><p>You are a <span>Spanish</span> sentence.</p></body></html>", "You are a <span>Spanish</span> sentence.")
def test_ul(self):
"""Test to see if the list item <li> is exracted"""
markup = "<html><head></head><body><ul><li>Unordered One</li><li>Unordered Two</li></ul><ol><li>Ordered One</li><li>Ordered Two</li></ol></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 4)
self.compareunit(pofile, 1, "Unordered One")
self.compareunit(pofile, 2, "Unordered Two")
self.compareunit(pofile, 3, "Ordered One")
self.compareunit(pofile, 4, "Ordered Two")
def test_duplicates(self):
"""check that we use the default style of msgctxt to disambiguate duplicate messages"""
markup = "<html><head></head><body><p>Duplicate</p><p>Duplicate</p></body></html>"
pofile = self.html2po(markup)
self.countunits(pofile, 2)
# FIXME change this so that we check that the msgctxt is correctly added
self.compareunit(pofile, 1, "Duplicate")
self.compareunit(pofile, 2, "Duplicate")
def wtest_multiline_reflow(self):
"""check that we reflow multiline content to make it more readable for translators"""
self.check_single('''<td valign="middle" width="96%"><font class="headingwhite">South
Africa</font></td>''', '''<font class="headingwhite">South Africa</font>''')
def wtest_nested_tags(self):
"""check that we can extract items within nested tags"""
markup = "<div><p>Extract this</p>And this</div>"
pofile = self.html2po(markup)
self.countunits(pofile, 2)
self.compareunit(pofile, 1, "Extract this")
self.compareunit(pofile, 2, "And this")
def test_carriage_return(self):
"""Remove carriage returns from files in dos format."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">\r
<html><!-- InstanceBegin template="/Templates/masterpage.dwt" codeOutsideHTMLIsLocked="false" -->\r
<head>\r
<!-- InstanceBeginEditable name="doctitle" -->\r
<link href="fmfi.css" rel="stylesheet" type="text/css">\r
</head>\r
\r
<body>\r
<p>The rapid expansion of telecommunications infrastructure in recent\r
years has helped to bridge the digital divide to a limited extent.</p> \r
</body>\r
<!-- InstanceEnd --></html>\r
'''
self.check_single(htmlsource, 'The rapid expansion of telecommunications infrastructure in recent years has helped to bridge the digital divide to a limited extent.')
def test_encoding_latin1(self):
"""Convert HTML input in iso-8859-1 correctly to unicode."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><!-- InstanceBegin template="/Templates/masterpage.dwt" codeOutsideHTMLIsLocked="false" -->
<head>
<!-- InstanceBeginEditable name="doctitle" -->
<title>FMFI - South Africa - CSIR Openphone - Overview</title>
<!-- InstanceEndEditable -->
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
<meta name="keywords" content="fmfi, first mile, first inch, wireless, rural development, access devices, mobile devices, wifi, connectivity, rural connectivty, ict, low cost, cheap, digital divide, csir, idrc, community">
<!-- InstanceBeginEditable name="head" -->
<!-- InstanceEndEditable -->
<link href="../../../fmfi.css" rel="stylesheet" type="text/css">
</head>
<body>
<p>We aim to please \x96 will you aim too, please?</p>
<p>South Africa\x92s language diversity can be challenging.</p>
</body>
</html>
'''
pofile = self.html2po(htmlsource)
self.countunits(pofile, 4)
self.compareunit(pofile, 3, u'We aim to please \x96 will you aim too, please?')
self.compareunit(pofile, 4, u'South Africa\x92s language diversity can be challenging.')
def test_strip_html(self):
"""Ensure that unnecessary html is stripped from the resulting unit."""
htmlsource = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>FMFI - Contact</title>
</head>
<body>
<table width="100%" border="0" cellpadding="0" cellspacing="0">
<tr align="left" valign="top">
<td width="150" height="556">
<table width="157" height="100%" border="0" cellspacing="0" id="leftmenubg-color">
<tr>
<td align="left" valign="top" height="555">
<table width="100%" border="0" cellspacing="0" cellpadding="2">
<tr align="left" valign="top" bgcolor="#660000">
<td width="4%"><strong></strong></td>
<td width="96%"><strong><font class="headingwhite">Projects</font></strong></td>
</tr>
<tr align="left" valign="top">
<td valign="middle" width="4%"><img src="images/arrow.gif" width="8" height="8"></td>
<td width="96%"><a href="index.html">Home Page</a></td>
</tr>
</table>
</td>
</tr>
</table></td>
</table>
</body>
</html>
'''
pofile = self.html2po(htmlsource)
self.countunits(pofile, 3)
self.compareunit(pofile, 2, u'Projects')
self.compareunit(pofile, 3, u'Home Page')
# Translate and convert back:
pofile.units[2].target = 'Projekte'
pofile.units[3].target = 'Tuisblad'
htmlresult = self.po2html(str(pofile), htmlsource).replace('\n', ' ').replace('= "', '="').replace('> <', '><')
snippet = '<td width="96%"><strong><font class="headingwhite">Projekte</font></strong></td>'
assert snippet in htmlresult
snippet = '<td width="96%"><a href="index.html">Tuisblad</a></td>'
assert snippet in htmlresult
def test_php(self):
"""Test that PHP snippets don't interfere"""
# A simple string
self.check_phpsnippet('''<?=$phpvariable?>''')
# Contains HTML tag charcters (< and >)
self.check_phpsnippet('''<?=($a < $b ? $foo : ($b > c ? $bar : $cat))?>''')
# Make sure basically any symbol can be handled
self.check_phpsnippet(''' <? asdfghjkl qwertyuiop 1234567890!@#$%^&*()-=_+[]\{}|;':",./<>? ?> ''')
def test_multiple_php(self):
"""Test multiple PHP snippets in a string to make sure they get restored properly"""
php1 = '''<?=$phpvariable?>'''
php2 = '''<?=($a < $b ? $foo : ($b > c ? $bar : $cat))?>'''
php3 = '''<? asdfghjklqwertyuiop1234567890!@#$%^&*()-=_+[]\{}|;':",./<>? ?>'''
# Put 3 different strings into an html string
innertext = '<a href="'+php1+'/site.html">Body text</a> and some '+php2+' more text '+php2+php3
htmlsource = '<html><head></head><body><p>'+innertext+'</p></body></html>'
self.check_single(htmlsource, innertext)
def test_php_multiline(self):
# A multi-line php string to test
php1 = '''<? abc
def
ghi ?>'''
# Scatter the php strings throughout the file, and show what the translation should be
innertext = '<a href="'+php1+'/site.html">Body text</a> and some '+php1+' more text '+php1+php1
innertrans = '<a href="'+php1+'/site.html">Texte de corps</a> et encore de '+php1+' plus de texte '+php1+php1
htmlsource = '<html><head></head><body><p>'+innertext+'</p></body></html>' # Current html file
transsource = '<html><head></head><body><p>'+innertrans+'</p></body></html>' # Expected translation
pofile = self.html2po(htmlsource)
pofile.units[1].target = innertrans # Register the translation in the PO file
htmlresult = self.po2html(pofile, htmlsource)
assert htmlresult == transsource
def test_comments(self):
"""Test that HTML comments are converted to translator notes in output"""
pofile = self.html2po('<!-- comment outside block --><p><!-- a comment -->A paragraph<!-- with another comment -->.</p>', keepcomments=True)
self.compareunit(pofile, 1, 'A paragraph.')
notes = pofile.getunits()[0].getnotes()
assert unicode(notes) == ' a comment \n with another comment '
class TestHTML2POCommand(test_convert.TestConvertCommand, TestHTML2PO):
"""Tests running actual html2po commands on files"""
convertmodule = html2po
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-P, --pot")
options = self.help_check(options, "--duplicates=DUPLICATESTYLE")
options = self.help_check(options, "--keepcomments")
options = self.help_check(options, "-u, --untagged", last=True)
```
#### File: translate/convert/test_po2prop.py
```python
from translate.convert import po2prop
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import po
class TestPO2Prop:
def po2prop(self, posource):
"""helper that converts po source to .properties source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
convertor = po2prop.po2prop()
outputprop = convertor.convertstore(inputpo)
return outputprop
def merge2prop(self, propsource, posource, personality="java"):
"""helper that merges po translations to .properties source without requiring files"""
inputfile = wStringIO.StringIO(posource)
inputpo = po.pofile(inputfile)
templatefile = wStringIO.StringIO(propsource)
#templateprop = properties.propfile(templatefile)
convertor = po2prop.reprop(templatefile)
outputprop = convertor.convertstore(inputpo, personality=personality)
print outputprop
return outputprop
def test_merging_simple(self):
"""check the simplest case of merging a translation"""
posource = '''#: prop\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop=value\n'''
propexpected = '''prop=waarde\n'''
propfile = self.merge2prop(proptemplate, posource)
print propfile
assert propfile == [propexpected]
def test_hard_newlines_preserved(self):
"""check that we preserver hard coded newlines at the start and end of sentence"""
posource = '''#: prop\nmsgid "\\nvalue\\n\\n"\nmsgstr "\\nwaarde\\n\\n"\n'''
proptemplate = '''prop=\\nvalue\\n\\n\n'''
propexpected = '''prop=\\nwaarde\\n\\n\n'''
propfile = self.merge2prop(proptemplate, posource)
print propfile
assert propfile == [propexpected]
def test_space_preservation(self):
"""check that we preserve any spacing in properties files when merging"""
posource = '''#: prop\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop = value\n'''
propexpected = '''prop = waarde\n'''
propfile = self.merge2prop(proptemplate, posource)
print propfile
assert propfile == [propexpected]
def test_merging_blank_entries(self):
"""check that we can correctly merge entries that are blank in the template"""
posource = r'''#: accesskey-accept
msgid ""
"_: accesskey-accept\n"
""
msgstr ""'''
proptemplate = 'accesskey-accept=\n'
propexpected = 'accesskey-accept=\n'
propfile = self.merge2prop(proptemplate, posource)
print propfile
assert propfile == [propexpected]
def test_merging_fuzzy(self):
"""check merging a fuzzy translation"""
posource = '''#: prop\n#, fuzzy\nmsgid "value"\nmsgstr "waarde"\n'''
proptemplate = '''prop=value\n'''
propexpected = '''prop=value\n'''
propfile = self.merge2prop(proptemplate, posource)
print propfile
assert propfile == [propexpected]
def test_merging_propertyless_template(self):
"""check that when merging with a template with no property values that we copy the template"""
posource = ""
proptemplate = "# A comment\n"
propexpected = proptemplate
propfile = self.merge2prop(proptemplate, posource)
print propfile
assert propfile == [propexpected]
def test_personalities(self):
"""test that we output correctly for Java and Mozilla style property files. Mozilla uses Unicode, while Java uses escaped Unicode"""
posource = '''#: prop\nmsgid "value"\nmsgstr "ṽḁḽṻḝ"\n'''
proptemplate = '''prop = value\n'''
propexpectedjava = '''prop = \\u1E7D\\u1E01\\u1E3D\\u1E7B\\u1E1D\n'''
propfile = self.merge2prop(proptemplate, posource)
print propfile
assert propfile == [propexpectedjava]
propexpectedmozilla = '''prop = ṽḁḽṻḝ\n'''
propfile = self.merge2prop(proptemplate, posource, personality="mozilla")
print propfile
assert propfile == [propexpectedmozilla]
propexpectedskype = '''prop = ṽḁḽṻḝ\n'''
propfile = self.merge2prop(proptemplate, posource, personality="skype")
print propfile
assert propfile == [propexpectedskype]
class TestPO2PropCommand(test_convert.TestConvertCommand, TestPO2Prop):
"""Tests running actual po2prop commands on files"""
convertmodule = po2prop
defaultoptions = {"progress": "none"}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--fuzzy")
options = self.help_check(options, "--personality=TYPE")
options = self.help_check(options, "--nofuzzy", last=True)
```
#### File: translate/convert/test_po2tmx.py
```python
from translate.convert import po2tmx
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import tmx
from translate.storage import lisa
class TestPO2TMX:
def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'):
"""helper that converts po source to tmx source without requiring files"""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage)
po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage)
return outputfile.tmxfile
def test_basic(self):
minipo = r"""# Afrikaans translation of program ABC
#
msgid ""
msgstr ""
"Project-Id-Version: program 2.1-branch\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2006-01-09 07:15+0100\n"
"PO-Revision-Date: 2004-03-30 17:02+0200\n"
"Last-Translator: Zuza Software Foundation <<EMAIL>>\n"
"Language-Team: Afrikaans <<EMAIL>>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
# Please remember to do something
#: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4
msgid "Applications"
msgstr "Toepassings"
"""
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("Applications") == "Toepassings"
assert tmx.translate("bla") is None
xmltext = str(tmx)
assert xmltext.index('creationtool="Translate Toolkit - po2tmx"')
assert xmltext.index('adminlang')
assert xmltext.index('creationtoolversion')
assert xmltext.index('datatype')
assert xmltext.index('o-tmf')
assert xmltext.index('segtype')
assert xmltext.index('srclang')
def test_sourcelanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, sourcelanguage="xh")
print "The generated xml:"
print str(tmx)
header = tmx.document.find("header")
assert header.get("srclang") == "xh"
def test_targetlanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, targetlanguage="xh")
print "The generated xml:"
print str(tmx)
tuv = tmx.document.findall(".//%s" % tmx.namespaced("tuv"))[1]
#tag[0] will be the source, we want the target tuv
assert tuv.get("{%s}lang" % lisa.XML_NS) == "xh"
def test_multiline(self):
"""Test multiline po entry"""
minipo = r'''msgid "First part "
"and extra"
msgstr "Eerste deel "
"en ekstra"'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('First part and extra') == 'Eerste deel en ekstra'
def test_escapednewlines(self):
"""Test the escaping of newlines"""
minipo = r'''msgid "First line\nSecond line"
msgstr "Eerste lyn\nTweede lyn"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First line\nSecond line") == "Eerste lyn\nTweede lyn"
def test_escapedtabs(self):
"""Test the escaping of tabs"""
minipo = r'''msgid "First column\tSecond column"
msgstr "Eerste kolom\tTweede kolom"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First column\tSecond column") == "Eerste kolom\tTweede kolom"
def test_escapedquotes(self):
"""Test the escaping of quotes (and slash)"""
minipo = r'''msgid "Hello \"Everyone\""
msgstr "Good day \"All\""
msgid "Use \\\"."
msgstr "Gebruik \\\"."
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('Hello "Everyone"') == 'Good day "All"'
assert tmx.translate(r'Use \".') == r'Gebruik \".'
def test_exclusions(self):
"""Test that empty and fuzzy messages are excluded"""
minipo = r'''#, fuzzy
msgid "One"
msgstr "Een"
msgid "Two"
msgstr ""
msgid ""
msgstr "Drie"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert "<tu" not in str(tmx)
assert len(tmx.units) == 0
def test_nonascii(self):
"""Tests that non-ascii conversion works."""
minipo = r'''msgid "Bézier curve"
msgstr "Bézier-kurwe"
'''
tmx = self.po2tmx(minipo)
print str(tmx)
assert tmx.translate(u"Bézier curve") == u"Bézier-kurwe"
class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX):
"""Tests running actual po2tmx commands on files"""
convertmodule = po2tmx
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-l LANG, --language=LANG")
options = self.help_check(options, "--source-language=LANG", last=True)
```
#### File: translate/convert/test_tiki2po.py
```python
from translate.convert import tiki2po
from translate.storage import tiki
from translate.convert import test_convert
from translate.misc import wStringIO
class TestTiki2Po:
def test_converttiki_defaults(self):
inputfile = """
"zero_source" => "zero_target",
// ### Start of unused words
"one_source" => "one_target",
// ### end of unused words
"""
outputfile = wStringIO.StringIO()
tiki2po.converttiki(inputfile, outputfile)
output = outputfile.getvalue()
assert '#: translated' in output
assert 'msgid "zero_source"' in output
assert "one_source" not in output
def test_converttiki_includeunused(self):
inputfile = """
"zero_source" => "zero_target",
// ### Start of unused words
"one_source" => "one_target",
// ### end of unused words
"""
outputfile = wStringIO.StringIO()
tiki2po.converttiki(inputfile, outputfile, includeunused=True)
output = outputfile.getvalue()
assert '#: translated' in output
assert 'msgid "zero_source"' in output
assert '#: unused' in output
assert 'msgid "one_source"' in output
class TestTiki2PoCommand(test_convert.TestConvertCommand, TestTiki2Po):
"""Tests running actual tiki2po commands on files"""
convertmodule = tiki2po
defaultoptions = {}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "--include-unused")
```
#### File: translate/convert/txt2po.py
```python
from translate.storage import txt
from translate.storage import po
class txt2po:
def __init__(self, duplicatestyle="msgctxt"):
self.duplicatestyle = duplicatestyle
def convertstore(self, thetxtfile):
"""converts a file to .po format"""
thetargetfile = po.pofile()
targetheader = thetargetfile.init_headers(charset="UTF-8", encoding="8bit")
targetheader.addnote("extracted from %s" % thetxtfile.filename, "developer")
for txtunit in thetxtfile.units:
newunit = thetargetfile.addsourceunit(txtunit.source)
newunit.addlocations(txtunit.getlocations())
thetargetfile.removeduplicates(self.duplicatestyle)
return thetargetfile
def converttxt(inputfile, outputfile, templates, duplicatestyle="msgctxt", encoding="utf-8", flavour=None):
"""reads in stdin using fromfileclass, converts using convertorclass, writes to stdout"""
inputstore = txt.TxtFile(inputfile, encoding=encoding, flavour=flavour)
convertor = txt2po(duplicatestyle=duplicatestyle)
outputstore = convertor.convertstore(inputstore)
if outputstore.isempty():
return 0
outputfile.write(str(outputstore))
return 1
def main(argv=None):
from translate.convert import convert
from translate.misc import stdiotell
import sys
sys.stdout = stdiotell.StdIOWrapper(sys.stdout)
formats = {"txt":("po", converttxt), "*":("po", converttxt)}
parser = convert.ConvertOptionParser(formats, usepots=True, description=__doc__)
parser.add_option("", "--encoding", dest="encoding", default='utf-8', type="string",
help="The encoding of the input file (default: UTF-8)")
parser.passthrough.append("encoding")
parser.add_option("", "--flavour", dest="flavour", default="plain",
type="choice", choices=["plain", "dokuwiki", "mediawiki"],
help="The flavour of text file: plain (default), dokuwiki, mediawiki",
metavar="FLAVOUR")
parser.passthrough.append("flavour")
parser.add_duplicates_option()
parser.run(argv)
```
#### File: translate/lang/af.py
```python
from translate.lang import common
import re
articlere = re.compile(r"'n\b")
class af(common.Common):
"""This class represents Afrikaans."""
punctuation = u"".join([common.Common.commonpunc, common.Common.quotes, common.Common.miscpunc])
sentenceend = u".!?…"
sentencere = re.compile(r"""(?s) #make . also match newlines
.*? #anything, but match non-greedy
[%s] #the puntuation for sentence ending
\s+ #the spacing after the puntuation
(?='n\s[A-Z]|[^'a-z\d]|'[^n])
#lookahead that next part starts with caps or 'n followed by caps
""" % sentenceend, re.VERBOSE)
def capsstart(cls, text):
"""Modify this for the indefinite article ('n)."""
match = articlere.search(text, 0, 20)
if match:
#construct a list of non-apostrophe punctuation:
nonapos = u"".join(cls.punctuation.split(u"'"))
stripped = text.lstrip().lstrip(nonapos)
match = articlere.match(stripped)
if match:
return common.Common.capsstart(stripped[match.end():])
return common.Common.capsstart(text)
capsstart = classmethod(capsstart)
```
#### File: translate/lang/data.py
```python
import unicodedata
from translate.storage.placeables import StringElem
languages = {
'af': (u'Afrikaans', 2, '(n != 1)'),
'ak': (u'Akan', 2, 'n > 1'),
'am': (u'Amharic', 2, 'n > 1'),
'ar': (u'Arabic', 6, 'n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=11 ? 4 : 5'),
'arn': (u'Mapudungun; Mapuche', 2, 'n > 1'),
'ast': (u'Asturian; Bable; Leonese; Asturleonese', 2, 'n != 1'),
'az': (u'Azerbaijani', 2, '(n != 1)'),
'be': (u'Belarusian', 3, 'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'bg': (u'Bulgarian', 2, '(n != 1)'),
'bn': (u'Bengali', 2, '(n != 1)'),
'bn_IN': (u'Bengali (India)', 2, '(n != 1)'),
'bo': (u'Tibetan', 1, '0'),
'br': (u'Breton', 2, 'n > 1'),
'bs': (u'Bosnian', 3, 'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'ca': (u'Catalan; Valencian', 2, '(n != 1)'),
'cs': (u'Czech', 3, '(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'),
'csb': (u'Kashubian', 3, 'n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'cy': (u'Welsh', 2, '(n==2) ? 1 : 0'),
'da': (u'Danish', 2, '(n != 1)'),
'de': (u'German', 2, '(n != 1)'),
'dz': (u'Dzongkha', 1, '0'),
'el': (u'Greek, Modern (1453-)', 2, '(n != 1)'),
'en': (u'English', 2, '(n != 1)'),
'en_GB': (u'English (United Kingdom)', 2, '(n != 1)'),
'en_ZA': (u'English (South Africa)', 2, '(n != 1)'),
'eo': (u'Esperanto', 2, '(n != 1)'),
'es': (u'Spanish; Castilian', 2, '(n != 1)'),
'et': (u'Estonian', 2, '(n != 1)'),
'eu': (u'Basque', 2, '(n != 1)'),
'fa': (u'Persian', 1, '0'),
'fi': (u'Finnish', 2, '(n != 1)'),
'fil': (u'Filipino; Pilipino', 2, '(n > 1)'),
'fo': (u'Faroese', 2, '(n != 1)'),
'fr': (u'French', 2, '(n > 1)'),
'fur': (u'Friulian', 2, '(n != 1)'),
'fy': (u'Frisian', 2, '(n != 1)'),
'ga': (u'Irish', 3, 'n==1 ? 0 : n==2 ? 1 : 2'),
'gl': (u'Galician', 2, '(n != 1)'),
'gu': (u'Gujarati', 2, '(n != 1)'),
'gun': (u'Gun', 2, '(n > 1)'),
'ha': (u'Hausa', 2, '(n != 1)'),
'he': (u'Hebrew', 2, '(n != 1)'),
'hi': (u'Hindi', 2, '(n != 1)'),
'hy': (u'Armenian', 1, '0'),
'hr': (u'Croatian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'hu': (u'Hungarian', 2, '(n != 1)'),
'id': (u'Indonesian', 1, '0'),
'is': (u'Icelandic', 2, '(n != 1)'),
'it': (u'Italian', 2, '(n != 1)'),
'ja': (u'Japanese', 1, '0'),
'jv': (u'Javanese', 2, '(n != 1)'),
'ka': (u'Georgian', 1, '0'),
'km': (u'Central Khmer', 1, '0'),
'kn': (u'Kannada', 2, '(n != 1)'),
'ko': (u'Korean', 1, '0'),
'ku': (u'Kurdish', 2, '(n != 1)'),
'kw': (u'Cornish', 4, '(n==1) ? 0 : (n==2) ? 1 : (n == 3) ? 2 : 3'),
'ky': (u'Kirghiz; Kyrgyz', 1, '0'),
'lb': (u'Luxembourgish; Letzeburgesch', 2, '(n != 1)'),
'ln': (u'Lingala', 2, '(n > 1)'),
'lo': (u'Lao', 1, '0'),
'lt': (u'Lithuanian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'lv': (u'Latvian', 3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
'mg': (u'Malagasy', 2, '(n > 1)'),
'mi': (u'Maori', 2, '(n > 1)'),
'mk': (u'Macedonian', 2, 'n==1 || n%10==1 ? 0 : 1'),
'ml': (u'Malayalam', 2, '(n != 1)'),
'mn': (u'Mongolian', 2, '(n != 1)'),
'mr': (u'Marathi', 2, '(n != 1)'),
'ms': (u'Malay', 1, '0'),
'mt': (u'Maltese', 4, '(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
'nah': (u'Nahuatl languages', 2, '(n != 1)'),
'nap': (u'Neapolitan', 2, '(n != 1)'),
'nb': (u'Bokmål, Norwegian; Norwegian Bokmål', 2, '(n != 1)'),
'ne': (u'Nepali', 2, '(n != 1)'),
'nl': (u'Dutch; Flemish', 2, '(n != 1)'),
'nn': (u'Norwegian Nynorsk; Nynorsk, Norwegian', 2, '(n != 1)'),
'nso': (u'Pedi; Sepedi; Northern Sotho', 2, '(n > 1)'),
'oc': (u'Occitan (post 1500)', 2, '(n > 1)'),
'or': (u'Oriya', 2, '(n != 1)'),
'pa': (u'Panjabi; Punjabi', 2, '(n != 1)'),
'pap': (u'Papiamento', 2, '(n != 1)'),
'pl': (u'Polish', 3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'pms': (u'Piemontese', 2, '(n != 1)'),
'ps': (u'Pushto; Pashto', 2, '(n != 1)'),
'pt': (u'Portuguese', 2, '(n != 1)'),
'pt_BR': (u'Portuguese (Brazil)', 2, '(n > 1)'),
'ro': (u'Romanian', 3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2);'),
'ru': (u'Russian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'sco': (u'Scots', 2, '(n != 1)'),
'sk': (u'Slovak', 3, '(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'),
'sl': (u'Slovenian', 4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
'so': (u'Somali', 2, '(n != 1)'),
'sq': (u'Albanian', 2, '(n != 1)'),
'sr': (u'Serbian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'st': (u'Sotho, Southern', 2, '(n != 1)'),
'su': (u'Sundanese', 1, '0'),
'sv': (u'Swedish', 2, '(n != 1)'),
'sw': (u'Swahili', 2, '(n != 1)'),
'ta': (u'Tamil', 2, '(n != 1)'),
'te': (u'Telugu', 2, '(n != 1)'),
'tg': (u'Tajik', 2, '(n != 1)'),
'ti': (u'Tigrinya', 2, '(n > 1)'),
'th': (u'Thai', 1, '0'),
'tk': (u'Turkmen', 2, '(n != 1)'),
'tr': (u'Turkish', 1, '0'),
'uk': (u'Ukrainian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'vi': (u'Vietnamese', 1, '0'),
'wa': (u'Walloon', 2, '(n > 1)'),
# Chinese is difficult because the main divide is on script, not really
# country. Simplified Chinese is used mostly in China, Singapore and Malaysia.
# Traditional Chinese is used mostly in Hong Kong, Taiwan and Macau.
'zh_CN': (u'Chinese (China)', 1, '0'),
'zh_HK': (u'Chinese (Hong Kong)', 1, '0'),
'zh_TW': (u'Chinese (Taiwan)', 1, '0'),
'zu': (u'Zulu', 2, '(n != 1)'),
}
"""Dictionary of language data.
The language code is the dictionary key (which may contain country codes and modifiers).
The value is a tuple: (Full name in English from iso-codes, nplurals, plural equation).
Note that the English names should not be used in user facing places - it
should always be passed through the function returned from tr_lang(), or at
least passed through _fix_language_name()."""
_fixed_names = {
u"Asturian; Bable; Leonese; Asturleonese": u"Asturian",
u"Bokmål, Norwegian; Norwegian Bokmål": u"Norwegian Bokmål",
u"Catalan; Valencian": u"Catalan",
u"Central Khmer": u"Khmer",
u"Chichewa; Chewa; Nyanja": u"Chewa; Nyanja",
u"Divehi; Dhivehi; Maldivian": u"Divehi",
u"Dutch; Flemish": u"Dutch",
u"Filipino; Pilipino": u"Filipino",
u"Greek, Modern (1453-)": u"Greek",
u"Kirghiz; Kyrgyz": u"Kirghiz",
u"Klingon; tlhIngan-Hol": u"Klingon",
u"Limburgan; Limburger; Limburgish": u"Limburgish",
u"Low German; Low Saxon; German, Low; Saxon, Low": u"Low German",
u"Luxembourgish; Letzeburgesch": u"Luxembourgish",
u"Ndebele, South; South Ndebele": u"Southern Ndebele",
u"Norwegian Nynorsk; Nynorsk, Norwegian": u"Norwegian Nynorsk",
u"Occitan (post 1500)": u"Occitan",
u"Panjabi; Punjabi": u"Punjabi",
u"Pedi; Sepedi; Northern Sotho": u"Northern Sotho",
u"Pushto; Pashto": u"Pashto",
u"Sinhala; Sinhalese": u"Sinhala",
u"Sotho, Southern": u"Sotho",
u"Spanish; Castilian": u"Spanish",
u"Uighur; Uyghur": u"Uighur",
}
def simplercode(code):
"""This attempts to simplify the given language code by ignoring country
codes, for example.
@see:
- U{http://www.rfc-editor.org/rfc/bcp/bcp47.txt}
- U{http://www.rfc-editor.org/rfc/rfc4646.txt}
- U{http://www.rfc-editor.org/rfc/rfc4647.txt}
- U{http://www.w3.org/International/articles/language-tags/}
"""
if not code:
return code
normalized = normalize_code(code)
separator = normalized.rfind('-')
if separator >= 0:
return code[:separator]
else:
return ""
expansion_factors = {
'af': 0.1,
'ar': -0.09,
'es': 0.21,
'fr': 0.28,
'it': 0.2,
}
"""Source to target string length expansion factors."""
import gettext
import locale
import re
import os
iso639 = {}
"""ISO 639 language codes"""
iso3166 = {}
"""ISO 3166 country codes"""
langcode_re = re.compile("^[a-z]{2,3}([_-][A-Z]{2,3}|)(@[a-zA-Z0-9]+|)$")
variant_re = re.compile("^[_-][A-Z]{2,3}(@[a-zA-Z0-9]+|)$")
def languagematch(languagecode, otherlanguagecode):
"""matches a languagecode to another, ignoring regions in the second"""
if languagecode is None:
return langcode_re.match(otherlanguagecode)
return languagecode == otherlanguagecode or \
(otherlanguagecode.startswith(languagecode) and variant_re.match(otherlanguagecode[len(languagecode):]))
dialect_name_re = re.compile(r"(.+)\s\(([^)\d]+)\)$")
def tr_lang(langcode=None):
"""Gives a function that can translate a language name, even in the form C{"language (country)"},
into the language with iso code langcode, or the system language if no language is specified."""
langfunc = gettext_lang(langcode)
countryfunc = gettext_country(langcode)
def handlelanguage(name):
match = dialect_name_re.match(name)
if match:
language, country = match.groups()
return u"%s (%s)" % (_fix_language_name(langfunc(language)), countryfunc(country))
else:
return _fix_language_name(langfunc(name))
return handlelanguage
def _fix_language_name(name):
"""Identify and replace some unsightly names present in iso-codes.
If the name is present in _fixed_names we assume it is untranslated and
we replace it with a more usable rendering."""
return _fixed_names.get(name, name)
def gettext_lang(langcode=None):
"""Returns a gettext function to translate language names into the given
language, or the system language if no language is specified."""
if not langcode in iso639:
if not langcode:
langcode = ""
if os.name == "nt":
# On Windows the default locale is not used for some reason
t = gettext.translation('iso_639', languages=[locale.getdefaultlocale()[0]], fallback=True)
else:
t = gettext.translation('iso_639', fallback=True)
else:
t = gettext.translation('iso_639', languages=[langcode], fallback=True)
iso639[langcode] = t.ugettext
return iso639[langcode]
def gettext_country(langcode=None):
"""Returns a gettext function to translate country names into the given
language, or the system language if no language is specified."""
if not langcode in iso3166:
if not langcode:
langcode = ""
if os.name == "nt":
# On Windows the default locale is not used for some reason
t = gettext.translation('iso_3166', languages=[locale.getdefaultlocale()[0]], fallback=True)
else:
t = gettext.translation('iso_3166', fallback=True)
else:
t = gettext.translation('iso_3166', languages=[langcode], fallback=True)
iso3166[langcode] = t.ugettext
return iso3166[langcode]
def normalize(string, normal_form="NFC"):
"""Return a unicode string in its normalized form
@param string: The string to be normalized
@param normal_form: NFC (default), NFD, NFCK, NFDK
@return: Normalized string
"""
if string is None:
return None
else:
return unicodedata.normalize(normal_form, string)
def forceunicode(string):
"""Ensures that the string is in unicode.
@param string: A text string
@type string: Unicode, String
@return: String converted to Unicode and normalized as needed.
@rtype: Unicode
"""
if string is None:
return None
if isinstance(string, str):
encoding = getattr(string, "encoding", "utf-8")
string = string.decode(encoding)
elif isinstance(string, StringElem):
string = unicode(string)
return string
def normalized_unicode(string):
"""Forces the string to unicode and does normalization."""
return normalize(forceunicode(string))
def normalize_code(code):
return code.replace("_", "-").replace("@", "-").lower()
def simplify_to_common(language_code, languages=languages):
"""Simplify language code to the most commonly used form for the
language, stripping country information for languages that tend
not to be localized differently for different countries"""
simpler = simplercode(language_code)
if normalize_code(language_code) in [normalize_code(key) for key in languages.keys()] or simpler == "":
return language_code
else:
return simplify_to_common(simpler)
```
#### File: translate/lang/test_af.py
```python
from translate.lang import factory
def test_sentences():
"""Tests basic functionality of sentence segmentation."""
language = factory.getlanguage('af')
sentences = language.sentences(u"Normal case. Nothing interesting.")
assert sentences == [u"Normal case.", "Nothing interesting."]
sentences = language.sentences(u"Wat? 'n Fout?")
assert sentences == [u"Wat?", "'n Fout?"]
sentences = language.sentences(u"Dit sal a.g.v. 'n fout gebeur.")
assert sentences == [u"Dit sal a.g.v. 'n fout gebeur."]
def test_capsstart():
"""Tests that the indefinite article ('n) doesn't confuse startcaps()."""
language = factory.getlanguage('af')
assert not language.capsstart("")
assert language.capsstart("Koeie kraam koeie")
assert language.capsstart("'Koeie' kraam koeie")
assert not language.capsstart("koeie kraam koeie")
assert language.capsstart("\n\nKoeie kraam koeie")
assert language.capsstart("'n Koei kraam koeie")
assert language.capsstart("'n 'Koei' kraam koeie")
assert not language.capsstart("'n koei kraam koeie")
assert language.capsstart("\n\n'n Koei kraam koeie")
```
#### File: translate/lang/test_es.py
```python
from translate.lang import factory
def test_punctranslate():
"""Tests that we can translate punctuation."""
language = factory.getlanguage('es')
assert language.punctranslate(u"") == u""
assert language.punctranslate(u"abc efg") == u"abc efg"
assert language.punctranslate(u"abc efg.") == u"abc efg."
assert language.punctranslate(u"abc efg?") == u"¿abc efg?"
assert language.punctranslate(u"abc efg!") == u"¡abc efg!"
# We have to be a bit more gentle on the code by using capitals correctly.
# Can we be more robust with this witout affecting sentence segmentation?
assert language.punctranslate(u"Abc efg? Hij.") == u"¿Abc efg? Hij."
assert language.punctranslate(u"Abc efg! Hij.") == u"¡Abc efg! Hij."
#TODO: we should be doing better, but at the only we only support the first sentence
def test_sentences():
"""Tests basic functionality of sentence segmentation."""
language = factory.getlanguage('es')
sentences = language.sentences(u"")
assert sentences == []
sentences = language.sentences(u"El archivo <b>%1</b> ha sido modificado. ¿Desea guardarlo?")
print sentences
assert sentences == [u"El archivo <b>%1</b> ha sido modificado.", u"¿Desea guardarlo?"]
```
#### File: translate/misc/autoencode.py
```python
class autoencode(unicode):
def __new__(newtype, string=u"", encoding=None, errors=None):
if isinstance(string, unicode):
if errors is None:
newstring = unicode.__new__(newtype, string)
else:
newstring = unicode.__new__(newtype, string, errors=errors)
if encoding is None and isinstance(string, autoencode):
newstring.encoding = string.encoding
else:
newstring.encoding = encoding
else:
if errors is None and encoding is None:
newstring = unicode.__new__(newtype, string)
elif errors is None:
try:
newstring = unicode.__new__(newtype, string, encoding)
except LookupError, e:
raise ValueError(str(e))
elif encoding is None:
newstring = unicode.__new__(newtype, string, errors)
else:
newstring = unicode.__new__(newtype, string, encoding, errors)
newstring.encoding = encoding
return newstring
def join(self, seq):
return autoencode(super(autoencode, self).join(seq))
def __str__(self):
if self.encoding is None:
return super(autoencode, self).__str__()
else:
return self.encode(self.encoding)
```
#### File: translate/misc/context.py
```python
import sys
def with_(mgr, body):
"""A function to mimic the with statement introduced in Python 2.5
The code below was taken from http://www.python.org/dev/peps/pep-0343/
"""
exit = mgr.__exit__ # Not calling it yet
value = mgr.__enter__()
exc = True
try:
try:
if isinstance(value, (tuple, list)):
return body(*value)
else:
return body(value)
except:
# The exceptional case is handled here
exc = False
if not exit(*sys.exc_info()):
raise
# The exception is swallowed if exit() returns true
finally:
# The normal and non-local-goto cases are handled here
if exc:
exit(None, None, None)
```
#### File: translate/misc/file_discovery.py
```python
__all__ = ['get_abs_data_filename']
import sys
import os
def get_abs_data_filename(path_parts, basedirs=None):
"""Get the absolute path to the given file- or directory name in the current
running application's data directory.
@type path_parts: list
@param path_parts: The path parts that can be joined by os.path.join().
"""
if basedirs is None:
basedirs = []
if isinstance(path_parts, str):
path_parts = [path_parts]
BASE_DIRS = basedirs + [
os.path.dirname(unicode(__file__, sys.getfilesystemencoding())),
os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
]
# Freedesktop standard
if 'XDG_DATA_HOME' in os.environ:
BASE_DIRS += [os.environ['XDG_DATA_HOME']]
if 'XDG_DATA_DIRS' in os.environ:
BASE_DIRS += os.environ['XDG_DATA_DIRS'].split(os.path.pathsep)
# Mac OSX app bundles
if 'RESOURCEPATH' in os.environ:
BASE_DIRS += os.environ['RESOURCEPATH'].split(os.path.pathsep)
DATA_DIRS = [
["..", "share"],
["share"]
]
for basepath, data_dir in ((x, y) for x in BASE_DIRS for y in DATA_DIRS):
dir_and_filename = data_dir + path_parts
datafile = os.path.join(basepath or os.path.dirname(__file__), *dir_and_filename)
if os.path.exists(datafile):
return datafile
raise Exception('Could not find "%s"' % (os.path.join(*path_parts)))
```
#### File: translate/misc/multistring.py
```python
from translate.misc import autoencode
class multistring(autoencode.autoencode):
def __new__(newtype, string=u"", encoding=None, errors=None):
if isinstance(string, list):
if not string:
raise ValueError("multistring must contain at least one string")
mainstring = string[0]
newstring = multistring.__new__(newtype, string[0], encoding, errors)
newstring.strings = [newstring] + [autoencode.autoencode.__new__(autoencode.autoencode, altstring, encoding, errors) for altstring in string[1:]]
else:
newstring = autoencode.autoencode.__new__(newtype, string, encoding, errors)
newstring.strings = [newstring]
return newstring
def __init__(self, *args, **kwargs):
super(multistring, self).__init__()
if not hasattr(self, "strings"):
self.strings = []
def __cmp__(self, otherstring):
if isinstance(otherstring, multistring):
parentcompare = cmp(autoencode.autoencode(self), otherstring)
if parentcompare:
return parentcompare
else:
return cmp(self.strings[1:], otherstring.strings[1:])
elif isinstance(otherstring, autoencode.autoencode):
return cmp(autoencode.autoencode(self), otherstring)
elif isinstance(otherstring, unicode):
return cmp(unicode(self), otherstring)
elif isinstance(otherstring, str):
return cmp(str(self), otherstring)
elif isinstance(otherstring, list):
return cmp(self, multistring(otherstring))
else:
return cmp(type(self), type(otherstring))
def __ne__(self, otherstring):
return self.__cmp__(otherstring) != 0
def __eq__(self, otherstring):
return self.__cmp__(otherstring) == 0
def __repr__(self):
parts = [autoencode.autoencode.__repr__(self)] + [repr(a) for a in self.strings[1:]]
return "multistring([" + ",".join(parts) + "])"
def replace(self, old, new, count=None):
if count is None:
newstr = multistring(super(multistring, self).replace(old, new), self.encoding)
else:
newstr = multistring(super(multistring, self).replace(old, new, count), self.encoding)
for s in self.strings[1:]:
if count is None:
newstr.strings.append(s.replace(old, new))
else:
newstr.strings.append(s.replace(old, new, count))
return newstr
```
#### File: translate/misc/test_multistring.py
```python
from translate.misc import multistring
from translate.misc import test_autoencode
class TestMultistring(test_autoencode.TestAutoencode):
type2test = multistring.multistring
def test_constructor(self):
t = self.type2test
s1 = t("test")
assert type(s1) == t
assert s1 == "test"
assert s1.strings == ["test"]
s2 = t(["test", "me"])
assert type(s2) == t
assert s2 == "test"
assert s2.strings == ["test", "me"]
assert s2 != s1
def test_replace(self):
t = self.type2test
s1 = t(["abcdef", "def"])
result = s1.replace("e", "")
assert type(result) == t
assert result == t(["abcdf", "df"])
result = s1.replace("e", "xx")
assert result == t(["abcdxxf", "dxxf"])
result = s1.replace("e", u"\xe9")
assert result == t([u"abcd\xe9f", u"d\xe9f"])
result = s1.replace("e", "\n")
assert result == t([u"abcd\nf", u"d\nf"])
result = result.replace("\n", "\\n")
assert result == t([u"abcd\\nf", u"d\\nf"])
result = result.replace("\\n", "\n")
assert result == t([u"abcd\nf", u"d\nf"])
```
#### File: translate/misc/zipfileext.py
```python
from zipfile import ZipFile, struct, structCentralDir, stringCentralDir, structEndArchive, stringEndArchive
class ZipFileExt(ZipFile, object):
"""a ZipFile that can handle replacing objects"""
def delete(self, name):
"""Delete the file from the archive. If it appears multiple
times only the first instance will be deleted."""
for i in range (0, len(self.filelist)):
if self.filelist[i].filename == name:
if self.debug:
print "Removing", name
deleted_offset = self.filelist[i].header_offset
# "file_offset" is only available in python up to 2.4
if hasattr(self.filelist[i], "file_offset"):
deleted_size = (self.filelist[i].file_offset - self.filelist[i].header_offset) + self.filelist[i].compress_size
else:
deleted_size = (len(self.filelist[i].FileHeader()) - self.filelist[i].header_offset) + self.filelist[i].compress_size
zinfo_size = struct.calcsize(structCentralDir) + len(self.filelist[i].filename) + len(self.filelist[i].extra)
# Remove the file's data from the archive.
current_offset = self.fp.tell()
# go to the end of the archive to calculate the total archive_size
self.fp.seek(0, 2)
archive_size = self.fp.tell()
self.fp.seek(deleted_offset + deleted_size)
buf = self.fp.read()
self.fp.seek(deleted_offset)
self.fp.write(buf)
self.fp.truncate(archive_size - deleted_size - zinfo_size)
# go to the end of the archive to calculate the total archive_size
self.fp.seek(0, 2)
if self.debug >= 2:
if self.fp.tell() != archive_size - deleted_size - zinfo_size:
print "truncation failed: %r != %r" % (self.fp.tell(), archive_size - deleted_size - zinfo_size)
if current_offset > deleted_offset + deleted_size:
current_offset -= deleted_size
elif current_offset > deleted_offset:
current_offset = deleted_offset
self.fp.seek(current_offset, 0)
# Remove file from central directory.
del self.filelist[i]
# Adjust the remaining offsets in the central directory.
for j in range (i, len(self.filelist)):
if self.filelist[j].header_offset > deleted_offset:
self.filelist[j].header_offset -= deleted_size
# "file_offset" is only available in python up to 2.4
if hasattr(self.filelist[i], "file_offset"):
if self.filelist[j].file_offset > deleted_offset:
self.filelist[j].file_offset -= deleted_size
del self.NameToInfo[name]
return
if self.debug:
print name, "not in archive"
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
self.writeendrec()
if not self._filePassed:
self.fp.close()
self.fp = None
def writeendrec(self):
"""Write the ending records (without neccessarily closing the file)"""
if self.mode in ("w", "a"): # write ending records
count = 0
current_offset = self.fp.tell()
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
centdir = struct.pack(structCentralDir,
stringCentralDir, zinfo.create_version,
zinfo.create_system, zinfo.extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, zinfo.compress_size, zinfo.file_size,
len(zinfo.filename), len(zinfo.extra), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
zinfo.header_offset)
self.fp.write(centdir)
self.fp.write(zinfo.filename)
self.fp.write(zinfo.extra)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, count, count, pos2 - pos1, pos1, 0)
self.fp.write(endrec)
self.fp.seek(pos1)
```
#### File: search/indexing/CommonIndexer.py
```python
import translate.lang.data
import os
__revision__ = "$Id: CommonIndexer.py 8507 2008-09-27 09:15:08Z dwaynebailey $"
def is_available():
"""check if this indexing engine interface is usable
this function must exist in every module that contains indexing engine
interfaces
@return: is this interface usable?
@rtype: bool
"""
return False
class CommonDatabase(object):
"""base class for indexing support
any real implementation must override most methods of this class
"""
field_analyzers = {}
"""mapping of field names and analyzers - see 'set_field_analyzers'"""
ANALYZER_EXACT = 0
"""exact matching: the query string must equal the whole term string"""
ANALYZER_PARTIAL = 1<<1
"""partial matching: a document matches, even if the query string only
matches the beginning of the term value."""
ANALYZER_TOKENIZE = 1<<2
"""tokenize terms and queries automatically"""
ANALYZER_DEFAULT = ANALYZER_TOKENIZE | ANALYZER_PARTIAL
"""the default analyzer to be used if nothing is configured"""
QUERY_TYPE = None
"""override this with the query class of the implementation"""
INDEX_DIRECTORY_NAME = None
"""override this with a string to be used as the name of the indexing
directory/file in the filesystem
"""
def __init__(self, basedir, analyzer=None, create_allowed=True):
"""initialize or open an indexing database
Any derived class must override __init__.
Any implementation can rely on the "self.location" attribute to be set
by the __init__ function of the super class.
@raise ValueError: the given location exists, but the database type
is incompatible (e.g. created by a different indexing engine)
@raise OSError: the database failed to initialize
@param basedir: the parent directory of the database
@type basedir: str
@param analyzer: bitwise combination of possible analyzer flags
to be used as the default analyzer for this database. Leave it empty
to use the system default analyzer (self.ANALYZER_DEFAULT).
see self.ANALYZER_TOKENIZE, self.ANALYZER_PARTIAL, ...
@type analyzer: int
@param create_allowed: create the database, if necessary; default: True
@type create_allowed: bool
"""
# just do some checks
if self.QUERY_TYPE is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'QUERY_TYPE' is undefined")
if self.INDEX_DIRECTORY_NAME is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'INDEX_DIRECTORY_NAME' is undefined")
self.location = os.path.join(basedir, self.INDEX_DIRECTORY_NAME)
if (not create_allowed) and (not os.path.exists(self.location)):
raise OSError("Indexer: the database does not exist - and I am" \
+ " not configured to create it.")
if analyzer is None:
self.analyzer = self.ANALYZER_DEFAULT
else:
self.analyzer = analyzer
self.field_analyzers = {}
def flush(self, optimize=False):
"""flush the content of the database - to force changes to be written
to disk
some databases also support index optimization
@param optimize: should the index be optimized if possible?
@type optimize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'flush' is missing")
def make_query(self, args, require_all=True, analyzer=None):
"""create simple queries (strings or field searches) or
combine multiple queries (AND/OR)
To specifiy rules for field searches, you may want to take a look at
'set_field_analyzers'. The parameter 'match_text_partial' can override
the previously defined default setting.
@param args: queries or search string or description of field query
examples::
[xapian.Query("foo"), xapian.Query("bar")]
xapian.Query("foo")
"bar"
{"foo": "bar", "foobar": "foo"}
@type args: list of queries | single query | str | dict
@param require_all: boolean operator
(True -> AND (default) / False -> OR)
@type require_all: boolean
@param analyzer: (only applicable for 'dict' or 'str')
Define query options (partial matching, exact matching, tokenizing,
...) as bitwise combinations of CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
@type analyzer: int
@return: the combined query
@rtype: query type of the specific implemention
"""
# turn a dict into a list if necessary
if isinstance(args, dict):
args = args.items()
# turn 'args' into a list if necessary
if not isinstance(args, list):
args = [args]
# combine all given queries
result = []
for query in args:
# just add precompiled queries
if isinstance(query, self.QUERY_TYPE):
result.append(self._create_query_for_query(query))
# create field/value queries out of a tuple
elif isinstance(query, tuple):
field, value = query
# perform unicode normalization
field = translate.lang.data.normalize(unicode(field))
value = translate.lang.data.normalize(unicode(value))
# check for the choosen match type
if analyzer is None:
analyzer = self.get_field_analyzers(field)
result.append(self._create_query_for_field(field, value,
analyzer=analyzer))
# parse plaintext queries
elif isinstance(query, basestring):
if analyzer is None:
analyzer = self.analyzer
# perform unicode normalization
query = translate.lang.data.normalize(unicode(query))
result.append(self._create_query_for_string(query,
require_all=require_all, analyzer=analyzer))
else:
# other types of queries are not supported
raise ValueError("Unable to handle query type: %s" \
% str(type(query)))
# return the combined query
return self._create_query_combined(result, require_all)
def _create_query_for_query(self, query):
"""generate a query based on an existing query object
basically this function should just create a copy of the original
@param query: the original query object
@type query: xapian.Query
@return: the resulting query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_query' is missing")
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""generate a query for a plain term of a string query
basically this function parses the string and returns the resulting
query
@param text: the query string
@type text: str
@param require_all: boolean operator
(True -> AND (default) / False -> OR)
@type require_all: bool
@param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
@type analyzer: int
@return: resulting query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_string' is missing")
def _create_query_for_field(self, field, value, analyzer=None):
"""generate a field query
this functions creates a field->value query
@param field: the fieldname to be used
@type field: str
@param value: the wanted value of the field
@type value: str
@param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
CommonIndexer.ANALYZER_???.
This can override previously defined field analyzer settings.
If analyzer is None (default), then the configured analyzer for the
field is used.
@type analyzer: int
@return: resulting query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_field' is missing")
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
@param queries: list of the original queries
@type queries: list of xapian.Query
@param require_all: boolean operator
(True -> AND (default) / False -> OR)
@type require_all: bool
@return: the resulting combined query object
@rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_combined' is missing")
def index_document(self, data):
"""add the given data to the database
@param data: the data to be indexed.
A dictionary will be treated as fieldname:value combinations.
If the fieldname is None then the value will be interpreted as a
plain term or as a list of plain terms.
Lists of terms are indexed separately.
Lists of strings are treated as plain terms.
@type data: dict | list of str
"""
doc = self._create_empty_document()
if isinstance(data, dict):
data = data.items()
# add all data
for dataset in data:
if isinstance(dataset, tuple):
# the dataset tuple consists of '(key, value)'
key, value = dataset
if key is None:
if isinstance(value, list):
terms = value[:]
elif isinstance(value, basestring):
terms = [value]
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
for one_term in terms:
self._add_plain_term(doc, self._decode(one_term),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
analyze_settings = self.get_field_analyzers(key)
# handle multiple terms
if not isinstance(value, list):
value = [value]
for one_term in value:
self._add_field_term(doc, key, self._decode(one_term),
(analyze_settings & self.ANALYZER_TOKENIZE > 0))
elif isinstance(dataset, basestring):
self._add_plain_term(doc, self._decode(dataset),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
self._add_document_to_index(doc)
def _create_empty_document(self):
"""create an empty document to be filled and added to the index later
@return: the new document object
@rtype: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_empty_document' is missing")
def _add_plain_term(self, document, term, tokenize=True):
"""add a term to a document
@param document: the document to be changed
@type document: xapian.Document | PyLucene.Document
@param term: a single term to be added
@type term: str
@param tokenize: should the term be tokenized automatically
@type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_plain_term' is missing")
def _add_field_term(self, document, field, term, tokenize=True):
"""add a field term to a document
@param document: the document to be changed
@type document: xapian.Document | PyLucene.Document
@param field: name of the field
@type field: str
@param term: term to be associated to the field
@type term: str
@param tokenize: should the term be tokenized automatically
@type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_field_term' is missing")
def _add_document_to_index(self, document):
"""add a prepared document to the index database
@param document: the document to be added
@type document: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_document_to_index' is missing")
def begin_transaction(self):
"""begin a transaction
You can group multiple modifications of a database as a transaction.
This prevents time-consuming database flushing and helps, if you want
that a changeset is committed either completely or not at all.
No changes will be written to disk until 'commit_transaction'.
'cancel_transaction' can be used to revert an ongoing transaction.
Database types that do not support transactions may silently ignore it.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'begin_transaction' is missing")
def cancel_transaction(self):
"""cancel an ongoing transaction
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'cancel_transaction' is missing")
def commit_transaction(self):
"""submit the currently ongoing transaction and write changes to disk
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'commit_transaction' is missing")
def get_query_result(self, query):
"""return an object containing the results of a query
@param query: a pre-compiled query
@type query: a query object of the real implementation
@return: an object that allows access to the results
@rtype: subclass of CommonEnquire
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'get_query_result' is missing")
def delete_document_by_id(self, docid):
"""delete a specified document
@param docid: the document ID to be deleted
@type docid: int
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'delete_document_by_id' is missing")
def search(self, query, fieldnames):
"""return a list of the contents of specified fields for all matches of
a query
@param query: the query to be issued
@type query: a query object of the real implementation
@param fieldnames: the name(s) of a field of the document content
@type fieldnames: string | list of strings
@return: a list of dicts containing the specified field(s)
@rtype: list of dicts
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'search' is missing")
def delete_doc(self, ident):
"""delete the documents returned by a query
@param ident: [list of] document IDs | dict describing a query | query
@type ident: int | list of tuples | dict | list of dicts |
query (e.g. xapian.Query) | list of queries
"""
# turn a doc-ID into a list of doc-IDs
if isinstance(ident, list):
# it is already a list
ident_list = ident
else:
ident_list = [ident]
if len(ident_list) == 0:
# no matching items
return 0
if isinstance(ident_list[0], int):
# create a list of IDs of all successfully removed documents
success_delete = [match for match in ident_list
if self.delete_document_by_id(match)]
return len(success_delete)
if isinstance(ident_list[0], dict):
# something like: { "msgid": "foobar" }
# assemble all queries
query = self.make_query([self.make_query(query_dict,
require_all=True) for query_dict in ident_list],
require_all=True)
elif isinstance(ident_list[0], object):
# assume a query object (with 'AND')
query = self.make_query(ident_list, require_all=True)
else:
# invalid element type in list (not necessarily caught in the
# lines above)
raise TypeError("description of documents to-be-deleted is not " \
+ "supported: list of %s" % type(ident_list[0]))
# we successfully created a query - now iterate through the result
# no documents deleted so far ...
remove_list = []
# delete all resulting documents step by step
def add_docid_to_list(match):
"""collect every document ID"""
remove_list.append(match["docid"])
self._walk_matches(query, add_docid_to_list)
return self.delete_doc(remove_list)
def _walk_matches(self, query, function, arg_for_function=None):
"""use this function if you want to do something with every single match
of a query
example::
self._walk_matches(query, function_for_match, arg_for_func)
'function_for_match' expects only one argument: the matched object
@param query: a query object of the real implementation
@type query: xapian.Query | PyLucene.Query
@param function: the function to execute with every match
@type function: function
@param arg_for_function: an optional argument for the function
@type arg_for_function: anything
"""
# execute the query
enquire = self.get_query_result(query)
# start with the first element
start = 0
# do the loop at least once
size, avail = (0, 1)
# how many results per 'get_matches'?
steps = 2
while start < avail:
(size, avail, matches) = enquire.get_matches(start, steps)
for match in matches:
if arg_for_function is None:
function(match)
else:
function(match, arg_for_function)
start += size
def set_field_analyzers(self, field_analyzers):
"""set the analyzers for different fields of the database documents
All bitwise combinations of CommonIndexer.ANALYZER_??? are possible.
@param field_analyzers: mapping of field names and analyzers
@type field_analyzers: dict containing field names and analyzers
@raise TypeError: invalid values in 'field_analyzers'
"""
for field, analyzer in field_analyzers.items():
# check for invald input types
if not isinstance(field, (str, unicode)):
raise TypeError("field name must be a string")
if not isinstance(analyzer, int):
raise TypeError("the analyzer must be a whole number (int)")
# map the analyzer to the field name
self.field_analyzers[field] = analyzer
def get_field_analyzers(self, fieldnames=None):
"""return the analyzer that was mapped to a specific field
see 'set_field_analyzers' for details
@param fieldnames: the analyzer of this field (or all/multiple fields)
is requested; leave empty (or "None") to request all fields
@type fieldnames: str | list of str | None
@return: the analyzer setting of the field - see
CommonDatabase.ANALYZER_??? or a dict of field names and analyzers
@rtype: int | dict
"""
# all field analyzers are requested
if fieldnames is None:
# return a copy
return dict(self.field_analyzers)
# one field is requested
if isinstance(fieldnames, (str, unicode)):
if self.field_analyzers.has_key(fieldnames):
return self.field_analyzers[fieldnames]
else:
return self.analyzer
# a list of fields is requested
if isinstance(fieldnames, list):
result = {}
for field in fieldnames:
result[field] = self.get_field_analyzers(field)
return result
return self.analyzer
def _decode(self, text):
"""decode the string from utf-8 or charmap
perform unicde normalization
"""
if isinstance(text, str):
try:
result = unicode(text.decode("UTF-8"))
except UnicodeEncodeError, e:
result = unicode(text.decode("charmap"))
elif not isinstance(text, unicode):
result = unicode(text)
else:
result = text
# perform unicode normalization
return translate.lang.data.normalize(result)
class CommonEnquire(object):
"""an enquire object contains the information about the result of a request
"""
def __init__(self, enquire):
"""intialization of a wrapper around enquires of different backends
@param enquire: a previous enquire
@type enquire: xapian.Enquire | pylucene-enquire
"""
self.enquire = enquire
def get_matches(self, start, number):
"""return a specified number of qualified matches of a previous query
@param start: index of the first match to return (starting from zero)
@type start: int
@param number: the number of matching entries to return
@type number: int
@return: a set of matching entries and some statistics
@rtype: tuple of (returned number, available number, matches)
"matches" is a dictionary of::
["rank", "percent", "document", "docid"]
"""
raise NotImplementedError("Incomplete indexing implementation: " \
+ "'get_matches' for the 'Enquire' class is missing")
def get_matches_count(self):
"""return the estimated number of matches
use "CommonIndexer.search" to retrieve the exact number of matches
@return: the estimaed number of matches
@rtype: int
"""
(returned, estimate_count, matches) = self.get_matches(0, 1)
return estimate_count
```
#### File: translate/search/lshtein.py
```python
import math
import sys
def python_distance(a, b, stopvalue=-1):
"""Calculates the distance for use in similarity calculation. Python
version."""
l1 = len(a)
l2 = len(b)
if stopvalue == -1:
stopvalue = l2
current = range(l1+1)
for i in range(1, l2+1):
previous, current = current, [i]+[0]*l1
least = l2
for j in range(1, l1 + 1):
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
insert = previous[j] + 1
delete = current[j-1] + 1
current[j] = min(insert, delete, change)
if least > current[j]:
least = current[j]
#The smallest value in the current array is the best (lowest) value
#that can be attained in the end if the strings are identical further
if least > stopvalue:
return least
return current[l1]
def native_distance(a, b, stopvalue=0):
"""Same as python_distance in functionality. This uses the fast C
version if we detected it earlier.
Note that this does not support arbitrary sequence types, but only
string types."""
return Levenshtein.distance(a, b)
try:
import Levenshtein as Levenshtein
distance = native_distance
except Exception:
import logging
logging.warning("Python-Levenshtein not found. Continuing with built-in (slower) fuzzy matching.")
distance = python_distance
class LevenshteinComparer:
def __init__(self, max_len=200):
self.MAX_LEN = max_len
def similarity(self, a, b, stoppercentage=40):
similarity = self.similarity_real(a, b, stoppercentage)
measurements = 1
# chr_a = segment.characters(a)
# chr_b = segment.characters(b)
# if chr_a and chr_b and abs(len(chr_a) - len(a)) + abs(len(chr_b) - len(b)):
# similarity += self.similarity_real(chr_a, chr_b, stoppercentage)
# measurements += 1
# else:
# similarity *= 2
# measurements += 1
#
# wrd_a = segment.words(a)
# wrd_b = segment.words(b)
# if len(wrd_a) + len(wrd_b) > 2:
# similarity += self.similarity_real(wrd_a, wrd_b, 0)
# measurements += 1
return similarity / measurements
def similarity_real(self, a, b, stoppercentage=40):
"""Returns the similarity between a and b based on Levenshtein distance. It
can stop prematurely as soon as it sees that a and b will be no simmilar than
the percentage specified in stoppercentage.
The Levenshtein distance is calculated, but the following should be noted:
- Only the first MAX_LEN characters are considered. Long strings differing
at the end will therefore seem to match better than they should. See the
use of the variable penalty to lessen the effect of this.
- Strings with widely different lengths give the opportunity for shortcut.
This is by definition of the Levenshtein distance: the distance will be
at least as much as the difference in string length.
- Calculation is stopped as soon as a similarity of stoppercentage becomes
unattainable. See the use of the variable stopvalue.
- Implementation uses memory O(min(len(a), len(b))
- Excecution time is O(len(a)*len(b))
"""
l1, l2 = len(a), len(b)
if l1 == 0 or l2 == 0:
return 0
#Let's make l1 the smallest
if l1 > l2:
l1, l2 = l2, l1
a, b = b, a
#maxsimilarity is the maximum similarity that can be attained as constrained
#by the difference in string length
maxsimilarity = 100 - 100.0*abs(l1 - l2)/l2
if maxsimilarity < stoppercentage:
return maxsimilarity * 1.0
#Let's penalise the score in cases where we shorten strings
penalty = 0
if l2 > self.MAX_LEN:
b = b[:self.MAX_LEN]
l2 = self.MAX_LEN
penalty += 7
if l1 > self.MAX_LEN:
a = a[:self.MAX_LEN]
l1 = self.MAX_LEN
penalty += 7
#The actual value in the array that would represent a giveup situation:
stopvalue = math.ceil((100.0 - stoppercentage)/100 * l2)
dist = distance(a, b, stopvalue)
if dist > stopvalue:
return stoppercentage - 1.0
#If MAX_LEN came into play, we consider the calculated distance to be
#representative of the distance between the whole, untrimmed strings
if dist != 0:
penalty = 0
return 100 - (dist*1.0/l2)*100 - penalty
if __name__ == "__main__":
from sys import argv
comparer = LevenshteinComparer()
print "Similarity:\n%s" % comparer.similarity(argv[1], argv[2], 50)
```
#### File: translate/search/match.py
```python
import heapq
import re
from translate.search import lshtein
from translate.search import terminology
from translate.storage import base
from translate.storage import po
from translate.misc.multistring import multistring
def sourcelen(unit):
"""Returns the length of the source string"""
return len(unit.source)
class matcher(object):
"""A class that will do matching and store configuration for the matching process"""
sort_reverse = False
def __init__(self, store, max_candidates=10, min_similarity=75, max_length=70, comparer=None, usefuzzy=False):
"""max_candidates is the maximum number of candidates that should be assembled,
min_similarity is the minimum similarity that must be attained to be included in
the result, comparer is an optional Comparer with similarity() function"""
if comparer is None:
comparer = lshtein.LevenshteinComparer(max_length)
self.comparer = comparer
self.setparameters(max_candidates, min_similarity, max_length)
self.usefuzzy = usefuzzy
self.inittm(store)
self.addpercentage = True
def usable(self, unit):
"""Returns whether this translation unit is usable for TM"""
#TODO: We might want to consider more attributes, such as approved, reviewed, etc.
source = unit.source
target = unit.target
if source and target and (self.usefuzzy or not unit.isfuzzy()):
if len(source) < 2:
return False
if source in self.existingunits and self.existingunits[source] == target:
return False
else:
self.existingunits[source] = target
return True
return False
def inittm(self, stores, reverse=False):
"""Initialises the memory for later use. We use simple base units for
speedup."""
# reverse is deprectated - just use self.sort_reverse
self.existingunits = {}
self.candidates = base.TranslationStore()
if isinstance(stores, base.TranslationStore):
stores = [stores]
for store in stores:
self.extendtm(store.units, store=store, sort=False)
self.candidates.units.sort(key=sourcelen, reverse=self.sort_reverse)
# print "TM initialised with %d candidates (%d to %d characters long)" % \
# (len(self.candidates.units), len(self.candidates.units[0].source), len(self.candidates.units[-1].source))
def extendtm(self, units, store=None, sort=True):
"""Extends the memory with extra unit(s).
@param units: The units to add to the TM.
@param store: Optional store from where some metadata can be retrieved
and associated with each unit.
@param sort: Optional parameter that can be set to False to supress
sorting of the candidates list. This should probably only be used in
inittm().
"""
if isinstance(units, base.TranslationUnit):
units = [units]
candidates = filter(self.usable, units)
for candidate in candidates:
simpleunit = base.TranslationUnit("")
# We need to ensure that we don't pass multistrings futher, since
# some modules (like the native Levenshtein) can't use it.
if isinstance(candidate.source, multistring):
if len(candidate.source.strings) > 1:
simpleunit.orig_source = candidate.source
simpleunit.orig_target = candidate.target
simpleunit.source = unicode(candidate.source)
simpleunit.target = unicode(candidate.target)
else:
simpleunit.source = candidate.source
simpleunit.target = candidate.target
# If we now only get translator comments, we don't get programmer
# comments in TM suggestions (in Pootle, for example). If we get all
# notes, pot2po adds all previous comments as translator comments
# in the new po file
simpleunit.addnote(candidate.getnotes(origin="translator"))
simpleunit.fuzzy = candidate.isfuzzy()
self.candidates.units.append(simpleunit)
if sort:
self.candidates.units.sort(key=sourcelen, reverse=self.sort_reverse)
def setparameters(self, max_candidates=10, min_similarity=75, max_length=70):
"""Sets the parameters without reinitialising the tm. If a parameter
is not specified, it is set to the default, not ignored"""
self.MAX_CANDIDATES = max_candidates
self.MIN_SIMILARITY = min_similarity
self.MAX_LENGTH = max_length
def getstoplength(self, min_similarity, text):
"""Calculates a length beyond which we are not interested.
The extra fat is because we don't use plain character distance only."""
return min(len(text) / (min_similarity/100.0), self.MAX_LENGTH)
def getstartlength(self, min_similarity, text):
"""Calculates the minimum length we are interested in.
The extra fat is because we don't use plain character distance only."""
return max(len(text) * (min_similarity/100.0), 1)
def matches(self, text):
"""Returns a list of possible matches for given source text.
@type text: String
@param text: The text that will be search for in the translation memory
@rtype: list
@return: a list of units with the source and target strings from the
translation memory. If self.addpercentage is true (default) the match
quality is given as a percentage in the notes.
"""
bestcandidates = [(0.0, None)]*self.MAX_CANDIDATES
#We use self.MIN_SIMILARITY, but if we already know we have max_candidates
#that are better, we can adjust min_similarity upwards for speedup
min_similarity = self.MIN_SIMILARITY
# We want to limit our search in self.candidates, so we want to ignore
# all units with a source string that is too short or too long. We use
# a binary search to find the shortest string, from where we start our
# search in the candidates.
# minimum source string length to be considered
startlength = self.getstartlength(min_similarity, text)
startindex = 0
endindex = len(self.candidates.units)
while startindex < endindex:
mid = (startindex + endindex) // 2
if sourcelen(self.candidates.units[mid]) < startlength:
startindex = mid + 1
else:
endindex = mid
# maximum source string length to be considered
stoplength = self.getstoplength(min_similarity, text)
lowestscore = 0
for candidate in self.candidates.units[startindex:]:
cmpstring = candidate.source
if len(cmpstring) > stoplength:
break
similarity = self.comparer.similarity(text, cmpstring, min_similarity)
if similarity < min_similarity:
continue
if similarity > lowestscore:
heapq.heapreplace(bestcandidates, (similarity, candidate))
lowestscore = bestcandidates[0][0]
if lowestscore >= 100:
break
if min_similarity < lowestscore:
min_similarity = lowestscore
stoplength = self.getstoplength(min_similarity, text)
#Remove the empty ones:
def notzero(item):
score = item[0]
return score != 0
bestcandidates = filter(notzero, bestcandidates)
#Sort for use as a general list, and reverse so the best one is at index 0
bestcandidates.sort(reverse=True)
return self.buildunits(bestcandidates)
def buildunits(self, candidates):
"""Builds a list of units conforming to base API, with the score in the comment"""
units = []
for score, candidate in candidates:
if hasattr(candidate, "orig_source"):
candidate.source = candidate.orig_source
candidate.target = candidate.orig_target
newunit = po.pounit(candidate.source)
newunit.target = candidate.target
newunit.markfuzzy(candidate.fuzzy)
candidatenotes = candidate.getnotes().strip()
if candidatenotes:
newunit.addnote(candidatenotes)
if self.addpercentage:
newunit.addnote("%d%%" % score)
units.append(newunit)
return units
# We don't want to miss certain forms of words that only change a little
# at the end. Now we are tying this code to English, but it should serve
# us well. For example "category" should be found in "categories",
# "copy" should be found in "copied"
#
# The tuples define a regular expression to search for, and with what it
# should be replaced.
ignorepatterns = [
("y\s*$", "ie"), #category/categories, identify/identifies, apply/applied
("[\s-]+", ""), #down time / downtime, pre-order / preorder
("-", " "), #pre-order / pre order
(" ", "-"), #pre order / pre-order
]
context_re = re.compile("\s+\(.*\)\s*$")
class terminologymatcher(matcher):
"""A matcher with settings specifically for terminology matching"""
sort_reverse = True
def __init__(self, store, max_candidates=10, min_similarity=75, max_length=500, comparer=None):
if comparer is None:
comparer = terminology.TerminologyComparer(max_length)
matcher.__init__(self, store, max_candidates, min_similarity=10, max_length=max_length, comparer=comparer)
self.addpercentage = False
self.match_info = {}
def inittm(self, store):
"""Normal initialisation, but convert all source strings to lower case"""
matcher.inittm(self, store)
extras = []
for unit in self.candidates.units:
source = unit.source = context_re.sub("", unit.source).lower()
for ignorepattern in ignorepatterns:
(newterm, occurrences) = re.subn(ignorepattern[0], ignorepattern[1], source)
if occurrences:
new_unit = type(unit).buildfromunit(unit)
new_unit.source = newterm
# We mark it fuzzy to indicate that it isn't pristine
unit.markfuzzy()
extras.append(new_unit)
self.candidates.units.sort(key=sourcelen, reverse=self.sort_reverse)
if extras:
# We don't sort, so that the altered forms are at the back and
# considered last.
self.extendtm(extras, sort=False)
def getstartlength(self, min_similarity, text):
# Let's number false matches by not working with terms of two
# characters or less
return 3
def getstoplength(self, min_similarity, text):
# Let's ignore terms with more than 30 characters. Perhaps someone
# gave a file with normal (long) translations
return 30
def usable(self, unit):
"""Returns whether this translation unit is usable for terminology."""
if not unit.istranslated():
return False
l = len(context_re.sub("", unit.source))
return l <= self.MAX_LENGTH and l >= self.getstartlength(None, None)
def matches(self, text):
"""Normal matching after converting text to lower case. Then replace
with the original unit to retain comments, etc."""
text = text.lower()
comparer = self.comparer
comparer.match_info = {}
matches = []
known = set()
for cand in self.candidates.units:
if (cand.source, cand.target) in known:
continue
source = cand.source
if comparer.similarity(text, source, self.MIN_SIMILARITY):
self.match_info[source] = {'pos': comparer.match_info[source]['pos']}
matches.append(cand)
known.add((cand.source, cand.target))
return matches
# utility functions used by virtaal and tmserver to convert matching units in easily marshallable dictionaries
def unit2dict(unit):
"""converts a pounit to a simple dict structure for use over the web"""
return {"source": unit.source, "target": unit.target,
"quality": _parse_quality(unit.getnotes()), "context": unit.getcontext()}
def _parse_quality(comment):
"""extracts match quality from po comments"""
quality = re.search('([0-9]+)%', comment)
if quality:
return quality.group(1)
```
#### File: translate/storage/base.py
```python
try:
import cPickle as pickle
except:
import pickle
from exceptions import NotImplementedError
import translate.i18n
from translate.storage.placeables import StringElem, general, parse as rich_parse
from translate.misc.typecheck import accepts, Self, IsOneOf
from translate.misc.multistring import multistring
def force_override(method, baseclass):
"""Forces derived classes to override method."""
if type(method.im_self) == type(baseclass):
# then this is a classmethod and im_self is the actual class
actualclass = method.im_self
else:
actualclass = method.im_class
if actualclass != baseclass:
raise NotImplementedError(
"%s does not reimplement %s as required by %s" % \
(actualclass.__name__, method.__name__, baseclass.__name__)
)
class ParseError(Exception):
def __init__(self, inner_exc):
self.inner_exc = inner_exc
def __str__(self):
return repr(self.inner_exc)
class TranslationUnit(object):
"""Base class for translation units.
Our concept of a I{translation unit} is influenced heavily by XLIFF:
U{http://www.oasis-open.org/committees/xliff/documents/xliff-specification.htm}
As such most of the method- and variable names borrows from XLIFF terminology.
A translation unit consists of the following:
- A I{source} string. This is the original translatable text.
- A I{target} string. This is the translation of the I{source}.
- Zero or more I{notes} on the unit. Notes would typically be some
comments from a translator on the unit, or some comments originating from
the source code.
- Zero or more I{locations}. Locations indicate where in the original
source code this unit came from.
- Zero or more I{errors}. Some tools (eg. L{pofilter <filters.pofilter>}) can run checks on
translations and produce error messages.
@group Source: *source*
@group Target: *target*
@group Notes: *note*
@group Locations: *location*
@group Errors: *error*
"""
rich_parsers = []
"""A list of functions to use for parsing a string into a rich string tree."""
def __init__(self, source):
"""Constructs a TranslationUnit containing the given source string."""
self.notes = ""
self._store = None
self.source = source
self._target = None
self._rich_source = None
self._rich_target = None
def __eq__(self, other):
"""Compares two TranslationUnits.
@type other: L{TranslationUnit}
@param other: Another L{TranslationUnit}
@rtype: Boolean
@return: Returns True if the supplied TranslationUnit equals this unit.
"""
return self.source == other.source and self.target == other.target
def __str__(self):
"""Converts to a string representation that can be parsed back using L{parsestring()}."""
# no point in pickling store object, so let's hide it for a while.
store = getattr(self, "_store", None)
self._store = None
dump = pickle.dumps(self)
self._store = store
return dump
def rich_to_multistring(cls, elem_list):
"""Convert a "rich" string tree to a C{multistring}:
>>> from translate.storage.placeables.interfaces import X
>>> rich = [StringElem(['foo', X(id='xxx', sub=[' ']), 'bar'])]
>>> TranslationUnit.rich_to_multistring(rich)
multistring(u'foo bar')
"""
return multistring([unicode(elem) for elem in elem_list])
rich_to_multistring = classmethod(rich_to_multistring)
def multistring_to_rich(cls, mulstring):
"""Convert a multistring to a list of "rich" string trees:
>>> target = multistring([u'foo', u'bar', u'baz'])
>>> TranslationUnit.multistring_to_rich(target)
[<StringElem([<StringElem([u'foo'])>])>,
<StringElem([<StringElem([u'bar'])>])>,
<StringElem([<StringElem([u'baz'])>])>]
"""
if isinstance(mulstring, multistring):
return [rich_parse(s, cls.rich_parsers) for s in mulstring.strings]
return [rich_parse(mulstring, cls.rich_parsers)]
def setsource(self, source):
"""Sets the source string to the given value."""
self._rich_source = None
self._source = source
source = property(lambda self: self._source, setsource)
def settarget(self, target):
"""Sets the target string to the given value."""
self._rich_target = None
self._target = target
target = property(lambda self: self._target, settarget)
def _get_rich_source(self):
if self._rich_source is None:
self._rich_source = self.multistring_to_rich(self.source)
return self._rich_source
def _set_rich_source(self, value):
if not hasattr(value, '__iter__'):
raise ValueError('value must be iterable')
if len(value) < 1:
raise ValueError('value must have at least one element.')
if not isinstance(value[0], StringElem):
raise ValueError('value[0] must be of type StringElem.')
self._rich_source = list(value)
self.source = self.rich_to_multistring(value)
rich_source = property(_get_rich_source, _set_rich_source)
""" @see: rich_to_multistring
@see: multistring_to_rich"""
def _get_rich_target(self):
if self._rich_target is None:
self._rich_target = self.multistring_to_rich(self.target)
return self._rich_target
def _set_rich_target(self, value):
if not hasattr(value, '__iter__'):
raise ValueError('value must be iterable')
if len(value) < 1:
raise ValueError('value must have at least one element.')
if not isinstance(value[0], StringElem):
raise ValueError('value[0] must be of type StringElem.')
self._rich_target = list(value)
self.target = self.rich_to_multistring(value)
rich_target = property(_get_rich_target, _set_rich_target)
""" @see: rich_to_multistring
@see: multistring_to_rich"""
def gettargetlen(self):
"""Returns the length of the target string.
@note: Plural forms might be combined.
@rtype: Integer
"""
length = len(self.target or "")
strings = getattr(self.target, "strings", [])
if strings:
length += sum([len(pluralform) for pluralform in strings[1:]])
return length
def getid(self):
"""A unique identifier for this unit.
@rtype: string
@return: an identifier for this unit that is unique in the store
Derived classes should override this in a way that guarantees a unique
identifier for each unit in the store.
"""
return self.source
def setid(self, value):
"""Sets the unique identified for this unit.
only implemented if format allows ids independant from other
unit properties like source or context"""
pass
def getlocations(self):
"""A list of source code locations.
@note: Shouldn't be implemented if the format doesn't support it.
@rtype: List
"""
return []
def addlocation(self, location):
"""Add one location to the list of locations.
@note: Shouldn't be implemented if the format doesn't support it.
"""
pass
def addlocations(self, location):
"""Add a location or a list of locations.
@note: Most classes shouldn't need to implement this,
but should rather implement L{addlocation()}.
@warning: This method might be removed in future.
"""
if isinstance(location, list):
for item in location:
self.addlocation(item)
else:
self.addlocation(location)
def getcontext(self):
"""Get the message context."""
return ""
def setcontext(self, context):
"""Set the message context"""
pass
def getnotes(self, origin=None):
"""Returns all notes about this unit.
It will probably be freeform text or something reasonable that can be
synthesised by the format.
It should not include location comments (see L{getlocations()}).
"""
return getattr(self, "notes", "")
def addnote(self, text, origin=None, position="append"):
"""Adds a note (comment).
@type text: string
@param text: Usually just a sentence or two.
@type origin: string
@param origin: Specifies who/where the comment comes from.
Origin can be one of the following text strings:
- 'translator'
- 'developer', 'programmer', 'source code' (synonyms)
"""
if getattr(self, "notes", None):
self.notes += '\n'+text
else:
self.notes = text
def removenotes(self):
"""Remove all the translator's notes."""
self.notes = u''
def adderror(self, errorname, errortext):
"""Adds an error message to this unit.
@type errorname: string
@param errorname: A single word to id the error.
@type errortext: string
@param errortext: The text describing the error.
"""
pass
def geterrors(self):
"""Get all error messages.
@rtype: Dictionary
"""
return {}
def markreviewneeded(self, needsreview=True, explanation=None):
"""Marks the unit to indicate whether it needs review.
@keyword needsreview: Defaults to True.
@keyword explanation: Adds an optional explanation as a note.
"""
pass
def istranslated(self):
"""Indicates whether this unit is translated.
This should be used rather than deducing it from .target,
to ensure that other classes can implement more functionality
(as XLIFF does).
"""
return bool(self.target) and not self.isfuzzy()
def istranslatable(self):
"""Indicates whether this unit can be translated.
This should be used to distinguish real units for translation from
header, obsolete, binary or other blank units.
"""
return True
def isfuzzy(self):
"""Indicates whether this unit is fuzzy."""
return False
def markfuzzy(self, value=True):
"""Marks the unit as fuzzy or not."""
pass
def isobsolete(self):
"""indicate whether a unit is obsolete"""
return False
def makeobsolete(self):
"""Make a unit obsolete"""
pass
def isheader(self):
"""Indicates whether this unit is a header."""
return False
def isreview(self):
"""Indicates whether this unit needs review."""
return False
def isblank(self):
"""Used to see if this unit has no source or target string.
@note: This is probably used more to find translatable units,
and we might want to move in that direction rather and get rid of this.
"""
return not (self.source or self.target)
def hasplural(self):
"""Tells whether or not this specific unit has plural strings."""
#TODO: Reconsider
return False
def getsourcelanguage(self):
return getattr(self._store, "sourcelanguage", "en")
def gettargetlanguage(self):
return getattr(self._store, "targetlanguage", None)
def merge(self, otherunit, overwrite=False, comments=True, authoritative=False):
"""Do basic format agnostic merging."""
if not self.target or overwrite:
self.rich_target = otherunit.rich_target
def unit_iter(self):
"""Iterator that only returns this unit."""
yield self
def getunits(self):
"""This unit in a list."""
return [self]
def buildfromunit(cls, unit):
"""Build a native unit from a foreign unit, preserving as much
information as possible."""
if type(unit) == cls and hasattr(unit, "copy") and callable(unit.copy):
return unit.copy()
newunit = cls(unit.source)
newunit.target = unit.target
newunit.markfuzzy(unit.isfuzzy())
locations = unit.getlocations()
if locations:
newunit.addlocations(locations)
notes = unit.getnotes()
if notes:
newunit.addnote(notes)
return newunit
buildfromunit = classmethod(buildfromunit)
xid = property(lambda self: None, lambda self, value: None)
rid = property(lambda self: None, lambda self, value: None)
class TranslationStore(object):
"""Base class for stores for multiple translation units of type UnitClass."""
UnitClass = TranslationUnit
"""The class of units that will be instantiated and used by this class"""
Name = "Base translation store"
"""The human usable name of this store type"""
Mimetypes = None
"""A list of MIME types associated with this store type"""
Extensions = None
"""A list of file extentions associated with this store type"""
_binary = False
"""Indicates whether a file should be accessed as a binary file."""
suggestions_in_format = False
"""Indicates if format can store suggestions and alternative translation for a unit"""
def __init__(self, unitclass=None):
"""Constructs a blank TranslationStore."""
self.units = []
self.sourcelanguage = None
self.targetlanguage = None
if unitclass:
self.UnitClass = unitclass
super(TranslationStore, self).__init__()
def getsourcelanguage(self):
"""Gets the source language for this store"""
return self.sourcelanguage
def setsourcelanguage(self, sourcelanguage):
"""Sets the source language for this store"""
self.sourcelanguage = sourcelanguage
def gettargetlanguage(self):
"""Gets the target language for this store"""
return self.targetlanguage
def settargetlanguage(self, targetlanguage):
"""Sets the target language for this store"""
self.targetlanguage = targetlanguage
def unit_iter(self):
"""Iterator over all the units in this store."""
for unit in self.units:
yield unit
def getunits(self):
"""Return a list of all units in this store."""
return [unit for unit in self.unit_iter()]
def addunit(self, unit):
"""Appends the given unit to the object's list of units.
This method should always be used rather than trying to modify the
list manually.
@type unit: L{TranslationUnit}
@param unit: The unit that will be added.
"""
unit._store = self
self.units.append(unit)
def addsourceunit(self, source):
"""Adds and returns a new unit with the given source string.
@rtype: L{TranslationUnit}
"""
unit = self.UnitClass(source)
self.addunit(unit)
return unit
def findid(self, id):
"""find unit with matching id by checking id_index"""
self.require_index()
return self.id_index.get(id, None)
def findunit(self, source):
"""Finds the unit with the given source string.
@rtype: L{TranslationUnit} or None
"""
if len(getattr(self, "sourceindex", [])):
if source in self.sourceindex:
return self.sourceindex[source][0]
else:
for unit in self.units:
if unit.source == source:
return unit
return None
def findunits(self, source):
"""Finds the units with the given source string.
@rtype: L{TranslationUnit} or None
"""
if len(getattr(self, "sourceindex", [])):
if source in self.sourceindex:
return self.sourceindex[source]
else:
#FIXME: maybe we should generate index here instead since
#we'll scan all units anyway
result = []
for unit in self.units:
if unit.source == source:
result.append(unit)
return result
return None
def translate(self, source):
"""Returns the translated string for a given source string.
@rtype: String or None
"""
unit = self.findunit(source)
if unit and unit.target:
return unit.target
else:
return None
def remove_unit_from_index(self, unit):
"""Remove a unit from source and locaton indexes"""
def remove_unit(source):
if source in self.sourceindex:
try:
self.sourceindex[source].remove(unit)
if len(self.sourceindex[source]) == 0:
del(self.sourceindex[source])
except ValueError:
pass
if unit.hasplural():
for source in unit.source.strings:
remove_unit(source)
else:
remove_unit(unit.source)
for location in unit.getlocations():
if location in self.locationindex and self.locationindex[location] is not None \
and self.locationindex[location] == unit:
del(self.locationindex[location])
def add_unit_to_index(self, unit):
"""Add a unit to source and location idexes"""
self.id_index[unit.getid()] = unit
def insert_unit(source):
if not source in self.sourceindex:
self.sourceindex[source] = [unit]
else:
self.sourceindex[source].append(unit)
if unit.hasplural():
for source in unit.source.strings:
insert_unit(source)
else:
insert_unit(unit.source)
for location in unit.getlocations():
if location in self.locationindex:
# if sources aren't unique, don't use them
#FIXME: maybe better store a list of units like sourceindex
self.locationindex[location] = None
else:
self.locationindex[location] = unit
def makeindex(self):
"""Indexes the items in this store. At least .sourceindex should be usefull."""
self.locationindex = {}
self.sourceindex = {}
self.id_index = {}
for index, unit in enumerate(self.units):
unit.index = index
if unit.istranslatable():
self.add_unit_to_index(unit)
def require_index(self):
"""make sure source index exists"""
if not hasattr(self, "sourceindex"):
self.makeindex()
def getids(self):
"""return a list of unit ids"""
self.require_index()
return self.id_index.keys()
def __getstate__(self):
odict = self.__dict__.copy()
odict['fileobj'] = None
return odict
def __setstate__(self, dict):
self.__dict__.update(dict)
if getattr(self, "filename", False):
self.fileobj = open(self.filename)
def __str__(self):
"""Converts to a string representation that can be parsed back using L{parsestring()}."""
# We can't pickle fileobj if it is there, so let's hide it for a while.
fileobj = getattr(self, "fileobj", None)
self.fileobj = None
dump = pickle.dumps(self)
self.fileobj = fileobj
return dump
def isempty(self):
"""Returns True if the object doesn't contain any translation units."""
if len(self.units) == 0:
return True
for unit in self.units:
if unit.istranslatable():
return False
return True
def _assignname(self):
"""Tries to work out what the name of the filesystem file is and
assigns it to .filename."""
fileobj = getattr(self, "fileobj", None)
if fileobj:
filename = getattr(fileobj, "name", getattr(fileobj, "filename", None))
if filename:
self.filename = filename
def parsestring(cls, storestring):
"""Converts the string representation back to an object."""
newstore = cls()
if storestring:
newstore.parse(storestring)
return newstore
parsestring = classmethod(parsestring)
def parse(self, data):
"""parser to process the given source string"""
self.units = pickle.loads(data).units
def savefile(self, storefile):
"""Writes the string representation to the given file (or filename)."""
if isinstance(storefile, basestring):
mode = 'w'
if self._binary:
mode = 'wb'
storefile = open(storefile, mode)
self.fileobj = storefile
self._assignname()
storestring = str(self)
storefile.write(storestring)
storefile.close()
def save(self):
"""Save to the file that data was originally read from, if available."""
fileobj = getattr(self, "fileobj", None)
mode = 'w'
if self._binary:
mode = 'wb'
if not fileobj:
filename = getattr(self, "filename", None)
if filename:
fileobj = file(filename, mode)
else:
fileobj.close()
filename = getattr(fileobj, "name", getattr(fileobj, "filename", None))
if not filename:
raise ValueError("No file or filename to save to")
fileobj = fileobj.__class__(filename, mode)
self.savefile(fileobj)
def parsefile(cls, storefile):
"""Reads the given file (or opens the given filename) and parses back to an object."""
mode = 'r'
if cls._binary:
mode = 'rb'
if isinstance(storefile, basestring):
storefile = open(storefile, mode)
mode = getattr(storefile, "mode", mode)
#For some reason GzipFile returns 1, so we have to test for that here
if mode == 1 or "r" in mode:
storestring = storefile.read()
storefile.close()
else:
storestring = ""
newstore = cls.parsestring(storestring)
newstore.fileobj = storefile
newstore._assignname()
return newstore
parsefile = classmethod(parsefile)
```
#### File: translate/storage/odf_shared.py
```python
def define_tables():
# Copied from git commit <PASSWORD> from
# git://hforge.org/itools.git
config_uri = 'urn:oasis:names:tc:opendocument:xmlns:config:1.0'
dc_uri = 'http://purl.org/dc/elements/1.1/'
form_uri = 'urn:oasis:names:tc:opendocument:xmlns:form:1.0'
meta_uri = 'urn:oasis:names:tc:opendocument:xmlns:meta:1.0'
number_uri = 'urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0'
office_uri = 'urn:oasis:names:tc:opendocument:xmlns:office:1.0'
presentation_uri = 'urn:oasis:names:tc:opendocument:xmlns:presentation:1.0'
text_uri = 'urn:oasis:names:tc:opendocument:xmlns:text:1.0'
svg_uri = 'urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0'
inline_elements = [
(text_uri, 'page-count'),
(text_uri, 'page-number'),
(text_uri, 'a'),
(text_uri, 'line-break'),
(text_uri, 'ruby-base'),
(text_uri, 's'),
(text_uri, 'span'),
(text_uri, 'tab')]
no_translate_content_elements = [
# Config
(config_uri, 'config-item'),
# Dublin core
(dc_uri, 'creator'),
(dc_uri, 'date'),
#(dc_uri, 'description'),
(dc_uri, 'language'),
#(dc_uri, 'subject'),
#(dc_uri, 'title'),
# Form
(form_uri, 'item'),
(form_uri, 'option'),
# Meta
(meta_uri, 'creation-date'),
(meta_uri, 'date-string'),
(meta_uri, 'editing-cycles'),
(meta_uri, 'editing-duration'),
(meta_uri, 'generator'),
(meta_uri, 'initial-creator'),
#(meta_uri, 'keyword'),
(meta_uri, 'printed-by'),
(meta_uri, 'print-date'),
(meta_uri, 'user-defined'),
# Number
(number_uri, 'currency-symbol'),
(number_uri, 'embedded-text'),
(number_uri, 'text'),
# Office
(office_uri, 'binary-data'),
# Presentation
(presentation_uri, 'date-time-decl'),
#(presentation_uri, 'footer-decl'),
#(presentation_uri, 'header-decl'),
# Text
(text_uri, 'author-initials'),
(text_uri, 'author-name'),
# XXX (text_uri, 'bibliography-mark'),
(text_uri, 'bookmark-ref'),
#(text_uri, 'chapter'),
(text_uri, 'character-count'),
#(text_uri, 'conditional-text'),
(text_uri, 'creation-date'),
(text_uri, 'creation-time'),
(text_uri, 'creator'),
(text_uri, 'date'),
(text_uri, 'dde-connection'),
#(text_uri, 'description'),
(text_uri, 'editing-cycles'),
(text_uri, 'editing-duration'),
(text_uri, 'expression'),
(text_uri, 'file-name'),
#(text_uri, 'hidden-paragraph'),
#(text_uri, 'hidden-text'),
(text_uri, 'image-count'),
#(text_uri, 'index-entry-span'),
(text_uri, 'index-title-template'),
(text_uri, 'initial-creator'),
#(text_uri, 'keywords'),
(text_uri, 'linenumbering-separator'),
(text_uri, 'measure'),
(text_uri, 'modification-date'),
(text_uri, 'modification-time'),
#(text_uri, 'note-citation'),
#(text_uri, 'note-continuation-notice-backward'),
#(text_uri, 'note-continuation-notice-forward'),
(text_uri, 'note-ref'),
(text_uri, 'number'),
(text_uri, 'object-count'),
(text_uri, 'page-continuation'),
(text_uri, 'page-count'),
(text_uri, 'page-number'),
(text_uri, 'page-variable-get'),
(text_uri, 'page-variable-set'),
(text_uri, 'paragraph-count'),
#(text_uri, 'placeholder'),
(text_uri, 'print-date'),
(text_uri, 'print-time'),
(text_uri, 'printed-by'),
(text_uri, 'reference-ref'),
#(text_uri, 'ruby-text'),
(text_uri, 'script'),
(text_uri, 'sender-city'),
(text_uri, 'sender-company'),
(text_uri, 'sender-country'),
(text_uri, 'sender-email'),
(text_uri, 'sender-fax'),
(text_uri, 'sender-firstname'),
(text_uri, 'sender-initials'),
(text_uri, 'sender-lastname'),
(text_uri, 'sender-phone-private'),
(text_uri, 'sender-phone-work'),
#(text_uri, 'sender-position'),
(text_uri, 'sender-postal-code'),
(text_uri, 'sender-state-or-province'),
(text_uri, 'sender-street'),
#(text_uri, 'sender-title'),
(text_uri, 'sequence'),
(text_uri, 'sequence-ref'),
(text_uri, 'sheet-name'),
#(text_uri, 'subject'),
(text_uri, 'table-count'),
(text_uri, 'table-formula'),
(text_uri, 'template-name'),
(text_uri, 'text-input'),
(text_uri, 'time'),
#(text_uri, 'title'),
(text_uri, 'user-defined'),
(text_uri, 'user-field-get'),
(text_uri, 'user-field-input'),
(text_uri, 'variable-get'),
(text_uri, 'variable-input'),
(text_uri, 'variable-set'),
(text_uri, 'word-count'),
# SVG
#(svg_uri, 'title'),
#(svg_uri, 'desc')
# From translate
(text_uri, 'tracked-changes')
]
globals()['inline_elements'] = inline_elements
globals()['no_translate_content_elements'] = no_translate_content_elements
try:
from itools.odf.schema import inline_elements
from itools.odf.schema import no_translate_content_elements
except:
define_tables()
```
#### File: translate/storage/php.py
```python
from translate.storage import base
import re
def phpencode(text, quotechar="'"):
"""convert Python string to PHP escaping
The encoding is implemented for
U{'single quote'<http://www.php.net/manual/en/language.types.string.php#language.types.string.syntax.single>}
and U{"double quote"<http://www.php.net/manual/en/language.types.string.php#language.types.string.syntax.double>}
syntax.
heredoc and nowdoc are not implemented and it is not certain whether this would
ever be needed for PHP localisation needs.
"""
if not text:
return text
if quotechar == '"':
# \n may be converted to \\n but we don't. This allows us to preserve pretty layout that might have appeared in muliline entries
# we might lose some "blah\nblah" layouts but that's probably not the most frequent use case. See bug 588
escapes = (("\\", "\\\\"), ("\r", "\\r"), ("\t", "\\t"), ("\v", "\\v"), ("\f", "\\f"), ("\\\\$", "\\$"), ('"', '\\"'), ("\\\\", "\\"))
for a, b in escapes:
text = text.replace(a, b)
return text
else:
return text.replace("%s" % quotechar, "\\%s" % quotechar)
def phpdecode(text, quotechar="'"):
"""convert PHP escaped string to a Python string"""
def decode_octal_hex(match):
"""decode Octal \NNN and Hex values"""
if match.groupdict().has_key("octal"):
return match.groupdict()['octal'].decode("string_escape")
elif match.groupdict().has_key("hex"):
return match.groupdict()['hex'].decode("string_escape")
else:
return match.group
if not text:
return text
if quotechar == '"':
# We do not escape \$ as it is used by variables and we can't roundtrip that item.
text = text.replace('\\"', '"').replace("\\\\", "\\")
text = text.replace("\\n", "\n").replace("\\r", "\r").replace("\\t", "\t").replace("\\v", "\v").replace("\\f", "\f")
text = re.sub(r"(?P<octal>\\[0-7]{1,3})", decode_octal_hex, text)
text = re.sub(r"(?P<hex>\\x[0-9A-Fa-f]{1,2})", decode_octal_hex, text)
else:
text = text.replace("\\'", "'").replace("\\\\", "\\")
return text
class phpunit(base.TranslationUnit):
"""a unit of a PHP file i.e. a name and value, and any comments
associated"""
def __init__(self, source=""):
"""construct a blank phpunit"""
self.escape_type = None
super(phpunit, self).__init__(source)
self.name = ""
self.value = ""
self.translation = ""
self._comments = []
self.source = source
def setsource(self, source):
"""Sets the source AND the target to be equal"""
self.value = phpencode(source, self.escape_type)
def getsource(self):
return phpdecode(self.value, self.escape_type)
source = property(getsource, setsource)
def settarget(self, target):
self.translation = phpencode(target, self.escape_type)
def gettarget(self):
return phpdecode(self.translation, self.escape_type)
target = property(gettarget, settarget)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(getattr(self, "encoding", "UTF-8"))
return source
def getoutput(self):
"""convert the unit back into formatted lines for a php file"""
return "".join(self._comments + ["%s='%s';\n" % (self.name, self.translation or self.value)])
def addlocation(self, location):
self.name = location
def getlocations(self):
return [self.name]
def addnote(self, text, origin=None, position="append"):
if origin in ['programmer', 'developer', 'source code', None]:
if position == "append":
self._comments.append(text)
else:
self._comments = [text]
else:
return super(phpunit, self).addnote(text, origin=origin, position=position)
def getnotes(self, origin=None):
if origin in ['programmer', 'developer', 'source code', None]:
return '\n'.join(self._comments)
else:
return super(phpunit, self).getnotes(origin)
def removenotes(self):
self._comments = []
def isblank(self):
"""Returns whether this is a blank element, containing only comments."""
return not (self.name or self.value)
def getid(self):
return self.name
class phpfile(base.TranslationStore):
"""This class represents a PHP file, made up of phpunits"""
UnitClass = phpunit
def __init__(self, inputfile=None, encoding='utf-8'):
"""construct a phpfile, optionally reading in from inputfile"""
super(phpfile, self).__init__(unitclass = self.UnitClass)
self.filename = getattr(inputfile, 'name', '')
self._encoding = encoding
if inputfile is not None:
phpsrc = inputfile.read()
inputfile.close()
self.parse(phpsrc)
def parse(self, phpsrc):
"""Read the source of a PHP file in and include them as units"""
newunit = phpunit()
lastvalue = ""
value = ""
comment = []
invalue = False
incomment = False
valuequote = "" # either ' or "
for line in phpsrc.decode(self._encoding).split("\n"):
commentstartpos = line.find("/*")
commentendpos = line.rfind("*/")
if commentstartpos != -1:
incomment = True
if commentendpos != -1:
newunit.addnote(line[commentstartpos:commentendpos].strip(), "developer")
incomment = False
else:
newunit.addnote(line[commentstartpos:].strip(), "developer")
if commentendpos != -1 and incomment:
newunit.addnote(line[:commentendpos+2].strip(), "developer")
incomment = False
if incomment and commentstartpos == -1:
newunit.addnote(line.strip(), "developer")
continue
equalpos = line.find("=")
hashpos = line.find("#")
if 0 <= hashpos < equalpos:
# Assume that this is a '#' comment line
newunit.addnote(line.strip(), "developer")
continue
if equalpos != -1 and not invalue:
newunit.addlocation(line[:equalpos].strip().replace(" ", ""))
value = line[equalpos+1:].lstrip()[1:]
valuequote = line[equalpos+1:].lstrip()[0]
lastvalue = ""
invalue = True
else:
if invalue:
value = line
colonpos = value.rfind(";")
while colonpos != -1:
if value[colonpos-1] == valuequote:
newunit.value = lastvalue + value[:colonpos-1]
newunit.escape_type = valuequote
lastvalue = ""
invalue = False
if not invalue and colonpos != len(value)-1:
commentinlinepos = value.find("//", colonpos)
if commentinlinepos != -1:
newunit.addnote(value[commentinlinepos+2:].strip(), "developer")
if not invalue:
self.addunit(newunit)
value = ""
newunit = phpunit()
colonpos = value.rfind(";", 0, colonpos)
if invalue:
lastvalue = lastvalue + value + "\n"
def __str__(self):
"""Convert the units back to lines."""
lines = []
for unit in self.units:
lines.append(str(unit))
return "".join(lines)
```
#### File: storage/placeables/base.py
```python
from strelem import StringElem
from interfaces import *
__all__ = ['Bpt', 'Ept', 'Ph', 'It', 'G', 'Bx', 'Ex', 'X', 'Sub', 'to_base_placeables']
# Basic placeable types.
class Bpt(MaskingPlaceable, PairedDelimiter):
has_content = True
class Ept(MaskingPlaceable, PairedDelimiter):
has_content = True
class Ph(MaskingPlaceable):
has_content = True
istranslatable = False
class It(MaskingPlaceable, Delimiter):
has_content = True
class G(ReplacementPlaceable):
has_content = True
class Bx(ReplacementPlaceable, PairedDelimiter):
has_content = False
istranslatable = False
def __init__(self, id=None, xid=None, **kwargs):
# kwargs is ignored
ReplacementPlaceable.__init__(self, id=id, xid=xid, **kwargs)
class Ex(ReplacementPlaceable, PairedDelimiter):
has_content = False
istranslatable = False
def __init__(self, id=None, xid=None, **kwargs):
# kwargs is ignored
ReplacementPlaceable.__init__(self, id=id, xid=xid, **kwargs)
class X(ReplacementPlaceable, Delimiter):
has_content = False
iseditable = False
isfragile = True
istranslatable = False
def __init__(self, id=None, xid=None, **kwargs):
ReplacementPlaceable.__init__(self, id=id, xid=xid, **kwargs)
class Sub(SubflowPlaceable):
has_content = True
def to_base_placeables(tree):
if not isinstance(tree, StringElem):
return tree
base_class = [klass for klass in tree.__class__.__bases__ \
if klass in [Bpt, Ept, Ph, It, G, Bx, Ex, X, Sub]]
if not base_class:
base_class = tree.__class__
else:
base_class = base_class[0]
newtree = base_class()
newtree.id = tree.id
newtree.rid = tree.rid
newtree.xid = tree.xid
newtree.sub = []
for subtree in tree.sub:
newtree.sub.append(to_base_placeables(subtree))
return newtree
```
#### File: translate/storage/test_oo.py
```python
from translate.storage import oo
from translate.misc import wStringIO
import warnings
def test_makekey():
"""checks the makekey function for consistency"""
assert oo.makekey(('project', r'path\to\the\sourcefile.src', 'resourcetype', 'GROUP_ID', 'LOCAL_ID', 'platform'), False) == "sourcefile.src#GROUP_ID.LOCAL_ID.resourcetype"
# Testwith long_key i.e. used in multifile options
assert oo.makekey(('project', r'path\to\the\sourcefile.src', 'resourcetype', 'GROUP_ID', 'LOCAL_ID', 'platform'), True) == "project/path/to/the/sourcefile.src#GROUP_ID.LOCAL_ID.resourcetype"
assert oo.makekey(('project', r'path\to\the\sourcefile.src', 'resourcetype', 'GROUP_ID', '', 'platform'), False) == "sourcefile.src#GROUP_ID.resourcetype"
assert oo.makekey(('project', r'path\to\the\sourcefile.src', 'resourcetype', '', 'LOCAL_ID', 'platform'), False) == "sourcefile.src#LOCAL_ID.resourcetype"
assert oo.makekey(('project', r'path\to\the\sourcefile.src', '', 'GROUP_ID', 'LOCAL_ID', 'platform'), False) == "sourcefile.src#GROUP_ID.LOCAL_ID"
assert oo.makekey(('project', r'path\to\the\sourcefile.src', '', 'GROUP_ID', '', 'platform'), False) == "sourcefile.src#GROUP_ID"
def test_escape_help_text():
"""Check the help text escape function"""
assert oo.escape_help_text("If we don't know <tag> we don't <br> escape it") == "If we don't know <tag> we don't <br> escape it"
# Bug 694
assert oo.escape_help_text("A szó: <nyelv>") == "A szó: <nyelv>"
assert oo.escape_help_text("""...következő: "<kiszolgáló> <témakör> <elem>", ahol...""") == """...következő: "<kiszolgáló> <témakör> <elem>", ahol..."""
# See bug 694 comments 8-10 not fully resolved.
assert oo.escape_help_text(r"...törtjel (\) létrehozásához...") == r"...törtjel (\\) létrehozásához..."
class TestOO:
def setup_method(self, method):
warnings.resetwarnings()
def teardown_method(self, method):
warnings.resetwarnings()
def ooparse(self, oosource):
"""helper that parses oo source without requiring files"""
dummyfile = wStringIO.StringIO(oosource)
oofile = oo.oofile(dummyfile)
return oofile
def ooregen(self, oosource):
"""helper that converts oo source to oofile object and back"""
return str(self.ooparse(oosource))
def test_simpleentry(self):
"""checks that a simple oo entry is parsed correctly"""
oosource = r'svx source\dialog\numpages.src 0 string RID_SVXPAGE_NUM_OPTIONS STR_BULLET 0 en-US Character 20050924 09:13:58'
oofile = self.ooparse(oosource)
assert len(oofile.units) == 1
oe = oofile.units[0]
assert oe.languages.keys() == ["en-US"]
ol = oofile.oolines[0]
assert ol.getkey() == ('svx', r'source\dialog\numpages.src', 'string', 'RID_SVXPAGE_NUM_OPTIONS', 'STR_BULLET', '')
assert ol.text == 'Character'
assert str(ol) == oosource
def test_simpleentry_quickhelptest(self):
"""checks that a simple entry with quickhelptext is parsed correctly"""
oosource = r'sd source\ui\dlg\sdobjpal.src 0 imagebutton FLTWIN_SDOBJPALETTE BTN_SYMSIZE 16 en-US - Toggle Symbol Size 20051017 21:40:56'
oofile = self.ooparse(oosource)
assert len(oofile.units) == 1
oe = oofile.units[0]
assert oe.languages.keys() == ["en-US"]
ol = oofile.oolines[0]
assert ol.getkey() == ('sd', r'source\ui\dlg\sdobjpal.src', 'imagebutton', 'FLTWIN_SDOBJPALETTE', 'BTN_SYMSIZE', '')
assert ol.quickhelptext == 'Toggle Symbol Size'
assert str(ol) == oosource
def test_simpleentry_title(self):
"""checks that a simple entry with title text is parsed correctly"""
oosource = r'dbaccess source\ui\dlg\indexdialog.src 0 querybox QUERY_SAVE_CURRENT_INDEX 0 en-US Do you want to save the changes made to the current index? Exit Index Design 20051017 21:40:56'
oofile = self.ooparse(oosource)
assert len(oofile.units) == 1
oe = oofile.units[0]
assert oe.languages.keys() == ["en-US"]
ol = oofile.oolines[0]
assert ol.getkey() == ('dbaccess', r'source\ui\dlg\indexdialog.src', 'querybox', 'QUERY_SAVE_CURRENT_INDEX', '', '')
assert ol.title == 'Exit Index Design'
assert str(ol) == oosource
def test_blankline(self):
"""checks that a blank line is parsed correctly"""
oosource = '\n'
warnings.simplefilter("error")
oofile = self.ooparse(oosource)
assert len(oofile.units) == 0
def test_fieldlength(self):
"""checks that we process the length field correctly"""
# Since the actual field is 18 characters long and the field width in this example is 16 we're not sure if they even use this!
oosource = r'sd source\ui\dlg\sdobjpal.src 0 imagebutton FLTWIN_SDOBJPALETTE BTN_SYMSIZE 16 en-US - Toggle Symbol Size 20051017 21:40:56'
oofile = self.ooparse(oosource)
assert len(oofile.units) == 1
oe = oofile.units[0]
assert oe.languages.keys() == ["en-US"]
ol = oofile.oolines[0]
assert int(ol.width) == 16
def test_escapes(self):
"""checks that we escape properly"""
oosource = r'svx source\dialog\numpages.src 0 string RID_SVXPAGE_NUM_OPTIONS STR_BULLET 0 en-US size *2 \\langle x \\rangle 20050924 09:13:58'
oofile = self.ooregen(oosource)
assert r'size *2 \\langle x \\rangle' in oofile
```
#### File: translate/storage/test_ts.py
```python
from translate.storage import ts
class TestTS:
def test_construct(self):
tsfile = ts.QtTsParser()
tsfile.addtranslation("ryan", "Bread", "Brood", "Wit", createifmissing=True)
```
#### File: translate/storage/test_zipfile.py
```python
import zlib # implied prerequisite
import zipfile, os, StringIO, tempfile
from test.test_support import TestFailed
from py import test
from translate.misc import zipfileext
BrokenStringIO = StringIO.StringIO
class FixedStringIO(BrokenStringIO):
def truncate(self, size=None):
BrokenStringIO.truncate(self, size)
self.len = len(self.buf)
StringIO.StringIO = FixedStringIO
# these tests won't all pass on zipfile module in Python 2.4
# there are extensions in zipfileext to add the delete method etc
# to test the underlying zipfile module, uncomment the following line:
zipfile.ZipFile = zipfileext.ZipFileExt
class TestZipFiles:
def setup_method(self, method):
self.srcname = "%s-%s%stmp" % (self.__class__.__name__, method.__name__, os.extsep)
self.zipname = "%s-%s%szip" % (self.__class__.__name__, method.__name__, os.extsep)
def teardown_method(self, method):
# Remove temporary files
if os.path.isfile(self.srcname):
os.unlink(self.srcname)
if os.path.isfile(self.zipname):
os.unlink(self.zipname)
def zipTest(self, f, compression, srccontents):
zip = zipfile.ZipFile(f, "w", compression) # Create the ZIP archive
zip.write(self.srcname, "another"+os.extsep+"name")
zip.write(self.srcname, self.srcname)
zip.close()
zip = zipfile.ZipFile(f, "r", compression) # Read the ZIP archive
readData2 = zip.read(self.srcname)
readData1 = zip.read("another"+os.extsep+"name")
zip.close()
if readData1 != srccontents or readData2 != srccontents:
raise TestFailed("Written data doesn't equal read data.")
def deleteTest(self, f, compression, srccontents):
zip = zipfile.ZipFile(f, "w", compression) # Create the ZIP archive
othername = "another"+os.extsep+"name"
finalname = "adifferent"+os.extsep+"name"
leftname, deletenames = othername, [self.srcname, finalname]
zip.write(self.srcname, self.srcname)
zip.write(self.srcname, othername)
zip.write(self.srcname, finalname)
zip.close()
zip = zipfile.ZipFile(f, "a", compression) # Modify the ZIP archive
try:
for deletename in deletenames:
zip.delete(deletename)
finally:
zip.close()
zip = zipfile.ZipFile(f, "r", compression) # Read the ZIP archive
try:
testfailed = zip.testzip()
readData = zip.read(leftname)
finally:
zip.close()
assert not testfailed
assert readData == srccontents
def test_create_zip(self):
fp = open(self.srcname, "wb") # Make a source file with some lines
for i in range(0, 1000):
fp.write("Test of zipfile line %d.\n" % i)
fp.close()
fp = open(self.srcname, "rb")
writtenData = fp.read()
fp.close()
for file in (self.zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
self.zipTest(file, zipfile.ZIP_STORED, writtenData)
for file in (self.zipname, tempfile.TemporaryFile(), StringIO.StringIO()):
self.zipTest(file, zipfile.ZIP_DEFLATED, writtenData)
def test_delete_member(self):
fp = open(self.srcname, "wb") # Make a source file with some lines
for i in range(0, 1000):
fp.write("Test of zipfile line %d.\n" % i)
fp.close()
fp = open(self.srcname, "rb")
writtenData = fp.read()
fp.close()
self.deleteTest(self.zipname, zipfile.ZIP_STORED, writtenData)
self.deleteTest(tempfile.TemporaryFile(), zipfile.ZIP_STORED, writtenData)
self.deleteTest(StringIO.StringIO(), zipfile.ZIP_STORED, writtenData)
self.deleteTest(self.zipname, zipfile.ZIP_DEFLATED, writtenData)
self.deleteTest(tempfile.TemporaryFile(), zipfile.ZIP_DEFLATED, writtenData)
self.deleteTest(StringIO.StringIO(), zipfile.ZIP_DEFLATED, writtenData)
def test_handles_error(self):
"""This test checks that the ZipFile constructor closes the file object"""
"""it opens if there's an error in the file. If it doesn't, the traceback"""
"""holds a reference to the ZipFile object and, indirectly, the file object."""
"""On Windows, this causes the os.unlink() call to fail because the"""
"""underlying file is still open. This is SF bug #412214."""
fp = open(self.srcname, "w")
fp.write("this is not a legal zip file\n")
fp.close()
assert test.raises(zipfile.BadZipfile, zipfile.ZipFile, self.srcname)
os.unlink(self.srcname)
def test_finalize(self):
"""make sure we don't raise an AttributeError when a partially-constructed"""
"""ZipFile instance is finalized; this tests for regression on SF tracker"""
"""bug #403871."""
assert test.raises(IOError, zipfile.ZipFile, self.srcname)
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
def test_fail_read_closed(self):
# Verify that testzip() doesn't swallow inappropriate exceptions.
data = StringIO.StringIO()
zipf = zipfile.ZipFile(data, mode="w")
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
zipf.close()
zipf = zipfile.ZipFile(data, mode="r")
zipf.close()
# This is correct; calling .read on a closed ZipFile should throw
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
assert test.raises(RuntimeError, zipf.testzip)
del data, zipf
```
#### File: translate/storage/wordfast.py
```python
import csv
import sys
import time
from translate.storage import base
WF_TIMEFORMAT = "%Y%m%d~%H%M%S"
"""Time format used by Wordfast"""
WF_FIELDNAMES_HEADER = ["date", "userlist", "tucount", "src-lang", "version", "target-lang", "license", "attr1list", "attr2list", "attr3list", "attr4list", "attr5list"]
"""Field names for the Wordfast header"""
WF_FIELDNAMES = ["date", "user", "reuse", "src-lang", "source", "target-lang", "target", "attr1", "attr2", "attr3", "attr4"]
"""Field names for a Wordfast TU"""
WF_FIELDNAMES_HEADER_DEFAULTS = {
"date": "%19000101~121212",
"userlist": "%User ID,TT,TT Translate-Toolkit",
"tucount": "%TU=00000001",
"src-lang": "%EN-US",
"version": "%Wordfast TM v.5.51w9/00",
"target-lang": "",
"license": "%---00000001",
"attr1list": "",
"attr2list": "",
"attr3list": "",
"attr4list": "" }
"""Default or minimum header entries for a Wordfast file"""
# TODO Needs validation. The following need to be checked against a WF TM file to ensure
# that the correct Unicode values have been chosen for the characters. For now these look
# correct and have been taken from Windows CP1252 and Macintosh code points found for
# the respective character sets on Linux.
WF_ESCAPE_MAP = (
("&'26;", u"\u0026"), # & - Ampersand (must be first to prevent escaping of escapes)
("&'82;", u"\u201A"), # ‚ - Single low-9 quotation mark
("&'85;", u"\u2026"), # … - Elippsis
("&'91;", u"\u2018"), # ‘ - left single quotation mark
("&'92;", u"\u2019"), # ’ - right single quotation mark
("&'93;", u"\u201C"), # “ - left double quotation mark
("&'94;", u"\u201D"), # ” - right double quotation mark
("&'96;", u"\u2013"), # – - en dash (validate)
("&'97;", u"\u2014"), # — - em dash (validate)
("&'99;", u"\u2122"), # ™ - Trade mark
# Windows only
("&'A0;", u"\u00A0"), # - Non breaking space
("&'A9;", u"\u00A9"), # © - Copyright
("&'AE;", u"\u00AE"), # ® - Registered
("&'BC;", u"\u00BC"), # ¼
("&'BD;", u"\u00BD"), # ½
("&'BE;", u"\u00BE"), # ¾
# Mac only
("&'A8;", u"\u00AE"), # ® - Registered
("&'AA;", u"\u2122"), # ™ - Trade mark
("&'C7;", u"\u00AB"), # « - Left-pointing double angle quotation mark
("&'C8;", u"\u00BB"), # » - Right-pointing double angle quotation mark
("&'C9;", u"\u2026"), # … - Horizontal Elippsis
("&'CA;", u"\u00A0"), # - Non breaking space
("&'D0;", u"\u2013"), # – - en dash (validate)
("&'D1;", u"\u2014"), # — - em dash (validate)
("&'D2;", u"\u201C"), # “ - left double quotation mark
("&'D3;", u"\u201D"), # ” - right double quotation mark
("&'D4;", u"\u2018"), # ‘ - left single quotation mark
("&'D5;", u"\u2019"), # ’ - right single quotation mark
("&'E2;", u"\u201A"), # ‚ - Single low-9 quotation mark
("&'E3;", u"\u201E"), # „ - Double low-9 quotation mark
# Other markers
#("&'B;", u"\n"), # Soft-break - XXX creates a problem with roundtripping could also be represented by \u2028
)
"""Mapping of Wordfast &'XX; escapes to correct Unicode characters"""
TAB_UTF16 = "\x00\x09"
"""The tab \\t character as it would appear in UTF-16 encoding"""
def _char_to_wf(string):
"""Char -> Wordfast &'XX; escapes
Full roundtripping is not possible because of the escaping of NEWLINE \\n
and TAB \\t"""
# FIXME there is no platform check to ensure that we use Mac encodings when running on a Mac
if string:
for code, char in WF_ESCAPE_MAP:
string = string.replace(char.encode('utf-8'), code)
string = string.replace("\n", "\\n").replace("\t", "\\t")
return string
def _wf_to_char(string):
"""Wordfast &'XX; escapes -> Char"""
if string:
for code, char in WF_ESCAPE_MAP:
string = string.replace(code, char.encode('utf-8'))
string = string.replace("\\n", "\n").replace("\\t", "\t")
return string
class WordfastDialect(csv.Dialect):
"""Describe the properties of a Wordfast generated TAB-delimited file."""
delimiter = "\t"
lineterminator = "\r\n"
quoting = csv.QUOTE_NONE
if sys.version_info < (2, 5, 0):
# We need to define the following items for csv in Python < 2.5
quoting = csv.QUOTE_MINIMAL # Wordfast does not quote anything, since we escape
# \t anyway in _char_to_wf this should not be a problem
doublequote = False
skipinitialspace = False
escapechar = None
quotechar = '"'
csv.register_dialect("wordfast", WordfastDialect)
class WordfastTime(object):
"""Manages time stamps in the Wordfast format of YYYYMMDD~hhmmss"""
def __init__(self, newtime=None):
self._time = None
if not newtime:
self.time = None
elif isinstance(newtime, basestring):
self.timestring = newtime
elif isinstance(newtime, time.struct_time):
self.time = newtime
def get_timestring(self):
"""Get the time in the Wordfast time format"""
if not self._time:
return None
else:
return time.strftime(WF_TIMEFORMAT, self._time)
def set_timestring(self, timestring):
"""Set the time_sturct object using a Wordfast time formated string
@param timestring: A Wordfast time string (YYYMMDD~hhmmss)
@type timestring: String
"""
self._time = time.strptime(timestring, WF_TIMEFORMAT)
timestring = property(get_timestring, set_timestring)
def get_time(self):
"""Get the time_struct object"""
return self._time
def set_time(self, newtime):
"""Set the time_struct object
@param newtime: a new time object
@type newtime: time.time_struct
"""
if newtime and isinstance(newtime, time.struct_time):
self._time = newtime
else:
self._time = None
time = property(get_time, set_time)
def __str__(self):
if not self.timestring:
return ""
else:
return self.timestring
class WordfastHeader(object):
"""A wordfast translation memory header"""
def __init__(self, header=None):
self._header_dict = []
if not header:
self.header = self._create_default_header()
elif isinstance(header, dict):
self.header = header
def _create_default_header(self):
"""Create a default Wordfast header with the date set to the current time"""
defaultheader = WF_FIELDNAMES_HEADER_DEFAULTS
defaultheader['date'] = '%%%s' % WordfastTime(time.localtime()).timestring
return defaultheader
def getheader(self):
"""Get the header dictionary"""
return self._header_dict
def setheader(self, newheader):
self._header_dict = newheader
header = property(getheader, setheader)
def settargetlang(self, newlang):
self._header_dict['target-lang'] = '%%%s' % newlang
targetlang = property(None, settargetlang)
def settucount(self, count):
self._header_dict['tucount'] = '%%TU=%08d' % count
tucount = property(None, settucount)
class WordfastUnit(base.TranslationUnit):
"""A Wordfast translation memory unit"""
def __init__(self, source=None):
self._dict = {}
if source:
self.source = source
super(WordfastUnit, self).__init__(source)
def _update_timestamp(self):
"""Refresh the timestamp for the unit"""
self._dict['date'] = WordfastTime(time.localtime()).timestring
def getdict(self):
"""Get the dictionary of values for a Wordfast line"""
return self._dict
def setdict(self, newdict):
"""Set the dictionary of values for a Wordfast line
@param newdict: a new dictionary with Wordfast line elements
@type newdict: Dict
"""
# TODO First check that the values are OK
self._dict = newdict
dict = property(getdict, setdict)
def _get_source_or_target(self, key):
if self._dict.get(key, None) is None:
return None
elif self._dict[key]:
return _wf_to_char(self._dict[key]).decode('utf-8')
else:
return ""
def _set_source_or_target(self, key, newvalue):
if newvalue is None:
self._dict[key] = None
if isinstance(newvalue, unicode):
newvalue = newvalue.encode('utf-8')
newvalue = _char_to_wf(newvalue)
if not key in self._dict or newvalue != self._dict[key]:
self._dict[key] = newvalue
self._update_timestamp()
def getsource(self):
return self._get_source_or_target('source')
def setsource(self, newsource):
return self._set_source_or_target('source', newsource)
source = property(getsource, setsource)
def gettarget(self):
return self._get_source_or_target('target')
def settarget(self, newtarget):
return self._set_source_or_target('target', newtarget)
target = property(gettarget, settarget)
def settargetlang(self, newlang):
self._dict['target-lang'] = newlang
targetlang = property(None, settargetlang)
def __str__(self):
return str(self._dict)
def istranslated(self):
if not self._dict.get('source', None):
return False
return bool(self._dict.get('target', None))
class WordfastTMFile(base.TranslationStore):
"""A Wordfast translation memory file"""
Name = _("Wordfast Translation Memory")
Mimetypes = ["application/x-wordfast"]
Extensions = ["txt"]
def __init__(self, inputfile=None, unitclass=WordfastUnit):
"""construct a Wordfast TM, optionally reading in from inputfile."""
self.UnitClass = unitclass
base.TranslationStore.__init__(self, unitclass=unitclass)
self.filename = ''
self.header = WordfastHeader()
self._encoding = 'iso-8859-1'
if inputfile is not None:
self.parse(inputfile)
def parse(self, input):
"""parsese the given file or file source string"""
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
tmsrc = input.read()
input.close()
input = tmsrc
if TAB_UTF16 in input.split("\n")[0]:
self._encoding = 'utf-16'
else:
self._encoding = 'iso-8859-1'
try:
input = input.decode(self._encoding).encode('utf-8')
except:
raise ValueError("Wordfast files are either UTF-16 (UCS2) or ISO-8859-1 encoded")
for header in csv.DictReader(input.split("\n")[:1], fieldnames=WF_FIELDNAMES_HEADER, dialect="wordfast"):
self.header = WordfastHeader(header)
lines = csv.DictReader(input.split("\n")[1:], fieldnames=WF_FIELDNAMES, dialect="wordfast")
for line in lines:
newunit = WordfastUnit()
newunit.dict = line
self.addunit(newunit)
def __str__(self):
output = csv.StringIO()
header_output = csv.StringIO()
writer = csv.DictWriter(output, fieldnames=WF_FIELDNAMES, dialect="wordfast")
unit_count = 0
for unit in self.units:
if unit.istranslated():
unit_count += 1
writer.writerow(unit.dict)
if unit_count == 0:
return ""
output.reset()
self.header.tucount = unit_count
outheader = csv.DictWriter(header_output, fieldnames=WF_FIELDNAMES_HEADER, dialect="wordfast")
outheader.writerow(self.header.header)
header_output.reset()
decoded = "".join(header_output.readlines() + output.readlines()).decode('utf-8')
try:
return decoded.encode(self._encoding)
except UnicodeEncodeError:
return decoded.encode('utf-16')
```
#### File: storage/xml_extract/extract.py
```python
from lxml import etree
from translate.storage import base
from translate.misc.typecheck import accepts, Self, IsCallable, IsOneOf, Any, Class
from translate.misc.typecheck.typeclasses import Number
from translate.misc.contextlib import contextmanager, nested
from translate.misc.context import with_
from translate.storage.xml_extract import xpath_breadcrumb
from translate.storage.xml_extract import misc
from translate.storage.placeables import xliff, StringElem
def Nullable(t):
return IsOneOf(t, type(None))
TranslatableClass = Class('Translatable')
class Translatable(object):
"""A node corresponds to a translatable element. A node may
have children, which correspond to placeables."""
@accepts(Self(), unicode, unicode, etree._Element, [IsOneOf(TranslatableClass, unicode)])
def __init__(self, placeable_name, xpath, dom_node, source):
self.placeable_name = placeable_name
self.source = source
self.xpath = xpath
self.is_inline = False
self.dom_node = dom_node
def _get_placeables(self):
return [placeable for placeable in self.source if isinstance(placeable, Translatable)]
placeables = property(_get_placeables)
@accepts(IsCallable(), Translatable, state=[Any()])
def reduce_unit_tree(f, unit_node, *state):
return misc.reduce_tree(f, unit_node, unit_node, lambda unit_node: unit_node.placeables, *state)
class ParseState(object):
"""Maintain constants and variables used during the walking of a
DOM tree (via the function apply)."""
def __init__(self, no_translate_content_elements, inline_elements = {}, nsmap = {}):
self.no_translate_content_elements = no_translate_content_elements
self.inline_elements = inline_elements
self.is_inline = False
self.xpath_breadcrumb = xpath_breadcrumb.XPathBreadcrumb()
self.placeable_name = u"<top-level>"
self.nsmap = nsmap
@accepts(etree._Element, ParseState)
def _process_placeable(dom_node, state):
"""Run find_translatable_dom_nodes on the current dom_node"""
placeable = find_translatable_dom_nodes(dom_node, state)
# This happens if there were no recognized child tags and thus
# no translatable is returned. Make a placeable with the name
# "placeable"
if len(placeable) == 0:
return Translatable(u"placeable", state.xpath_breadcrumb.xpath, dom_node, [])
# The ideal situation: we got exactly one translateable back
# when processing this tree.
elif len(placeable) == 1:
return placeable[0]
else:
raise Exception("BUG: find_translatable_dom_nodes should never return more than a single translatable")
@accepts(etree._Element, ParseState)
def _process_placeables(dom_node, state):
"""Return a list of placeables and list with
alternating string-placeable objects. The former is
useful for directly working with placeables and the latter
is what will be used to build the final translatable string."""
source = []
for child in dom_node:
source.extend([_process_placeable(child, state), unicode(child.tail or u"")])
return source
@accepts(etree._Element, ParseState)
def _process_translatable(dom_node, state):
source = [unicode(dom_node.text or u"")] + _process_placeables(dom_node, state)
translatable = Translatable(state.placeable_name, state.xpath_breadcrumb.xpath, dom_node, source)
translatable.is_inline = state.is_inline
return [translatable]
@accepts(etree._Element, ParseState)
def _process_children(dom_node, state):
_namespace, tag = misc.parse_tag(dom_node.tag)
children = [find_translatable_dom_nodes(child, state) for child in dom_node]
# Flatten a list of lists into a list of elements
children = [child for child_list in children for child in child_list]
if len(children) > 1:
intermediate_translatable = Translatable(tag, state.xpath_breadcrumb.xpath, dom_node, children)
return [intermediate_translatable]
else:
return children
def compact_tag(nsmap, namespace, tag):
if namespace in nsmap:
return u'%s:%s' % (nsmap[namespace], tag)
else:
return u'{%s}%s' % (namespace, tag)
@accepts(etree._Element, ParseState)
def find_translatable_dom_nodes(dom_node, state):
# For now, we only want to deal with XML elements.
# And we want to avoid processing instructions, which
# are XML elements (in the inheritance hierarchy).
if not isinstance(dom_node, etree._Element) or \
isinstance(dom_node, etree._ProcessingInstruction):
return []
namespace, tag = misc.parse_tag(dom_node.tag)
@contextmanager
def xpath_set():
state.xpath_breadcrumb.start_tag(compact_tag(state.nsmap, namespace, tag))
yield state.xpath_breadcrumb
state.xpath_breadcrumb.end_tag()
@contextmanager
def placeable_set():
old_placeable_name = state.placeable_name
state.placeable_name = tag
yield state.placeable_name
state.placeable_name = old_placeable_name
@contextmanager
def inline_set():
old_inline = state.is_inline
if (namespace, tag) in state.inline_elements:
state.is_inline = True
else:
state.is_inline = False
yield state.is_inline
state.is_inline = old_inline
def with_block(xpath_breadcrumb, placeable_name, is_inline):
if (namespace, tag) not in state.no_translate_content_elements:
return _process_translatable(dom_node, state)
else:
return _process_children(dom_node, state)
return with_(nested(xpath_set(), placeable_set(), inline_set()), with_block)
class IdMaker(object):
def __init__(self):
self._max_id = 0
self._obj_id_map = {}
def get_id(self, obj):
if not self.has_id(obj):
self._obj_id_map[obj] = self._max_id
self._max_id += 1
return self._obj_id_map[obj]
def has_id(self, obj):
return obj in self._obj_id_map
@accepts(Nullable(Translatable), Translatable, IdMaker)
def _to_placeables(parent_translatable, translatable, id_maker):
result = []
for chunk in translatable.source:
if isinstance(chunk, unicode):
result.append(chunk)
else:
id = unicode(id_maker.get_id(chunk))
if chunk.is_inline:
result.append(xliff.G(sub=_to_placeables(parent_translatable, chunk, id_maker), id=id))
else:
result.append(xliff.X(id=id, xid=chunk.xpath))
return result
@accepts(base.TranslationStore, Nullable(Translatable), Translatable, IdMaker)
def _add_translatable_to_store(store, parent_translatable, translatable, id_maker):
"""Construct a new translation unit, set its source and location
information and add it to 'store'.
"""
unit = store.UnitClass(u'')
unit.rich_source = [StringElem(_to_placeables(parent_translatable, translatable, id_maker))]
unit.addlocation(translatable.xpath)
store.addunit(unit)
@accepts(Translatable)
def _contains_translatable_text(translatable):
"""Checks whether translatable contains any chunks of text which contain
more than whitespace.
If not, then there's nothing to translate."""
for chunk in translatable.source:
if isinstance(chunk, unicode):
if chunk.strip() != u"":
return True
return False
@accepts(base.TranslationStore)
def _make_store_adder(store):
"""Return a function which, when called with a Translatable will add
a unit to 'store'. The placeables will represented as strings according
to 'placeable_quoter'."""
id_maker = IdMaker()
def add_to_store(parent_translatable, translatable, rid):
_add_translatable_to_store(store, parent_translatable, translatable, id_maker)
return add_to_store
@accepts([Translatable], IsCallable(), Nullable(Translatable), Number)
def _walk_translatable_tree(translatables, f, parent_translatable, rid):
for translatable in translatables:
if _contains_translatable_text(translatable) and not translatable.is_inline:
rid = rid + 1
new_parent_translatable = translatable
f(parent_translatable, translatable, rid)
else:
new_parent_translatable = parent_translatable
_walk_translatable_tree(translatable.placeables, f, new_parent_translatable, rid)
def reverse_map(a_map):
return dict((value, key) for key, value in a_map.iteritems())
@accepts(lambda obj: hasattr(obj, "read"), base.TranslationStore, ParseState, Nullable(IsCallable()))
def build_store(odf_file, store, parse_state, store_adder = None):
"""Utility function for loading xml_filename"""
store_adder = store_adder or _make_store_adder(store)
tree = etree.parse(odf_file)
root = tree.getroot()
parse_state.nsmap = reverse_map(root.nsmap)
translatables = find_translatable_dom_nodes(root, parse_state)
_walk_translatable_tree(translatables, store_adder, None, 0)
return tree
```
#### File: translate/storage/xpi.py
```python
from __future__ import generators
import zipfile
import os.path
from translate import __version__
import StringIO
import re
# we have some enhancements to zipfile in a file called zipfileext
# hopefully they will be included in a future version of python
from translate.misc import zipfileext
ZipFileBase = zipfileext.ZipFileExt
from translate.misc import wStringIO
# this is a fix to the StringIO in Python 2.3.3
# submitted as patch 951915 on sourceforge
class FixedStringIO(wStringIO.StringIO):
def truncate(self, size=None):
StringIO.StringIO.truncate(self, size)
self.len = len(self.buf)
NamedStringInput = wStringIO.StringIO
NamedStringOutput = wStringIO.StringIO
def _commonprefix(itemlist):
def cp(a, b):
l = min(len(a), len(b))
for n in range(l):
if a[n] != b[n]: return a[:n]
return a[:l]
if itemlist:
return reduce(cp, itemlist)
else:
return ''
def rememberchanged(self, method):
def changed(*args, **kwargs):
self.changed = True
method(*args, **kwargs)
return changed
class CatchPotentialOutput(NamedStringInput, object):
"""catches output if there has been, before closing"""
def __init__(self, contents, onclose):
"""Set up the output stream, and remember a method to call on closing"""
NamedStringInput.__init__(self, contents)
self.onclose = onclose
self.changed = False
s = super(CatchPotentialOutput, self)
self.write = rememberchanged(self, s.write)
self.writelines = rememberchanged(self, s.writelines)
self.truncate = rememberchanged(self, s.truncate)
def close(self):
"""wrap the underlying close method, to pass the value to onclose before it goes"""
if self.changed:
value = self.getvalue()
self.onclose(value)
NamedStringInput.close(self)
def flush(self):
"""zip files call flush, not close, on file-like objects"""
value = self.getvalue()
self.onclose(value)
NamedStringInput.flush(self)
def slam(self):
"""use this method to force the closing of the stream if it isn't closed yet"""
if not self.closed:
self.close()
class ZipFileCatcher(ZipFileBase, object):
"""a ZipFile that calls any methods its instructed to before closing (useful for catching stream output)"""
def __init__(self, *args, **kwargs):
"""initialize the ZipFileCatcher"""
# storing oldclose as attribute, since if close is called from __del__ it has no access to external variables
self.oldclose = super(ZipFileCatcher, self).close
super(ZipFileCatcher, self).__init__(*args, **kwargs)
def addcatcher(self, pendingsave):
"""remember to call the given method before closing"""
if hasattr(self, "pendingsaves"):
if not pendingsave in self.pendingsaves:
self.pendingsaves.append(pendingsave)
else:
self.pendingsaves = [pendingsave]
def close(self):
"""close the stream, remembering to call any addcatcher methods first"""
if hasattr(self, "pendingsaves"):
for pendingsave in self.pendingsaves:
pendingsave()
# if close is called from __del__, it somehow can't see ZipFileCatcher, so we've cached oldclose...
if ZipFileCatcher is None:
self.oldclose()
else:
super(ZipFileCatcher, self).close()
def overwritestr(self, zinfo_or_arcname, bytes):
"""writes the string into the archive, overwriting the file if it exists..."""
if isinstance(zinfo_or_arcname, zipfile.ZipInfo):
filename = zinfo_or_arcname.filename
else:
filename = zinfo_or_arcname
if filename in self.NameToInfo:
self.delete(filename)
self.writestr(zinfo_or_arcname, bytes)
self.writeendrec()
class XpiFile(ZipFileCatcher):
def __init__(self, *args, **kwargs):
"""sets up the xpi file"""
self.includenonloc = kwargs.get("includenonloc", True)
if "includenonloc" in kwargs:
del kwargs["includenonloc"]
if "compression" not in kwargs:
kwargs["compression"] = zipfile.ZIP_DEFLATED
self.locale = kwargs.pop("locale", None)
self.region = kwargs.pop("region", None)
super(XpiFile, self).__init__(*args, **kwargs)
self.jarfiles = {}
self.findlangreg()
self.jarprefixes = self.findjarprefixes()
self.reverseprefixes = dict([
(prefix,jarfilename) for jarfilename, prefix in self.jarprefixes.iteritems() if prefix])
self.reverseprefixes["package/"] = None
def iterjars(self):
"""iterate through the jar files in the xpi as ZipFile objects"""
for filename in self.namelist():
if filename.lower().endswith('.jar'):
if filename not in self.jarfiles:
jarstream = self.openinputstream(None, filename)
jarfile = ZipFileCatcher(jarstream, mode=self.mode)
self.jarfiles[filename] = jarfile
else:
jarfile = self.jarfiles[filename]
yield filename, jarfile
def islocfile(self, filename):
"""returns whether the given file is needed for localization (basically .dtd and .properties)"""
base, ext = os.path.splitext(filename)
return ext in (os.extsep + "dtd", os.extsep + "properties")
def findlangreg(self):
"""finds the common prefix of all the files stored in the jar files"""
dirstructure = {}
locale = self.locale
region = self.region
localematch = re.compile("^[a-z]{2,3}(-[a-zA-Z]{2,3}|)$")
regionmatch = re.compile("^[a-zA-Z]{2,3}$")
# exclude en-mac, en-win, en-unix for seamonkey
osmatch = re.compile("^[a-z]{2,3}-(mac|unix|win)$")
for jarfilename, jarfile in self.iterjars():
jarname = "".join(jarfilename.split('/')[-1:]).replace(".jar", "", 1)
if localematch.match(jarname) and not osmatch.match(jarname):
if locale is None:
locale = jarname
elif locale != jarname:
locale = 0
elif regionmatch.match(jarname):
if region is None:
region = jarname
elif region != jarname:
region = 0
for filename in jarfile.namelist():
if filename.endswith('/'): continue
if not self.islocfile(filename) and not self.includenonloc: continue
parts = filename.split('/')[:-1]
treepoint = dirstructure
for partnum in range(len(parts)):
part = parts[partnum]
if part in treepoint:
treepoint = treepoint[part]
else:
treepoint[part] = {}
treepoint = treepoint[part]
localeentries = {}
if 'locale' in dirstructure:
for dirname in dirstructure['locale']:
localeentries[dirname] = 1
if localematch.match(dirname) and not osmatch.match(dirname):
if locale is None:
locale = dirname
elif locale != dirname:
print "locale dir mismatch - ", dirname, "but locale is", locale, "setting to 0"
locale = 0
elif regionmatch.match(dirname):
if region is None:
region = dirname
elif region != dirname:
region = 0
if locale and locale in localeentries:
del localeentries[locale]
if region and region in localeentries:
del localeentries[region]
if locale and not region:
if "-" in locale:
region = locale.split("-", 1)[1]
else:
region = ""
self.setlangreg(locale, region)
def setlangreg(self, locale, region):
"""set the locale and region of this xpi"""
if locale == 0 or locale is None:
raise ValueError("unable to determine locale")
self.locale = locale
self.region = region
self.dirmap = {}
if self.locale is not None:
self.dirmap[('locale', self.locale)] = ('lang-reg',)
if self.region:
self.dirmap[('locale', self.region)] = ('reg',)
def findjarprefixes(self):
"""checks the uniqueness of the jar files contents"""
uniquenames = {}
jarprefixes = {}
for jarfilename, jarfile in self.iterjars():
jarprefixes[jarfilename] = ""
for filename in jarfile.namelist():
if filename.endswith('/'): continue
if filename in uniquenames:
jarprefixes[jarfilename] = True
jarprefixes[uniquenames[filename]] = True
else:
uniquenames[filename] = jarfilename
for jarfilename, hasconflicts in jarprefixes.items():
if hasconflicts:
shortjarfilename = os.path.split(jarfilename)[1]
shortjarfilename = os.path.splitext(shortjarfilename)[0]
jarprefixes[jarfilename] = shortjarfilename+'/'
# this is a clever trick that will e.g. remove zu- from zu-win, zu-mac, zu-unix
commonjarprefix = _commonprefix([prefix for prefix in jarprefixes.itervalues() if prefix])
if commonjarprefix:
for jarfilename, prefix in jarprefixes.items():
if prefix:
jarprefixes[jarfilename] = prefix.replace(commonjarprefix, '', 1)
return jarprefixes
def ziptoospath(self, zippath):
"""converts a zipfile filepath to an os-style filepath"""
return os.path.join(*zippath.split('/'))
def ostozippath(self, ospath):
"""converts an os-style filepath to a zipfile filepath"""
return '/'.join(ospath.split(os.sep))
def mapfilename(self, filename):
"""uses a map to simplify the directory structure"""
parts = tuple(filename.split('/'))
possiblematch = None
for prefix, mapto in self.dirmap.iteritems():
if parts[:len(prefix)] == prefix:
if possiblematch is None or len(possiblematch[0]) < len(prefix):
possiblematch = prefix, mapto
if possiblematch is not None:
prefix, mapto = possiblematch
mapped = mapto + parts[len(prefix):]
return '/'.join(mapped)
return filename
def mapxpifilename(self, filename):
"""uses a map to rename files that occur straight in the xpi"""
if filename.startswith('bin/chrome/') and filename.endswith(".manifest"):
return 'bin/chrome/lang-reg.manifest'
return filename
def reversemapfile(self, filename):
"""unmaps the filename..."""
possiblematch = None
parts = tuple(filename.split('/'))
for prefix, mapto in self.dirmap.iteritems():
if parts[:len(mapto)] == mapto:
if possiblematch is None or len(possiblematch[0]) < len(mapto):
possiblematch = (mapto, prefix)
if possiblematch is None:
return filename
mapto, prefix = possiblematch
reversemapped = prefix + parts[len(mapto):]
return '/'.join(reversemapped)
def reversemapxpifilename(self, filename):
"""uses a map to rename files that occur straight in the xpi"""
if filename == 'bin/chrome/lang-reg.manifest':
if self.locale:
return '/'.join(('bin', 'chrome', self.locale + '.manifest'))
else:
for otherfilename in self.namelist():
if otherfilename.startswith("bin/chrome/") and otherfilename.endswith(".manifest"):
return otherfilename
return filename
def jartoospath(self, jarfilename, filename):
"""converts a filename from within a jarfile to an os-style filepath"""
if jarfilename:
jarprefix = self.jarprefixes[jarfilename]
return self.ziptoospath(jarprefix+self.mapfilename(filename))
else:
return self.ziptoospath(os.path.join("package", self.mapxpifilename(filename)))
def ostojarpath(self, ospath):
"""converts an extracted os-style filepath to a jarfilename and filename"""
zipparts = ospath.split(os.sep)
prefix = zipparts[0] + '/'
if prefix in self.reverseprefixes:
jarfilename = self.reverseprefixes[prefix]
filename = self.reversemapfile('/'.join(zipparts[1:]))
if jarfilename is None:
filename = self.reversemapxpifilename(filename)
return jarfilename, filename
else:
filename = self.ostozippath(ospath)
if filename in self.namelist():
return None, filename
filename = self.reversemapfile('/'.join(zipparts))
possiblejarfilenames = [jarfilename for jarfilename, prefix in self.jarprefixes.iteritems() if not prefix]
for jarfilename in possiblejarfilenames:
jarfile = self.jarfiles[jarfilename]
if filename in jarfile.namelist():
return jarfilename, filename
raise IndexError("ospath not found in xpi file, could not guess location: %r" % ospath)
def jarfileexists(self, jarfilename, filename):
"""checks whether the given file exists inside the xpi"""
if jarfilename is None:
return filename in self.namelist()
else:
jarfile = self.jarfiles[jarfilename]
return filename in jarfile.namelist()
def ospathexists(self, ospath):
"""checks whether the given file exists inside the xpi"""
jarfilename, filename = self.ostojarpath(ospath)
if jarfilename is None:
return filename in self.namelist()
else:
jarfile = self.jarfiles[jarfilename]
return filename in jarfile.namelist()
def openinputstream(self, jarfilename, filename):
"""opens a file (possibly inside a jarfile as a StringIO"""
if jarfilename is None:
contents = self.read(filename)
def onclose(contents):
if contents != self.read(filename):
self.overwritestr(filename, contents)
inputstream = CatchPotentialOutput(contents, onclose)
self.addcatcher(inputstream.slam)
else:
jarfile = self.jarfiles[jarfilename]
contents = jarfile.read(filename)
inputstream = NamedStringInput(contents)
inputstream.name = self.jartoospath(jarfilename, filename)
if hasattr(self.fp, 'name'):
inputstream.name = "%s:%s" % (self.fp.name, inputstream.name)
return inputstream
def openoutputstream(self, jarfilename, filename):
"""opens a file for writing (possibly inside a jarfile as a StringIO"""
if jarfilename is None:
def onclose(contents):
self.overwritestr(filename, contents)
else:
if jarfilename in self.jarfiles:
jarfile = self.jarfiles[jarfilename]
else:
jarstream = self.openoutputstream(None, jarfilename)
jarfile = ZipFileCatcher(jarstream, "w")
self.jarfiles[jarfilename] = jarfile
self.addcatcher(jarstream.slam)
def onclose(contents):
jarfile.overwritestr(filename, contents)
outputstream = wStringIO.CatchStringOutput(onclose)
outputstream.name = "%s %s" % (jarfilename, filename)
if jarfilename is None:
self.addcatcher(outputstream.slam)
else:
jarfile.addcatcher(outputstream.slam)
return outputstream
def close(self):
"""Close the file, and for mode "w" and "a" write the ending records."""
for jarfile in self.jarfiles.itervalues():
jarfile.close()
super(XpiFile, self).close()
def testzip(self):
"""test the xpi zipfile and all enclosed jar files..."""
for jarfile in self.jarfiles.itervalues():
jarfile.testzip()
super(XpiFile, self).testzip()
def restructurejar(self, origjarfilename, newjarfilename, otherxpi, newlang, newregion):
"""Create a new .jar file with the same contents as the given name, but rename directories, write to outputstream"""
jarfile = self.jarfiles[origjarfilename]
origlang = self.locale[:self.locale.find("-")]
if newregion:
newlocale = "%s-%s" % (newlang, newregion)
else:
newlocale = newlang
for filename in jarfile.namelist():
filenameparts = filename.split("/")
for i in range(len(filenameparts)):
part = filenameparts[i]
if part == origlang:
filenameparts[i] = newlang
elif part == self.locale:
filenameparts[i] = newlocale
elif part == self.region:
filenameparts[i] = newregion
newfilename = '/'.join(filenameparts)
fileoutputstream = otherxpi.openoutputstream(newjarfilename, newfilename)
fileinputstream = self.openinputstream(origjarfilename, filename)
fileoutputstream.write(fileinputstream.read())
fileinputstream.close()
fileoutputstream.close()
def clone(self, newfilename, newmode=None, newlang=None, newregion=None):
"""Create a new .xpi file with the same contents as this one..."""
other = XpiFile(newfilename, "w", locale=newlang, region=newregion)
origlang = self.locale[:self.locale.find("-")]
# TODO: check if this language replacement code is still neccessary
if newlang is None:
newlang = origlang
if newregion is None:
newregion = self.region
if newregion:
newlocale = "%s-%s" % (newlang, newregion)
else:
newlocale = newlang
for filename in self.namelist():
filenameparts = filename.split('/')
basename = filenameparts[-1]
if basename.startswith(self.locale):
newbasename = basename.replace(self.locale, newlocale)
elif basename.startswith(origlang):
newbasename = basename.replace(origlang, newlang)
elif basename.startswith(self.region):
newbasename = basename.replace(self.region, newregion)
else:
newbasename = basename
if newbasename != basename:
filenameparts[-1] = newbasename
renamefilename = "/".join(filenameparts)
print "cloning", filename, "and renaming to", renamefilename
else:
print "cloning", filename
renamefilename = filename
if filename.lower().endswith(".jar"):
self.restructurejar(filename, renamefilename, other, newlang, newregion)
else:
inputstream = self.openinputstream(None, filename)
outputstream = other.openoutputstream(None, renamefilename)
outputstream.write(inputstream.read())
inputstream.close()
outputstream.close()
other.close()
if newmode is None: newmode = self.mode
if newmode == "w": newmode = "a"
other = XpiFile(newfilename, newmode)
other.setlangreg(newlocale, newregion)
return other
def iterextractnames(self, includenonjars=False, includedirs=False):
"""iterates through all the localization files with the common prefix stripped and a jarfile name added if neccessary"""
if includenonjars:
for filename in self.namelist():
if filename.endswith('/') and not includedirs: continue
if not self.islocfile(filename) and not self.includenonloc: continue
if not filename.lower().endswith(".jar"):
yield self.jartoospath(None, filename)
for jarfilename, jarfile in self.iterjars():
for filename in jarfile.namelist():
if filename.endswith('/'):
if not includedirs: continue
if not self.islocfile(filename) and not self.includenonloc: continue
yield self.jartoospath(jarfilename, filename)
# the following methods are required by translate.convert.ArchiveConvertOptionParser #
def __iter__(self):
"""iterates through all the files. this is the method use by the converters"""
for inputpath in self.iterextractnames(includenonjars=True):
yield inputpath
def __contains__(self, fullpath):
"""returns whether the given pathname exists in the archive"""
try:
jarfilename, filename = self.ostojarpath(fullpath)
except IndexError:
return False
return self.jarfileexists(jarfilename, filename)
def openinputfile(self, fullpath):
"""opens an input file given the full pathname"""
jarfilename, filename = self.ostojarpath(fullpath)
return self.openinputstream(jarfilename, filename)
def openoutputfile(self, fullpath):
"""opens an output file given the full pathname"""
try:
jarfilename, filename = self.ostojarpath(fullpath)
except IndexError:
return None
return self.openoutputstream(jarfilename, filename)
if __name__ == '__main__':
import optparse
optparser = optparse.OptionParser(version="%prog "+__version__.sver)
optparser.usage = "%prog [-l|-x] [options] file.xpi"
optparser.add_option("-l", "--list", help="list files", \
action="store_true", dest="listfiles", default=False)
optparser.add_option("-p", "--prefix", help="show common prefix", \
action="store_true", dest="showprefix", default=False)
optparser.add_option("-x", "--extract", help="extract files", \
action="store_true", dest="extractfiles", default=False)
optparser.add_option("-d", "--extractdir", help="extract into EXTRACTDIR", \
default=".", metavar="EXTRACTDIR")
(options, args) = optparser.parse_args()
if len(args) < 1:
optparser.error("need at least one argument")
xpifile = XpiFile(args[0])
if options.showprefix:
for prefix, mapto in xpifile.dirmap.iteritems():
print "/".join(prefix), "->", "/".join(mapto)
if options.listfiles:
for name in xpifile.iterextractnames(includenonjars=True, includedirs=True):
print name #, xpifile.ostojarpath(name)
if options.extractfiles:
if options.extractdir and not os.path.isdir(options.extractdir):
os.mkdir(options.extractdir)
for name in xpifile.iterextractnames(includenonjars=True, includedirs=False):
abspath = os.path.join(options.extractdir, name)
# check neccessary directories exist - this way we don't create empty directories
currentpath = options.extractdir
subparts = os.path.dirname(name).split(os.sep)
for part in subparts:
currentpath = os.path.join(currentpath, part)
if not os.path.isdir(currentpath):
os.mkdir(currentpath)
outputstream = open(abspath, 'w')
jarfilename, filename = xpifile.ostojarpath(name)
inputstream = xpifile.openinputstream(jarfilename, filename)
outputstream.write(inputstream.read())
outputstream.close()
```
#### File: translate/tools/poconflicts.py
```python
from translate.storage import factory
from translate.storage import po
from translate.misc import optrecurse
import sys
import os
class ConflictOptionParser(optrecurse.RecursiveOptionParser):
"""a specialized Option Parser for the conflict tool..."""
def parse_args(self, args=None, values=None):
"""parses the command line options, handling implicit input/output args"""
(options, args) = optrecurse.optparse.OptionParser.parse_args(self, args, values)
# some intelligence as to what reasonable people might give on the command line
if args and not options.input:
if not options.output:
options.input = args[:-1]
args = args[-1:]
else:
options.input = args
args = []
if args and not options.output:
options.output = args[-1]
args = args[:-1]
if not options.output:
self.error("output file is required")
if args:
self.error("You have used an invalid combination of --input, --output and freestanding args")
if isinstance(options.input, list) and len(options.input) == 1:
options.input = options.input[0]
return (options, args)
def set_usage(self, usage=None):
"""sets the usage string - if usage not given, uses getusagestring for each option"""
if usage is None:
self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list]) + \
"\n input directory is searched for PO files, PO files with name of conflicting string are output in output directory"
else:
super(ConflictOptionParser, self).set_usage(usage)
def run(self):
"""parses the arguments, and runs recursiveprocess with the resulting options"""
(options, args) = self.parse_args()
options.inputformats = self.inputformats
options.outputoptions = self.outputoptions
self.usepsyco(options)
self.recursiveprocess(options)
def recursiveprocess(self, options):
"""recurse through directories and process files"""
if self.isrecursive(options.input, 'input') and getattr(options, "allowrecursiveinput", True):
if not self.isrecursive(options.output, 'output'):
try:
self.warning("Output directory does not exist. Attempting to create")
os.mkdir(options.output)
except:
self.error(optrecurse.optparse.OptionValueError("Output directory does not exist, attempt to create failed"))
if isinstance(options.input, list):
inputfiles = self.recurseinputfilelist(options)
else:
inputfiles = self.recurseinputfiles(options)
else:
if options.input:
inputfiles = [os.path.basename(options.input)]
options.input = os.path.dirname(options.input)
else:
inputfiles = [options.input]
self.textmap = {}
self.initprogressbar(inputfiles, options)
for inputpath in inputfiles:
fullinputpath = self.getfullinputpath(options, inputpath)
try:
success = self.processfile(None, options, fullinputpath)
except Exception, error:
if isinstance(error, KeyboardInterrupt):
raise
self.warning("Error processing: input %s" % (fullinputpath), options, sys.exc_info())
success = False
self.reportprogress(inputpath, success)
del self.progressbar
self.buildconflictmap()
self.outputconflicts(options)
def clean(self, string, options):
"""returns the cleaned string that contains the text to be matched"""
if options.ignorecase:
string = string.lower()
for accelerator in options.accelchars:
string = string.replace(accelerator, "")
string = string.strip()
return string
def processfile(self, fileprocessor, options, fullinputpath):
"""process an individual file"""
inputfile = self.openinputfile(options, fullinputpath)
inputfile = factory.getobject(inputfile)
for unit in inputfile.units:
if unit.isheader() or not unit.istranslated():
continue
if unit.hasplural():
continue
if not options.invert:
source = self.clean(unit.source, options)
target = self.clean(unit.target, options)
else:
target = self.clean(unit.source, options)
source = self.clean(unit.target, options)
self.textmap.setdefault(source, []).append((target, unit, fullinputpath))
def flatten(self, text, joinchar):
"""flattens text to just be words"""
flattext = ""
for c in text:
if c.isalnum():
flattext += c
elif flattext[-1:].isalnum():
flattext += joinchar
return flattext.rstrip(joinchar)
def buildconflictmap(self):
"""work out which strings are conflicting"""
self.conflictmap = {}
for source, translations in self.textmap.iteritems():
source = self.flatten(source, " ")
if len(source) <= 1:
continue
if len(translations) > 1:
uniquetranslations = dict.fromkeys([target for target, unit, filename in translations])
if len(uniquetranslations) > 1:
self.conflictmap[source] = translations
def outputconflicts(self, options):
"""saves the result of the conflict match"""
print "%d/%d different strings have conflicts" % (len(self.conflictmap), len(self.textmap))
reducedmap = {}
for source, translations in self.conflictmap.iteritems():
words = source.split()
words.sort(lambda x, y: cmp(len(x), len(y)))
source = words[-1]
reducedmap.setdefault(source, []).extend(translations)
# reduce plurals
plurals = {}
for word in reducedmap:
if word + "s" in reducedmap:
plurals[word] = word + "s"
for word, pluralword in plurals.iteritems():
reducedmap[word].extend(reducedmap.pop(pluralword))
for source, translations in reducedmap.iteritems():
flatsource = self.flatten(source, "-")
fulloutputpath = os.path.join(options.output, flatsource + os.extsep + "po")
conflictfile = po.pofile()
for target, unit, filename in translations:
unit.othercomments.append("# (poconflicts) %s\n" % filename)
conflictfile.units.append(unit)
open(fulloutputpath, "w").write(str(conflictfile))
def main():
formats = {"po":("po", None), None:("po", None)}
parser = ConflictOptionParser(formats)
parser.add_option("-I", "--ignore-case", dest="ignorecase",
action="store_true", default=False, help="ignore case distinctions")
parser.add_option("-v", "--invert", dest="invert",
action="store_true", default=False, help="invert the conflicts thus extracting conflicting destination words")
parser.add_option("", "--accelerator", dest="accelchars", default="",
metavar="ACCELERATORS", help="ignores the given accelerator characters when matching")
parser.set_usage()
parser.description = __doc__
parser.run()
if __name__ == '__main__':
main()
```
#### File: translate/tools/podebug.py
```python
import os
import re
from translate.misc import hash
from translate.storage import factory
from translate.storage.placeables import StringElem, general
from translate.storage.placeables import parse as rich_parse
from translate.convert import dtd2po
def add_prefix(prefix, stringelems):
for stringelem in stringelems:
for string in stringelem.flatten():
if len(string.sub) > 0:
string.sub[0] = prefix + string.sub[0]
return stringelems
podebug_parsers = general.parsers
podebug_parsers.remove(general.CapsPlaceable.parse)
podebug_parsers.remove(general.CamelCasePlaceable.parse)
class podebug:
def __init__(self, format=None, rewritestyle=None, ignoreoption=None):
if format is None:
self.format = ""
else:
self.format = format
self.rewritefunc = getattr(self, "rewrite_%s" % rewritestyle, None)
self.ignorefunc = getattr(self, "ignore_%s" % ignoreoption, None)
def apply_to_translatables(self, string, func):
"""Applies func to all translatable strings in string."""
string.map(
lambda e: e.apply_to_strings(func),
lambda e: e.isleaf() and e.istranslatable
)
def rewritelist(cls):
return [rewrite.replace("rewrite_", "") for rewrite in dir(cls) if rewrite.startswith("rewrite_")]
rewritelist = classmethod(rewritelist)
def _rewrite_prepend_append(self, string, prepend, append=None):
if append is None:
append = prepend
if not isinstance(string, StringElem):
string = StringElem(string)
string.sub.insert(0, prepend)
if unicode(string).endswith(u'\n'):
# Try and remove the last character from the tree
try:
lastnode = string.flatten()[-1]
if isinstance(lastnode.sub[-1], unicode):
lastnode.sub[-1] = lastnode.sub[-1].rstrip(u'\n')
except IndexError:
pass
string.sub.append(append + u'\n')
else:
string.sub.append(append)
return string
def rewrite_xxx(self, string):
return self._rewrite_prepend_append(string, u"xxx")
def rewrite_bracket(self, string):
return self._rewrite_prepend_append(string, u"[", u"]")
def rewrite_en(self, string):
if not isinstance(string, StringElem):
string = StringElem(string)
return string
def rewrite_blank(self, string):
return StringElem(u"")
def rewrite_chef(self, string):
"""Rewrite using Mock Swedish as made famous by Monty Python"""
if not isinstance(string, StringElem):
string = StringElem(string)
# From Dive into Python which itself got it elsewhere
# http://www.renderx.com/demos/examples/diveintopython.pdf
subs = (
(r'a([nu])', r'u\1'),
(r'A([nu])', r'U\1'),
(r'a\B', r'e'),
(r'A\B', r'E'),
(r'en\b', r'ee'),
(r'\Bew', r'oo'),
(r'\Be\b', r'e-a'),
(r'\be', r'i'),
(r'\bE', r'I'),
(r'\Bf', r'ff'),
(r'\Bir', r'ur'),
(r'(\w*?)i(\w*?)$', r'\1ee\2'),
(r'\bow', r'oo'),
(r'\bo', r'oo'),
(r'\bO', r'Oo'),
(r'the', r'zee'),
(r'The', r'Zee'),
(r'th\b', r't'),
(r'\Btion', r'shun'),
(r'\Bu', r'oo'),
(r'\BU', r'Oo'),
(r'v', r'f'),
(r'V', r'F'),
(r'w', r'w'),
(r'W', r'W'),
(r'([a-z])[.]', r'\1. Bork Bork Bork!'))
for a, b in subs:
self.apply_to_translatables(string, lambda s: re.sub(a, b, s))
return string
REWRITE_UNICODE_MAP = u"ȦƁƇḒḖƑƓĦĪĴĶĿḾȠǾƤɊŘŞŦŬṼẆẊẎẐ" + u"[\\]^_`" + u"ȧƀƈḓḗƒɠħīĵķŀḿƞǿƥɋřşŧŭṽẇẋẏẑ"
def rewrite_unicode(self, string):
"""Convert to Unicode characters that look like the source string"""
if not isinstance(string, StringElem):
string = StringElem(string)
def transpose(char):
loc = ord(char)-65
if loc < 0 or loc > 56:
return char
return self.REWRITE_UNICODE_MAP[loc]
def transformer(s):
return ''.join([transpose(c) for c in s])
self.apply_to_translatables(string, transformer)
return string
REWRITE_FLIPPED_MAP = u"¡„#$%⅋,()⁎+´-·/012Ɛᔭ59Ƚ86:;<=>?@" + \
u"∀ԐↃᗡƎℲ⅁HIſӼ⅂WNOԀÒᴚS⊥∩ɅMX⅄Z" + u"[\\]ᵥ_," + \
u"ɐqɔpǝɟƃɥıɾʞʅɯuodbɹsʇnʌʍxʎz"
# Brackets should be swapped if the string will be reversed in memory.
# If a right-to-left override is used, the brackets should be
# unchanged.
#Some alternatives:
# D: ᗡ◖
# K: Ж⋊Ӽ
# @: Ҩ - Seems only related in Dejavu Sans
# Q: Ὄ Ό Ὀ Ὃ Ὄ Ṑ Ò Ỏ
# _: ‾ - left out for now for the sake of GTK accelerators
def rewrite_flipped(self, string):
"""Convert the string to look flipped upside down."""
if not isinstance(string, StringElem):
string = StringElem(string)
def transpose(char):
loc = ord(char)-33
if loc < 0 or loc > 89:
return char
return self.REWRITE_FLIPPED_MAP[loc]
def transformer(s):
return u"\u202e" + u''.join([transpose(c) for c in s])
# To reverse instead of using the RTL override:
#return u''.join(reversed([transpose(c) for c in s]))
self.apply_to_translatables(string, transformer)
return string
def ignorelist(cls):
return [ignore.replace("ignore_", "") for ignore in dir(cls) if ignore.startswith("ignore_")]
ignorelist = classmethod(ignorelist)
def ignore_openoffice(self, unit):
for location in unit.getlocations():
if location.startswith("Common.xcu#..Common.View.Localisation"):
return True
elif location.startswith("profile.lng#STR_DIR_MENU_NEW_"):
return True
elif location.startswith("profile.lng#STR_DIR_MENU_WIZARD_"):
return True
return False
def ignore_mozilla(self, unit):
locations = unit.getlocations()
if len(locations) == 1 and locations[0].lower().endswith(".accesskey"):
return True
for location in locations:
if dtd2po.is_css_entity(location):
return True
if location in ["brandShortName", "brandFullName", "vendorShortName"]:
return True
if location.lower().endswith(".commandkey") or location.endswith(".key"):
return True
return False
def ignore_gtk(self, unit):
if unit.source == "default:LTR":
return True
return False
def ignore_kde(self, unit):
if unit.source == "LTR":
return True
return False
def convertunit(self, unit, prefix):
if self.ignorefunc:
if self.ignorefunc(unit):
return unit
if prefix.find("@hash_placeholder@") != -1:
if unit.getlocations():
hashable = unit.getlocations()[0]
else:
hashable = unit.source
prefix = prefix.replace("@hash_placeholder@", hash.md5_f(hashable).hexdigest()[:self.hash_len])
rich_source = unit.rich_source
if not isinstance(rich_source, StringElem):
rich_source = [rich_parse(string, podebug_parsers) for string in rich_source]
if self.rewritefunc:
rewritten = [self.rewritefunc(string) for string in rich_source]
if rewritten:
unit.rich_target = rewritten
elif not unit.istranslated():
unit.rich_target = unit.rich_source
unit.rich_target = add_prefix(prefix, unit.rich_target)
return unit
def convertstore(self, store):
filename = self.shrinkfilename(store.filename)
prefix = self.format
for formatstr in re.findall("%[0-9c]*[sfFbBdh]", self.format):
if formatstr.endswith("s"):
formatted = self.shrinkfilename(store.filename)
elif formatstr.endswith("f"):
formatted = store.filename
formatted = os.path.splitext(formatted)[0]
elif formatstr.endswith("F"):
formatted = store.filename
elif formatstr.endswith("b"):
formatted = os.path.basename(store.filename)
formatted = os.path.splitext(formatted)[0]
elif formatstr.endswith("B"):
formatted = os.path.basename(store.filename)
elif formatstr.endswith("d"):
formatted = os.path.dirname(store.filename)
elif formatstr.endswith("h"):
try:
self.hash_len = int(filter(str.isdigit, formatstr[1:-1]))
except ValueError:
self.hash_len = 4
formatted = "@hash_placeholder@"
else:
continue
formatoptions = formatstr[1:-1]
if formatoptions and not formatstr.endswith("h"):
if "c" in formatoptions and formatted:
formatted = formatted[0] + filter(lambda x: x.lower() not in "aeiou", formatted[1:])
length = filter(str.isdigit, formatoptions)
if length:
formatted = formatted[:int(length)]
prefix = prefix.replace(formatstr, formatted)
for unit in store.units:
if not unit.istranslatable():
continue
unit = self.convertunit(unit, prefix)
return store
def shrinkfilename(self, filename):
if filename.startswith("." + os.sep):
filename = filename.replace("." + os.sep, "", 1)
dirname = os.path.dirname(filename)
dirparts = dirname.split(os.sep)
if not dirparts:
dirshrunk = ""
else:
dirshrunk = dirparts[0][:4] + "-"
if len(dirparts) > 1:
dirshrunk += "".join([dirpart[0] for dirpart in dirparts[1:]]) + "-"
baseshrunk = os.path.basename(filename)[:4]
if "." in baseshrunk:
baseshrunk = baseshrunk[:baseshrunk.find(".")]
return dirshrunk + baseshrunk
def convertpo(inputfile, outputfile, templatefile, format=None, rewritestyle=None, ignoreoption=None):
"""Reads in inputfile, changes it to have debug strings, writes to outputfile."""
# note that templatefile is not used, but it is required by the converter...
inputstore = factory.getobject(inputfile)
if inputstore.isempty():
return 0
convertor = podebug(format=format, rewritestyle=rewritestyle, ignoreoption=ignoreoption)
outputstore = convertor.convertstore(inputstore)
outputfile.write(str(outputstore))
return 1
def main():
from translate.convert import convert
formats = {"po":("po", convertpo), "pot":("po", convertpo), "xlf":("xlf", convertpo)}
parser = convert.ConvertOptionParser(formats, description=__doc__)
# TODO: add documentation on format strings...
parser.add_option("-f", "--format", dest="format", default="",
help="specify format string")
parser.add_option("", "--rewrite", dest="rewritestyle",
type="choice", choices=podebug.rewritelist(), metavar="STYLE",
help="the translation rewrite style: %s" % ", ".join(podebug.rewritelist()))
parser.add_option("", "--ignore", dest="ignoreoption",
type="choice", choices=podebug.ignorelist(), metavar="APPLICATION",
help="apply tagging ignore rules for the given application: %s" % ", ".join(podebug.ignorelist()))
parser.passthrough.append("format")
parser.passthrough.append("rewritestyle")
parser.passthrough.append("ignoreoption")
parser.run()
if __name__ == '__main__':
main()
```
#### File: tests/contrib/test_iterio.py
```python
from nose.tools import assert_raises
from werkzeug.contrib.iterio import IterIO
def test_itero():
"""Test the IterIO"""
iterable = iter(["Hello", "World", "1", "2", "3"])
io = IterIO(iterable)
assert io.tell() == 0
assert io.read(2) == "He"
assert io.tell() == 2
assert io.read(3) == "llo"
assert io.tell() == 5
io.seek(0)
assert io.read(5) == "Hello"
assert io.tell() == 5
assert io._buf == "Hello"
assert io.read() == "World123"
assert io.tell() == 13
io.close()
assert io.closed
io = IterIO(iter(["Hello\n", "World!"]))
assert io.readline() == 'Hello\n'
assert io._buf == 'Hello\n'
assert io.read() == 'World!'
assert io._buf == 'Hello\nWorld!'
assert io.tell() == 12
io.seek(0)
assert io.readlines() == ['Hello\n', 'World!']
io = IterIO(iter(["foo\n", "bar"]))
io.seek(-4, 2)
assert io.read(4) == '\nbar'
assert_raises(IOError, io.seek, 2, 100)
io.close()
assert_raises(ValueError, io.read)
```
#### File: tests/multipart/collect.py
```python
from werkzeug import Request, Response, run_simple
def copy_stream(request):
from os import mkdir
from time import time
folder = 'request-%d' % time()
mkdir(folder)
environ = request.environ
f = file(folder + '/request.txt', 'wb+')
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
f.flush()
f.seek(0)
environ['wsgi.input'] = f
request.stat_folder = folder
def stats(request):
copy_stream(request)
f1 = request.files['file1']
f2 = request.files['file2']
text = request.form['text']
f1.save(request.stat_folder + '/file1.bin')
f2.save(request.stat_folder + '/file2.bin')
file(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
return Response('Done.')
def upload_file(request):
return Response('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="file1"><br>
<input type="file" name="file2"><br>
<textarea name="text"></textarea><br>
<input type="submit" value="Send">
</form>
''', mimetype='text/html')
def application(environ, start_responseonse):
request = Request(environ)
if request.method == 'POST':
response = stats(request)
else:
response = upload_file(request)
return response(environ, start_responseonse)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
```
#### File: werkzeug/debug/render.py
```python
import pprint
from os.path import dirname, join
from werkzeug.templates import Template
def get_template(name):
return Template.from_file(join(dirname(__file__), 'shared', name),
unicode_mode=False, errors='ignore')
def load_resource(res):
try:
f = file(join(dirname(__file__), 'shared', res))
except IOError:
return ''
try:
return f.read()
finally:
f.close()
t_body = get_template('body.tmpl')
t_codetable = get_template('codetable.tmpl')
t_vartable = get_template('vartable.tmpl')
def code_table(frame):
from werkzeug.debug.util import Namespace
lines = []
lineno = frame['context_lineno']
if lineno is not None:
lineno += 1
for l in frame['pre_context']:
lines.append(Namespace(mode='pre', lineno=lineno, code=l))
lineno += 1
lines.append(Namespace(mode='cur', lineno=lineno,
code=frame['context_line']))
lineno += 1
for l in frame['post_context']:
lines.append(Namespace(mode='post', lineno=lineno, code=l))
lineno += 1
else:
lines.append(Namespace(mode='cur', lineno=1,
code='Sourcecode not available'))
return t_codetable.render(lines=lines)
def var_table(var):
def safe_pformat(x):
try:
lines = pprint.pformat(x).splitlines()
except:
return '?'
tmp = []
for line in lines:
if len(line) > 79:
line = line[:79] + '...'
tmp.append(line)
return '\n'.join(tmp)
# dicts
if isinstance(var, dict) or hasattr(var, 'items'):
value = var.items()
if not value:
typ = 'empty'
else:
typ = 'dict'
value.sort()
value = [(repr(key), safe_pformat(val)) for key, val in value]
# lists
elif isinstance(var, list):
if not var:
typ = 'empty'
else:
typ = 'list'
value = [safe_pformat(item) for item in var]
# others
else:
typ = 'simple'
value = repr(var)
return t_vartable.render(type=typ, value=value)
def debug_page(context):
tc = context.to_dict()
tc['var_table'] = var_table
tc['code_table'] = code_table
return t_body.render(tc)
```
|
{
"source": "jgmize/nucleus",
"score": 2
}
|
#### File: django_browserid/tests/__init__.py
```python
import json
from django.test import TestCase as DjangoTestCase
from django.utils.encoding import smart_text
from django.utils.functional import wraps
from mock import patch
from nose.tools import eq_
from django_browserid.auth import BrowserIDBackend
from django_browserid.base import MockVerifier
def fake_create_user(email):
pass
class mock_browserid(object):
"""
Mock verification in :class:`django_browserid.auth.BrowserIDBackend`.
Can be used as a context manager or as a decorator:
with mock_browserid('<EMAIL>'):
django_browserid.verify('random-token') # = {'status': 'okay',
# 'email': '<EMAIL>',
# ...}
@mock_browserid(None)
def browserid_test():
django_browserid.verify('random-token') # = False
"""
def __init__(self, email, **kwargs):
"""
:param email:
Email to return in the verification result. If None, the verification will fail.
:param kwargs:
Keyword arguments are passed on to :class:`django_browserid.base.MockVerifier`, which
updates the verification result with them.
"""
self.patcher = patch.object(BrowserIDBackend, 'get_verifier')
self.return_value = MockVerifier(email, **kwargs)
def __enter__(self):
mock = self.patcher.start()
mock.return_value = self.return_value
return mock
def __exit__(self, exc_type, exc_value, traceback):
self.patcher.stop()
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
class TestCase(DjangoTestCase):
def assert_json_equals(self, json_str, value):
return eq_(json.loads(smart_text(json_str)), value)
```
#### File: django_browserid/tests/test_views.py
```python
import json
from django.contrib import auth
from django.contrib.auth.backends import ModelBackend
from django.contrib.sessions.backends.cache import SessionStore
from django.test.client import RequestFactory
from django.utils.encoding import smart_text
from django.utils.functional import lazy
from mock import patch
from nose.tools import eq_, ok_
from django_browserid import BrowserIDException, views
from django_browserid.auth import BrowserIDBackend
from django_browserid.tests import mock_browserid, TestCase
class JSONViewTests(TestCase):
def test_http_method_not_allowed(self):
class TestView(views.JSONView):
def get(self, request, *args, **kwargs):
return 'asdf'
response = TestView().http_method_not_allowed()
eq_(response.status_code, 405)
ok_(set(['GET']).issubset(set(response['Allow'].split(', '))))
self.assert_json_equals(response.content, {'error': 'Method not allowed.'})
def test_http_method_not_allowed_allowed_methods(self):
class GetPostView(views.JSONView):
def get(self, request, *args, **kwargs):
return 'asdf'
def post(self, request, *args, **kwargs):
return 'qwer'
response = GetPostView().http_method_not_allowed()
ok_(set(['GET', 'POST']).issubset(set(response['Allow'].split(', '))))
class GetPostPutDeleteHeadView(views.JSONView):
def get(self, request, *args, **kwargs):
return 'asdf'
def post(self, request, *args, **kwargs):
return 'qwer'
def put(self, request, *args, **kwargs):
return 'qwer'
def delete(self, request, *args, **kwargs):
return 'qwer'
def head(self, request, *args, **kwargs):
return 'qwer'
response = GetPostPutDeleteHeadView().http_method_not_allowed()
expected_methods = set(['GET', 'POST', 'PUT', 'DELETE', 'HEAD'])
actual_methods = set(response['Allow'].split(', '))
ok_(expected_methods.issubset(actual_methods))
class VerifyTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def verify(self, request_type, **kwargs):
"""
Call the verify view function. Kwargs are passed as GET or POST
arguments.
"""
if request_type == 'get':
request = self.factory.get('/browserid/verify', kwargs)
else:
request = self.factory.post('/browserid/verify', kwargs)
verify_view = views.Verify.as_view()
with patch.object(auth, 'login'):
response = verify_view(request)
return response
def test_no_assertion(self):
# If no assertion is given, return a failure result.
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail'):
response = self.verify('post', blah='asdf')
eq_(response.status_code, 403)
self.assert_json_equals(response.content, {'redirect': '/fail?bid_login_failed=1'})
@mock_browserid(None)
def test_auth_fail(self):
# If authentication fails, redirect to the failure URL.
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail'):
response = self.verify('post', assertion='asdf')
eq_(response.status_code, 403)
self.assert_json_equals(response.content, {'redirect': '/fail?bid_login_failed=1'})
@mock_browserid(None)
def test_auth_fail_url_parameters(self):
# Ensure that bid_login_failed=1 is appended to the failure url.
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail'):
response = self.verify('post', assertion='asdf')
self.assert_json_equals(response.content, {'redirect': '/fail?bid_login_failed=1'})
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail?'):
response = self.verify('post', assertion='asdf')
self.assert_json_equals(response.content, {'redirect': '/fail?bid_login_failed=1'})
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail?asdf'):
response = self.verify('post', assertion='asdf')
self.assert_json_equals(response.content, {'redirect': '/fail?asdf&bid_login_failed=1'})
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail?asdf=4'):
response = self.verify('post', assertion='asdf')
self.assert_json_equals(response.content, {'redirect': '/fail?asdf=4&bid_login_failed=1'})
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail?asdf=4&bid_login_failed=1'):
response = self.verify('post', assertion='asdf')
self.assert_json_equals(response.content,
{'redirect': '/fail?asdf=4&bid_login_failed=1&bid_login_failed=1'})
@mock_browserid(None)
@patch('django_browserid.views.logger.error')
@patch('django_browserid.views.auth.authenticate')
def test_authenticate_browserid_exception(self, authenticate, logger_error):
# If authenticate raises a BrowserIDException, return a failure response.
excpt = BrowserIDException(Exception('hsakjw'))
authenticate.side_effect = excpt
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail'):
response = self.verify('post', assertion='asdf')
eq_(response.status_code, 403)
self.assert_json_equals(response.content, {'redirect': '/fail?bid_login_failed=1'})
logger_error.assert_called_with(excpt)
def test_failure_url_reverse(self):
# If the failure URL is a view name, it should be reversed
# to get the real failure URL.
with self.settings(LOGIN_REDIRECT_URL_FAILURE='epic_fail'):
response = self.verify('post')
eq_(response.status_code, 403)
self.assert_json_equals(response.content, {'redirect': '/epic-fail/?bid_login_failed=1'})
@mock_browserid('<EMAIL>')
def test_auth_success_redirect_success(self):
# If authentication succeeds, redirect to the success URL.
user = auth.models.User.objects.create_user('asdf', '<EMAIL>')
request = self.factory.post('/browserid/verify', {'assertion': 'asdf'})
with self.settings(LOGIN_REDIRECT_URL='/success'):
with patch('django_browserid.views.auth.login') as login:
verify = views.Verify.as_view()
response = verify(request)
login.assert_called_with(request, user)
eq_(response.status_code, 200)
self.assert_json_equals(response.content,
{'email': '<EMAIL>', 'redirect': '/success'})
def test_sanity_checks(self):
# Run sanity checks on all incoming requests.
with patch('django_browserid.views.sanity_checks') as sanity_checks:
self.verify('post')
ok_(sanity_checks.called)
class BrowserIDInfoTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def info(self, user=None, backend=None):
request = self.factory.get('/')
request.session = SessionStore()
if user:
if backend:
user.backend = user.backend = '{0}.{1}'.format(backend.__module__,
backend.__class__.__name__)
auth.login(request, user)
request.user = user
info = views.Info.as_view()
return info(request)
def test_anonymous_user_no_request_args(self):
with patch('django_browserid.views.RequestContext') as RequestContext:
RequestContext.return_value.get.return_value = 'asdf'
response = self.info()
RequestContext.return_value.get.assert_called_with('csrf_token', None)
self.assert_json_equals(response.content, {
'userEmail': '',
'loginUrl': '/browserid/login/',
'logoutUrl': '/browserid/logout/',
'requestArgs': {},
'csrfToken': '<PASSWORD>',
})
def test_authenticated_user(self):
user = auth.models.User.objects.create_user('asdf', '<EMAIL>')
response = self.info(user, BrowserIDBackend())
response_data = json.loads(smart_text(response.content))
eq_(response_data['userEmail'], '<EMAIL>')
def test_request_args(self):
with self.settings(BROWSERID_REQUEST_ARGS={'siteName': 'asdf'}):
response = self.info()
response_data = json.loads(smart_text(response.content))
eq_(response_data['requestArgs'], {'siteName': 'asdf'})
def test_non_browserid_user(self):
# If the current user was not authenticated via
# django-browserid, userEmail should be empty.
user = auth.models.User.objects.create_user('asdf', '<EMAIL>')
response = self.info(user, ModelBackend())
response_data = json.loads(smart_text(response.content))
eq_(response_data['userEmail'], '')
def test_lazy_request_args(self):
# Ensure that request_args can be a lazy-evaluated dictionary.
def _lazy_request_args():
return {'siteName': 'asdf'}
lazy_request_args = lazy(_lazy_request_args, dict)
with self.settings(BROWSERID_REQUEST_ARGS=lazy_request_args()):
response = self.info()
response_data = json.loads(smart_text(response.content))
eq_(response_data['requestArgs'], {'siteName': 'asdf'})
class LogoutTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_redirect(self):
# Include LOGOUT_REDIRECT_URL in the response.
request = self.factory.post('/')
logout = views.Logout.as_view()
with self.settings(LOGOUT_REDIRECT_URL='/test/foo'):
with patch('django_browserid.views.auth.logout') as auth_logout:
response = logout(request)
auth_logout.assert_called_with(request)
eq_(response.status_code, 200)
self.assert_json_equals(response.content, {'redirect': '/test/foo'})
```
|
{
"source": "JGMorgan/Pong",
"score": 3
}
|
#### File: JGMorgan/Pong/main.py
```python
import pygame
import math
import ssl
import json
import random
from websocket import create_connection
class Ball():
def __init__(self, x, y, x_0, y_0):
self.x = x
self.y = y
self.xMove = x_0
self.yMove = y_0
def update(self, paddle1, paddle2):
self.x += self.xMove
self.y += self.yMove
if self.y < 0 or self.y > 720:
self.yMove *= -1
if self.x >= 10 and self.x <= 20 and self.y <= (paddle1[1] + 180) and self.y >= paddle1[1]:
self.xMove = int(self.xMove * 1.2)
self.yMove = int(self.yMove * 1.2)
self.xMove *= -1
elif self.x >= 1250 and self.x <= 1260 and self.y <= (paddle2[1] + 180) and self.y >= paddle2[1]:
self.xMove = int(self.xMove * 1.2)
self.yMove = int(self.yMove * 1.2)
self.xMove *= -1
return (self.x, self.y)
def reset(self, score1, score2):
if self.x < 0 or self.x > 1280:
if (self.x < 0):
score2 += 1
if (self.x > 1280):
score1 += 1
self.x = 640
self.y = 360
self.xMove = 8
self.yMove = 8
return (score1, score2)
return (score1, score2)
class Paddle():
x = 10
y = 10
def __init__(self, x, y):
self.x = x
self.y = y
def update(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
pressed = pygame.key.get_pressed()
mouse_pos = pygame.mouse.get_pos()
self.y = mouse_pos[1] - 90
return (self.x, self.y)
def updateRemote(self, x, y):
self.x = x
self.y = y
return (self.x, self.y)
def main():
pygame.init()
#ws = create_connection("ws://localhost:5000/", sslopt={"cert_reqs": ssl.CERT_NONE})
ws = create_connection("ws://172.16.17.32:5000/", sslopt={"cert_reqs": ssl.CERT_NONE})
screen = pygame.display.set_mode((1280, 720))
myfont = pygame.font.Font(None, 72)
done = False
x = 10
y = 10
score1 = 0;
score2 = 0;
balls = []
balls.append(Ball(640, 360, 8, 8))
balls.append(Ball(640, 360, -8, -8))
balls.append(Ball(640, 360, 23, -8))
balls.append(Ball(640, 360, 18, 3))
balls.append(Ball(640, 360, -76, 7))
balls.append(Ball(640, 360, -1, -1))
balls.append(Ball(640, 360, -2, 8))
balls.append(Ball(640, 360, -98, 8))
ball1 = balls[0]
paddle = Paddle(x, y)
paddle2 = Paddle(1250, 10)
clock = pygame.time.Clock()
paddle_coords2 = (1250, 10)
value = random.random()
tick = 0
while not done:
tick += 1
screen.fill((0x27, 0x2B, 0x33))
paddle_coords = paddle.update()
temp = json.dumps({"paddleX": paddle_coords[0],
"paddleY": paddle_coords[1],
"value": value})
newscores = ball1.reset(score1, score2)
score1 = newscores[0]
score2 = newscores[1]
for i in xrange(1, len(balls)):
ball = balls[i]
if((score1 > i*2) or (score2 > i*2)):
newscores = ball.reset(score1, score2)
score1 = newscores[0]
score2 = newscores[1]
ws.send(temp)
tick = 0
result = ws.recv()
coords = json.loads(result)
if value != coords['value']:
coords = json.loads(result)
print coords['paddleY']
paddle_coords2 = paddle2.updateRemote(1250, coords['paddleY'])
ball1_coords = ball1.update(paddle_coords, paddle_coords2)
balls_coords = []
balls_coords.append(ball1_coords)
for i in xrange(1, len(balls)):
ball = balls[i]
if((score1 > i*2) or (score2 > i*2)):
balls_coords.append(ball.update(paddle_coords, paddle_coords2))
# ball_coords = ball2.update(paddle_coords, paddle_coords2)
pygame.draw.rect(screen, (0x87, 0xC0, 0x78), pygame.Rect(paddle_coords[0], paddle_coords[1] , 20, 180))
pygame.draw.rect(screen, (0x87, 0xC0, 0x78), pygame.Rect(paddle_coords2[0], paddle_coords2[1] , 20, 180))
pygame.draw.rect(screen, (0xF4, 0x43, 0x36), pygame.Rect(ball1_coords[0], ball1_coords[1], 20, 20))
# if(score1 > 4 or score2 > 4):
# pygame.draw.rect(screen, (0, 255, 0), pygame.Rect(ball2_coords[0], ball2_coords[1], 20, 20))
for i in xrange(1, len(balls)):
ball = balls[i]
if((score1 > i*2) or (score2 > i*2)):
balls_coords[i] = ball.update(paddle_coords, paddle_coords2)
pygame.draw.rect(screen, (0xF4, 0x43, 0x36), pygame.Rect(balls_coords[i][0], balls_coords[i][1], 20, 20))
scoretext = myfont.render("{0} {1}".format(score1, score2), 1, (0xE5, 0xC1, 0x7C))
screen.blit(scoretext, (1280/2 - 50, 25))
pygame.display.flip()
clock.tick(60)
if __name__ == '__main__':
main()
```
|
{
"source": "jgnav/BSc-Lego-Mindstorm-Robot",
"score": 2
}
|
#### File: BSc-Lego-Mindstorm-Robot/Codigo_auxiliar/codigo_de_prueba.py
```python
posicion_robot = [0.0, 0.0, 0.0, pi/2]
punto_global = [1.0, 1.0, 0.0]
R = np.array([[cos(posicion_robot[3]), -sin(posicion_robot[3]), 0],
[sin(posicion_robot[3]), cos(posicion_robot[3]), 0],
[0.0, 0.0, 1.0]])
Rt = np.transpose(R)
aux = -(Rt @ posicion_robot[:3])
T = np.array([[Rt[0][0], Rt[0][1], Rt[0][2], aux[0]],
[Rt[1][0], Rt[1][1], Rt[1][2], aux[1]],
[Rt[2][0], Rt[2][1], Rt[2][2], aux[2]],
[0, 0, 0, 1]])
resultado = T @ np.append(np.array(punto_global), 1)
print(resultado[:3].tolist())
def _theta_a_ejes(theta):
ejes = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
ejes[0][0] = cos(theta - (pi/2))
ejes[1][0] = sin(theta - (pi/2))
ejes[0][1] = cos(theta)
ejes[1][1] = sin(theta)
return ejes
def _matriz_de_rotacion(ejesA, ejesB):
axisA = np.array(ejesA)
axisB = np.array(ejesB)
bRa = np.array([[(axisA[:, 0] @ axisB[:, 0]), (axisA[:, 1] @ axisB[:, 0]), (axisA[:, 2] @ axisB[:, 0])],
[(axisA[:, 0] @ axisB[:, 1]), (axisA[:, 1] @ axisB[:, 1]), (axisA[:, 2] @ axisB[:, 1])],
[(axisA[:, 0] @ axisB[:, 2]), (axisA[:, 1] @ axisB[:, 2]), (axisA[:, 2] @ axisB[:, 2])]])
return bRa.tolist()
def _matriz_de_translacion(cero, m_rotacion):
bPa0 = np.array(cero)
bRa = np.array(m_rotacion)
bTa = np.array([[bRa[0][0], bRa[0][1], bRa[0][2], bPa0[0]],
[bRa[1][0], bRa[1][1], bRa[1][2], bPa0[1]],
[bRa[2][0], bRa[2][1], bRa[2][2], bPa0[2]],
[0, 0, 0, 1]])
return bTa.tolist()
def _matriz_de_translacion_inversa(cero, m_rotacion):
bPa0 = np.array(cero)
bRa = np.array(m_rotacion)
bRaT = np.transpose(bRa)
aux = -(bRaT @ bPa0)
aTb = np.array([[bRaT[0][0], bRaT[0][1], bRaT[0][2], aux[0]],
[bRaT[1][0], bRaT[1][1], bRaT[1][2], aux[1]],
[bRaT[2][0], bRaT[2][1], bRaT[2][2], aux[2]],
[0, 0, 0, 1]])
return aTb.tolist()
def _translacion_de_punto(aP, aPb0, axisA, axisB):
bRa = _matriz_de_rotacion(axisA, axisB)
aTb = _matriz_de_translacion_inversa(aPb0, bRa)
aPprima = np.append(np.array(aP), 1)
bP = np.array(aTb) @ aPprima
return bP[:3].tolist()
def rotation_matrix(axisA, axisB):
bRa = np.array([[(axisA[0] @ axisB[0]), (axisA[1] @ axisB[0]), (axisA[2] @ axisB[0])],
[(axisA[0] @ axisB[1]), (axisA[1] @ axisB[1]), (axisA[2] @ axisB[1])],
[(axisA[0] @ axisB[2]), (axisA[1] @ axisB[2]), (axisA[2] @ axisB[2])]])
return bRa
def translation_matrix(bPa0, bRa):
bTa = np.array([[bRa[0][0], bRa[1][0], bRa[2][0], bPa0[0]],
[bRa[0][1], bRa[1][1], bRa[2][1], bPa0[1]],
[bRa[0][2], bRa[1][2], bRa[2][2], bPa0[2]],
[0, 0, 0, 1]])
return bTa
def inverted_translation_matrix(bPa0, bRa):
bRaT = np.transpose(bRa)
aux = -(bRaT @ bPa0)
aTb = np.array([[bRaT[0][0], bRaT[1][0], bRaT[2][0], aux[0]],
[bRaT[0][1], bRaT[1][1], bRaT[2][1], aux[1]],
[bRaT[0][2], bRaT[1][2], bRaT[2][2], aux[2]],
[0, 0, 0, 1]])
return aTb
def point_translation(aP, bPa0, axisA, axisB):
bRa = rotation_matrix(axisA, axisB)
bTa = translation_matrix(bPa0, bRa)
aPprima = np.append(aP, 1)
bP = bTa @ aPprima
return bP[:3]
# PRUEBA:
axisA = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
axisB = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
bPa0 = np.array([1, 1, 1])
aP = np.array([1, 1, 1])
bRa = rotation_matrix(axisA, axisB)
# print(bRa)
bTa = inverted_translation_matrix(bPa0, bRa)
# print(bTa)
bP = point_translation(aP, bPa0, axisA, axisB)
print(bP)
theta = ejes_a_theta(axisA)
axis = theta_a_ejes(theta)
print(axis)
self._cero_universal = np.array([0.0, 0.0, 0.0])
self._ejes_universales = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
self._cero_robot = np.array([0.0, 0.0, 0.0])
self._theta_robot = 0.0
self._ejes_robot = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
def _ejes_a_theta(self, ejes):
if ((ejes[0][1] > 0) and (ejes[1][1] >= 0)):
theta = atan(ejes[1][1]/ejes[0][1])
elif ((ejes[0][1] < 0) and (ejes[1][1] > 0)):
theta = atan(ejes[1][1]/ejes[0][1]) + pi
elif ((ejes[0][1] < 0) and (ejes[1][1] <= 0)):
theta = atan(ejes[1][1]/ejes[0][1]) + pi
elif ((ejes[0][1] > 0) and (ejes[1][1] < 0)):
theta = 2*pi + atan(ejes[1][1]/ejes[0][1])
elif ((ejes[0][1] == 0) and (ejes[1][1] > 0)):
theta = pi/2
elif ((ejes[0][1] == 0) and (ejes[1][1] < 0)):
theta = (3/2)*pi
return theta
def _theta_a_ejes(self, theta):
ejes = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
if ((theta > 0) and (theta < pi/2)):
ejes[0][0] = 1
ejes[0][1] = 1
ejes[1][0] = 1
ejes[1][1] = 1
elif ((theta > pi/2) and (theta < pi)):
ejes[0][0] = 1
ejes[0][1] = 1
ejes[1][0] = 1
ejes[1][1] = 1
elif ((theta > pi) and (theta < (3/2)*pi)):
ejes[0][0] = 1
ejes[0][1] = 1
ejes[1][0] = 1
ejes[1][1] = 1
elif ((theta > (3/2)*pi) and (theta < 2*pi)):
ejes[0][0] = 1
ejes[0][1] = 1
ejes[1][0] = 1
ejes[1][1] = 1
return ejes
def _theta_a_ejes(self, theta):
ejes = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
ejes[0][0] = cos(theta - (pi/2))
ejes[1][0] = sin(theta - (pi/2))
ejes[0][1] = cos(theta)
ejes[1][1] = sin(theta)
return ejes
def _matriz_de_rotacion(self, ejesA, ejesB):
axisA = np.array(ejesA)
axisB = np.array(ejesB)
bRa = np.array([[(axisA[:, 0] @ axisB[:, 0]), (axisA[:, 1] @ axisB[:, 0]), (axisA[:, 2] @ axisB[:, 0])],
[(axisA[:, 0] @ axisB[:, 1]), (axisA[:, 1] @ axisB[:, 1]), (axisA[:, 2] @ axisB[:, 1])],
[(axisA[:, 0] @ axisB[:, 2]), (axisA[:, 1] @ axisB[:, 2]), (axisA[:, 2] @ axisB[:, 2])]])
return bRa.tolist()
def _matriz_de_translacion(self, cero, m_rotacion):
bPa0 = np.array(cero)
bRa = np.array(m_rotacion)
bTa = np.array([[bRa[0][0], bRa[0][1], bRa[0][2], bPa0[0]],
[bRa[1][0], bRa[1][1], bRa[1][2], bPa0[1]],
[bRa[2][0], bRa[2][1], bRa[2][2], bPa0[2]],
[0, 0, 0, 1]])
return bTa.tolist()
def _matriz_de_translacion_inversa(self, cero, m_rotacion):
bPa0 = np.array(cero)
bRa = np.array(m_rotacion)
bRaT = np.transpose(bRa)
aux = -(bRaT @ bPa0)
aTb = np.array([[bRaT[0][0], bRaT[0][1], bRaT[0][2], aux[0]],
[bRaT[1][0], bRaT[1][1], bRaT[1][2], aux[1]],
[bRaT[2][0], bRaT[2][1], bRaT[2][2], aux[2]],
[0, 0, 0, 1]])
return aTb.tolist()
def _translacion_de_punto(self, aP, aPb0, axisA, axisB):
bRa = self._matriz_de_rotacion(axisA, axisB)
aTb = self._matriz_de_translacion(aPb0, bRa)
aPprima = np.append(np.array(aP), 1)
bP = np.array(aTb) @ aPprima
return bP[:3].tolist()
def _c_globales_a_robot(self, coordenadas):
ejes_robot = self._theta_a_ejes(self._posicion_robot[3])
return self._translacion_de_punto(coordenadas, self._posicion_robot[:3], self._ejes_universales, ejes_robot)
```
|
{
"source": "JGNieto/JavierFTCDebugger",
"score": 4
}
|
#### File: JGNieto/JavierFTCDebugger/field.py
```python
from re import X
width = 3657.6
height = 3657.6
def field_to_pixels(point, screen_size):
""" Transform a point of field coordinates to pixels on a screen """
field_x, field_y = point
screen_w, screen_h = screen_size
# x and y are flipped
screen_x = field_y / width * screen_w
screen_y = field_x / height * screen_h
# Account for pygame's weird axes.
screen_x = - screen_x + screen_w // 2
screen_y = - screen_y + screen_h // 2
return screen_x, screen_y
def get_robot_size(screen_size):
""" Compute size of the robot in pixels. """
robot_w, robot_h = 337, 433
screen_w, screen_h = screen_size
screen_x = robot_w / width * screen_w
screen_y = robot_h / height * screen_h
return screen_x, screen_y
```
|
{
"source": "JGNieto/tierpsy-tracker",
"score": 2
}
|
#### File: tierpsy/gui/MWTrackerViewer.py
```python
import os
from functools import partial
import numpy as np
import pandas as pd
import tables
import matplotlib
import warnings
from PyQt5.QtCore import Qt, QPointF
from PyQt5.QtGui import QPixmap, QPainter, QFont, QPen, QPolygonF, QColor, QKeySequence, QBrush
from PyQt5.QtWidgets import QApplication, QMessageBox
from tierpsy.analysis.ske_create.helperIterROI import getWormROI
from tierpsy.analysis.split_fov.FOVMultiWellsSplitter import FOVMultiWellsSplitter
from tierpsy.gui.MWTrackerViewer_ui import Ui_MWTrackerViewer
from tierpsy.gui.TrackerViewerAux import (
TrackerViewerAuxGUI, BAD_SKEL_COLOURS, GOOD_SKEL_COLOURS)
from tierpsy.gui.PlotFeatures import PlotFeatures
from tierpsy.helper.misc import WLAB, save_modified_table
from tierpsy.analysis.split_fov.helper import get_well_color
class WellsDrawer(TrackerViewerAuxGUI):
'''
Dummy class with the wells division drawer functions
'''
def __init__(self, ui):
super().__init__(ui)
# colour
self.fovsplitter_mask = None
self.fovsplitter_feat = None
self.fovsplitter = None
self.is_fov_tosplit = None
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
# check if /fov_wells exists in masked video
if self.fid is not None:
if '/fov_wells' not in self.fid:
self.is_fov_tosplit = False
else:
self.is_fov_tosplit = True
# if it exists, read it
if self.is_fov_tosplit:
# self.wells_in_mask = pd.DataFrame(
# self.fid.get_node('/fov_wells').read())
self.fovsplitter_mask = FOVMultiWellsSplitter(self.vfilename)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
# if no skeletons, skip
if not self.skeletons_file:
return
# check if /fov_wells exists in features video
with tables.File(self.skeletons_file, 'r') as fid:
if '/fov_wells' not in fid:
self.is_fov_tosplit = False
# print("didn't find fov wells though")
else:
self.is_fov_tosplit = True
# print("found fov wells in featuresN")
# if it exists, read it
if self.is_fov_tosplit:
# print('reading fov_wells from featuresN')
# print('pre-reading:')
# print(self.wells)
# self.wells_in_feat = pd.DataFrame(
# fid.get_node('/fov_wells').read())
self.fovsplitter_feat = FOVMultiWellsSplitter(self.skeletons_file)
def draw_wells(self, image):
'''
Draw wells.
'''
if self.is_fov_tosplit:
if self.fovsplitter_feat is not None:
self.fovsplitter = self.fovsplitter_feat
else: # fall back to mask ones
print('falling back')
self.fovsplitter = self.fovsplitter_mask
# prepare constants for drawing
self.fontsize = max(1, max(image.height(), image.width()) // 60)
penwidth = max(1, max(image.height(), image.width()) // 400)
self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1
# self.wellsC = QColor(250, 140, 0)
if 'is_good_well' in self.fovsplitter.wells.columns:
is_color_by_well = True
else:
is_color_by_well = False
# Qt drawing code
painter = QPainter()
painter.begin(image)
pen = QPen()
pen.setWidth(self.penwidth)
painter.setFont(QFont('Decorative', self.fontsize))
# loop on wells
for _, well in self.fovsplitter.wells.iterrows():
# update color every time
if is_color_by_well:
wellC = get_well_color(well['is_good_well'], forCV=True)
wellC = QColor(*wellC)
else:
wellC = QColor(250, 140, 0)
pen.setColor(wellC)
painter.setPen(pen)
# draw well name
painter.drawText(well['x_min'] + self.fontsize*0.4,
well['y_min'] + self.fontsize*1.2,
well['well_name'])
# draw rectangle
painter.drawRect(well['x_min'],
well['y_min'],
well['x_max'] - well['x_min'],
well['y_max'] - well['y_min'])
if well['is_good_well'] == False:
painter.drawLine(well['x_min'],
well['y_min'],
well['x_max'],
well['y_max'])
painter.end()
# super().keyPressEvent(event)
class FoodContourDrawer(TrackerViewerAuxGUI):
'''
Dummy class with the contour functions
'''
def __init__(self, ui):
super().__init__(ui)
self.food_coordinates = None
self.wlabC = {
WLAB['U']: Qt.white,
WLAB['WORM']: Qt.green,
WLAB['WORMS']: Qt.blue,
WLAB['BAD']: Qt.darkRed,
WLAB['GOOD_SKE']: Qt.darkCyan
}
self.ui.checkBox_showFood.stateChanged.connect(self.updateImage)
self.ui.checkBox_showFood.setEnabled(False)
self.ui.checkBox_showFood.setChecked(True)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
if not self.skeletons_file or self.trajectories_data is None:
self.food_coordinates = None
return
with tables.File(self.skeletons_file, 'r') as fid:
if not '/food_cnt_coord' in fid:
self.food_coordinates = None
self.ui.checkBox_showFood.setEnabled(False)
else:
#change from microns to pixels
self.food_coordinates = fid.get_node('/food_cnt_coord')[:]
self.food_coordinates /= self.microns_per_pixel
self.ui.checkBox_showFood.setEnabled(True)
def draw_food_contour(self, image):
if self.food_coordinates is None or not self.ui.checkBox_showFood.isChecked():
return
painter = QPainter()
painter.begin(image)
penwidth = max(1, max(image.height(), image.width()) // 800)
col = Qt.darkMagenta
p = QPolygonF()
for x,y in self.food_coordinates:
p.append(QPointF(x,y))
pen = QPen()
pen.setWidth(penwidth)
pen.setColor(col)
painter.setPen(pen)
painter.drawPolyline(p)
painter.end()
class IntensityLabeler(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.mean_intensity = None
self.ui.intensity_label.setStyleSheet('') #avoid displaying color at the start of the programı
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
if self.fid is not None:
#get mean intensity information.
#Useful for the optogenetic experiments.
try:
mean_int = self.fid.get_node('/mean_intensity')[:]
#calculate the intensity range and normalize the data.
#I am ignoring any value less than 1. The viewer only works with uint8 data.
dd = mean_int[mean_int>=1]
if dd.size == 0:
raise ValueError
bot = np.min(dd)
top = np.max(dd)
rr = top-bot
# if the mean value change is less than 1 (likely continous image do nothing)
if rr <= 1:
raise ValueError
self.mean_intensity = (mean_int-bot)/(rr)
except (tables.exceptions.NoSuchNodeError, ValueError):
self.mean_intensity = None
self.ui.intensity_label.setStyleSheet('')
def display_intensity(self):
if self.mean_intensity is not None and self.frame_number < self.mean_intensity.size:
d = int(self.mean_intensity[self.frame_number]*255)
self.ui.intensity_label.setStyleSheet('QLabel {background-color: rgb(%i, %i, %i);}' % (0, 0, d))
class BlobLabeler(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.wlab = WLAB
self.label_type = 'worm_label'
self.ui.pushButton_U.clicked.connect(
partial(self._h_tag_worm, self.wlab['U']))
self.ui.pushButton_W.clicked.connect(
partial(self._h_tag_worm, self.wlab['WORM']))
self.ui.pushButton_WS.clicked.connect(
partial(self._h_tag_worm, self.wlab['WORMS']))
self.ui.pushButton_B.clicked.connect(
partial(self._h_tag_worm, self.wlab['BAD']))
self.ui.pushButton_W.setShortcut(QKeySequence(Qt.Key_W))
self.ui.pushButton_U.setShortcut(QKeySequence(Qt.Key_U))
self.ui.pushButton_WS.setShortcut(QKeySequence(Qt.Key_C))
self.ui.pushButton_B.setShortcut(QKeySequence(Qt.Key_B))
def enable_label_buttons(self, value):
self.ui.pushButton_U.setEnabled(value)
self.ui.pushButton_W.setEnabled(value)
self.ui.pushButton_WS.setEnabled(value)
self.ui.pushButton_B.setEnabled(value)
def _h_tag_worm(self, label_ind):
if not self.worm_index_type == 'worm_index_manual':
return
worm_ind = self.current_worm_index
if self.frame_data is None:
return
if not worm_ind in self.frame_data['worm_index_manual'].values:
QMessageBox.critical(
self,
'The selected worm is not in this frame.',
'Select a worm in the current frame to label.',
QMessageBox.Ok)
return
good = self.trajectories_data['worm_index_manual'] == worm_ind
self.trajectories_data.loc[good, 'worm_label'] = label_ind
self.updateImage()
class ROIWorm():
def __init__(self, wormCanvas, comboBox_ROI, checkBox_ROI):
self.worm_index = None
self.wormCanvas = wormCanvas
self.comboBox_ROI = comboBox_ROI
self.checkBox_ROI = checkBox_ROI
self.comboBox_ROI.activated.connect(self.selectROI)
def selectROI(self, index):
try:
self.worm_index = int(self.comboBox_ROI.itemText(index))
except ValueError:
self.worm_index = None
@property
def isDrawSkel(self):
return self.checkBox_ROI.isChecked()
class ROIManager(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.rois = [
ROIWorm(
self.ui.wormCanvas1,
self.ui.comboBox_ROI1,
self.ui.checkBox_ROI1
),
ROIWorm(
self.ui.wormCanvas2,
self.ui.comboBox_ROI2,
self.ui.checkBox_ROI2
)
]
self.ui.radioButton_ROI1.setShortcut(QKeySequence(Qt.Key_Up))
self.ui.radioButton_ROI2.setShortcut(QKeySequence(Qt.Key_Down))
self.ui.checkBox_ROI1.stateChanged.connect(partial(self._updateROI, self.rois[0]))
self.ui.checkBox_ROI2.stateChanged.connect(partial(self._updateROI, self.rois[1]))
self.ui.comboBox_ROI1.activated.connect(partial(self._updateROI, self.rois[0]))
self.ui.comboBox_ROI2.activated.connect(partial(self._updateROI, self.rois[1]))
# flags for RW and FF
self.RW, self.FF = 1, 2
self.ui.pushButton_ROI1_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[0]))
self.ui.pushButton_ROI1_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[0]))
self.ui.pushButton_ROI2_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[1]))
self.ui.pushButton_ROI2_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[1]))
@property
def current_roi(self):
if self.ui.radioButton_ROI1.isChecked():
return self.rois[0]
elif self.ui.radioButton_ROI2.isChecked():
return self.rois[1]
else:
raise ValueError("I shouldn't be here")
@property
def current_worm_index(self):
return self.current_roi.worm_index
def updateSkelFile(self, skeletons_file):
for roi in self.rois:
roi.worm_index = None
super().updateSkelFile(skeletons_file)
def keyPressEvent(self, event):
#MORE SHORTCUTS
# go the the start of end of a trajectory
if event.key() == Qt.Key_BracketLeft:
self.roiRWFF(self.RW, self.current_roi)
elif event.key() == Qt.Key_BracketRight:
self.roiRWFF(self.FF, self.current_roi)
super().keyPressEvent(event)
def updateROIcomboBox(self, roi):
# update valid index for the comboBox
roi.comboBox_ROI.clear()
if roi.worm_index is not None:
roi.comboBox_ROI.addItem(str(int(roi.worm_index)))
for ind in self.frame_data[self.worm_index_type]:
roi.comboBox_ROI.addItem(str(int(ind)))
if roi.worm_index is None:
w_ind = float(roi.comboBox_ROI.itemText(0))
roi.worm_index = int(w_ind)
# function that generalized the updating of the ROI
def _updateROI(self, roi):
if self.frame_data is None or not self.worm_index_type:
# no trajectories data presented, nothing to do here
roi.wormCanvas.clear()
return
self.updateROIcomboBox(roi)
# extract individual worm ROI
good = self.frame_data[self.worm_index_type] == roi.worm_index
row_data = self.frame_data.loc[good].squeeze()
if row_data.size == 0 or \
np.isnan(row_data['coord_x']) or \
np.isnan(row_data['coord_y']):
# invalid data nothing to do here
roi.wormCanvas.clear()
return
worm_img, roi_corner = getWormROI(self.frame_img,
row_data['coord_x'],
row_data['coord_y'],
row_data['roi_size']
)
roi_ori_size = worm_img.shape
worm_img = np.ascontiguousarray(worm_img)
worm_qimg = self._convert2Qimg(worm_img)
canvas_size = min(roi.wormCanvas.height(), roi.wormCanvas.width())
worm_qimg = worm_qimg.scaled(
canvas_size, canvas_size, Qt.KeepAspectRatio)
worm_qimg = self.drawSkelResult(worm_img, worm_qimg, row_data, roi.isDrawSkel, roi_corner, read_center=False)
pixmap = QPixmap.fromImage(worm_qimg)
roi.wormCanvas.setPixmap(pixmap)
def updateROIs(self):
for roi in self.rois:
self._updateROI(roi)
def clearROIs(self):
for roi in self.rois:
roi.wormCanvas.clear()
# move to the first or the last frames of a trajectory
def roiRWFF(self, rwff, roi):
if self.frame_data is None:
return
# use 1 for rewind RW or 2 of fast forward
good = self.trajectories_data[self.worm_index_type] == roi.worm_index
frames = self.trajectories_data.loc[good, 'frame_number']
if frames.size == 0:
return
if rwff == self.RW:
self.frame_number = frames.min()
elif rwff == self.FF:
self.frame_number = frames.max()
else:
raise ValueError('Invalid rwff value : {} '.format(rwff))
self.ui.spinBox_frame.setValue(self.frame_number)
class TrajectoryEditor(ROIManager):
def __init__(self, ui):
super().__init__(ui)
self.ui.pushButton_join.clicked.connect(self.joinTraj)
self.ui.pushButton_split.clicked.connect(self.splitTraj)
#SHORTCUTS
self.ui.pushButton_join.setShortcut(QKeySequence(Qt.Key_J))
self.ui.pushButton_split.setShortcut(QKeySequence(Qt.Key_S))
def enable_trajectories_buttons(self, value):
self.ui.pushButton_join.setEnabled(value)
self.ui.pushButton_split.setEnabled(value)
def joinTraj(self):
if self.worm_index_type != 'worm_index_manual' \
or self.frame_data is None:
return
worm_ind1 = self.rois[0].worm_index
worm_ind2 = self.rois[1].worm_index
if worm_ind1 == worm_ind2:
QMessageBox.critical(
self,
'Cannot join the same trajectory with itself',
'Cannot join the same trajectory with itself.',
QMessageBox.Ok)
return
index1 = (self.trajectories_data[
'worm_index_manual'] == worm_ind1).values
index2 = (self.trajectories_data[
'worm_index_manual'] == worm_ind2).values
# if the trajectories do not overlap they shouldn't have frame_number
# indexes in commun
frame_number = self.trajectories_data.loc[
index1 | index2, 'frame_number']
if frame_number.size != np.unique(frame_number).size:
QMessageBox.critical(
self,
'Cannot join overlaping trajectories',
'Cannot join overlaping trajectories.',
QMessageBox.Ok)
return
if not (worm_ind1 in self.frame_data[
'worm_index_manual'].values or worm_ind2 in self.frame_data['worm_index_manual'].values):
reply = QMessageBox.question(
self,
'Message',
"The none of the selected worms to join is not in this frame. Are you sure to continue?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.No:
return
# get the first row for each segment to extract some data
first_row1 = self.trajectories_data.loc[index1, :].iloc[0]
first_row2 = self.trajectories_data.loc[index2, :].iloc[0]
# join trajectories
self.trajectories_data.loc[
index2, 'worm_label'] = first_row1['worm_label']
self.trajectories_data.loc[index2, 'worm_index_manual'] = worm_ind1
self.rois[0].worm_index = worm_ind1
self.rois[1].worm_index = worm_ind1
#this might be too slow. I might need to change it
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
def splitTraj(self):
if self.worm_index_type != 'worm_index_manual' \
or self.frame_data is None:
return
worm_ind = self.current_worm_index
if not worm_ind in self.frame_data['worm_index_manual'].data:
QMessageBox.critical(
self,
'Worm index is not in the current frame.',
'Worm index is not in the current frame. Select a valid index.',
QMessageBox.Ok)
return
last_index = self.trajectories_data['worm_index_manual'].max()
new_ind1 = last_index + 1
new_ind2 = last_index + 2
good = self.trajectories_data['worm_index_manual'] == worm_ind
frames = self.trajectories_data.loc[good, 'frame_number']
frames = frames.sort_values(inplace=False)
good = frames < self.frame_number
index1 = frames[good].index
index2 = frames[~good].index
self.trajectories_data.ix[index1, 'worm_index_manual'] = new_ind1
self.trajectories_data.ix[index2, 'worm_index_manual'] = new_ind2
self.rois[0].index = new_ind1
self.rois[1].index = new_ind2
#this might be too slow. I might need to change it
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
class FeatureReaderBase(TrackerViewerAuxGUI):
index_cols = ['worm_index', 'timestamp', 'motion_modes', 'skeleton_id', 'well_name']
valid_fields = ['/timeseries_data', '/features_timeseries']
def __init__(self, ui):
self.timeseries_data = None
self.feat_column = ''
super().__init__(ui)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
try:
self.traj_colors = {}
with pd.HDFStore(self.skeletons_file, 'r') as ske_file_id:
for field in self.valid_fields:
if field in ske_file_id:
self.timeseries_data = ske_file_id[field]
if field == '/timeseries_data':
blob_features = ske_file_id['/blob_features']
blob_features.columns = ['blob_' + x for x in blob_features.columns]
self.timeseries_data = pd.concat((self.timeseries_data, blob_features), axis=1)
break
else:
raise KeyError
if not len(self.timeseries_data) != len(self.trajectories_data):
ValueError('timeseries_data and trajectories_data does not match. You might be using an old version of featuresN.hdf5')
self.valid_features = [x for x in self.timeseries_data.columns if x not in self.index_cols]
except (TypeError, AttributeError, IOError, KeyError, tables.exceptions.HDF5ExtError):
self.valid_features = None
self.timeseries_data = None
class MarkersDrawer(FeatureReaderBase):
def __init__(self, ui):
super().__init__(ui)
self.traj_colors = {}
self.n_points_traj = 250
self.n_colors = 256
cmap = matplotlib.cm.get_cmap("bwr")
palette = [cmap(x) for x in np.linspace(0, 1, self.n_colors)]
#palette = sns.color_palette("RdBu_r", self.n_colors)
palette = np.round(np.array(palette)*255).astype(np.int)
self.palette = [QColor(*x) for x in palette]
self.drawT = {x: self.ui.comboBox_drawType.findText(x , flags=Qt.MatchContains)
for x in ['boxes', 'traj', 'skel']}
self.showT = {x: self.ui.comboBox_showLabels.findText(x , flags=Qt.MatchContains)
for x in ['hide', 'all', 'filter']}
self.ui.comboBox_showLabels.setCurrentIndex(self.showT['all'])
self.ui.comboBox_showLabels.currentIndexChanged.connect(self.updateImage)
self.ui.comboBox_drawType.currentIndexChanged.connect(self.updateImage)
self.ui.feature_column.currentIndexChanged.connect(self.change_feature)
self.ui.feat_max_value.valueChanged.connect(self.updateImage)
self.ui.feat_min_value.valueChanged.connect(self.updateImage)
self.ui.is_color_features.stateChanged.connect(self.updateImage)
self.enable_color_feats(False)
self.ui.spinBox_step.valueChanged.connect(self.updateImage)
def updateSkelFile(self, skeletons_file):
self.ui.is_color_features.setChecked(False)
super().updateSkelFile(skeletons_file)
self.ui.feature_column.clear()
if self.timeseries_data is None:
#no feature data
self.enable_color_feats(False)
else:
self.enable_color_feats(True)
self.ui.feature_column.addItems(self.valid_features)
self._h_find_feat_limits()
def change_feature(self):
self._h_find_feat_limits()
self.updateImage()
def _h_find_feat_limits(self):
self.feat_column = str(self.ui.feature_column.currentText())
print(self.feat_column)
if self.feat_column and self.timeseries_data is not None:
f_max = self.timeseries_data[self.feat_column].max()
f_min = self.timeseries_data[self.feat_column].min()
q1, q2 = self.timeseries_data[self.feat_column].quantile([0.02, 0.98])
else:
f_min, f_max, q1, q2 = 0,0,0,0
self.ui.feat_max_value.setRange(f_min, f_max)
self.ui.feat_min_value.setRange(f_min, f_max)
self.ui.feat_min_value.setValue(q1)
self.ui.feat_max_value.setValue(q2)
def enable_color_feats(self, value):
self.ui.feature_column.setEnabled(value)
self.ui.feat_min_value.setEnabled(value)
self.ui.feat_max_value.setEnabled(value)
self.ui.is_color_features.setEnabled(value)
def _h_assign_feat_color(self, irow):
feat_val = self.timeseries_data.loc[irow, self.feat_column]
if (feat_val != feat_val):
return Qt.black
#this function can and should be optimized
f_min = self.ui.feat_min_value.value()
f_max = self.ui.feat_max_value.value()
if f_min == f_max: #dummy range in case all the values are the same
f_min, f_max = -1, 1
elif f_min > f_max:
return Qt.black
nn = np.clip((feat_val - f_min)/(f_max - f_min), 0, 1)
ind = int(np.round(nn*(self.n_colors-1)))
col = self.palette[ind]
return col
def draw_worm_markers(self, image):
'''
Draw traj worm trajectory.
'''
if not self.worm_index_type in self.frame_data or \
self.ui.comboBox_showLabels.currentIndex() == self.showT['hide']:
return
if hasattr(self, 'current_worm_index'):
current_index = self.current_worm_index
else:
current_index = -1
painter = QPainter()
painter.begin(image)
self.fontsize = max(1, max(image.height(), image.width()) // 120)
penwidth = max(1, max(image.height(), image.width()) // 800)
self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1
if not self.label_type in self.frame_data:
self.frame_data[self.label_type] = self.wlab['U']
for row_id, row_data in self.frame_data.iterrows():
# check if the coordinates are nan
if np.isnan(row_data['coord_x']) or np.isnan(row_data['coord_y']):
continue
#if select between showing filtered index or not
if self.ui.comboBox_showLabels.currentIndex() == self.showT['filter']:
continue
is_current_index = current_index == int(row_data[self.worm_index_type])
cb_ind = self.ui.comboBox_drawType.currentIndex()
if cb_ind == self.drawT['boxes']:
self.draw_boxes(painter, row_id, row_data, is_current_index)
elif cb_ind == self.drawT['traj']:
self.draw_trajectories(painter, row_data, is_current_index)
elif cb_ind == self.drawT['skel']:
self.draw_skeletons(painter, row_id, row_data, is_current_index)
painter.end()
def _h_get_trajectory(self, worm_index, current_frame):
worm_data = self.traj_worm_index_grouped.get_group(worm_index)
valid_index = worm_data.index[worm_data['frame_number']<= current_frame]
ini = max(0, valid_index.size - self.frame_step*self.n_points_traj)
traj_ind = valid_index.values[ini::self.frame_step]
traj_data = worm_data.loc[traj_ind]
return traj_data
def draw_trajectories(self, painter, row_data, is_current_index):
if self.traj_worm_index_grouped is None:
return
worm_index = int(row_data[self.worm_index_type])
current_frame = row_data['frame_number']
traj_data = self._h_get_trajectory(worm_index, current_frame)
traj_data = traj_data.dropna(subset=['coord_x', 'coord_y'])
x_v = traj_data['coord_x'].round()
y_v = traj_data['coord_y'].round()
points = [QPointF(*map(int, c)) for c in zip(x_v, y_v)]
if self.ui.is_color_features.isChecked():
vec_color = [self._h_assign_feat_color(x) for x in traj_data.index]
pen = QPen()
pen.setWidth(self.penwidth)
for p1, p2, c in zip(points[1:], points[:-1], vec_color):
pen.setColor(c)
painter.setPen(pen)
painter.drawLine(p1, p2)
else:
pol = QPolygonF()
for p in points:
pol.append(p)
if not worm_index in self.traj_colors:
self.traj_colors[worm_index] = QColor(*np.random.randint(50, 230, 3))
col = self.traj_colors[worm_index]
pen = QPen()
pen.setWidth(self.penwidth)
pen.setColor(col)
painter.setPen(pen)
painter.drawPolyline(pol)
def draw_boxes(self, painter, row_id, row_data, is_current_index):
'''
Draw traj worm trajectory.
'''
worm_index = int(row_data[self.worm_index_type])
x = int(round(row_data['coord_x']))
y = int(round(row_data['coord_y']))
label_color = self.wlabC[int(row_data[self.label_type])]
if not self.ui.is_color_features.isChecked():
label_color = self.wlabC[int(row_data[self.label_type])]
else:
label_color = self._h_assign_feat_color(row_id)
pen = QPen()
pen.setColor(label_color)
pen.setWidth(self.penwidth)
painter.setPen(pen)
painter.setFont(QFont('Decorative', self.fontsize))
painter.drawText(x, y, str(worm_index))
bb = row_data['roi_size']
painter.drawRect(x - bb / 2, y - bb / 2, bb, bb)
if is_current_index:
b_size = bb//5
offset = bb/2 - b_size
painter.fillRect(x + offset, y + offset, b_size, b_size, QBrush(label_color))
def draw_skeletons(self, painter, roi_id, row_data, is_current_index):
if self.traj_worm_index_grouped is None:
return
if self.coordinates_fields is None:
return
worm_index = int(row_data[self.worm_index_type])
skel_id = int(row_data['skeleton_id'])
if self.coordinates_fields is None or skel_id < 0:
return
skel_dat = {}
with tables.File(self.skeletons_file, 'r') as skel_file_id:
# print(self.coordinates_group)
# print(self.coordinates_fields)
for ff, tt in self.coordinates_fields.items():
field = self.coordinates_group + ff
if field in skel_file_id:
dat = skel_file_id.get_node(field)[skel_id]
dat /= self.microns_per_pixel
if self.stage_position_pix is not None and self.stage_position_pix.size > 0:
#subtract stage motion if necessary
dat -= self.stage_position_pix[self.frame_number]
#dat[:, 0] = (dat[:, 0] - roi_corner[0] + 0.5) * c_ratio_x
#dat[:, 1] = (dat[:, 1] - roi_corner[1] + 0.5) * c_ratio_y
else:
dat = np.full((1,2), np.nan)
skel_dat[tt] = dat
if 'is_good_skel' in row_data and row_data['is_good_skel'] == 0:
skel_colors = BAD_SKEL_COLOURS
else:
skel_colors = GOOD_SKEL_COLOURS
qPlg = {}
for tt, dat in skel_dat.items():
qPlg[tt] = QPolygonF()
for p in dat:
#do not add point if it is nan
if p[0] == p[0]:
qPlg[tt].append(QPointF(*p))
if not qPlg or len(qPlg['skeleton']) == 0:
#all nan skeleton nothing to do here...
return
pen = QPen()
pen.setWidth(0.5)
# pen.setColor(QColor(col))
# painter.setPen(pen)
# painter.drawPolyline(pol_v)
for k, pol_v in qPlg.items():
color = skel_colors[k]
pen.setColor(QColor(*color))
painter.setPen(pen)
painter.drawPolyline(pol_v)
pen.setColor(Qt.black)
painter.setBrush(Qt.white)
painter.setPen(pen)
radius = 2
painter.drawEllipse(qPlg['skeleton'][0], radius, radius)
painter.drawEllipse(QPointF(0,0), radius, radius)
class PlotCommunicator(FeatureReaderBase, ROIManager):
def __init__(self, ui=''):
super().__init__(ui)
self.ui.pushButton_plot.setEnabled(False)
self.ui.pushButton_plot.clicked.connect(self.show_plot)
self.plotter = None
def closePrev(self):
if self.plotter is not None:
self.plotter.close()
self.plotter = None
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
self.closePrev()
if self.timeseries_data is None:
self.ui.pushButton_plot.setEnabled(False)
else:
self.ui.pushButton_plot.setEnabled(True)
def show_plot(self):
self.closePrev()
self.plotter = PlotFeatures(self.skeletons_file,
self.timeseries_data,
self.traj_worm_index_grouped,
self.time_units,
self.xy_units,
self.fps,
parent = self)
self.plotter.setWindowFlags(self.plotter.windowFlags() | Qt.WindowStaysOnTopHint)
self.plotter.show()
self.update_plot()
def update_plot(self):
if self.plotter:
self.plotter.plot(self.current_worm_index, self.feat_column)
class MWTrackerViewer_GUI( MarkersDrawer, PlotCommunicator,
FoodContourDrawer, BlobLabeler, IntensityLabeler, TrajectoryEditor, WellsDrawer):
def __init__(self, ui='', argv=''):
if not ui:
super().__init__(Ui_MWTrackerViewer())
else:
super().__init__(ui)
self.setWindowTitle("Multi-Worm Viewer")
self.vfilename = '' if len(argv) <= 1 else argv[1]
self.videos_dir = r"/Volumes/behavgenom$/GeckoVideo/MaskedVideos/"
self.results_dir = ''
self.skeletons_file = ''
self.worm_index_type = 'worm_index_manual'
self.frame_data = None
self.ui.comboBox_labelType.currentIndexChanged.connect(self.selectWormIndexType)
self.ui.pushButton_save.clicked.connect(self.saveData)
# select worm ROI when doubleclick a worm
self.mainImage._canvas.mouseDoubleClickEvent = self.selectWorm
self.mainImage._canvas.mouseRightClickEvent = self.toggleWellStatus
self.ui.comboBox_ROI1.activated.connect(self.update_plot)
self.ui.comboBox_ROI2.activated.connect(self.update_plot)
def saveData(self):
'''save data from manual labelling. pytables saving format is more convenient than pandas'''
if os.name == 'nt':
# I Windows the paths return by QFileDialog use / as the file
# separation character. We need to correct it.
for field_name in ['vfilename', 'skeletons_file']:
setattr(
self, field_name, getattr(
self, field_name).replace(
'/', os.sep))
has_skeletons_file = ((self.skeletons_file is not None)
and (self.skeletons_file != ''))
if has_skeletons_file:
save_modified_table(self.skeletons_file,
self.trajectories_data,
'trajectories_data')
if self.is_fov_tosplit:
if has_skeletons_file:
self.fovsplitter.write_fov_wells_to_file(self.skeletons_file)
else:
warnings.warn('No skeletons file. Saving wells info in masked video')
self.fid.close()
self.fovsplitter.write_fov_wells_to_file(self.vfilename)
# self.fid = tables.File(self.vfilename, 'r')
self.updateVideoFile(self.vfilename)
if has_skeletons_file:
self.updateSkelFile(self.skeletons_file)
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
self.updateImage()
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
if self.trajectories_data is None:
#empty file nothing to do here
self.updateImage()
return
#correct the `worm_index_N` to the actual name `worm_index_manual`
if 'worm_index_N' in self.trajectories_data:
self.trajectories_data = self.trajectories_data.rename(
columns={'worm_index_N': 'worm_index_manual'})
#if this is really a trajectories_data not (_features.hdf5) add `worm_index_manual` if it does not exists
if not 'worm_index_manual' in self.trajectories_data and not self.is_estimated_trajectories_data:
self.trajectories_data['worm_label'] = self.wlab['U']
self.trajectories_data['worm_index_manual'] = self.trajectories_data['worm_index_joined']
#deactiate the save option if we are dealing with estimated data...
self.ui.pushButton_save.setEnabled(not self.is_estimated_trajectories_data)
#add this column if it does not exist
if not 'has_skeleton' in self.trajectories_data:
self.trajectories_data['has_skeleton'] = (
self.trajectories_data['skeleton_id'] >= 0).astype(np.uint8)
self.updateWormIndexTypeMenu()
self.updateImage()
def updateWormIndexTypeMenu(self):
possible_indexes = [x.replace('worm_index_', '') for x in self.trajectories_data.columns if x.startswith('worm_index_')]
assert len(set(possible_indexes)) == len(possible_indexes) #all indexes ending must be different
menu_names = sorted([x + ' index' for x in possible_indexes])
self.ui.comboBox_labelType.clear()
self.ui.comboBox_labelType.addItems(menu_names)
if 'manual' in possible_indexes:
dd = self.ui.comboBox_labelType.findText('manual index')
self.ui.comboBox_labelType.setCurrentIndex(dd);
self.selectWormIndexType()
def selectWormIndexType(self):
index_option = self.ui.comboBox_labelType.currentText()
if not index_option:
return
assert index_option.endswith(' index')
self.worm_index_type = 'worm_index_' + index_option.replace(' index', '')
# select between automatic and manual worm indexing and label
if self.worm_index_type == 'worm_index_manual':
self.label_type = 'worm_label'
self.enable_trajectories_buttons(True)
self.enable_label_buttons(True)
else:
self.label_type = 'auto_label'
self.enable_trajectories_buttons(False)
self.enable_label_buttons(False)
#recalculate the grouped indexes
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
# update image
def updateImage(self):
if (self.image_group is None) and (self.isimgstore is False):
return
super(TrackerViewerAuxGUI, self).readCurrentFrame()
# read the data of the particles that exists in the frame
self.frame_data = self.getFrameData(self.frame_number)
#draw extra info only if the worm_index_type is valid
if self.frame_data is not None and \
self.worm_index_type in self.frame_data:
#filter any -1 index
self.frame_data = self.frame_data[self.frame_data[self.worm_index_type]>=0]
if self.frame_data.size > 0:
self.draw_worm_markers(self.frame_qimg)
self.draw_food_contour(self.frame_qimg)
self.updateROIs()
else:
self.clearROIs()
# plot wells
self.draw_wells(self.frame_qimg)
# create the pixmap for the label
self.mainImage.setPixmap(self.frame_qimg)
self.display_intensity()
def selectWorm(self, event):
x = event.pos().x()
y = event.pos().y()
print(x,y)
if self.frame_data is None or self.frame_data.size == 0:
return
R = (x - self.frame_data['coord_x'])**2 + \
(y - self.frame_data['coord_y'])**2
ind = R.idxmin()
good_row = self.frame_data.loc[ind]
if np.sqrt(R.loc[ind]) < good_row['roi_size']:
self.current_roi.worm_index = int(good_row[self.worm_index_type])
self.update_plot()
self.updateImage()
def toggleWellStatus(self, event):
# abort if not multifov
if self.is_fov_tosplit != True:
return
# event is for sure a right click or this does not get called
x = event.pos().x()
y = event.pos().y()
# this will always return something. n/a if clicking outside a well
well_name = self.fovsplitter.find_well_of_xy(x, y)[0].decode('utf-8')
idx = self.fovsplitter.wells['well_name'] == str(well_name)
self.fovsplitter.wells.loc[idx, 'is_good_well'] = \
np.mod(self.fovsplitter.wells.loc[idx, 'is_good_well']+1, 2)
# print(self.fovsplitter.wells)
self.updateImage()
def joinTraj(self):
super().joinTraj()
self.update_plot()
def splitTraj(self):
super().splitTraj()
self.update_plot()
def change_feature(self):
super().change_feature()
self.update_plot()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
main = MWTrackerViewer_GUI(argv=sys.argv)
#mask_file = '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/mutliworm_example/BRC20067_worms10_food1-10_Set2_Pos5_Ch2_02062017_121709.hdf5'
#mask_file = '/Volumes/rescomp1/data/WormData/screenings/Pratheeban/First_Set/MaskedVideos/Old_Adult/16_07_22/W3_ELA_1.0_Ch1_22072016_131149.hdf5'
#mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/AVI_VIDEOS/MaskedVideos/AVI_VIDEOS_1.hdf5'
# mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/WT2/MaskedVideos/WT2.hdf5'
mask_file = '/Users/lferiani/Hackathon/multiwell_tierpsy/12_FEAT_TIERPSY/MaskedVideos/20191205/syngenta_screen_run1_bluelight_20191205_151104.22956805/metadata.hdf5'
main.updateVideoFile(mask_file)
main.show()
sys.exit(app.exec_())
```
|
{
"source": "JGoard/teensy-rs485-arm-control",
"score": 2
}
|
#### File: accelerometer/src/lsm_iic.py
```python
import board
import busio
import rospy
from adafruit_lsm6ds.lsm6dsox import LSM6DSOX
from sensor_msgs.msg import Imu
def main():
rospy.init_node('accelerometer', anonymous=False)
pub = rospy.Publisher("imu", Imu, queue_size=10)
print(board.SCL, board.SDA)
i2c = busio.I2C(board.SCL, board.SDA)
sensor = LSM6DSOX(i2c)
rospy.loginfo('ISM330DHCX 6DOF Accelerometer Publishing to IMU')
imu_msg = Imu()
imu_msg.linear_acceleration_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
imu_msg.angular_velocity_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
while not rospy.is_shutdown():
x, y, z = sensor.acceleration
u,v,w = sensor.gyro
imu_msg.angular_velocity.x = u
imu_msg.angular_velocity.y = v
imu_msg.angular_velocity.z = w
imu_msg.linear_acceleration.x = x
imu_msg.linear_acceleration.y = y
imu_msg.linear_acceleration.z = z
pub.publish(imu_msg)
rospy.sleep(1)
rospy.loginfo('ISM330DHCX Accelerometer Offline')
if __name__ == '__main__':
main()
```
#### File: accelerometer/src/mma_iic.py
```python
import board
import busio
import rospy
import adafruit_lsm6ds
from sensor_msgs.msg import Imu
def main():
rospy.init_node('accelerometer', anonymous=False)
pub = rospy.Publisher("imu", Imu, queue_size=10)
rospy.loginfo('MMA8451 3DOF Accelerometer Publishing to IMU')
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_lsm6ds.LSM6DSOX(i2c)
imu_msg = Imu()
imu_msg.linear_acceleration_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
imu_msg.angular_velocity_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
while not rospy.is_shutdown():
x, y, z = sensor.acceleration
imu_msg.linear_acceleration.x = x
imu_msg.linear_acceleration.y = y
imu_msg.linear_acceleration.z = z
pub.publish(imu_msg)
rospy.sleep(1)
rospy.logwarn('MMA8451 Accelerometer Offline')
if __name__ == '__main__':
main()
```
|
{
"source": "jgobuyan/pyrf",
"score": 2
}
|
#### File: pyrf/devices/playback.py
```python
from pyrf.devices.thinkrf_properties import wsa_properties
class Playback(object):
def __init__(self, device_class, device_identifier):
# XXX this is all we support for now
assert device_class == 'thinkrf.WSA'
self.properties = wsa_properties(device_identifier)
self.device_id = device_identifier
def async_connector(self):
return False
def disconnect(self):
pass
```
#### File: pyrf/gui/device_controls.py
```python
from PySide import QtGui, QtCore
from pyrf.units import M
from pyrf.gui.fonts import GROUP_BOX_FONT
from pyrf.gui.util import clear_layout
from pyrf.gui.widgets import (QComboBoxPlayback, QCheckBoxPlayback,
QDoubleSpinBoxPlayback)
class DeviceControls(QtGui.QWidget):
"""
A widget based from the Qt QGroupBox widget with a layout containing widgets that
can be used to control the WSA4000/WSA5000
:param name: The name of the groupBox
"""
def __init__(self, controller):
super(DeviceControls, self).__init__()
self.controller = controller
controller.device_change.connect(self.device_changed)
controller.state_change.connect(self.state_changed)
self._create_controls()
self.setLayout(QtGui.QGridLayout())
self._build_layout()
self._connect_device_controls()
def _create_controls(self):
self._dec_label = QtGui.QLabel('DDC:')
self._dec_box = QComboBoxPlayback()
self._dec_box.setToolTip("Choose Decimation Rate")
# FIXME: use values from device properties
dec_values = ['1', '4', '8', '16', '32', '64', '128', '256', '512', '1024']
for d in dec_values:
self._dec_box.addItem(d)
self._dec_values = dec_values
self._fshift_label = QtGui.QLabel("FShift:")
self._fshift_edit = QDoubleSpinBoxPlayback()
self._fshift_edit.setSuffix(' MHz')
self._fshift_edit.setToolTip("Frequency Shift")
self._fshift_edit.setWrapping(True)
self._antenna_label = QtGui.QLabel('Antenna:')
self._antenna_box = QComboBoxPlayback()
self._antenna_box.setToolTip("Choose Antenna")
self._antenna_box.quiet_update(["Antenna 1", "Antenna 2"])
self._iq_output_label = QtGui.QLabel("IQ Path:")
self._iq_output_box = QComboBoxPlayback()
self._iq_output_box.setToolTip("Choose IQ Path")
self._iq_output_box.quiet_update(["Digitizer", "Connector"])
self._gain_label = QtGui.QLabel("RF Gain:")
self._gain_box = QComboBoxPlayback()
self._gain_box.setToolTip("Choose RF Gain setting")
gain_values = ['VLow', 'Low', 'Med', 'High']
self._gain_box.quiet_update(gain_values)
self._gain_values = [g.lower() for g in gain_values]
self._ifgain_label = QtGui.QLabel("IF Gain:")
self._ifgain_box = QtGui.QSpinBox()
self._ifgain_box.setToolTip("Choose IF Gain setting")
# FIXME: use values from device properties
self._ifgain_box.setRange(-10, 25)
self._ifgain_box.setSuffix(" dB")
self._pll_label = QtGui.QLabel("PLL Ref:")
self._pll_box = QComboBoxPlayback()
self._pll_box.setToolTip("Choose PLL Reference")
self._pll_box.quiet_update(["Internal", "External"])
def _build_layout(self, dut_prop=None):
features = dut_prop.SWEEP_SETTINGS if dut_prop else []
grid = self.layout()
clear_layout(grid)
grid.addWidget(self._dec_label, 1, 0, 1, 1)
grid.addWidget(self._dec_box, 1, 1, 1, 1)
grid.addWidget(self._fshift_label, 1, 3, 1, 1)
grid.addWidget(self._fshift_edit, 1, 4, 1, 1)
# 4k features
if 'antenna' in features:
grid.addWidget(self._antenna_label, 2, 3, 1, 1)
grid.addWidget(self._antenna_box, 2, 4, 1, 1)
if 'gain' in features:
grid.addWidget(self._gain_label, 3, 0, 1, 1)
grid.addWidget(self._gain_box, 3, 1, 1, 1)
# FIXME: 'ifgain' appears in 5k list too
grid.addWidget(self._ifgain_label, 3, 3, 1, 1)
grid.addWidget(self._ifgain_box, 3, 4, 1, 1)
# 5k features
if 'attenuator' in features:
if dut_prop.IQ_OUTPUT_CONNECTOR:
grid.addWidget(self._iq_output_label, 3, 3, 1, 1)
grid.addWidget(self._iq_output_box, 3, 4, 1, 1)
grid.addWidget(self._pll_label, 3, 0, 1, 1)
grid.addWidget(self._pll_box, 3, 1, 1, 1)
grid.setColumnStretch(0, 4)
grid.setColumnStretch(1, 8)
grid.setColumnStretch(2, 1)
grid.setColumnStretch(3, 4)
grid.setColumnStretch(4, 8)
grid.setRowStretch(7, 1) # expand empty space at the bottom
self.resize_widget()
def _connect_device_controls(self):
def new_antenna():
self.controller.apply_device_settings(antenna=
int(self._antenna_box.currentText().split()[-1]))
def new_dec():
self.controller.apply_settings(decimation=int(
self._dec_box.currentText()))
def new_freq_shift():
self.controller.apply_settings(
fshift=self._fshift_edit.value() * M)
def new_gain():
self.plot_state.dev_set['gain'] = self._gain_box.currentText().split()[-1].lower().encode('ascii')
self.cap_dut.configure_device(self.plot_state.dev_set)
def new_ifgain():
self.plot_state.dev_set['ifgain'] = self._ifgain_box.value()
self.cap_dut.configure_device(self.plot_state.dev_set)
def new_pll_reference():
if self._pll_box.currentText() == 'Internal':
src = 'INT'
else:
src = 'EXT'
self.controller.apply_device_settings(pll_reference=src)
def new_iq_path():
self.controller.apply_device_settings(
iq_output_path= str(self._iq_output_box.currentText().upper()))
self._antenna_box.currentIndexChanged.connect(new_antenna)
self._gain_box.currentIndexChanged.connect(new_gain)
self._dec_box.currentIndexChanged.connect(new_dec)
self._fshift_edit.valueChanged.connect(new_freq_shift)
self._ifgain_box.valueChanged.connect(new_ifgain)
self._iq_output_box.currentIndexChanged.connect(new_iq_path)
self._pll_box.currentIndexChanged.connect(new_pll_reference)
def device_changed(self, dut):
self.dut_prop = dut.properties
self._build_layout(self.dut_prop)
def state_changed(self, state, changed):
if state.playback:
# for playback simply update everything on every state change
self._dec_box.playback_value(str(state.decimation))
self._fshift_edit.playback_value(state.fshift / M)
self._pll_box.playback_value('External'
if state.device_settings.get('pll_reference') == 'EXT' else
'Internal')
self._iq_output_box.playback_value('Digitizer')
return
if 'playback' in changed:
decimation_available = self.dut_prop.MIN_DECIMATION[
state.rfe_mode()] is not None
self._dec_box.setEnabled(decimation_available)
self._fshift_edit.setEnabled(decimation_available)
self._pll_box.quiet_update(["Internal", "External"])
self._pll_box.setEnabled(True)
self._iq_output_box.quiet_update(["Digitizer", "Connector"])
self._iq_output_box.setEnabled(True)
if 'device_settings.trigger' in changed:
if state.device_settings['trigger']['type'] == 'None':
if self._level_trigger.isChecked():
self._level_trigger.click()
if 'mode' in changed:
if state.sweeping():
self._dec_box.setEnabled(False)
self._fshift_edit.setEnabled(False)
else:
decimation_available = self.dut_prop.MIN_DECIMATION[
state.rfe_mode()] is not None
self._dec_box.setEnabled(decimation_available)
self._fshift_edit.setEnabled(decimation_available)
fshift_max = self.dut_prop.FULL_BW[state.rfe_mode()] / M
self._fshift_edit.setRange(-fshift_max, fshift_max)
if 'device_settings.iq_output_path' in changed:
if 'CONNECTOR' == state.device_settings['iq_output_path']:
# remove all digitizer controls
self._dec_box.setEnabled(False)
self._fshift_edit.setEnabled(False)
self._fshift_label.setEnabled(False)
self._level_trigger.setEnabled(False)
self._trig_fstart.setEnabled(False)
self._trig_fstop.setEnabled(False)
self._trig_amp.setEnabled(False)
elif 'DIGITIZER' == state.device_settings['iq_output_path']:
# enable digitizer controls
if not self.gui_state.device_settings['iq_output_path'] == 'DIGITIZER':
self._dec_box.setEnabled(True)
self._fshift_edit.setEnabled(True)
self._fshift_label.setEnabled(True)
self._trig_fstart.setEnabled(True)
self._trig_fstop.setEnabled(True)
self._trig_amp.setEnabled(True)
self._level_trigger.setEnabled(True)
self.gui_state = state
def resize_widget(self):
self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
def showEvent(self, event):
self.activateWindow()
```
#### File: pyrf/gui/measurements_widget.py
```python
from PySide import QtGui
from pyrf.units import M
from pyrf.gui import colors
from pyrf.gui.fonts import GROUP_BOX_FONT
from pyrf.gui.util import clear_layout
from pyrf.gui.widgets import QCheckBoxPlayback, QDoubleSpinBoxPlayback
class MeasurementControls(QtGui.QWidget):
def __init__(self, controller):
super(MeasurementControls, self).__init__()
self.controller = controller
controller.device_change.connect(self.device_changed)
controller.state_change.connect(self.state_changed)
controller.plot_change.connect(self.plot_changed)
self._create_controls()
self.setLayout(QtGui.QGridLayout())
self._build_layout()
self._connect_controls()
def _create_controls(self):
self._channel_power = QCheckBoxPlayback("Channel Power")
self._channel_power.setToolTip("Enable Channel Power Measurement")
self._horizontal_cursor = QCheckBoxPlayback("Horizontal Cursor")
self._horizontal_cursor.setToolTip("Enable Horizontal Cursor on reference Plot")
self._cursor_spinbox = QDoubleSpinBoxPlayback()
self._cursor_spinbox.setRange(-2000, 2000)
self._cursor_spinbox.setEnabled(False)
self._cursor_spinbox.quiet_update(value = -100)
def _build_layout(self):
grid = self.layout()
clear_layout(grid)
grid.addWidget(self._channel_power, 0, 0, 1, 1)
grid.addWidget(self._horizontal_cursor, 0, 1, 1,1)
grid.addWidget(self._cursor_spinbox, 0, 2, 1,1)
grid.setRowStretch(1, 1) # expand empty space at the bottom
self.resize_widget()
def _connect_controls(self):
def enable_channel_power():
self.controller.apply_plot_options(channel_power = self._channel_power.isChecked())
def enable_cursor():
self.controller.apply_plot_options(horizontal_cursor = self._horizontal_cursor.isChecked())
def change_cursor_value():
self.controller.apply_plot_options(horizontal_cursor_value = self._cursor_spinbox.value())
self._channel_power.clicked.connect(enable_channel_power)
self._horizontal_cursor.clicked.connect(enable_cursor)
self._cursor_spinbox.editingFinished.connect(change_cursor_value)
def device_changed(self, dut):
self.dut_prop = dut.properties
def state_changed(self, state, changed):
self.gui_state = state
if 'device_settings.iq_output_path' in changed:
if state.device_settings['iq_output_path'] == 'CONNECTOR':
self._channel_power.setEnabled(False)
self._horizontal_cursor.setEnabled(False)
self._cursor_spinbox.setEnabled(False)
elif state.device_settings['iq_output_path'] == 'DIGITIZER':
self._channel_power.setEnabled(True)
self._horizontal_cursor.setEnabled(True)
self._cursor_spinbox.setEnabled(True)
def plot_changed(self, state, changed):
self.plot_state = state
if 'horizontal_cursor_value' in changed:
self._cursor_spinbox.quiet_update(value = float(state['horizontal_cursor_value']))
if 'horizontal_cursor' in changed:
if state['horizontal_cursor']:
self._cursor_spinbox.setEnabled(True)
else:
self._cursor_spinbox.setEnabled(False)
def resize_widget(self):
self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
def showEvent(self, event):
self.activateWindow()
```
#### File: pyrf/gui/plot_tools.py
```python
import pyqtgraph as pg
import numpy as np
from PySide import QtCore
from pyrf.gui import colors
from pyrf.units import M
from pyrf.numpy_util import calculate_channel_power
LARGE_NEGATIVE_NUMBER = -900000
# minimum size allowed for auto peak
MIN_AUTO_POS_SIZE = 1000
class triggerControl(pg.ROI):
"""
Class to represent the trigger controls in the plot
"""
# sigHovering = QtCore.Signal(object)
# sigHoveringFinished = QtCore.Signal(object)
sigNewTriggerRange = QtCore.Signal(object)
def __init__(self):
super(triggerControl, self).__init__(pos=(0,0))
self.normal_pen = pg.mkPen(color = colors.WHITE_NUM, width= 2)
self.setPen(self.normal_pen)
self.hover_pen = pg.mkPen(color = colors.LIME_NUM, width= 2)
self.fstart = 0
self.fstop = 0
self.amplitude = 0
self.init_lines()
self.sigRegionChangeFinished.connect(self.new_trigger)
self.sigRegionChangeStarted.connect(self.begin_changing)
def begin_changing(self):
for l in self.lines:
l.blockSignals(True)
def new_trigger(self):
self.fstart = self.pos().x()
self.fstop = self.fstart + self.size().x()
self.amplitude = self.size().y() + self.pos().y()
self.fstart_line.setValue(self.fstart)
self.fstop_line.setValue(self.fstop)
self.amplitude_line.setValue(self.amplitude)
self.sigNewTriggerRange.emit(self)
for l in self.lines:
l.blockSignals(False)
def init_lines(self):
self.lines = []
cursor_pen = pg.mkPen((0,0,0,0), width = 50)
self.fstart_line = InfiniteLine(pen = cursor_pen, pos = -100, angle = 90, movable = True)
self.lines.append(self.fstart_line)
self.fstop_line = InfiniteLine(pen = cursor_pen, pos = -100, angle = 90, movable = True)
self.lines.append(self.fstop_line)
self.amplitude_line = InfiniteLine(pen = cursor_pen, pos = -100, angle = 0, movable = True)
self.lines.append(self.amplitude_line)
for l in self.lines:
def hovering():
self.setPen(self.hover_pen)
# l.setPen(self.hover_pen)
def not_hovering():
self.setPen(self.normal_pen)
# l.setPen(cursor_pen)
l.setHoverPen(cursor_pen)
l.sigHovering.connect(hovering)
l.sigHoveringFinished.connect(not_hovering)
def changing_fstart():
self.setPen(self.hover_pen)
self.resize_trigger(self.fstart_line.value(),
self.fstop,
self.amplitude)
self.fstart_line.sigPositionChanged.connect(changing_fstart)
def finished_changing_fstart():
self.setPen(self.normal_pen)
self.sigNewTriggerRange.emit(self)
self.fstart_line.sigPositionChangeFinished.connect(finished_changing_fstart)
def changing_fstop():
self.setPen(self.hover_pen)
self.resize_trigger(self.fstart,
self.fstop_line.value(),
self.amplitude)
self.fstop_line.sigPositionChanged.connect(changing_fstop)
def finished_changing_fstop():
self.setPen(self.normal_pen)
self.sigNewTriggerRange.emit(self)
self.fstop_line.sigPositionChangeFinished.connect(finished_changing_fstop)
def changing_amp():
self.setPen(self.hover_pen)
self.resize_trigger(self.fstart,
self.fstop,
self.amplitude_line.value())
self.amplitude_line.sigPositionChanged.connect(changing_amp)
def finished_changing_amplitude():
self.setPen(self.normal_pen)
self.sigNewTriggerRange.emit(self)
self.amplitude_line.sigPositionChangeFinished.connect(finished_changing_amplitude)
def resize_trigger(self, start, stop, amp):
self.blockSignals(True)
self.fstart = start
self.fstop = stop
self.amplitude = amp
span = stop - start
self.setPos(((start), LARGE_NEGATIVE_NUMBER))
self.setSize((span, (-1 * LARGE_NEGATIVE_NUMBER) - np.abs(amp)))
self.fstart_line.setValue(start)
self.fstop_line.setValue(stop)
self.amplitude_line.setValue(amp)
self.blockSignals(False)
def setMouseHover(self, hover):
if self.mouseHovering == hover:
return
self.mouseHovering = hover
if hover:
self.currentPen = self.hover_pen
else:
self.currentPen = self.pen
self.update()
class Trace(object):
"""
Class to represent a trace in the plot
"""
def __init__(self,plot_area, trace_name, trace_color, blank = False, write = False):
self.name = trace_name
self.max_hold = False
self.min_hold = False
self.blank = blank
self.write = write
self.store = False
self.average = False
self.data = None
self.raw_packet = None
self.freq_range = None
self.color = trace_color
self.calc_channel_power = False
self.channel_power = 0
self.channel_power_range = []
self.curves = []
self.plot_area = plot_area
self.average_list = []
self.average_factor = 5
def clear(self):
for c in self.curves:
self.plot_area.window.removeItem(c)
self.curves = []
def clear_data(self):
self.average_list = []
self.data = None
def update_average_factor(self, factor):
self.average_factor = factor
self.average_list = []
def compute_channel_power(self):
if self.calc_channel_power and not self.blank:
if min(self.channel_power_range) > min(self.freq_range) and max(self.channel_power_range) < max(self.freq_range):
if self.data is not None:
min_bin = (np.abs(self.freq_range-min(self.channel_power_range))).argmin()
max_bin = (np.abs(self.freq_range-max(self.channel_power_range))).argmin()
self.channel_power = calculate_channel_power(self.data[min_bin:max_bin])
def update_curve(self, xdata, ydata, usable_bins, sweep_segments):
if self.store or self.blank:
return
self.freq_range = xdata
if self.max_hold:
if (self.data is None or len(self.data) != len(ydata)):
self.data = ydata
self.data = np.maximum(self.data,ydata)
elif self.min_hold:
if (self.data is None or len(self.data) != len(ydata)):
self.data = ydata
self.data = np.minimum(self.data,ydata)
elif self.write:
self.data = ydata
elif self.average:
if len(self.average_list) >= self.average_factor:
self.average_list.pop(0)
if self.average_list:
if len(ydata) != len(self.data):
self.average_list = []
self.average_list.append(ydata)
self.data = np.average(self.average_list, axis = 0)
self.clear()
self.compute_channel_power()
if usable_bins:
# plot usable and unusable curves
i = 0
edge_color = tuple([c / 3 for c in self.color])
for start_bin, run_length in usable_bins:
if start_bin > i:
c = self.plot_area.window.plot(x=xdata[i:start_bin+1],
y=self.data[i:start_bin+1], pen=edge_color)
self.curves.append(c)
i = start_bin
if run_length:
c = self.plot_area.window.plot(x=xdata[i:i+run_length],
y=self.data[i:i+run_length], pen=self.color)
self.curves.append(c)
i = i + run_length - 1
if i < len(xdata):
c = self.plot_area.window.plot(x=xdata[i:], y=self.data[i:],
pen=edge_color)
self.curves.append(c)
else:
odd = True
i = 0
alternate_color = (
max(0, self.color[0] - 60),
max(0, self.color[1] - 60),
min(255, self.color[2] + 60),)
if sweep_segments is None:
sweep_segments = [len(self.data)]
for run in sweep_segments:
c = self.plot_area.window.plot(x=xdata[i:i + run],
y=self.data[i:i + run],
pen=self.color if odd else alternate_color)
self.curves.append(c)
i = i + run
odd = not odd
class Marker(object):
"""
Class to represent a marker on the plot
"""
def __init__(self,plot_area, marker_name, color, controller):
self.name = marker_name
self.marker_plot = pg.ScatterPlotItem()
self.enabled = False
self.selected = False
self.data_index = None
self.xdata = []
self.ydata = []
self.trace_index = 0
self.color = color
self.draw_color = color
self.hovering = False
self._plot = plot_area
self.coursor_dragged = False
self.controller = controller
cursor_pen = pg.mkPen((0,0,0,0), width = 40)
self.cursor_line = InfiniteLine(pen = cursor_pen, pos = -100, angle = 90, movable = True)
self.cursor_line.setHoverPen(pg.mkPen((0,0,0, 0), width = 40))
def dragged():
# determine index of click
index = np.abs(self.xdata-int(self.cursor_line.value())).argmin()
# calculate the region around the index to check for maximum value
index_region_offset = int(0.01 * len(self.ydata))
if int(min(self.xdata)) > int(min(self._plot.view_box.viewRange()[0])) or int(max(self.xdata)) > int(max(self._plot.view_box.viewRange()[0])) or len(self.ydata) < MIN_AUTO_POS_SIZE:
self.data_index = index
else:
# position of marker is the maximum of the region surrounding the area where the user clicked
if (index - index_region_offset) > 0:
self.data_index = np.where(self.ydata == max(self.ydata[index - index_region_offset: index + index_region_offset]))[0]
self.cursor_line.setPen(cursor_pen)
self.draw_color = colors.MARKER_HOVER
self.controller.apply_plot_options(marker_dragged = True)
self.update_pos(self.xdata, self.ydata)
self.cursor_line.sigDragged.connect(dragged)
def hovering():
self.draw_color = colors.MARKER_HOVER
self.cursor_line.sigHovering.connect(hovering)
def not_hovering():
self.draw_color = color
self.update_pos(self.xdata, self.ydata)
self.cursor_line.sigHoveringFinished.connect(not_hovering)
def remove_marker(self, plot):
plot.window.removeItem(self.marker_plot)
plot.window.removeItem(self.cursor_line)
def add_marker(self, plot):
plot.window.addItem(self.marker_plot)
plot.window.addItem(self.cursor_line)
def enable(self, plot):
self.enabled = True
self.add_marker(plot)
self.controller.apply_plot_options(marker_dragged = True)
def disable(self, plot):
self.enabled = False
self.remove_marker(plot)
self.data_index = None
self.trace_index = 0
def update_pos(self, xdata, ydata):
# calculate scale offset for marker
scale = np.abs( max(self._plot.view_box.viewRange()[1]) - min(self._plot.view_box.viewRange()[1])) * 0.01
self.marker_plot.clear()
self._plot.window.removeItem(self.marker_plot)
self._plot.window.addItem(self.marker_plot)
if len(xdata) <= 0 or len(ydata) <= 0:
return
if self.data_index is None:
self.data_index = len(ydata) / 2
if not len(xdata) == len(self.xdata) and not len(self.xdata) == 0:
self.data_index = int((float(self.data_index)/float(len(self.xdata))) * len(xdata))
xpos = xdata[self.data_index]
ypos = ydata[self.data_index]
self.xdata = xdata
self.ydata = ydata
if not self.coursor_dragged:
self.cursor_line.setValue(xpos)
brush_color = self.draw_color + (20,)
self.marker_plot.addPoints(x = [xpos],
y = [ypos + scale],
symbol = 't',
size = 20, pen = pg.mkPen(self.draw_color),
brush = brush_color)
class InfiniteLine(pg.InfiniteLine):
"""
Infinite Line with controls over the hover pen (feature will be available in pyqtgraph 0.9.9)
"""
sigHovering = QtCore.Signal(object)
sigHoveringFinished = QtCore.Signal(object)
def setPen(self, *args, **kwargs):
"""Set the pen for drawing the line. Allowable arguments are any that are valid
for :func:`mkPen <pyqtgraph.mkPen>`."""
self.pen = pg.mkPen(*args, **kwargs)
if not self.mouseHovering:
self.currentPen = self.pen
self.update()
def setHoverPen(self, *args, **kwargs):
"""Set the pen for drawing the line while the mouse hovers over it.
Allowable arguments are any that are valid
for :func:`mkPen <pyqtgraph.mkPen>`.
If the line is not movable, then hovering is also disabled.
Added in version 0.9.9."""
self.hoverPen = pg.mkPen(*args, **kwargs)
if self.mouseHovering:
self.currentPen = self.hoverPen
self.update()
def boundingRect(self):
#br = UIGraphicsItem.boundingRect(self)
br = self.viewRect()
## add a 4-pixel radius around the line for mouse interaction.
px = self.pixelLength(direction=pg.Point(1,0), ortho=True) ## get pixel length orthogonal to the line
if px is None:
px = 0
w = (max(4, self.pen.width()/2, self.hoverPen.width()/2)+1) * px
br.setBottom(-w)
br.setTop(w)
return br.normalized()
def hoverEvent(self, ev):
if (not ev.isExit()) and self.movable and ev.acceptDrags(QtCore.Qt.LeftButton):
self.setMouseHover(True)
else:
self.setMouseHover(False)
def setMouseHover(self, hover):
## Inform the item that the mouse is (not) hovering over it
if self.mouseHovering == hover:
return
self.mouseHovering = hover
if hover:
self.currentPen = self.hoverPen
self.sigHovering.emit(self)
else:
self.currentPen = self.pen
self.sigHoveringFinished.emit(self)
self.update()
```
#### File: pyrf/gui/plot_widget.py
```python
import platform
import pyqtgraph as pg
import numpy as np
from PySide import QtCore
from pyrf.gui import colors
from pyrf.gui import labels
from pyrf.gui import fonts
from pyrf.gui.widgets import SpectralWidget
from pyrf.gui.amplitude_controls import PLOT_TOP, PLOT_BOTTOM
from pyrf.gui.waterfall_widget import (WaterfallModel,
ThreadedWaterfallPlotWidget)
from pyrf.gui.persistence_plot_widget import (PersistencePlotWidget,
decay_fn_EXPONENTIAL)
from pyrf.gui.plot_tools import Marker, Trace, InfiniteLine, triggerControl
from pyrf.units import M
from pyrf.vrt import (I_ONLY, VRT_IFDATA_I14Q14, VRT_IFDATA_I14,
VRT_IFDATA_I24, VRT_IFDATA_PSD8)
PLOT_YMIN = -5000
PLOT_YMAX = 5000
IQ_PLOT_YMIN = -1
IQ_PLOT_YMAX = 1
IQ_PLOT_XMIN = -1
IQ_PLOT_XMAX = 1
# FIXME: we shouldn't be calculating fft in this module
ZIF_BITS = 2**13
CONST_POINTS = 512
PERSISTENCE_RESETTING_CHANGES = set(["center",
"device_settings.attenuator",
#"rbw", <-- signal is the same area
"mode"
])
class Plot(QtCore.QObject):
"""
Class to hold plot widget, as well as all the plot items (curves, marker_arrows,etc)
"""
user_xrange_change = QtCore.Signal(float, float)
def __init__(self, controller, layout):
super(Plot, self).__init__()
self.controller = controller
controller.device_change.connect(self.device_changed)
controller.state_change.connect(self.state_changed)
controller.plot_change.connect(self.plot_changed)
self.plot_state = {}
# initialize main fft window
self.spectral_window = SpectralWidget(controller)
self.window = self.spectral_window.window
self.window.setMenuEnabled(False)
def widget_range_changed(widget, ranges):
if hasattr(self, 'gui_state') and hasattr(self, 'plot_state'):
# HDR mode has a tuning resolution almost the same as its usable bandwidth, making the tuning mouse tuning annoying to use
if self.gui_state.mode == 'HDR' or not self.plot_state['mouse_tune']:
return
if not hasattr(ranges, '__getitem__'):
return # we're not intereted in QRectF updates
self.user_xrange_change.emit(ranges[0][0], ranges[0][1])
self.window.sigRangeChanged.connect(widget_range_changed)
self.view_box = self.window.plotItem.getViewBox()
# initialize the y-axis of the plot
self.window.setYRange(PLOT_BOTTOM, PLOT_TOP)
labelStyle = fonts.AXIS_LABEL_FONT
self.window.setLabel('left', 'Power', 'dBm', **labelStyle)
self.window.setLabel('top')
self.window.setLabel('bottom', 'Frequency', 'Hz', **labelStyle)
# horizontal cursor line
cursor_pen = pg.mkPen(color = colors.YELLOW_NUM, width = 2)
self.cursor_line = pg.InfiniteLine(pos = -100, angle = 0, movable = True, pen = cursor_pen)
self.channel_power_region = pg.LinearRegionItem()
self._trig_enable = False
self.grid(True)
# IQ constellation window
self.const_window = pg.PlotWidget(name='const_plot')
self.const_plot = pg.ScatterPlotItem(pen = 'y')
self.const_window.setMenuEnabled(False)
self.const_window.addItem(self.const_plot)
self.const_window.setYRange(IQ_PLOT_YMIN, IQ_PLOT_YMAX)
self.const_window.setXRange(IQ_PLOT_YMIN, IQ_PLOT_YMAX)
# IQ time domain window
self.iq_window = pg.PlotWidget(name='const_plot')
self.iq_window.setYRange(IQ_PLOT_YMIN, IQ_PLOT_YMAX)
self.iq_window.setMenuEnabled(False)
self.update_iq_range = True
self.i_curve = self.iq_window.plot(pen = 'g')
self.q_curve = self.iq_window.plot(pen = 'r')
# add traces
self.traces = []
first_trace = labels.TRACES[0]
for trace_name, trace_color in zip(labels.TRACES, colors.TRACE_COLORS):
trace = Trace(
self,
trace_name,
trace_color,
blank=True,
write=False)
self.traces.append(trace)
self.traces[0].blank = False
self.traces[0].write = True
self.markers = []
for name in labels.MARKERS:
self.markers.append(Marker(self, name, colors.WHITE_NUM, self.controller))
self.waterfall_data = WaterfallModel(max_len=600)
self.waterfall_window = ThreadedWaterfallPlotWidget(
self.waterfall_data,
scale_limits=(PLOT_YMIN, PLOT_YMAX),
max_frame_rate_fps=30,
mouse_move_crosshair=False,
)
self.persistence_window = PersistencePlotWidget(
decay_fn=decay_fn_EXPONENTIAL,
data_model=self.waterfall_data)
self.persistence_window.getAxis('bottom').setScale(1e-9)
self.persistence_window.showGrid(True, True)
self.trigger_control = triggerControl()
self.connect_plot_controls()
self.update_waterfall_levels(PLOT_BOTTOM, PLOT_TOP)
def connect_plot_controls(self):
def new_channel_power():
self.controller.apply_plot_options(channel_power_region = self.channel_power_region.getRegion())
def new_cursor_value():
self.controller.apply_plot_options(horizontal_cursor_value = self.cursor_line.value())
def new_trigger():
self.controller.apply_device_settings(trigger = {'type': 'LEVEL',
'fstart':self.trigger_control.fstart,
'fstop': self.trigger_control.fstop,
'amplitude': self.trigger_control.amplitude})
def new_y_axis():
self.controller.apply_plot_options(y_axis = self.view_box.viewRange()[1])
# update trigger settings when ever a line is changed
self.channel_power_region.sigRegionChanged.connect(new_channel_power)
self.cursor_line.sigPositionChangeFinished.connect(new_cursor_value)
self.trigger_control.sigNewTriggerRange.connect(new_trigger)
self.window.sigYRangeChanged.connect(new_y_axis)
def device_changed(self, dut):
self.dut_prop = dut.properties
def state_changed(self, state, changed):
self.gui_state = state
if 'device_settings.trigger' in changed:
fstart = state.device_settings['trigger']['fstart']
fstop = state.device_settings['trigger']['fstop']
amplitude = state.device_settings['trigger']['amplitude']
type = state.device_settings['trigger']['type']
if type == 'NONE':
self.remove_trigger()
self.trigger_control.resize_trigger(fstart, fstop, amplitude)
elif type == 'LEVEL':
self.add_trigger(fstart,
fstop,
amplitude)
for m in self.markers:
if m.enabled:
m.remove_marker(self)
m.add_marker(self)
if 'center' in changed or 'span' in changed:
fstart = state.center - (state.span / 2)
fstop = state.center + (state.span / 2)
for trace in self.traces:
trace.clear_data()
if fstart > self.trigger_control.fstart or fstop < self.trigger_control.fstop:
self.controller.apply_device_settings(trigger = {'type': 'NONE',
'fstart':self.trigger_control.fstart,
'fstop': self.trigger_control.fstop,
'amplitude': self.trigger_control.amplitude})
self.remove_trigger()
self.persistence_window.reset_plot()
if fstart > float(min(self.channel_power_region.getRegion())) or fstop < float(max(self.channel_power_region.getRegion())):
self.move_channel_power(fstart + state.span / 4, fstop - state.span / 4)
if set(changed).intersection(PERSISTENCE_RESETTING_CHANGES):
self.persistence_window.reset_plot()
if 'mode' in changed:
if state.mode not in self.dut_prop.LEVEL_TRIGGER_RFE_MODES:
self.remove_trigger()
def plot_changed(self, state, changed):
self.plot_state = state
if 'horizontal_cursor' in changed:
if state['horizontal_cursor']:
self.window.addItem(self.cursor_line)
else:
self.window.removeItem(self.cursor_line)
if 'channel_power' in changed:
if state['channel_power']:
self.enable_channel_power()
else:
self.disable_channel_power()
if 'horizontal_cursor_value' in changed:
self.cursor_line.setValue(state['horizontal_cursor_value'])
if 'channel_power_region' in changed:
for t in self.traces:
t.channel_power_range = state['channel_power_region']
t.compute_channel_power()
if 'y_axis' in changed:
self.window.setYRange(state['y_axis'][0] , state['y_axis'][1], padding = 0)
self.persistence_window.setYRange(state['y_axis'][0] , state['y_axis'][1], padding = 0)
def enable_channel_power(self):
for t in self.traces:
t.calc_channel_power = True
fstart = self.gui_state.center - (self.gui_state.span / 4)
fstop = self.gui_state.center + (self.gui_state.span / 4)
self.move_channel_power(fstart, fstop)
self.window.addItem(self.channel_power_region)
def move_channel_power(self, fstart, fstop):
self.channel_power_region.setRegion([(fstart),float(fstop)])
def disable_channel_power(self):
for t in self.traces:
t.calc_channel_power = False
self.window.removeItem(self.channel_power_region)
def add_trigger(self,fstart, fstop, amplitude):
if not self._trig_enable:
self.window.addItem(self.trigger_control)
self.window.addItem(self.trigger_control.fstart_line)
self.window.addItem(self.trigger_control.fstop_line)
self.window.addItem(self.trigger_control.amplitude_line)
self._trig_enable = True
self.trigger_control.resize_trigger(fstart,
fstop,
amplitude)
def remove_trigger(self):
self.window.removeItem(self.trigger_control)
self.window.removeItem(self.trigger_control.fstart_line)
self.window.removeItem(self.trigger_control.fstop_line)
self.window.removeItem(self.trigger_control.amplitude_line)
self._trig_enable = False
def center_view(self, fstart, fstop):
b = self.window.blockSignals(True)
self.window.setXRange(float(fstart), float(fstop), padding=0)
self.window.blockSignals(b)
self.persistence_window.setXRange(
float(fstart),
float(fstop),
padding=0)
def update_waterfall_levels(self, min_level, ref_level):
if self.waterfall_window is not None:
self.waterfall_window.set_lookup_levels(min_level, ref_level)
self.persistence_window.reset_plot()
self.persistence_window.setYRange(min_level, ref_level)
def grid(self,state):
self.window.showGrid(state,state)
self.window.getAxis('bottom').setPen(colors.GREY_NUM)
self.window.getAxis('bottom').setGrid(200)
self.window.getAxis('left').setPen(colors.GREY_NUM)
self.window.getAxis('left').setGrid(200)
self.window.getAxis('top').setTicks([[(-200, '-200'), (-200, '-200'),
(-250, '-200'), (-250, '-200')]])
def update_markers(self):
for m in self.markers:
if m.enabled:
trace = self.traces[m.trace_index]
m.update_pos(trace.freq_range, trace.data)
def get_trigger_region(self):
print self.trigger_control.pos()
print self.trigger_control.size()
def update_iq_plots(self, data):
trace = self.traces[0]
if not (trace.write or trace.max_hold or trace.min_hold or trace.store):
return
if data.stream_id == VRT_IFDATA_I14Q14:
i_data = np.array(data.data.numpy_array()[:,0], dtype=float)/ZIF_BITS
q_data = np.array(data.data.numpy_array()[:,1], dtype=float)/ZIF_BITS
self.i_curve.setData(i_data)
self.q_curve.setData(q_data)
self.const_plot.clear()
self.const_plot.addPoints(
x = i_data[0:CONST_POINTS],
y = q_data[0:CONST_POINTS],
symbol = 'o',
size = 1, pen = 'y',
brush = 'y')
else:
i_data = np.array(data.data.numpy_array(), dtype=float)
if data.stream_id == VRT_IFDATA_I14:
i_data = i_data /ZIF_BITS
elif data.stream_id == VRT_IFDATA_I24:
i_data = i_data / (np.mean(i_data)) - 1
self.i_curve.setData(i_data)
self.q_curve.clear()
if self.update_iq_range:
self.iq_window.setXRange(0, len(i_data))
self.update_iq_range = False
def center_iq_plots(self):
self.iq_window.setYRange(IQ_PLOT_YMIN, IQ_PLOT_YMAX)
```
#### File: pyrf/gui/spectrum_analyzer.py
```python
import sys
from PySide import QtGui
from pyrf.gui.gui import MainWindow
# pyinstaller + qt4reactor workaround:
sys.modules.pop('twisted.internet.reactor', None)
import qt4reactor
import logging
def main():
dut_address = None
playback_filename = None
developer_menu = False
if '-p' in sys.argv:
f_index = sys.argv.index('-p')
playback_filename = sys.argv[f_index + 1]
del sys.argv[f_index:f_index + 2]
if '-v' in sys.argv:
sys.argv.remove('-v')
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
if '-d' in sys.argv:
developer_menu = True
sys.argv.remove('-d')
if len(sys.argv) > 1:
dut_address = sys.argv[1]
app = QtGui.QApplication(sys.argv)
qt4reactor.install() # requires QApplication to exist
# requires qt4reactor to be installed
ex = MainWindow(dut_address, playback_filename, developer_menu)
# late import because installReactor is being used
from twisted.internet import reactor
reactor.run()
if __name__ == "__main__":
main()
```
#### File: jgobuyan/pyrf/setup.py
```python
import os
try:
import setuptools
def setup(**kwargs):
setuptools.setup(zip_safe=False, **kwargs)
except ImportError:
from distutils.core import setup
extras = {}
try:
import py2exe
extras.update({
'windows':['rtsa-gui.py'],
})
except ImportError:
pass
exec(open(os.path.join("pyrf","version.py")).read())
release = __version__
setup(
name='pyrf',
version=release,
author='ThinkRF Corporation',
author_email='<EMAIL>',
packages=['pyrf', 'pyrf.devices', 'pyrf.connectors', 'pyrf.gui'],
url='https://github.com/pyrf/pyrf',
license='BSD',
description='API for RF receivers including ThinkRF WSA platforms',
long_description=open('README.rst').read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications :: Qt",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Testing",
"Topic :: System :: Hardware",
],
test_suite='pyrf.tests',
entry_points={
'gui_scripts': [
"rtsa-gui = pyrf.gui.spectrum_analyzer:main",
],
},
**extras
)
```
|
{
"source": "jgoclawski/async-crawler",
"score": 3
}
|
#### File: async-crawler/crawler/01_async.py
```python
import socket
from selectors import DefaultSelector, EVENT_WRITE
from crawler.logger import log
def run_sync():
log("\nRunning synchronously with busy-waiting\n")
sock = create_socket()
request = "GET xkcd.com HTTP/1.0\r\nHost: xkcd.com\r\n\r\n"
encoded = request.encode("ascii")
wait_for_socket_in_a_loop(encoded, sock)
log("Ready!")
def run_async():
log("\nRunning asynchronously with a simple event loop\n")
sock = create_socket()
wait_for_socket_async(sock)
log("Ready!")
def create_socket():
sock = socket.socket()
sock.setblocking(False)
try:
log("Connect")
sock.connect(("xkcd.com", 80))
log("After connect")
except BlockingIOError as e:
log(f"Caught: {e}")
log("After try")
return sock
def wait_for_socket_in_a_loop(encoded, sock):
while True:
try:
sock.send(encoded)
break
except OSError as e:
log(f"Error: {e}")
def wait_for_socket_async(sock):
selector = DefaultSelector()
def connected():
selector.unregister(sock.fileno())
log("Connected!")
selector.register(sock.fileno(), EVENT_WRITE, connected)
loop(selector)
def loop(selector):
log("Starting event loop")
while True:
log("Waiting for events...")
events = selector.select()
log("Got event!")
for event_key, event_mask in events:
callback = event_key.data
callback()
break
log("Exiting event loop")
if __name__ == '__main__':
run_sync()
run_async()
```
|
{
"source": "jgodara/pymycloud",
"score": 2
}
|
#### File: pymycloud/admin/owncloud_admin.py
```python
import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
from admin.commands.users import AddUser, DeleteUser, ListUsers
from database.session import SessionFactoryPool
class OwnCloudAdmin(App):
def __init__(self):
super().__init__(
description="Utility to manage the OwnCloud installation",
version="0.1-beta",
command_manager=CommandManager("owncloud_admin"),
deferred_help=True
)
def prepare_to_run_command(self, cmd):
# Initialze the database session
_ = SessionFactoryPool.get_current_session()
def initialize_app(self, argv):
commands = [ListUsers, AddUser, DeleteUser]
for command in commands:
self.command_manager.add_command(
command.__name__.lower(), command)
def main(argv=sys.argv[1:]):
admin_app = OwnCloudAdmin()
return admin_app.run(argv)
if __name__ == "__main__":
sys.exit(main(argv=sys.argv[1:]))
```
#### File: pymycloud/core/sockets.py
```python
import logging
import socketserver
from core.runners.downloader import FileTransmitter
from core.socket_errors import InvalidTransmissionKeyError, SocketError, InvalidTransmissionTypeError
from database.models import Transmissions
from database.repositories import TransmissionRepository
class SocketRequestHandler(socketserver.BaseRequestHandler):
"""
Handles the traffic received over a TCP socket. The request must specify a
transmission key (generated using the REST API)
"""
def handle(self):
"""Hook that handles traffic."""
# Receive the initial 8-character Transmission Key
transmission_key = self.request.recv(16).decode()
transmission_repo = TransmissionRepository(use_new_session=True)
tran = None
try:
tran = transmission_repo.get_by_transmission_key(transmission_key)
if not tran:
raise InvalidTransmissionKeyError()
if tran.transmission_type == Transmissions.TYPE_GET:
# Run the GET FILE runner
FileTransmitter(tran.user, self.request).run()
elif tran.transmission_type == Transmissions.TYPE_UPLOAD:
# TODO Add support for uploads
pass
else:
raise InvalidTransmissionTypeError()
except SocketError as err:
logging.error(f"{err.get_error_code()}: Ran into an error white serving {self.client_address}", err)
self.request.send(f"{err.get_error_code()}")
finally:
# Always end the transmission
if tran:
transmission_repo.delete(tran)
```
#### File: pymycloud/owncloud_utils/strings.py
```python
import random
import string
def randstr(chars=string.ascii_lowercase + string.digits, len=16) -> str:
"""Generates a random string out of given charactes.
Parameters
----------
chars : type
An array if charaters to generate the string from.
len : type
Length of the string.
Returns
-------
str
A random string of length `len`.
"""
return ''.join(random.choices(chars, k=len))
```
|
{
"source": "jgod/openapi-core",
"score": 2
}
|
#### File: openapi_core/schema/shortcuts.py
```python
from jsonschema.validators import RefResolver
from openapi_spec_validator import default_handlers
from openapi_core.schema.specs.factories import SpecFactory
def create_spec(spec_dict, spec_url='', handlers=default_handlers):
spec_resolver = RefResolver(
spec_url, spec_dict, handlers=handlers)
spec_factory = SpecFactory(spec_resolver)
return spec_factory.create(spec_dict, spec_url=spec_url)
```
|
{
"source": "jgodwin/cropland-ds-2020",
"score": 3
}
|
#### File: jgodwin/cropland-ds-2020/stats.py
```python
import pandas as pd
from sklearn.metrics import f1_score, confusion_matrix, precision_recall_fscore_support
def prf1_score_img(gt, pre):
r, p, f, s = precision_recall_fscore_support(gt.flatten(),pre.flatten(),average=None)
ndf = pd.DataFrame()
ndf['Recall'] = r
ndf['Precision'] = p
ndf['F1'] = f
ndf['Support'] = s
return ndf
```
|
{
"source": "jgoecks/gemini",
"score": 3
}
|
#### File: gemini/gemini/gemini_subjects.py
```python
import sqlite3
import sys
from collections import defaultdict
from compiler import compile
from gemini_constants import *
import GeminiQuery
from functools import wraps
def compile_decorator(f):
"""decorator to automatically compile the eval strings returned from
the filter methods"""
@wraps(f)
def wrapper(*args, **kwargs):
query_string = f(*args, **kwargs)
if query_string == "False" or query_string == {"any": "False"}:
return None
if not isinstance(query_string, dict):
return compile(query_string, "<string>", "eval")
query_dict = query_string
for k, stmt in query_dict.iteritems():
query_dict[k] = compile(stmt, "<string>", "eval")
return query_dict
return wrapper
def get_phred_query(sample_id, gt_ll, genotype, prefix=" and ", invert=False):
"""Default is to test < where a low value phred-scale is high
confidence for that genotype
>>> get_phred_query(2, 22, "het")
' and gt_phred_ll_het[1] < 22'
>>> get_phred_query(2, 22, "het", prefix="")
'gt_phred_ll_het[1] < 22'
>>> get_phred_query(2, 22, "het", prefix="", invert=True)
'gt_phred_ll_het[1] > 22'
"""
assert genotype in ("het", "homref", "homalt")
if not gt_ll: return ""
# they passed in the subject:
if hasattr(sample_id, "sample_id"):
sample_id = sample_id.sample_id
sign = ["<", ">"][int(invert)]
s = "gt_phred_ll_{genotype}[{sample_id}] {sign} {gt_ll}"\
.format(sample_id=sample_id-1, genotype=genotype,
gt_ll=gt_ll, sign=sign)
return prefix + s
class Subject(object):
"""
Describe a single subject in the the samples table.
"""
def __init__(self, row):
self._set_fields_from_row(row)
def __repr__(self):
return "\t".join(map(str, [self.name, self.paternal_id,
self.maternal_id, self.phenotype]))
def set_father(self):
self.father = True
def set_mother(self):
self.mother = True
def _set_fields_from_row(self, row):
self.__dict__.update(row)
#for k, v in zip(row.keys(), row):
# self.__dict__[k] = v
self.phenotype = int(self.phenotype) if self._has_phenotype() else None
self._set_affected_status()
def _has_phenotype(self):
if hasattr(self, 'phenotype') and self.phenotype is not None:
return True
def _set_affected_status(self):
# 1 = unaffected
# 2 = affected
# 0 or -9 is unknown.
# http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped
pheno = str(self.phenotype)
if pheno == "2":
self.affected = True
elif pheno == "1":
self.affected = False
# distinguish unknown from known to be unaffected.
else:
self.affected = None
class Family(object):
"""
Describe the relationships among multiple subjects in a family.
"""
def __init__(self, subjects):
self.subjects = subjects
self.father = None
self.mother = None
self.family_id = self.subjects[0].family_id
self.children = []
self.affected = []
self.unaffected = []
self.affected_children = []
self.unaffected_children = []
self.is_constructed = False
self.build_family()
def has_an_affected(self):
"""
Return True if the Family has at least one affected individual.
Otherwise return False.
"""
for subject in self.subjects:
if subject.affected:
return True
return False
def has_an_affected_child(self):
"""
Return True if the Family has at least one affected child.
Otherwise return False.
"""
if not self.is_constructed:
self.build_family()
for child in self.children:
if child.affected:
return True
return False
def build_family(self):
"""
Screen for children with parental ids so that
we can identify the parents in this family.
NOTE: assumes at most a 2 generation family.
"""
# build only if the family has not already been built.
if self.is_constructed is False:
self.father_name = None
self.mother_name = None
for subject in self.subjects:
# if mom and dad are found, we know this is the child
if subject.maternal_id != "0" and subject.paternal_id != "0":
self.father_name = str(subject.paternal_id)
self.mother_name = str(subject.maternal_id)
self.children.append(subject)
# now track the actual sampleIds for the parents
for subject in self.subjects:
if self.father_name is not None and \
subject.name == self.father_name:
self.father = subject
elif self.mother_name is not None and \
subject.name == self.mother_name:
self.mother = subject
# prevent reconstructing family every time function is called.
self.is_constructed = True
if self.father is not None and self.mother is not None:
return True
else:
return False
@compile_decorator
def get_auto_recessive_filter(self, gt_ll=False):
"""
Generate an autosomal recessive eval() filter to apply for this family.
For example:
'(gt_types[57] == HET and \ # mom
gt_types[58] == HET and \ # dad
gt_types[11] == HOM_ALT)' # affected child
"""
parents_found = self.build_family()
affected_found = self.has_an_affected()
# identify which samples are the parents in the family.
# Fail if both parents are not found
if not parents_found and not affected_found:
sys.stderr.write("WARNING: Unable to identify at least one "
"affected individual for family (%s). "
"Consequently, GEMINI will not screen for "
"variants in this family.\n"
% self.family_id)
return "False"
elif not parents_found and affected_found:
sys.stderr.write("WARNING: Unable to identify parents for family (%s). "
"Consequently, GEMINI will solely place genotype "
"requirements on subjects based on their phenotype.\n"
% self.family_id)
mask = "("
for i, subject in enumerate(self.subjects):
if subject.affected:
mask += '(gt_types[' + str(subject.sample_id - 1) + "] == " + \
str(HOM_ALT)
mask += get_phred_query(subject, gt_ll, "homalt") + ")"
else:
mask += '(gt_types[' + str(subject.sample_id - 1) + "] != " + \
str(HOM_ALT)
mask += get_phred_query(subject, gt_ll, "homalt", invert=True) + ")"
if i < (len(self.subjects) - 1):
mask += " and "
mask += ")"
return mask
elif parents_found:
# if either parent is affected, this family cannot satisfy
# a recessive model, as the parents should be carriers.
if self.father.affected is True or self.mother.affected is True:
return "False"
mask = "("
mask += 'gt_types[' + str(self.father.sample_id - 1) + "] == " + \
str(HET)
mask += get_phred_query(self.father.sample_id, gt_ll, "het")
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + \
str(HET)
mask += get_phred_query(self.mother.sample_id, gt_ll, "het")
if self.has_an_affected_child():
for i, child in enumerate(self.children):
if child.affected is True:
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + \
str(HOM_ALT)
mask += get_phred_query(child.sample_id, gt_ll, "homalt")
# only allow an unaffected if there are other affected children
elif child.affected is False and self.has_an_affected_child():
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] != " + \
str(HOM_ALT)
# TODO: # Aaron, does this seem right? We want them not
# HOMALT, so we want a high GT_LL for the HOM ALT
# allele.
mask += get_phred_query(child.sample_id, gt_ll, "homalt", invert=True)
elif child.affected is None:
# assume just testing for inheritance patterns
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + \
str(HOM_ALT)
mask += get_phred_query(child.sample_id, gt_ll, "homalt")
else:
for i, child in enumerate(self.children):
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + \
str(HOM_ALT)
mask += get_phred_query(child.sample_id, gt_ll, "homalt")
mask += ")"
return mask
@compile_decorator
def get_auto_dominant_filter(self, gt_ll=False):
"""
Generate an autosomal dominant eval() filter to apply for this family.
For example:
'(
((bool(gt_types[57] == HET) # mom
!= \
bool(gt_types[58] == HET)) and \ # dad
gt_types[11] == HET # affected child
)'
NOTE: the bool(dad) != bool(mom) is an XOR requiring that one and
only one of the parents is heterozygous
"""
parents_found = self.build_family()
affected_found = self.has_an_affected()
# identify which samples are the parents in the family.
# Fail if both parents are not found
if not parents_found and not affected_found:
sys.stderr.write("WARNING: Unable to identify at least one "
"affected individual for family (%s). "
"Consequently, GEMINI will not screen for "
"variants in this family.\n"
% self.family_id)
return "False"
elif not parents_found and affected_found:
sys.stderr.write("WARNING: Unable to identify parents for family (%s). "
"Consequently, GEMINI will solely place genotype "
"requirements on subjects based on their phenotype.\n"
% self.family_id)
mask = "("
for i, subject in enumerate(self.subjects):
if subject.affected:
mask += 'gt_types[' + str(subject.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(subject.sample_id, gt_ll, "het")
else:
mask += 'gt_types[' + str(subject.sample_id - 1) + "] == " + str(HOM_REF)
mask += get_phred_query(subject.sample_id, gt_ll, "homref")
if i < (len(self.subjects) - 1):
mask += " and "
mask += ")"
return mask
elif parents_found:
mask = ""
if self.father.affected is True and self.mother.affected is True:
# doesn't meet an auto. dominant model if both parents are affected
# [*]---(*)
# |
# (*)
return "False"
elif ((self.father.affected is False and self.mother.affected is False)
or
(self.father.affected is None and self.mother.affected is None)):
# if neither parents are affected, or the affection status is
# unknown for both, we can just screen for variants where one and
# only one of the parents are hets and and the child is also a het
# []---()
# |
# (*)
mask = "((bool("
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HET)
# TODO: ask Aaron. adding these likelihoods means we are
# checking that only one of them is *confidently* a het.
# where as without the likelihoods, it could be more stringent..
mask += get_phred_query(self.father.sample_id, gt_ll, "het") + ")"
mask += ") != "
mask += 'bool((gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(self.mother.sample_id, gt_ll, "het") + ")"
mask += ")) and "
for i, child in enumerate(self.children):
if child.affected:
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(child.sample_id, gt_ll, "het")
else:
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + \
str(HOM_REF)
mask += get_phred_query(child.sample_id, gt_ll, "homref")
if i < (len(self.children) - 1):
mask += " and "
mask += ")"
return mask
elif (self.father.affected is True and
self.mother.affected is not True):
# if only Dad is known to be affected, we must enforce
# that only the affected child and Dad have the
# same heterozygous genotype.
# [*]---()
# |
# (*)
mask = "(("
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(self.father.sample_id, gt_ll, "het") + ")"
mask += " and "
mask += '(gt_types[' + str(self.mother.sample_id - 1) + "] != " + str(HET)
mask += get_phred_query(self.mother.sample_id, gt_ll, "het", invert=True) + ")"
mask += ") and "
for i, child in enumerate(self.children):
if child.affected:
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(child.sample_id, gt_ll, "het")
else:
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_REF)
mask += get_phred_query(child.sample_id, gt_ll, "homref")
if i < (len(self.children) - 1):
mask += " and "
mask += ")"
return mask
elif (self.father.affected is not True
and self.mother.affected is True):
# if only Mom is known to be affected, we must enforce
# that only the affected child and Mom have the
# same heterozygous genotype.
# []---(*)
# |
# (*)
mask = "(("
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(self.mother.sample_id, gt_ll, "het")
mask += " and "
mask += 'gt_types[' + str(self.father.sample_id - 1) + "] != " + str(HET)
mask += get_phred_query(self.father.sample_id, gt_ll, "het", invert=True)
mask += ") and "
for i, child in enumerate(self.children):
if child.affected:
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(child.sample_id, gt_ll, "het")
else:
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + \
str(HOM_REF)
mask += get_phred_query(child.sample_id, gt_ll, "homref")
if i < (len(self.children) - 1):
mask += " and "
mask += ")"
return mask
@compile_decorator
def get_de_novo_filter(self, only_affected=False, gt_ll=False):
"""
Generate aa de novo mutation eval() filter to apply for this family.
For example:
'(gt_types[57] == HOM_REF and \ # mom
gt_types[58] == HOM_REF and \ # dad
gt_types[11] == HET)' # affected child
# [G/G]---(G/G)
# |
# (A/G)
"""
# identify which samples are the parents in the family.
# Fail if both parents are not found
if not self.build_family():
sys.stderr.write("WARNING: Unable to find parents for family (%s). "
"GEMINI is currently only able to identify candidates "
"from two generational families.\n"
% self.family_id)
return "False"
mask = "("
mask += "("
mask += 'gt_types[' + str(self.father.sample_id - 1) + "] == " + \
str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + \
str(HOM_REF)
mask += get_phred_query(self.father.sample_id, gt_ll, "homref")
mask += get_phred_query(self.mother.sample_id, gt_ll, "homref")
mask += ")"
mask += " or "
mask += "("
mask += 'gt_types[' + str(self.father.sample_id - 1) + "] == " + \
str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + \
str(HOM_ALT)
mask += get_phred_query(self.father.sample_id, gt_ll, "homalt")
mask += get_phred_query(self.mother.sample_id, gt_ll, "homalt")
mask += ")"
mask += ")"
mask += " and ("
if len(self.children) == 1:
if only_affected == False or \
(only_affected == True and self.children[0].affected == True):
mask += '( gt_types[' + str(self.children[0].sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(self.children[0].sample_id, gt_ll, "het")
mask += ")"
else:
if only_affected == False:
for i, child in enumerate(self.children):
mask += '( gt_types[' + str(child.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(child, gt_ll, "het")
mask += ")"
if i < (len(self.children) - 1):
mask += " or "
else:
# one or more of the affecteds must be HET
num_affected = sum(child.affected for child in self.children)
affected = 0
for child in self.children:
if child.affected == True:
affected += 1
mask += '(gt_types[' + str(child.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(child, gt_ll, "het") + ")"
if affected < num_affected:
mask += " or "
mask += ") and ("
# AND, none of the unaffecteds can be HET
num_unaffected = sum(not child.affected for child in self.children)
unaffected = 0
for i, child in enumerate(self.children):
if child.affected is False:
unaffected += 1
mask += '(gt_types[' + str(child.sample_id - 1) + "] != " + str(HET)
mask += get_phred_query(child, gt_ll, "het", invert=True) + ")"
if unaffected < num_unaffected:
mask += " and "
mask += ")"
return mask
@compile_decorator
def get_mendelian_violation_filter(self, gt_ll=False):
"""
Generate Mendelian violation eval() filter to apply for this family.
For example:
'(gt_types[57] == HOM_REF and \ # mom
gt_types[58] == HOM_REF and \ # dad
gt_types[11] == HET)' # affected child
# [G/G]---(G/G)
# |
# (A/G)
"""
# identify which samples are the parents in the family.
# Fail if both parents are not found
if not self.build_family():
sys.stderr.write("WARNING: Unable to find parents for family (%s). "
"GEMINI is currently only able to identify candidates "
"from two generational families.\n"
% self.family_id)
return {"any": "False"}
# outer start paren
#masks is keys by the type of violation and values are the filters.
masks = {}
for i, child in enumerate(self.children):
##################################################
# Plausible de novos
##################################################
# DAD = HOM_REF; MOM = HOM_REF; KID = HET (De novo)
mask = '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(self.father, gt_ll, "homref")
mask += get_phred_query(self.mother, gt_ll, "homref")
mask += get_phred_query(child, gt_ll, "het")
mask += ")"
mask += " or "
# DAD = HOM_ALT; MOM = HOM_ALT; KID = HET (De novo)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HET)
mask += get_phred_query(self.father, gt_ll, "homalt")
mask += get_phred_query(self.mother, gt_ll, "homalt")
mask += get_phred_query(child, gt_ll, "het")
mask += ")"
masks['plausible de novo'] = mask
##################################################
# Implausible de novos
##################################################
# DAD = HOM_REF; MOM = HOM_REF; KID = HOM_ALT (Implausible de novo)
mask = '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_ALT)
mask += get_phred_query(self.father, gt_ll, "homref")
mask += get_phred_query(self.mother, gt_ll, "homref")
mask += get_phred_query(child, gt_ll, "homalt")
mask += ")"
mask += " or "
# DAD = HOM_ALT; MOM = HOM_ALT; KID = HOM_REF (Implausible de novo)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_REF)
mask += get_phred_query(self.father, gt_ll, "homalt")
mask += get_phred_query(self.mother, gt_ll, "homalt")
mask += get_phred_query(child, gt_ll, "homref")
mask += ")"
masks['implausible de novo'] = mask
##################################################
# Uniparental disomies
##################################################
# DAD = HOM_REF; MOM = HOM_ALT; KID = HOM_REF (Uniparental disomy)
mask = '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_REF)
mask += get_phred_query(self.father, gt_ll, "homref")
mask += get_phred_query(self.mother, gt_ll, "homalt")
mask += get_phred_query(child, gt_ll, "homref")
mask += ")"
mask += " or "
# DAD = HOM_REF; MOM = HOM_ALT; KID = HOM_ALT (Uniparental disomy)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_ALT)
mask += get_phred_query(self.father, gt_ll, "homref")
mask += get_phred_query(self.mother, gt_ll, "homalt")
mask += get_phred_query(child, gt_ll, "homalt")
mask += ")"
mask += " or "
# DAD = HOM_ALT; MOM = HOM_REF; KID = HOM_REF (Uniparental disomy)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_REF)
mask += get_phred_query(self.father, gt_ll, "homalt")
mask += get_phred_query(self.mother, gt_ll, "homref")
mask += get_phred_query(child, gt_ll, "homref")
mask += ")"
mask += " or "
# DAD = HOM_ALT; MOM = HOM_REF; KID = HOM_ALT (Uniparental disomy)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_ALT)
mask += get_phred_query(self.father, gt_ll, "homalt")
mask += get_phred_query(self.mother, gt_ll, "homref")
mask += get_phred_query(child, gt_ll, "homalt")
mask += ")"
masks['uniparental disomy'] = mask
##################################################
# Losses of heterozygosity
##################################################
# DAD = HOM_REF; MOM = HET; KID = HOM_ALT (Loss of heterozygosity)
mask = '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HET)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_ALT)
mask += get_phred_query(self.father, gt_ll, "homref")
mask += get_phred_query(self.mother, gt_ll, "het")
mask += get_phred_query(child, gt_ll, "homalt")
mask += ")"
mask += " or "
# DAD = HOM_ALT; MOM = HET; KID = HOM_REF (Loss of heterozygosity)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HET)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_REF)
mask += get_phred_query(self.father, gt_ll, "homalt")
mask += get_phred_query(self.mother, gt_ll, "het")
mask += get_phred_query(child, gt_ll, "homref")
mask += ")"
mask += " or "
# DAD = HET; MOM = HOM_REF; KID = HOM_ALT (Loss of heterozygosity)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HET)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_REF)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_ALT)
mask += get_phred_query(self.father, gt_ll, "het")
mask += get_phred_query(self.mother, gt_ll, "homref")
mask += get_phred_query(child, gt_ll, "homalt")
mask += ")"
mask += " or "
# DAD = HET; MOM = HOM_ALT; KID = HOM_REF (Loss of heterozygosity)
mask += '(gt_types[' + str(self.father.sample_id - 1) + "] == " + str(HET)
mask += " and "
mask += 'gt_types[' + str(self.mother.sample_id - 1) + "] == " + str(HOM_ALT)
mask += " and "
mask += 'gt_types[' + str(child.sample_id - 1) + "] == " + str(HOM_REF)
mask += get_phred_query(self.father, gt_ll, "het")
mask += get_phred_query(self.mother, gt_ll, "homalt")
mask += get_phred_query(child, gt_ll, "homref")
mask += ")"
masks['loss of heterozygosity'] = mask
# outer end paren
return masks
def get_gt_field(self, field):
columns = []
prefix = field + "["
if not self.build_family():
for subject in self.subjects:
columns.append(prefix + str(subject.sample_id - 1) + ']')
else:
columns.append(prefix + str(self.father.sample_id - 1) + ']')
columns.append(prefix + str(self.mother.sample_id - 1) + ']')
for child in self.children:
columns.append(prefix + str(child.sample_id - 1) + ']')
return columns
def get_genotype_columns(self):
return self.get_gt_field("gts")
def get_genotype_depths(self):
return self.get_gt_field("gt_depths")
def get_genotype_lls(self):
return dict(
homref=self.get_gt_field("gt_phred_ll_homref"),
het=self.get_gt_field("gt_phred_ll_het"),
homalt=self.get_gt_field("gt_phred_ll_homalt"))
def get_genotype_labels(self):
"""
Return header genotype labels for the parents and the children.
"""
labels = []
# these are just anonymous affected and unaffected i
# individuals in the same family
if not self.build_family():
for subject in self.subjects:
if subject.affected is True:
labels.append(subject.name + "(affected)")
elif subject.affected is False:
labels.append(subject.name + "(unaffected)")
elif subject.affected is None:
labels.append(subject.name + "(unknown)")
else:
if self.father.affected is True:
labels.append(self.father.name + "(dad;affected)")
elif self.father.affected is False:
labels.append(self.father.name + "(dad;unaffected)")
elif self.father.affected is None:
labels.append(self.father.name + "(dad;unknown)")
if self.mother.affected is True:
labels.append(self.mother.name + "(mom;affected)")
elif self.mother.affected is False:
labels.append(self.mother.name + "(mom;unaffected)")
elif self.mother.affected is None:
labels.append(self.mother.name + "(mom;unknown)")
# handle the childrem
for child in self.children:
if child.affected is True:
labels.append(child.name + "(child;affected)")
elif child.affected is False:
labels.append(child.name + "(child;unaffected)")
elif child.affected is None:
labels.append(child.name + "(child;unknown)")
return labels
def get_subject_depth_labels(self):
"""
Return header depth labels for the parents and the children.
"""
subjects = []
subjects.append(self.father.name + "(depth)")
subjects.append(self.mother.name + "(depth)")
for child in self.children:
subjects.append(child.name + "(depth)")
return subjects
def get_families(db, selected_families=None):
"""
Query the samples table to return a list of Family
objects that each contain all of the Subjects in a Family.
"""
conn = sqlite3.connect(db)
conn.isolation_level = None
conn.row_factory = sqlite3.Row
c = conn.cursor()
query = "SELECT * FROM samples \
WHERE family_id is not NULL \
ORDER BY family_id"
c.execute(query)
# create a mapping of family_id to the list of
# individuals that are members of the family.
families_dict = {}
for row in c:
subject = Subject(row)
family_id = subject.family_id
if family_id in families_dict:
families_dict[family_id].append(subject)
else:
families_dict[family_id] = [subject]
# if the user has specified a set of selected families
# to which the analysis should be restricted, then
# first sanity check that the family ids they specified are valid.
if selected_families is not None:
for family in selected_families.split(','):
if family not in families_dict:
sys.exit("ERROR: family \"%s\" is not a valid family_id\n" % family)
families = []
for fam in families_dict:
if selected_families is None:
family = Family(families_dict[fam])
families.append(family)
elif fam in selected_families:
family = Family(families_dict[fam])
families.append(family)
return families
def get_family_dict(args):
families = defaultdict(list)
subjects = get_subjects(args)
for subject in subjects.values():
families[subject.family_id].append(subject)
return families
def get_subjects(args, skip_filter=False):
"""
return a dictionary of subjects, optionally using the
subjects_query argument to filter them.
"""
gq = GeminiQuery.GeminiQuery(args.db)
query = "SELECT * FROM samples"
if not skip_filter:
if hasattr(args, 'sample_filter') and args.sample_filter:
query += " WHERE " + args.sample_filter
gq.c.execute(query)
samples_dict = {}
for row in gq.c:
subject = Subject(row)
samples_dict[subject.name] = subject
return samples_dict
def get_subjects_in_family(args, family):
subjects = get_subjects(args)
family_names = [f.name for f in family]
subject_dict = {}
for subject in subjects:
if subject in family_names:
subject_dict[subject] = subjects[subject]
return subject_dict
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: gemini/gemini/tool_autosomal_recessive.py
```python
import os
from gemini_inheritance_model_utils import GeminiInheritanceModelFactory
def run(parser, args):
if os.path.exists(args.db):
auto_recessive_factory = \
GeminiInheritanceModelFactory(args, model="auto_rec")
auto_recessive_factory.get_candidates()
```
#### File: gemini/gemini/tool_compound_hets.py
```python
import os
import sys
import collections
import re
from copy import copy
import GeminiQuery
from gemini_constants import *
from gemini_inheritance_model_utils import GeminiInheritanceModelFactory as Factory
import gemini_subjects as subjects
from itertools import groupby
from operator import itemgetter
class Site(object):
def __init__(self, row):
self.row = row
self.phased = None
self.gt = None
def __eq__(self, other):
return self.row['chrom'] == other.row['chrom'] and \
self.row['start'] == other.row['start']
def __repr__(self):
return ",".join([self.row['chrom'],
str(self.row['start']),
str(self.row['end'])])
def __hash__(self):
"hash the site based on chrom+start"
return sum(ord(c) for c in self.row['chrom']) + int(self.row['start'])
class CompoundHet(Factory):
def create_query(self):
"""
Construct a query to identify candidate compound heterozygotes
based on the user's columns and filters
"""
args = self.args
if args.columns is not None:
custom_columns = self._add_necessary_columns(str(args.columns))
query = "SELECT " + custom_columns + \
" FROM variants " + \
" WHERE (is_exonic = 1 or impact_severity != 'LOW') "
else:
# report the kitchen sink
query = "SELECT *" + \
", gts, gt_types, gt_phases, gt_depths, \
gt_ref_depths, gt_alt_depths, gt_quals" + \
" FROM variants " + \
" WHERE (is_exonic = 1 or impact_severity != 'LOW') "
# add any non-genotype column limits to the where clause
if args.filter:
query += " AND " + args.filter
# we need to order results by gene so that we can sweep through the results
query += " ORDER BY gene"
return query
def _add_necessary_columns(self, custom_columns):
"""
Convenience function to tack on columns that are necessary for
the functionality of the tool but yet have not been specifically
requested by the user.
"""
# we need to add the variant's chrom, start and gene if
# not already there.
self.added = []
for col in ("gene", "start", "alt", "variant_id"):
if custom_columns.find(col) < 0:
custom_columns += "," + col
if col != "variant_id":
self.added.append(col)
return custom_columns
def find_valid_het_pairs(self, sample_hets):
"""
Identify candidate heterozygote pairs.
"""
args = self.args
samples_w_hetpair = collections.defaultdict(list)
splitter = re.compile("\||/")
for sample in sample_hets:
for gene in sample_hets[sample]:
# we only care about combinations, not permutations
# (e.g. only need site1,site2, not site1,site2 _and site2,site1)
# thus we can do this in a ~ linear pass instead of a ~ N^2 pass
for idx, site1 in enumerate(sample_hets[sample][gene]):
for site2 in sample_hets[sample][gene][idx + 1:]:
# expand the genotypes for this sample at each site into
# it's composite alleles. e.g. A|G -> ['A', 'G']
alleles_site1 = []
alleles_site2 = []
if not args.ignore_phasing:
alleles_site1 = site1.gt.split('|')
alleles_site2 = site2.gt.split('|')
else:
# split on phased (|) or unphased (/) genotypes
alleles_site1 = splitter.split(site1.gt)
alleles_site2 = splitter.split(site2.gt)
# it is only a true compound heterozygote IFF
# the alternates are on opposite haplotypes.
if not args.ignore_phasing:
# return the haplotype on which the alternate allele
# was observed for this sample at each candidate het.
# site. e.g., if ALT=G and alleles_site1=['A', 'G']
# then alt_hap_1 = 1. if ALT=A, then alt_hap_1 = 0
if "," in str(site1.row['alt']) or \
"," in str(site2.row['alt']):
sys.stderr.write("WARNING: Skipping candidate for sample"
" %s b/c variants with mult. alt."
" alleles are not yet supported. The sites are:"
" %s and %s.\n" % (sample, site1, site2))
continue
alt_hap_1 = alleles_site1.index(site1.row['alt'])
alt_hap_2 = alleles_site2.index(site2.row['alt'])
# Keep as a candidate if
# 1. phasing is considered AND the alt alleles are on
# different haplotypes
# 2. the user doesn't care about phasing.
# TODO: Phase based on parental genotypes.
if (not args.ignore_phasing and alt_hap_1 != alt_hap_2) \
or args.ignore_phasing:
samples_w_hetpair[(site1,site2)].append(sample)
return samples_w_hetpair
def filter_candidates(self, samples_w_hetpair,
family_gt_labels,
family_gt_cols,
comp_het_counter=[0]):
"""
Refine candidate heterozygote pairs based on user's filters.
"""
args = self.args
# eliminate comp_hets with unaffected individuals if
# only affected individuals are required.
# once we are in here, we know that we have a single gene.
candidates = {}
if args.only_affected:
for comp_het in samples_w_hetpair:
num_affected = sum(self.subjects_dict[s].affected \
for s in samples_w_hetpair[comp_het])
if num_affected == len(samples_w_hetpair[comp_het]):
candidates[comp_het] = samples_w_hetpair[comp_het]
else:
candidates = samples_w_hetpair
# catalog the set of families that have a comp_het in this gene
family_count = collections.Counter()
for comp_het in candidates:
for s in samples_w_hetpair[comp_het]:
family_id = self.subjects_dict[s].family_id
family_count[family_id] += 1
# were there enough families with a compound het in this gene?
# keys of (variant_id, gene) vals of [row, family_gt_label, family_gt_cols,
# family_id, comp_het_id]
filtered_candidates = collections.defaultdict(list)
if len(family_count) >= args.min_kindreds:
for idx, comp_het in enumerate(candidates):
comp_het_counter[0] += 1
for s in samples_w_hetpair[comp_het]:
family_id = self.subjects_dict[s].family_id
if args.families is not None and family_id not in args.families.split(','):
continue
ch_id = str(comp_het_counter[0])
for i in (0, 1):
row = comp_het[i].row
filtered_candidates[(row['gene'], family_id)]\
.append((row,
family_gt_labels[family_id],
family_gt_cols[family_id],
row['variant_id'],
ch_id,
s))
self.report_candidates(filtered_candidates, is_comp_het=True)
def get_candidates(self):
self.get_compound_hets()
def get_compound_hets(self):
"""
Report candidate compound heterozygotes.
"""
args = self.args
gq = GeminiQuery.GeminiQuery(args.db, include_gt_cols=True)
idx_to_sample = gq.idx_to_sample
self.subjects_dict = subjects.get_subjects(args)
# run the query applying any genotype filters provided by the user.
gq.run(self.create_query())
families = subjects.get_families(args.db, args.families)
family_gt_labels, family_gt_cols = {}, {}
for family in families:
family_gt_labels[family.family_id] = family.get_genotype_labels()
family_gt_cols[family.family_id] = family.get_genotype_columns()
# output header
print self.get_header(gq.header, is_comp_het=True)
# Collect all of the genic heterozygotes for each sample / gene
for gene, row_list in groupby(gq, itemgetter('gene')):
sample_hets = collections.defaultdict(lambda: collections.defaultdict(list))
for row in row_list:
gt_types, gt_bases, gt_phases = row['gt_types'], row['gts'], row['gt_phases']
site = Site(row)
# track each sample that is heteroyzgous at this site.
for idx, gt_type in enumerate(gt_types):
if gt_type != HET:
continue
sample = idx_to_sample[idx]
sample_site = copy(site)
sample_site.phased = gt_phases[idx]
if not sample_site.phased and not args.ignore_phasing:
continue
sample_site.gt = gt_bases[idx]
# add the site to the list of candidates for this sample/gene
sample_hets[sample][site.row['gene']].append(sample_site)
# process the last gene seen
samples_w_hetpair = self.find_valid_het_pairs(sample_hets)
self.filter_candidates(samples_w_hetpair,
family_gt_labels,
family_gt_cols)
def run(parser, args):
if os.path.exists(args.db):
CompoundHet(args, "comp_het").get_compound_hets()
```
#### File: gemini/gemini/tool_de_novo_mutations.py
```python
import os
from gemini_inheritance_model_utils import GeminiInheritanceModelFactory
def run(parser, args):
if os.path.exists(args.db):
de_novo_factory = \
GeminiInheritanceModelFactory(args, model="de_novo")
de_novo_factory.get_candidates()
```
|
{
"source": "jgoeszoom/cuDNN-LeNet",
"score": 3
}
|
#### File: jgoeszoom/cuDNN-LeNet/lenet-keras.py
```python
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout,Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
import random
import time
def index_at(arr):
count = 0;
for i in arr:
if i == 1:
return count
else:
count+=1
return None
batch_size = 128
num_classes = 10
epochs = 12
img_rows, img_cols = 28,28
(x_train,y_train),(x_test,y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0],1,img_rows,img_cols)
x_test = x_test.reshape(x_test.shape[0],1,img_rows,img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows,img_cols,1)
x_test = x_test.reshape(x_test.shape[0], img_rows,img_cols,1)
input_shape = (img_rows,img_cols,1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:',x_train.shape)
print(x_train.shape[0],'train samples')
print(x_test.shape[0],'test samples')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test,num_classes)
model = Sequential()
model.add(Conv2D(5,(5,5),activation = 'relu',input_shape =input_shape))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(12,(5,5),activation = 'relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation = 'softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
print('Time taken without training', time1, 'seconds in process time')
model.fit(x_train,y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data = (x_test,y_test))
score = model.evaluate(x_test,y_test,verbose = 0)
print('Test loss:',score[0])
print('Test accuracy:', score[0])
print('Time taken with training', time, 'seconds in process time')
#print(y_train[0])
#print(index_at(y_train[0]))
#fig = plt.figure()
#for num in range(9):
# plt.subplot(3,3,num+1)
# plt.tight_layout()
# plt.title('Number: {}'.format(index_at(y_train[num])))
# plt.imshow(x_train[num].reshape(28,28))
# #plt.show()
# plt.xticks([])
# plt.yticks([])
#plt.show()
start = time.clock()
for i in range(0, len(y_test)):
probs = model.predict(x_test[np.newaxis, i])
prediction = probs.argmax(axis=1)
end = time.clock()
time = end-start
printf("Elapsed time(s) ", time)
```
|
{
"source": "jgoguen/ansible_playbooks",
"score": 2
}
|
#### File: ansible_playbooks/filter_plugins/direxist.py
```python
import os
from ansible.module_utils._text import to_text
def direxist(path: str) -> str:
if os.path.exists(path):
return to_text(path)
return to_text("")
class FilterModule(object):
def filters(self):
return dict(direxist=direxist)
```
|
{
"source": "jgoizueta/binomial-techniques",
"score": 3
}
|
#### File: binomial-techniques/pyhton/tie_decimal.py
```python
from decimal import *
def fact(x):
fact = 1
while x > 1 :
fact *= x
x -= 1
return fact
def prob(n) :
n = Decimal(n)
x = n/2
a = fact(n)
b = fact(x)
b *= b
c = Decimal(2)**n
return a/b/c
for n in [3030, 1000, 500, 200, 100, 50, 10] :
p = round(prob(n)*100, 2)
print n, " : ", p, " %"
```
#### File: binomial-techniques/pyhton/tie.py
```python
import math
from fractions import Fraction
def prob(n) :
x = n/2
a = math.factorial(n)
b = math.factorial(x)
b *= b
c = 2**n
return Fraction(a,b)/c
for n in [3030, 1000, 500, 200, 100, 50, 10] :
p = round(prob(n)*100, 2)
print n, " : ", p, " %"
```
|
{
"source": "jgold3/data-structures-game",
"score": 3
}
|
#### File: game_board/api/api.py
```python
import json
import random
from time import sleep
import uuid
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from rest_framework.throttling import AnonRateThrottle
from rest_framework.throttling import UserRateThrottle
from rest_framework.decorators import throttle_classes
from game_board.api import utils
from game_board.avl import avl_handler as avl
from game_board.ai import ai_handler as ai
from .. import config
@api_view(['GET'])
def api_overview(request):
"""
Overview of the API calls exist.
:param request:
:return: Response, list of API URLs.
"""
api_urls = {
'Start Game': '/start_game/<str:difficulty>/<str:player_ids>/<str:data_structures>',
'Game Board': '/board/<str:id>',
'Re-balance Tree': '/rebalance/<str:game_id>/<str:user_id>/<str:token>',
'Action': '/action/<str:card>/<str:game_id>/<str:user_id>/<str:token>',
'AI-Pick': '/action/<str:card>/<str:game_id>/<str:user_id>/<str:token>',
}
return Response(api_urls)
@api_view(['GET'])
@throttle_classes([AnonRateThrottle])
@throttle_classes([UserRateThrottle])
def start_game(request, difficulty, player_ids, data_structures):
"""
Creates a new game board.
:param request:
:param difficulty: game difficulty level
:param player_ids: string of player IDs, comma seperated if more than one
:param data_structures: string of data structures, comma seperated if more than one
:return game board id:
"""
# Chosen difficulty does not exist
if difficulty not in config.DIFFICULTY_LEVELS:
return Response({'error': 'Difficulty level not found!',
'options': config.DIFFICULTY_LEVELS},
status=status.HTTP_400_BAD_REQUEST)
# Convert the string fields into list. Separate by comma if provided
player_ids_temp = player_ids.split(',')
data_structures = data_structures.split(',')
player_ids = list()
for pl_id in player_ids_temp:
pl_id = str(pl_id).strip()
# If empty player_ids is passed
if len(pl_id) == 0:
random_player = 'RedPanda_' + str(uuid.uuid1())[:5]
while random_player in player_ids:
random_player = 'RedPanda_' + str(uuid.uuid1())[:5]
player_ids.append(random_player)
else:
player_ids.append(pl_id)
# Shuffle players
#random.shuffle(player_ids)
# Check if the number of players request is valid
if len(player_ids) > config.MAX_NUM_PLAYERS:
return Response({'error': 'Too many players requested!',
'options': config.MAX_NUM_PLAYERS},
status=status.HTTP_400_BAD_REQUEST)
# Create new game board JSON (dict), and store it in the database
new_board = utils.new_board(difficulty, player_ids, data_structures)
response_status = utils.create_board_db(new_board)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'game_id': response_status['game_id']})
@api_view(['GET'])
def board(request, game_id):
"""
Returns the current game board state.
:param request:
:param game_id: unique identifier of the board
:return game board JSON:
"""
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_400_BAD_REQUEST)
# hide the UID used by data structure backend from user
del response_status['game_board']['graph']['uid']
return Response(response_status['game_board'])
@api_view(['POST'])
def rebalance(request, game_id, user_id, token):
"""
Re-balance a un-balanced AVL tree.
:param user_id: username
:param token: authentication token
:param game_id: unique identifier of the board
:return game board JSON:
"""
# Get the POST request
post_request = json.loads(request.body)
try:
adjacency_list = post_request['adjacency_list']
except Exception as err:
return Response({'error': str(err)},
status=status.HTTP_400_BAD_REQUEST)
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# Check for invalid action
if board['curr_data_structure'] != 'AVL':
return Response({'invalid_action': 'Re-balance can be performed for an AVL!'})
check = utils.cheat_check(game_board=board, rebalance=True)
if check['cheat']:
return Response({'invalid_action': check['reason']},
status=status.HTTP_400_BAD_REQUEST)
# Do the re-balance action and get the new state of the graph
if board['curr_data_structure'] == 'AVL':
graph = avl.avlRebalance(board['graph'])
else:
graph = avl.avlRebalance(board['graph']) # change this if adding stack
board['graph'] = graph
# If not correct lose points
if board['graph']['adjacency_list'] != adjacency_list:
board['player_points'][board['turn']] -= config.LOSS[str(board['difficulty'])]
else:
board['player_points'][board['turn']] += config.GAIN[str(board['difficulty'])]
# Update board
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
@api_view(['GET'])
def action(request, card, game_id, user_id, token):
"""
Perform action on the Data Structure using a card
:param user_id: username
:param token: authentication token
:param card: what action to be performed
:param game_id: unique identifier of the board
:return game board JSON:
"""
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board = response_status['game_board']
# Check for invalid action
check = utils.cheat_check(game_board=board, card=card)
if check['cheat']:
return Response({'invalid_action': check['reason']},
status=status.HTTP_400_BAD_REQUEST)
# Give the points
if card.split(' ')[0] in config.GAIN_TIMES[board['curr_data_structure']]:
point = config.GAIN_TIMES_POINTS[card.split(' ')[0]]
board['player_points'][board['turn']] += point
# Perform the action on the data structure
if board['curr_data_structure'] == 'AVL':
graph = avl.avlAction(card, board['graph'], balance=False)
# Currently only AVL supported
else:
graph = avl.avlAction(card, board['graph'], balance=False)
# Update the graph with the new graph state
board['graph'] = graph
# Make sure deck is not empty
if len(board['deck']) == 0: # for now this checks deck so everyone always has 3 cards.
# Could check hand but not sure how that will affect frontend
pass
# Pick a new card
else:
board['cards'][board['turn']].remove(card)
new_card = board['deck'].pop(0)
board['cards'][board['turn']].append(new_card)
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
return Response(board_response)
@api_view(['GET'])
def ai_pick(request, game_id, user_id, token):
"""
Have an AI pick a move to execute
:param user_id: username
:param token: authentication token
:param game_id: unique identifier of the board
:return card: string that represents a valid action for current player to take
"""
# Load the game board from database
response_status = utils.load_board_db(game_id)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# Grab the board
board = response_status['game_board']
if not board['turn'].replace(" ", "").lower().startswith(config.BOT_NAME_PREFIX):
return Response({'error': 'The current player is not a BOT'},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# tree is unbalanced,
if not board['graph']['balanced']:
# Do the re-balance action and get the new state of the graph
if board['curr_data_structure'] == 'AVL':
graph = avl.avlRebalance(board['graph'])
else:
graph = avl.avlRebalance(board['graph']) # change this if adding stack
board['graph'] = graph
# calculate the balance decision threshold
# if it is higher than the limit for the difficulty, points will be lost
balance_thresh = random.randint(1, 100)
if balance_thresh <= config.REBAL_CHANCE[str(board['difficulty'])]:
board['player_points'][board['turn']] += config.GAIN[str(board['difficulty'])]
else:
# If not correct lose points
board['player_points'][board['turn']] -= config.LOSS[str(board['difficulty'])]
# tree is balanced, can pick a move
else:
ordered_cards = utils.ai_format_hands(board)
card = ai.select_move(board['graph'],
board['curr_data_structure'],
ordered_cards,
board['deck'],
max_depth=20) # not sure what an appropriate search depth would be... 5 is pretty fast
# Give the points
if card.split(' ')[0] in config.GAIN_TIMES[board['curr_data_structure']]:
point = config.GAIN_TIMES_POINTS[card.split(' ')[0]]
board['player_points'][board['turn']] += point
# Perform the action on the data structure
if board['curr_data_structure'] == 'AVL':
graph = avl.avlAction(card, board['graph'], balance=False)
# Currently only AVL supported
else:
graph = avl.avlAction(card, board['graph'], balance=False)
# Update the graph with the new graph state
board['graph'] = graph
# Make sure deck is not empty
if len(board['deck']) == 0: # for now this checks deck so everyone always has 3 cards.
# Could check hand but not sure how that will affect frontend
pass
# Pick a new card
else:
board['cards'][board['turn']].remove(card)
new_card = board['deck'].pop(0)
board['cards'][board['turn']].append(new_card)
# Update the board on database
response_status = utils.update_board_db(board, user_id, token)
if response_status['error']:
return Response({'error': response_status['reason']},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
board_response = response_status['game_board']
sleep(config.BOT_SLEEP_TIME)
return Response(board_response)
```
|
{
"source": "jgoldfar/bibserver",
"score": 3
}
|
#### File: jgoldfar/bibserver/cli.py
```python
import os
import sys
import optparse
import inspect
# does setup of cfg
from bibserver import dao
def rebuild_db():
'''Rebuild the db'''
conn, db = dao.get_conn()
conn.delete_index(db)
conn.create_index(db)
def fixtures():
import test.base
for dict_ in test.base.fixtures['records']:
dao.Record.upsert(dict_)
def convert(inpath):
'''Convert from bibtex to bibjson. One argument expected: path to bibtext
file.
'''
import bibserver.parsers.BibTexParser
import json
parser = parsers.BibTexParser.BibTexParser()
bibtex = open(inpath).read()
print json.dumps(parser.parse(bibtex), indent=2, sort_keys=True)
def bulk_upload(colls_list):
'''Take a collections list in a JSON file and use the bulk_upload importer.
colls_list described in importer.py
'''
import bibserver.importer
return bibserver.importer.bulk_upload(colls_list)
## ==================================================
## Misc stuff for setting up a command line interface
def _module_functions(functions):
local_functions = dict(functions)
for k,v in local_functions.items():
if not inspect.isfunction(v) or k.startswith('_'):
del local_functions[k]
return local_functions
def _main(functions_or_object):
isobject = inspect.isclass(functions_or_object)
if isobject:
_methods = _object_methods(functions_or_object)
else:
_methods = _module_functions(functions_or_object)
usage = '''%prog {action}
Actions:
'''
usage += '\n '.join(
[ '%s: %s' % (name, m.__doc__.split('\n')[0] if m.__doc__ else '') for (name,m)
in sorted(_methods.items()) ])
parser = optparse.OptionParser(usage)
# Optional: for a config file
# parser.add_option('-c', '--config', dest='config',
# help='Config file to use.')
options, args = parser.parse_args()
if not args or not args[0] in _methods:
parser.print_help()
sys.exit(1)
method = args[0]
if isobject:
getattr(functions_or_object(), method)(*args[1:])
else:
_methods[method](*args[1:])
__all__ = [ '_main' ]
if __name__ == '__main__':
_main(locals())
```
#### File: bibserver/test/test_web.py
```python
from nose.tools import assert_equal
import urllib
from base import *
from bibserver import web, ingest
import os
class TestWeb(object):
@classmethod
def setup_class(cls):
web.app.config['TESTING'] = True
cls.app = web.app.test_client()
# fixture data
recdict = fixtures['records'][0]
cls.record = dao.Record.upsert(recdict)
Fixtures.create_account()
config['download_cache_directory'] = 'test/data/downloads'
ingest.init()
@classmethod
def teardown_class(cls):
conn, db = dao.get_conn()
conn.delete_index(TESTDB)
for x in os.listdir('test/data/downloads'):
os.unlink(os.path.join('test/data/downloads', x))
os.rmdir('test/data/downloads')
def test_home(self):
res = self.app.get('/')
assert 'BibSoup' in res.data, res.data
def test_faq(self):
res = self.app.get('/faq')
assert 'This service is an example' in res.data, res.data
def test_record(self):
res = self.app.get('/' + Fixtures.account.id + '/' + self.record["collection"] + '/' + self.record["_id"] + '.json')
assert res.status == '200 OK', res.status
out = json.loads(res.data)
assert out["id"] == self.record["id"], out
def test_upload(self):
res = self.app.get('/upload')
print res.status
assert res.status == '302 FOUND', res.status
res = self.app.get('/upload',
headers={'REMOTE_USER': Fixtures.account.id}
)
assert res.status == '200 OK', res.status
assert 'upload' in res.data, res.data
def test_upload_post(self):
startnum = dao.Record.query()['hits']['total']
res = self.app.post('/upload?format=bibtex&collection='+urllib.quote_plus('"My Test Collection"'),
data = {'upfile': (open('test/data/sample.bibtex'), 'sample.bibtex')},
headers={'REMOTE_USER': Fixtures.account.id}
)
assert res.status == '302 FOUND', res.status
# Now we have to trigger the ingest handling of the ticket
# which is normally done asynchronously
for state in ('new', 'downloaded', 'parsed'):
for t in ingest.get_tickets(state):
ingest.determine_action(t)
endnum = dao.Record.query()['hits']['total']
assert_equal(endnum, startnum+1)
# TODO: re-enable
# This does not work because login in the previous method appears to
# persist to this method. Not sure how to fix this ...
def _test_upload_post_401(self):
bibtex_data = open('test/data/sample.bibtex').read()
res = self.app.post('/upload',
data=dict(
format='bibtex',
collection='My Test Collection',
data=bibtex_data,
)
)
assert res.status == '401 UNAUTHORIZED', res.status
def test_query(self):
res = self.app.get('/query')
assert res.status == '200 OK', res.status
res = self.app.get('/query?q=title:non-existent')
assert res.status == '200 OK', res.status
out = json.loads(res.data)
assert out['hits']['total'] == 0, out
def test_accounts_query_inaccessible(self):
res = self.app.get('/query/account')
assert res.status == '401 UNAUTHORIZED', res.status
def test_search(self):
res = self.app.get('/search?q=tolstoy&format=json')
assert res.status == '200 OK', res.status
out = json.loads(res.data)
assert len(out) == 1, out
assert "Tolstoy" in out[0]["author"][0]["name"], out
```
|
{
"source": "jgoldford/ASR",
"score": 3
}
|
#### File: ASR/asr/lib.py
```python
import sys
import os
from Bio import SeqIO
import numpy as np
import csv
class PAMLparams:
def __init__(self, seqfile,treefile,outfile):
self.seqfile = seqfile;
self.treefile = treefile;
self.outfile = outfile;
self.noisy = 9;
self.verbose = 2;
self.runmode = 0;
self.seqtype = 2;
self.clock = 0;
self.aaDist = 0;
self.aaRatefile = '/Users/joshuagoldford/Documents/paml4.8/dat/wag.dat';
self.model = 2;
self.icode= 0;
self.Mgene= 0;
self.fix_alpha = 0;
self.alpha = 0.5;
self.Malpha = 1;
self.ncatG = 4;
self.getSE = 0;
self.RateAncestor = 1;
self.Small_Diff = 0.5e-6;
self.cleandata = 0;
self.method = 1;
def toText(self):
return '''
seqfile = %(seqfile)s * sequence data filename
treefile = %(treefile)s * tree structure file name
outfile = %(outfile)s * main result file name
noisy = %(noisy)s * 0,1,2,3,9: how much rubbish on the screen
verbose = %(verbose)s * 0: concise; 1: detailed, 2: too much
runmode = %(runmode)s * 0: user tree; 1: semi-automatic; 2: automatic
* 3: StepwiseAddition; (4,5):PerturbationNNI; -2: pairwise
seqtype = %(seqtype)s * 1:codons; 2:AAs; 3:codons-->AAs
clock = %(clock)s * 0:no clock, 1:clock; 2:local clock; 3:CombinedAnalysis
aaDist = %(aaDist)s * 0:equal, +:geometric; -:linear, 1-6:G1974,Miyata,c,p,v,a
aaRatefile = %(aaRatefile)s * only used for aa seqs with model=empirical(_F)
* dayhoff.dat, jones.dat, wag.dat, mtmam.dat, or your own
model = %(model)s
* models for codons:
* 0:one, 1:b, 2:2 or more dN/dS ratios for branches
* models for AAs or codon-translated AAs:
* 0:poisson, 1:proportional, 2:Empirical, 3:Empirical+F
* 6:FromCodon, 7:AAClasses, 8:REVaa_0, 9:REVaa(nr=189)
icode = %(icode)s * 0:universal code; 1:mammalian mt; 2-10:see below
Mgene = %(Mgene)s * codon: 0:rates, 1:separate; 2:diff pi, 3:diff kapa, 4:all diff
* AA: 0:rates, 1:separate
fix_alpha = %(fix_alpha)s * 0: estimate gamma shape parameter; 1: fix it at alpha
alpha = %(alpha)s * initial or fixed alpha, 0:infinity (constant rate)
Malpha = %(Malpha)s * different alphas for genes
ncatG = %(ncatG)s * # of categories in dG of NSsites models
getSE = %(getSE)s * 0: don't want them, 1: want S.E.s of estimates
RateAncestor = %(RateAncestor)s * (0,1,2): rates (alpha>0) or ancestral states (1 or 2)
Small_Diff = %(Small_Diff)s
cleandata = %(cleandata)s * remove sites with ambiguity data (1:yes, 0:no)?
method = %(method)s * Optimization method 0: simultaneous; 1: one branch a time''' % self.__dict__
def fasta2phylip(infile,outfile):
print("CONVERT FASTA TO PHYLIP")
sequence_dict = {}
print(infile)
for record in SeqIO.parse(open(infile, "r"), "fasta") :
tab = record.id.split(" ")
#print record.title
sequence = str(record.seq).replace(" ","")
#print sequence, len(sequence)
sequence_dict[tab[0]]= sequence
if "U" in sequence:
print(tab[0])
sys.exit()
print(len(sequence_dict))
#sys.exit()
# Test length of the alignment:
alignment_length = 0
for gene in sequence_dict:
if (alignment_length != 0) and (len(sequence_dict[gene]) != alignment_length):
print("Error in alignment length, exit on error !!!")
sys.exit()
else:
alignment_length = len(sequence_dict[gene])
number_of_seq = len(sequence_dict)
print("Number of sequences:\t"+str(number_of_seq))
print("Alignment length:\t"+str(alignment_length))
print("Ratio =\t"+str(alignment_length/3))
if alignment_length%3 != 0:
print("Warning: Hum, your alignment didn't code for nucleotides")
### Write PHYLIP file
phyfile = open(outfile,"w")
name_length = 50
if len(sys.argv) > 3:
name_length = int(sys.argv[3])
phyfile.write(str(number_of_seq)+"\t"+str(alignment_length)+"\n")
for gene in sequence_dict:
if len(gene) > name_length:
gene_name = gene[0:name_length].replace(" ","")
if gene_name[-1] == "_":
gene_name = gene_name[0:-1]
##elif gene_name[-2] == "_":
## gene_name = gene_name[0:-2]
else:
gene_name = gene
phyfile.write(gene_name+" "+sequence_dict[gene]+"\n")
phyfile.close()
def rst2fasta(inFile,outFile):
file_in = open(inFile,"r")
file_out = open(outFile,'w');
while 1:
line = file_in.readline()
if line == "":
break
line= line.rstrip() # Remove end-of-line
if line[0:4] == "node":
tab = line.split(" ");
line = ">"+tab[0].replace(" ","").replace("#","");
file_out.write(line)
file_out.write('\n');
if len(tab)>1:
line = tab[1].replace(" ","")
file_out.write(line)
file_out.write('\n');
else:
line = tab[1].replace(" ","")
file_out.write(line)
file_out.write('\n');
file_in.close()
file_out.close();
def getAncesteralTree(fileName,outfile):
with open(fileName, "r") as fid:
tree = fid.readlines()[14];
tree = tree.rstrip()
with open(outfile,"w") as fout:
fout.write(tree);
return tree;
def appendAttributesToNodes(fileIn,treeFile):
with open(fileIn, 'rU') as csvfile:
file = csv.reader(csvfile, delimiter=',');
headers = file.next();
lines = list(file)
node_dict = {}
for idx,line in enumerate(lines):
if line[3] is not '-':
attr = np.log(float(line[3])/float(line[4]));
else:
attr = 0;
node_dict[line[0]] = attr;
tree_tab = []
tree_file = open(treeFile,"r")
#tree_file = open("FinalTree.tree","r")
while 1:
line = tree_file.readline()
if line == "":
break
tab = line.split()
tree_tab = tree_tab+tab
tree_file.close()
#print tree_tab
new_tree_line = ""
#print(node_dict.keys())
for item in tree_tab:
if 'node'+item in node_dict:
print("node"+item, node_dict["node"+item])
item = node_dict['node'+item]
item = round(item,3)
item = str(item)
new_tree_line = new_tree_line+item
print(new_tree_line)
```
|
{
"source": "JGoldman110/airflow",
"score": 2
}
|
#### File: hooks/vertex_ai/hyperparameter_tuning_job.py
```python
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.aiplatform import CustomJob, HyperparameterTuningJob, gapic, hyperparameter_tuning
from google.cloud.aiplatform_v1 import JobServiceClient, types
from google.cloud.aiplatform_v1.services.job_service.pagers import ListHyperparameterTuningJobsPager
from airflow import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class HyperparameterTuningJobHook(GoogleBaseHook):
"""Hook for Google Cloud Vertex AI Hyperparameter Tuning Job APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._hyperparameter_tuning_job: Optional[HyperparameterTuningJob] = None
def get_job_service_client(self, region: Optional[str] = None) -> JobServiceClient:
"""Returns JobServiceClient."""
client_options = None
if region and region != 'global':
client_options = {'api_endpoint': f'{region}-aiplatform.googleapis.com:443'}
return JobServiceClient(
credentials=self._get_credentials(), client_info=self.client_info, client_options=client_options
)
def get_hyperparameter_tuning_job_object(
self,
display_name: str,
custom_job: CustomJob,
metric_spec: Dict[str, str],
parameter_spec: Dict[str, hyperparameter_tuning._ParameterSpec],
max_trial_count: int,
parallel_trial_count: int,
max_failed_trial_count: int = 0,
search_algorithm: Optional[str] = None,
measurement_selection: Optional[str] = "best",
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
) -> HyperparameterTuningJob:
"""Returns HyperparameterTuningJob object"""
return HyperparameterTuningJob(
display_name=display_name,
custom_job=custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
max_failed_trial_count=max_failed_trial_count,
search_algorithm=search_algorithm,
measurement_selection=measurement_selection,
project=project,
location=location,
credentials=self._get_credentials(),
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
)
def get_custom_job_object(
self,
display_name: str,
worker_pool_specs: Union[List[Dict], List[gapic.WorkerPoolSpec]],
base_output_dir: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
) -> CustomJob:
"""Returns CustomJob object"""
return CustomJob(
display_name=display_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
project=project,
location=location,
credentials=self._get_credentials,
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
staging_bucket=staging_bucket,
)
@staticmethod
def extract_hyperparameter_tuning_job_id(obj: Dict) -> str:
"""Returns unique id of the hyperparameter_tuning_job."""
return obj["name"].rpartition("/")[-1]
def wait_for_operation(self, operation: Operation, timeout: Optional[float] = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def cancel_hyperparameter_tuning_job(self) -> None:
"""Cancel HyperparameterTuningJob"""
if self._hyperparameter_tuning_job:
self._hyperparameter_tuning_job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
display_name: str,
metric_spec: Dict[str, str],
parameter_spec: Dict[str, hyperparameter_tuning._ParameterSpec],
max_trial_count: int,
parallel_trial_count: int,
# START: CustomJob param
worker_pool_specs: Union[List[Dict], List[gapic.WorkerPoolSpec]],
base_output_dir: Optional[str] = None,
custom_job_labels: Optional[Dict[str, str]] = None,
custom_job_encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
# END: CustomJob param
max_failed_trial_count: int = 0,
search_algorithm: Optional[str] = None,
measurement_selection: Optional[str] = "best",
hyperparameter_tuning_job_labels: Optional[Dict[str, str]] = None,
hyperparameter_tuning_job_encryption_spec_key_name: Optional[str] = None,
# START: run param
service_account: Optional[str] = None,
network: Optional[str] = None,
timeout: Optional[int] = None, # seconds
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: Optional[str] = None,
sync: bool = True,
# END: run param
) -> HyperparameterTuningJob:
"""
Create a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of the HyperparameterTuningJob. The name can be
up to 128 characters long and can be consist of any UTF-8 characters.
:param metric_spec: Required. Dictionary representing metrics to optimize. The dictionary key is the
metric_id, which is reported by your training job, and the dictionary value is the optimization
goal of the metric('minimize' or 'maximize').
example: metric_spec = {'loss': 'minimize', 'accuracy': 'maximize'}
:param parameter_spec: Required. Dictionary representing parameters to optimize. The dictionary key
is the metric_id, which is passed into your training job as a command line key word argument, and
the dictionary value is the parameter specification of the metric.
:param max_trial_count: Required. The desired total number of Trials.
:param parallel_trial_count: Required. The desired number of Trials to run in parallel.
:param worker_pool_specs: Required. The spec of the worker pools including machine type and Docker
image. Can provided as a list of dictionaries or list of WorkerPoolSpec proto messages.
:param base_output_dir: Optional. GCS output directory of job. If not provided a timestamped
directory in the staging directory will be used.
:param custom_job_labels: Optional. The labels with user-defined metadata to organize CustomJobs.
Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param custom_job_encryption_spec_key_name: Optional.Customer-managed encryption key name for a
CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the
provided encryption key.
:param staging_bucket: Optional. Bucket for produced custom job artifacts. Overrides staging_bucket
set in aiplatform.init.
:param max_failed_trial_count: Optional. The number of failed Trials that need to be seen before
failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail
before the whole job fails.
:param search_algorithm: The search algorithm specified for the Study. Accepts one of the following:
`None` - If you do not specify an algorithm, your job uses the default Vertex AI algorithm. The
default algorithm applies Bayesian optimization to arrive at the optimal solution with a more
effective search over the parameter space.
'grid' - A simple grid search within the feasible space. This option is particularly useful if
you want to specify a quantity of trials that is greater than the number of points in the
feasible space. In such cases, if you do not specify a grid search, the Vertex AI default
algorithm may generate duplicate suggestions. To use grid search, all parameter specs must be of
type `IntegerParameterSpec`, `CategoricalParameterSpace`, or `DiscreteParameterSpec`.
'random' - A simple random search within the feasible space.
:param measurement_selection: This indicates which measurement to use if/when the service
automatically selects the final measurement from previously reported intermediate measurements.
Accepts: 'best', 'last'
Choose this based on two considerations:
A) Do you expect your measurements to monotonically improve? If so, choose 'last'. On the other
hand, if you're in a situation where your system can "over-train" and you expect the performance
to get better for a while but then start declining, choose 'best'.
B) Are your measurements significantly noisy and/or irreproducible? If so, 'best' will tend to be
over-optimistic, and it may be better to choose 'last'.
If both or neither of (A) and (B) apply, it doesn't matter which selection type is chosen.
:param hyperparameter_tuning_job_labels: Optional. The labels with user-defined metadata to organize
HyperparameterTuningJobs. Label keys and values can be no longer than 64 characters (Unicode
codepoints), can only contain lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for more information and examples
of labels.
:param hyperparameter_tuning_job_encryption_spec_key_name: Optional. Customer-managed encryption key
options for a HyperparameterTuningJob. If this is set, then all resources created by the
HyperparameterTuningJob will be encrypted with the provided encryption key.
:param service_account: Optional. Specifies the service account for workload run-as account. Users
submitting jobs must have act-as permission on this run-as account.
:param network: Optional. The full name of the Compute Engine network to which the job should be
peered. For example, projects/12345/global/networks/myVPC. Private services access must already
be configured for the network. If left unspecified, the job is not peered with any network.
:param timeout: The maximum job running time in seconds. The default is 7 days.
:param restart_job_on_worker_restart: Restarts the entire CustomJob if a worker gets restarted. This
feature can be used by distributed training jobs that are not resilient to workers leaving and
joining a job.
:param enable_web_access: Whether you want Vertex AI to enable interactive shell access to training
containers. https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
:param tensorboard: Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob will
upload Tensorboard logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` The training script should
write Tensorboard to following Vertex AI environment variable: AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`. For more information on configuring
your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute this method synchronously. If False, this method will unblock and it
will be executed in a concurrent Future.
"""
custom_job = self.get_custom_job_object(
project=project_id,
location=region,
display_name=display_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
labels=custom_job_labels,
encryption_spec_key_name=custom_job_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
self._hyperparameter_tuning_job = self.get_hyperparameter_tuning_job_object(
project=project_id,
location=region,
display_name=display_name,
custom_job=custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
max_failed_trial_count=max_failed_trial_count,
search_algorithm=search_algorithm,
measurement_selection=measurement_selection,
labels=hyperparameter_tuning_job_labels,
encryption_spec_key_name=hyperparameter_tuning_job_encryption_spec_key_name,
)
self._hyperparameter_tuning_job.run(
service_account=service_account,
network=network,
timeout=timeout, # seconds
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
sync=sync,
)
self._hyperparameter_tuning_job.wait()
return self._hyperparameter_tuning_job
@GoogleBaseHook.fallback_to_default_project_id
def get_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
hyperparameter_tuning_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> types.HyperparameterTuningJob:
"""
Gets a HyperparameterTuningJob
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job: Required. The name of the HyperparameterTuningJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.hyperparameter_tuning_job_path(project_id, region, hyperparameter_tuning_job)
result = client.get_hyperparameter_tuning_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_hyperparameter_tuning_jobs(
self,
project_id: str,
region: str,
filter: Optional[str] = None,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
read_mask: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ListHyperparameterTuningJobsPager:
"""
Lists HyperparameterTuningJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_hyperparameter_tuning_jobs(
request={
'parent': parent,
'filter': filter,
'page_size': page_size,
'page_token': page_token,
'read_mask': read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
hyperparameter_tuning_job: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
"""
Deletes a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job: Required. The name of the HyperparameterTuningJob resource to be
deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.hyperparameter_tuning_job_path(project_id, region, hyperparameter_tuning_job)
result = client.delete_hyperparameter_tuning_job(
request={
'name': name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
```
#### File: airflow_breeze/ci/build_params.py
```python
import os
from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional
from airflow_breeze.branch_defaults import AIRFLOW_BRANCH, DEFAULT_AIRFLOW_CONSTRAINTS_BRANCH
from airflow_breeze.global_constants import get_airflow_version
from airflow_breeze.utils.run_utils import run_command
@dataclass
class BuildParams:
# To construct ci_image_name
python_version: str = "3.7"
airflow_branch: str = AIRFLOW_BRANCH
build_id: int = 0
# To construct docker cache ci directive
docker_cache: str = "pulled"
airflow_extras: str = "devel_ci"
additional_airflow_extras: str = ""
additional_python_deps: str = ""
# To construct ci_image_name
tag: str = "latest"
# To construct airflow_image_repository
github_repository: str = "apache/airflow"
constraints_github_repository: str = "apache/airflow"
# Not sure if defaultConstraintsBranch and airflow_constraints_reference are different
default_constraints_branch: str = DEFAULT_AIRFLOW_CONSTRAINTS_BRANCH
airflow_constraints: str = "constraints-source-providers"
airflow_constraints_reference: Optional[str] = "constraints-main"
airflow_constraints_location: Optional[str] = ""
airflow_pre_cached_pip_packages: str = "true"
dev_apt_command: str = ""
dev_apt_deps: str = ""
additional_dev_apt_command: str = ""
additional_dev_apt_deps: str = ""
additional_dev_apt_env: str = ""
runtime_apt_command: str = ""
runtime_apt_deps: str = ""
additional_runtime_apt_command: str = ""
additional_runtime_apt_deps: str = ""
additional_runtime_apt_env: str = ""
platform: str = f"linux/{os.uname().machine}"
debian_version: str = "bullseye"
upgrade_to_newer_dependencies: str = "true"
@property
def airflow_image_name(self):
image = f'ghcr.io/{self.github_repository.lower()}'
return image
@property
def airflow_ci_image_name(self):
"""Construct CI image link"""
image = f'{self.airflow_image_name}/{self.airflow_branch}/ci/python{self.python_version}'
return image
@property
def airflow_ci_image_name_with_cache(self):
"""Construct CI image link"""
image = f'{self.airflow_image_name}/{self.airflow_branch}/ci/python{self.python_version}:cache'
return image
@property
def airflow_ci_image_name_with_tag(self):
"""Construct CI image link"""
image = f'{self.airflow_image_name}/{self.airflow_branch}/ci/python{self.python_version}'
return image if not self.tag else image + f":{self.tag}"
@property
def airflow_image_repository(self):
return f'https://github.com/{self.github_repository}'
@property
def python_base_image(self):
"""Construct Python Base Image"""
return f'python:{self.python_version}-slim-{self.debian_version}'
@property
def airflow_ci_local_manifest_image(self):
"""Construct CI Local Manifest Image"""
return f'local-airflow-ci-manifest/{self.airflow_branch}/python{self.python_version}'
@property
def airflow_ci_remote_manifest_image(self):
"""Construct CI Remote Manifest Image"""
return f'{self.airflow_ci_image_name}/{self.airflow_branch}/ci-manifest//python:{self.python_version}'
@property
def airflow_image_date_created(self):
# 2021-12-18T15:19:25Z '%Y-%m-%dT%H:%M:%SZ'
# Set date in above format and return
now = datetime.now()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
@property
def commit_sha(self):
output = run_command(['git', 'rev-parse', 'HEAD'], capture_output=True, text=True)
return output.stdout.strip()
@property
def docker_cache_ci_directive(self) -> List:
docker_cache_ci_directive = []
if self.docker_cache == "pulled":
docker_cache_ci_directive.append("--cache-from")
docker_cache_ci_directive.append(self.airflow_ci_image_name_with_cache)
elif self.docker_cache == "disabled":
docker_cache_ci_directive.append("--no-cache")
else:
pass
return docker_cache_ci_directive
@property
def airflow_version(self):
return get_airflow_version()
```
#### File: hooks/vertex_ai/test_batch_prediction_job.py
```python
from unittest import TestCase, mock
from airflow.providers.google.cloud.hooks.vertex_ai.batch_prediction_job import BatchPredictionJobHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
TEST_GCP_CONN_ID: str = "test-gcp-conn-id"
TEST_REGION: str = "test-region"
TEST_PROJECT_ID: str = "test-project-id"
TEST_BATCH_PREDICTION_JOB: dict = {}
TEST_MODEL_NAME = f"projects/{TEST_PROJECT_ID}/locations/{TEST_REGION}/models/test_model_id"
TEST_JOB_DISPLAY_NAME = "temp_create_batch_prediction_job_test"
TEST_BATCH_PREDICTION_JOB_ID = "test_batch_prediction_job_id"
TEST_UPDATE_MASK: dict = {}
BASE_STRING = "airflow.providers.google.common.hooks.base_google.{}"
BATCH_PREDICTION_JOB_STRING = "airflow.providers.google.cloud.hooks.vertex_ai.batch_prediction_job.{}"
class TestBatchPredictionJobWithDefaultProjectIdHook(TestCase):
def setUp(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id
):
self.hook = BatchPredictionJobHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_delete_batch_prediction_job(self, mock_client) -> None:
self.hook.delete_batch_prediction_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
batch_prediction_job=TEST_BATCH_PREDICTION_JOB,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.delete_batch_prediction_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.batch_prediction_job_path.return_value,
),
metadata=(),
retry=None,
timeout=None,
)
mock_client.return_value.batch_prediction_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_BATCH_PREDICTION_JOB,
)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_get_batch_prediction_job(self, mock_client) -> None:
self.hook.get_batch_prediction_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
batch_prediction_job=TEST_BATCH_PREDICTION_JOB,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.get_batch_prediction_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.batch_prediction_job_path.return_value,
),
metadata=(),
retry=None,
timeout=None,
)
mock_client.return_value.batch_prediction_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_BATCH_PREDICTION_JOB,
)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_list_batch_prediction_jobs(self, mock_client) -> None:
self.hook.list_batch_prediction_jobs(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.list_batch_prediction_jobs.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
filter=None,
page_size=None,
page_token=None,
read_mask=None,
),
metadata=(),
retry=None,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION)
class TestBatchPredictionJobWithoutDefaultProjectIdHook(TestCase):
def setUp(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_no_default_project_id
):
self.hook = BatchPredictionJobHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_delete_batch_prediction_job(self, mock_client) -> None:
self.hook.delete_batch_prediction_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
batch_prediction_job=TEST_BATCH_PREDICTION_JOB,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.delete_batch_prediction_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.batch_prediction_job_path.return_value,
),
metadata=(),
retry=None,
timeout=None,
)
mock_client.return_value.batch_prediction_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_BATCH_PREDICTION_JOB,
)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_get_batch_prediction_job(self, mock_client) -> None:
self.hook.get_batch_prediction_job(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
batch_prediction_job=TEST_BATCH_PREDICTION_JOB,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.get_batch_prediction_job.assert_called_once_with(
request=dict(
name=mock_client.return_value.batch_prediction_job_path.return_value,
),
metadata=(),
retry=None,
timeout=None,
)
mock_client.return_value.batch_prediction_job_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_REGION,
TEST_BATCH_PREDICTION_JOB,
)
@mock.patch(BATCH_PREDICTION_JOB_STRING.format("BatchPredictionJobHook.get_job_service_client"))
def test_list_batch_prediction_jobs(self, mock_client) -> None:
self.hook.list_batch_prediction_jobs(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
)
mock_client.assert_called_once_with(TEST_REGION)
mock_client.return_value.list_batch_prediction_jobs.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
filter=None,
page_size=None,
page_token=None,
read_mask=None,
),
metadata=(),
retry=None,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_REGION)
```
|
{
"source": "jgoldschrafe/metricinga",
"score": 3
}
|
#### File: metricinga/tests/test_argparse.py
```python
import mock
from mock import MagicMock
from mock import patch
import os
import sys
import unittest
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(__file__), '..')))
import metricinga
class ArgumentParserTestCase(unittest.TestCase):
"""Test that arguments are parsed correctly.
"""
def test_parse_long_args(self):
args = ['--daemonize',
'--host', 'testhost',
'--pidfile', '/test/pidfile',
'--poll-interval', '60',
'--port', '62004',
'--prefix', 'test-prefix',
'--replacement-char', 't',
'--spool-dir', '/test/spool-dir',
'--verbose']
opts = metricinga.parse_arguments(args)
self.assertEqual(opts.daemonize, True)
self.assertEqual(opts.host, 'testhost')
self.assertEqual(opts.pidfile, '/test/pidfile')
self.assertEqual(opts.poll_interval, 60)
self.assertEqual(opts.port, 62004)
self.assertEqual(opts.prefix, 'test-prefix')
self.assertEqual(opts.replacement_char, 't')
self.assertEqual(opts.spool_dir, '/test/spool-dir')
self.assertEqual(opts.verbose, True)
def test_parse_short_args(self):
args = ['-d',
'-D', '/test/spool-dir',
'-H', 'testhost',
'-P', 'test-prefix',
'-p', '62004',
'-r', 't',
'-v']
opts = metricinga.parse_arguments(args)
self.assertEqual(opts.daemonize, True)
self.assertEqual(opts.host, 'testhost')
self.assertEqual(opts.port, 62004)
self.assertEqual(opts.prefix, 'test-prefix')
self.assertEqual(opts.replacement_char, 't')
self.assertEqual(opts.spool_dir, '/test/spool-dir')
self.assertEqual(opts.verbose, True)
```
|
{
"source": "JGoldstone/colour",
"score": 2
}
|
#### File: appearance/tests/test_zcam.py
```python
import numpy as np
import unittest
from itertools import permutations
from colour.appearance import (
VIEWING_CONDITIONS_ZCAM,
InductionFactors_ZCAM,
CAM_Specification_ZCAM,
XYZ_to_ZCAM,
ZCAM_to_XYZ,
)
from colour.utilities import (
as_float_array,
domain_range_scale,
ignore_numpy_errors,
tsplit,
)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_ZCAM',
'TestZCAM_to_XYZ',
]
class TestXYZ_to_ZCAM(unittest.TestCase):
"""
Defines :func:`colour.appearance.zcam.XYZ_to_ZCAM` definition unit tests
methods.
"""
def test_XYZ_to_ZCAM(self):
"""
Tests :func:`colour.appearance.zcam.XYZ_to_ZCAM` definition.
"""
XYZ = np.array([185, 206, 163])
XYZ_w = np.array([256, 264, 202])
L_a = 264
Y_b = 100
surround = VIEWING_CONDITIONS_ZCAM['Average']
np.testing.assert_allclose(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
np.array([
92.2520, 3.0216, 196.3524, 19.1314, 321.3464, 10.5252,
237.6401, np.nan, 34.7022, 25.2994, 91.6837
]),
rtol=0.025,
atol=0.025)
XYZ = np.array([89, 96, 120])
np.testing.assert_allclose(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
np.array([
71.2071, 6.8539, 250.6422, 32.7963, 248.0394, 23.8744,
307.0595, np.nan, 18.2796, 40.4621, 70.4026
]),
rtol=0.025,
atol=0.025)
# NOTE: Hue quadrature :math:`H_z` is significantly different for this
# test, i.e. 47.748252 vs 43.8258.
# NOTE: :math:`F_L` as reported in the supplemental document has the
# same value as for :math:`L_a` = 264 instead of 150. The values seem
# to be computed for :math:`L_a` = 264 and :math:`Y_b` = 100.
XYZ = np.array([79, 81, 62])
# L_a = 150
# Y_b = 60
surround = VIEWING_CONDITIONS_ZCAM['Dim']
np.testing.assert_allclose(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
np.array([
68.8890, 0.9774, 58.7532, 12.5916, 196.7686, 2.7918, 43.8258,
np.nan, 11.0371, 44.4143, 68.8737
]),
rtol=0.025,
atol=4)
XYZ = np.array([910, 1114, 500])
XYZ_w = np.array([2103, 2259, 1401])
L_a = 359
Y_b = 16
surround = VIEWING_CONDITIONS_ZCAM['Dark']
np.testing.assert_allclose(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
np.array([
82.6445, 13.0838, 123.9464, 44.7277, 114.7431, 18.1655,
178.6422, np.nan, 34.4874, 26.8778, 78.2653
]),
rtol=0.025,
atol=0.025)
XYZ = np.array([96, 67, 28])
np.testing.assert_allclose(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
np.array([
33.0139, 19.4070, 389.7720 % 360, 86.1882, 45.8363, 26.9446,
397.3301, np.nan, 43.6447, 47.9942, 30.2593
]),
rtol=0.025,
atol=0.025)
def test_n_dimensional_XYZ_to_ZCAM(self):
"""
Tests :func:`colour.appearance.zcam.XYZ_to_ZCAM` definition
n-dimensional support.
"""
XYZ = np.array([185, 206, 163])
XYZ_w = np.array([256, 264, 202])
L_a = 264
Y_b = 100
surround = VIEWING_CONDITIONS_ZCAM['Average']
specification = XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround)
XYZ = np.tile(XYZ, (6, 1))
specification = np.tile(specification, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
specification,
decimal=7)
XYZ_w = np.tile(XYZ_w, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
specification,
decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
XYZ_w = np.reshape(XYZ_w, (2, 3, 3))
specification = np.reshape(specification, (2, 3, 11))
np.testing.assert_almost_equal(
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround),
specification,
decimal=7)
@ignore_numpy_errors
def test_domain_range_scale_XYZ_to_ZCAM(self):
"""
Tests :func:`colour.appearance.zcam.XYZ_to_ZCAM` definition
domain and range scale support.
"""
XYZ = np.array([185, 206, 163])
XYZ_w = np.array([256, 264, 202])
L_a = 264
Y_b = 100
surround = VIEWING_CONDITIONS_ZCAM['Average']
specification = XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround)
d_r = (
('reference', 1, 1),
(1, 1, np.array([1, 1, 1 / 360, 1, 1, 1, 1 / 400, np.nan, 1, 1,
1])),
(100, 100,
np.array([
100, 100, 100 / 360, 100, 100, 100, 100 / 400, np.nan, 100,
100, 100
])),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_ZCAM(XYZ * factor_a, XYZ_w * factor_a, L_a, Y_b,
surround),
as_float_array(specification) * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_ZCAM(self):
"""
Tests :func:`colour.appearance.zcam.XYZ_to_ZCAM` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_w = np.array(case)
L_a = case[0]
Y_b = 100
surround = InductionFactors_ZCAM(case[0], case[0], case[0],
case[0])
XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround)
class TestZCAM_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.appearance.zcam.ZCAM_to_XYZ` definition unit
tests methods.
"""
def test_ZCAM_to_XYZ(self):
"""
Tests :func:`colour.appearance.zcam.ZCAM_to_XYZ` definition.
"""
specification = CAM_Specification_ZCAM(
92.2520, 3.0216, 196.3524, 19.1314, 321.3464, 10.5252, 237.6401,
np.nan, 34.7022, 25.2994, 91.6837)
XYZ_w = np.array([256, 264, 202])
L_a = 264
Y_b = 100
surround = VIEWING_CONDITIONS_ZCAM['Average']
np.testing.assert_allclose(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
np.array([185, 206, 163]),
atol=0.01,
rtol=0.01)
specification = CAM_Specification_ZCAM(
71.2071, 6.8539, 250.6422, 32.7963, 248.0394, 23.8744, 307.0595,
np.nan, 18.2796, 40.4621, 70.4026)
np.testing.assert_allclose(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
np.array([89, 96, 120]),
atol=0.01,
rtol=0.01)
specification = CAM_Specification_ZCAM(
68.8890, 0.9774, 58.7532, 12.5916, 196.7686, 2.7918, 43.8258,
np.nan, 11.0371, 44.4143, 68.8737)
surround = VIEWING_CONDITIONS_ZCAM['Dim']
np.testing.assert_allclose(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
np.array([79, 81, 62]),
atol=0.01,
rtol=0.01)
specification = CAM_Specification_ZCAM(
82.6445, 13.0838, 123.9464, 44.7277, 114.7431, 18.1655, 178.6422,
np.nan, 34.4874, 26.8778, 78.2653)
XYZ_w = np.array([2103, 2259, 1401])
L_a = 359
Y_b = 16
surround = VIEWING_CONDITIONS_ZCAM['Dark']
np.testing.assert_allclose(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
np.array([910, 1114, 500]),
atol=0.01,
rtol=0.01)
specification = CAM_Specification_ZCAM(
33.0139, 19.4070, 389.7720 % 360, 86.1882, 45.8363, 26.9446,
397.3301, np.nan, 43.6447, 47.9942, 30.2593)
np.testing.assert_allclose(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
np.array([96, 67, 28]),
atol=0.01,
rtol=0.01)
def test_n_dimensional_ZCAM_to_XYZ(self):
"""
Tests :func:`colour.appearance.zcam.ZCAM_to_XYZ` definition
n-dimensional support.
"""
XYZ = np.array([185, 206, 163])
XYZ_w = np.array([256, 264, 202])
L_a = 264
Y_b = 100
surround = VIEWING_CONDITIONS_ZCAM['Average']
specification = XYZ_to_ZCAM(XYZ, XYZ_w, L_a, Y_b, surround)
XYZ = ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround)
specification = CAM_Specification_ZCAM(
*np.transpose(np.tile(tsplit(specification), (6, 1))).tolist())
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
XYZ,
decimal=7)
XYZ_w = np.tile(XYZ_w, (6, 1))
np.testing.assert_almost_equal(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
XYZ,
decimal=7)
specification = CAM_Specification_ZCAM(
*tsplit(np.reshape(specification, (2, 3, 11))).tolist())
XYZ_w = np.reshape(XYZ_w, (2, 3, 3))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(
ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround),
XYZ,
decimal=7)
@ignore_numpy_errors
def test_domain_range_scale_ZCAM_to_XYZ(self):
"""
Tests :func:`colour.appearance.zcam.ZCAM_to_XYZ` definition
domain and range scale support.
"""
XYZ_i = np.array([185, 206, 163])
XYZ_w = np.array([256, 264, 202])
L_a = 264
Y_b = 100
surround = VIEWING_CONDITIONS_ZCAM['Average']
specification = XYZ_to_ZCAM(XYZ_i, XYZ_w, L_a, Y_b, surround)
XYZ = ZCAM_to_XYZ(specification, XYZ_w, L_a, Y_b, surround)
d_r = (
('reference', 1, 1),
(1, np.array([1, 1, 1 / 360, 1, 1, 1, 1 / 400, np.nan, 1, 1, 1]),
1),
(100,
np.array([
100, 100, 100 / 360, 100, 100, 100, 100 / 400, np.nan, 100,
100, 100
]), 100),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
ZCAM_to_XYZ(specification * factor_a, XYZ_w * factor_b,
L_a, Y_b, surround),
XYZ * factor_b,
decimal=7)
@ignore_numpy_errors
def test_raise_exception_ZCAM_to_XYZ(self):
"""
Tests :func:`colour.appearance.zcam.ZCAM_to_XYZ` definition
raised exception.
"""
self.assertRaises(
ValueError, ZCAM_to_XYZ,
CAM_Specification_ZCAM(
41.731091132513917,
None,
219.04843265831178,
), np.array([256, 264, 202]), 318.31, 20.0,
VIEWING_CONDITIONS_ZCAM['Average'])
@ignore_numpy_errors
def test_nan_ZCAM_to_XYZ(self):
"""
Tests :func:`colour.appearance.zcam.ZCAM_to_XYZ` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
J = case[0]
C = case[0]
h = case[0]
XYZ_w = np.array(case)
L_a = case[0]
Y_b = 100
surround = InductionFactors_ZCAM(case[0], case[0], case[0],
case[0])
ZCAM_to_XYZ(
CAM_Specification_ZCAM(J, C, h, M=50), XYZ_w, L_a, Y_b,
surround)
```
#### File: io/luts/operator.py
```python
import numpy as np
from abc import ABC, abstractmethod
from colour.algebra import vector_dot
from colour.utilities import (
as_float_array,
attest,
is_iterable,
is_string,
ones,
zeros,
)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'AbstractLUTSequenceOperator',
'LUTOperatorMatrix',
]
class AbstractLUTSequenceOperator(ABC):
"""
Defines the base class for *LUT* sequence operators.
This is an :class:`ABCMeta` abstract class that must be inherited by
sub-classes.
Parameters
----------
name : str, optional
*LUT* sequence operator name.
comments : array_like, optional
Comments to add to the *LUT* sequence operator.
Attributes
----------
- :attr:`~colour.io.AbstractLUTSequenceOperator.name`
- :attr:`~colour.io.AbstractLUTSequenceOperator.comments`
Methods
-------
- :meth:`~colour.io.AbstractLUTSequenceOperator.apply`
"""
def __init__(self, name=None, comments=None):
self._name = 'LUT Sequence Operator {0}'.format(id(self))
self.name = name
self._comments = []
self.comments = comments
@property
def name(self):
"""
Getter and setter property for the *LUT* name.
Parameters
----------
value : str
Value to set the *LUT* name with.
Returns
-------
str
*LUT* name.
"""
return self._name
@name.setter
def name(self, value):
"""
Setter for **self.name** property.
"""
if value is not None:
attest(
is_string(value),
('"{0}" attribute: "{1}" type is not "str" or "str"!').format(
'name', value))
self._name = value
@property
def comments(self):
"""
Getter and setter property for the *LUT* comments.
Parameters
----------
value : str
Value to set the *LUT* comments with.
Returns
-------
str
*LUT* comments.
"""
return self._comments
@comments.setter
def comments(self, value):
"""
Setter for **self.comments** property.
"""
if value is not None:
attest(
is_iterable(value),
'"{0}" attribute: "{1}" must be an array like!'.format(
'comments', value))
self._comments = value
@abstractmethod
def apply(self, RGB, *args, **kwargs):
"""
Applies the *LUT* sequence operator to given *RGB* colourspace array.
Parameters
----------
RGB : array_like
*RGB* colourspace array to apply the *LUT* sequence operator onto.
Other Parameters
----------------
\\*args : list, optional
Arguments.
\\**kwargs : dict, optional
Keywords arguments.
Returns
-------
ndarray
Processed *RGB* colourspace array.
"""
pass
class LUTOperatorMatrix(AbstractLUTSequenceOperator):
"""
Defines the *LUT* operator supporting a 3x3 or 4x4 matrix and an offset
vector.
Parameters
----------
matrix : array_like, optional
3x3 or 4x4 matrix for the operator.
offset : array_like, optional
Offset for the operator.
name : str, optional
*LUT* operator name.
comments : array_like, optional
Comments to add to the *LUT* operator.
Attributes
----------
- :meth:`~colour.LUTOperatorMatrix.matrix`
- :meth:`~colour.LUTOperatorMatrix.offset`
Methods
-------
- :meth:`~colour.LUTOperatorMatrix.__str__`
- :meth:`~colour.LUTOperatorMatrix.__repr__`
- :meth:`~colour.LUTOperatorMatrix.__eq__`
- :meth:`~colour.LUTOperatorMatrix.__ne__`
- :meth:`~colour.LUTOperatorMatrix.apply`
Notes
-----
- The internal :attr:`colour.io.Matrix.matrix` and
:attr:`colour.io.Matrix.offset` attributes are reshaped to (4, 4) and
(4, ) respectively.
Examples
--------
Instantiating an identity matrix:
>>> print(LUTOperatorMatrix(name='Identity'))
LUTOperatorMatrix - Identity
----------------------------
<BLANKLINE>
Matrix : [[ 1. 0. 0. 0.]
[ 0. 1. 0. 0.]
[ 0. 0. 1. 0.]
[ 0. 0. 0. 1.]]
Offset : [ 0. 0. 0. 0.]
Instantiating a matrix with comments:
>>> matrix = np.array([[ 1.45143932, -0.23651075, -0.21492857],
... [-0.07655377, 1.1762297 , -0.09967593],
... [ 0.00831615, -0.00603245, 0.9977163 ]])
>>> print(LUTOperatorMatrix(
... matrix,
... name='AP0 to AP1',
... comments=['A first comment.', 'A second comment.']))
LUTOperatorMatrix - AP0 to AP1
------------------------------
<BLANKLINE>
Matrix : [[ 1.45143932 -0.23651075 -0.21492857 0. ]
[-0.07655377 1.1762297 -0.09967593 0. ]
[ 0.00831615 -0.00603245 0.9977163 0. ]
[ 0. 0. 0. 1. ]]
Offset : [ 0. 0. 0. 0.]
<BLANKLINE>
A first comment.
A second comment.
"""
def __init__(self, matrix=None, offset=None, *args, **kwargs):
super(LUTOperatorMatrix, self).__init__(*args, **kwargs)
self._matrix = np.diag(ones(4))
self.matrix = matrix
self._offset = zeros(4)
self.offset = offset
@property
def matrix(self):
"""
Getter and setter property for the *LUT* operator matrix.
Parameters
----------
value : str
Value to set the *LUT* operator matrix with.
Returns
-------
str
Operator matrix.
"""
return self._matrix
@matrix.setter
def matrix(self, value):
"""
Setter for **self.matrix** property.
"""
if value is not None:
value = as_float_array(value)
shape_t = value.shape[-1]
value = value.reshape([shape_t, shape_t])
attest(
value.shape in [(3, 3), (4, 4)],
'"{0}" attribute: "{1}" shape is not (3, 3) or (4, 4)!'.format(
'matrix', value))
M = np.identity(4)
M[:shape_t, :shape_t] = value
self._matrix = M
@property
def offset(self):
"""
Getter and setter property for the *LUT* operator offset.
Parameters
----------
value : str
Value to set the *LUT* operator offset with.
Returns
-------
str
Operator offset.
"""
return self._offset
@offset.setter
def offset(self, value):
"""
Setter for **self.offset** property.
"""
if value is not None:
value = as_float_array(value)
shape_t = value.shape[-1]
attest(
value.shape in [(3, ), (4, )],
'"{0}" attribute: "{1}" shape is not (3, ) or (4, )!'.format(
'offset', value))
offset = zeros(4)
offset[:shape_t] = value
self._offset = offset
def __str__(self):
"""
Returns a formatted string representation of the *LUT* operator.
Returns
-------
str
Formatted string representation.
Examples
--------
>>> print(LUTOperatorMatrix()) # doctest: +ELLIPSIS
LUTOperatorMatrix - LUT Sequence Operator ...
------------------------------------------...
<BLANKLINE>
Matrix : [[ 1. 0. 0. 0.]
[ 0. 1. 0. 0.]
[ 0. 0. 1. 0.]
[ 0. 0. 0. 1.]]
Offset : [ 0. 0. 0. 0.]
"""
def _indent_array(a):
"""
Indents given array string representation.
"""
return str(a).replace(' [', ' ' * 14 + '[')
return ('{0} - {1}\n'
'{2}\n\n'
'Matrix : {3}\n'
'Offset : {4}'
'{5}'.format(
self.__class__.__name__, self._name,
'-' * (len(self.__class__.__name__) + 3 + len(self._name)),
_indent_array(self._matrix), _indent_array(self._offset),
'\n\n{0}'.format('\n'.join(self._comments))
if self._comments else ''))
def __repr__(self):
"""
Returns an evaluable string representation of the *LUT* operator.
Returns
-------
str
Evaluable string representation.
Examples
--------
>>> LUTOperatorMatrix(
... comments=['A first comment.', 'A second comment.'])
... # doctest: +ELLIPSIS
LUTOperatorMatrix([[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]],
[ 0., 0., 0., 0.],
name='LUT Sequence Operator ...',
comments=['A first comment.', 'A second comment.'])
"""
representation = repr(self._matrix)
representation = representation.replace('array',
self.__class__.__name__)
representation = representation.replace(
' [',
'{0}['.format(' ' * (len(self.__class__.__name__) + 2)))
indentation = ' ' * (len(self.__class__.__name__) + 1)
representation = ('{0},\n'
'{1}{2},\n'
'{1}name=\'{3}\''
'{4})').format(
representation[:-1], indentation,
repr(self._offset).replace('array(', '').replace(
')', ''), self._name,
',\n{0}comments={1}'.format(
indentation, repr(self._comments))
if self._comments else '')
return representation
def __eq__(self, other):
"""
Returns whether the *LUT* operator is equal to given other object.
Parameters
----------
other : object
Object to test whether it is equal to the *LUT* operator.
Returns
-------
bool
Is given object equal to the *LUT* operator.
Examples
--------
>>> LUTOperatorMatrix() == LUTOperatorMatrix()
True
"""
if isinstance(other, LUTOperatorMatrix):
if all([
np.array_equal(self._matrix, other._matrix),
np.array_equal(self._offset, other._offset)
]):
return True
return False
def __ne__(self, other):
"""
Returns whether the *LUT* operator is not equal to given other object.
Parameters
----------
other : object
Object to test whether it is not equal to the *LUT* operator.
Returns
-------
bool
Is given object not equal to the *LUT* operator.
Examples
--------
>>> LUTOperatorMatrix() != LUTOperatorMatrix(
... np.linspace(0, 1, 16).reshape([4, 4]))
True
"""
return not (self == other)
def apply(self, RGB, apply_offset_first=False):
"""
Applies the *LUT* operator to given *RGB* array.
Parameters
----------
RGB : array_like
*RGB* array to apply the *LUT* operator transform to.
apply_offset_first : bool, optional
Whether to apply the offset first and then the matrix.
Returns
-------
ndarray
Transformed *RGB* array.
Examples
--------
>>> matrix = np.array([[ 1.45143932, -0.23651075, -0.21492857],
... [-0.07655377, 1.1762297 , -0.09967593],
... [ 0.00831615, -0.00603245, 0.9977163 ]])
>>> M = LUTOperatorMatrix(matrix)
>>> RGB = np.array([0.3, 0.4, 0.5])
>>> M.apply(RGB) # doctest: +ELLIPSIS
array([ 0.2333632..., 0.3976877..., 0.4989400...])
"""
RGB = as_float_array(RGB)
has_alpha_channel = RGB.shape[-1] == 4
M = self._matrix
offset = self._offset
if not has_alpha_channel:
M = M[:3, :3]
offset = offset[:3]
if apply_offset_first:
RGB += offset
RGB = vector_dot(M, RGB)
if not apply_offset_first:
RGB += offset
return RGB
```
#### File: io/tests/test_tabular.py
```python
import os
import shutil
import unittest
import tempfile
from colour.colorimetry import SpectralDistribution, SpectralShape
from colour.io import (
read_spectral_data_from_csv_file,
read_sds_from_csv_file,
write_sds_to_csv_file,
)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'RESOURCES_DIRECTORY',
'COLOURCHECKER_N_OHTA_1',
'TestReadSpectralDataFromCsvFile',
'TestReadSdsFromCsvFile',
'TestWriteSdsToCsvFile',
]
RESOURCES_DIRECTORY = os.path.join(os.path.dirname(__file__), 'resources')
COLOURCHECKER_N_OHTA_1 = {
380.0: 0.048,
385.0: 0.051,
390.0: 0.055,
395.0: 0.060,
400.0: 0.065,
405.0: 0.068,
410.0: 0.068,
415.0: 0.067,
420.0: 0.064,
425.0: 0.062,
430.0: 0.059,
435.0: 0.057,
440.0: 0.055,
445.0: 0.054,
450.0: 0.053,
455.0: 0.053,
460.0: 0.052,
465.0: 0.052,
470.0: 0.052,
475.0: 0.053,
480.0: 0.054,
485.0: 0.055,
490.0: 0.057,
495.0: 0.059,
500.0: 0.061,
505.0: 0.062,
510.0: 0.065,
515.0: 0.067,
520.0: 0.070,
525.0: 0.072,
530.0: 0.074,
535.0: 0.075,
540.0: 0.076,
545.0: 0.078,
550.0: 0.079,
555.0: 0.082,
560.0: 0.087,
565.0: 0.092,
570.0: 0.100,
575.0: 0.107,
580.0: 0.115,
585.0: 0.122,
590.0: 0.129,
595.0: 0.134,
600.0: 0.138,
605.0: 0.142,
610.0: 0.146,
615.0: 0.150,
620.0: 0.154,
625.0: 0.158,
630.0: 0.163,
635.0: 0.167,
640.0: 0.173,
645.0: 0.180,
650.0: 0.188,
655.0: 0.196,
660.0: 0.204,
665.0: 0.213,
670.0: 0.222,
675.0: 0.231,
680.0: 0.242,
685.0: 0.251,
690.0: 0.261,
695.0: 0.271,
700.0: 0.282,
705.0: 0.294,
710.0: 0.305,
715.0: 0.318,
720.0: 0.334,
725.0: 0.354,
730.0: 0.372,
735.0: 0.392,
740.0: 0.409,
745.0: 0.420,
750.0: 0.436,
755.0: 0.450,
760.0: 0.462,
765.0: 0.465,
770.0: 0.448,
775.0: 0.432,
780.0: 0.421
}
class TestReadSpectralDataFromCsvFile(unittest.TestCase):
"""
Defines :func:`colour.io.tabular.read_spectral_data_from_csv_file`
definition unit tests methods.
"""
def test_read_spectral_data_from_csv_file(self):
"""
Tests :func:`colour.io.tabular.read_spectral_data_from_csv_file`
definition.
"""
colour_checker_n_ohta = os.path.join(RESOURCES_DIRECTORY,
'colorchecker_n_ohta.csv')
data = read_spectral_data_from_csv_file(colour_checker_n_ohta)
self.assertListEqual(
list(data.keys()), ['wavelength'] + [str(x) for x in range(1, 25)])
self.assertDictEqual(
dict(zip(data['wavelength'], data['1'])), COLOURCHECKER_N_OHTA_1)
colour_checker_n_ohta_transposed = os.path.join(
RESOURCES_DIRECTORY, 'colorchecker_n_ohta_transposed.csv')
data = read_spectral_data_from_csv_file(
colour_checker_n_ohta_transposed, transpose=True, delimiter='\t')
self.assertListEqual(
list(data.keys()), ['wavelength'] + [str(x) for x in range(1, 25)])
self.assertDictEqual(
dict(zip(data['wavelength'], data['1'])), COLOURCHECKER_N_OHTA_1)
linss2_10e_5 = os.path.join(RESOURCES_DIRECTORY, 'linss2_10e_5.csv')
data = read_spectral_data_from_csv_file(
linss2_10e_5,
names=['wavelength', 'l_bar', 'm_bar', 's_bar'],
filling_values=0)
self.assertListEqual(
list(data.keys()), ['wavelength', 'l_bar', 'm_bar', 's_bar'])
self.assertEqual(data['s_bar'][77], 0)
data = read_spectral_data_from_csv_file(
linss2_10e_5,
names=['wavelength', 'l_bar', 'm_bar', 's_bar'],
filling_values=-1)
self.assertEqual(data['s_bar'][77], -1)
class TestReadSdsFromCsvFile(unittest.TestCase):
"""
Defines :func:`colour.io.tabular.read_sds_from_csv_file` definition unit
tests methods.
"""
def test_read_sds_from_csv_file(self):
"""
Tests :func:`colour.io.tabular.read_sds_from_csv_file` definition.
"""
colour_checker_n_ohta = os.path.join(RESOURCES_DIRECTORY,
'colorchecker_n_ohta.csv')
sds = read_sds_from_csv_file(colour_checker_n_ohta)
for sd in sds.values():
self.assertIsInstance(sd, SpectralDistribution)
self.assertEqual(
sds['1'], SpectralDistribution(COLOURCHECKER_N_OHTA_1, name='1'))
class TestWriteSdsToCsvFile(unittest.TestCase):
"""
Defines :func:`colour.io.tabular.write_sds_to_csv_file` definition unit
tests methods.
"""
def setUp(self):
"""
Initialises common tests attributes.
"""
self._temporary_directory = tempfile.mkdtemp()
def tearDown(self):
"""
After tests actions.
"""
shutil.rmtree(self._temporary_directory)
def test_write_sds_to_csv_file(self):
"""
Tests :func:`colour.io.tabular.write_sds_to_csv_file` definition.
"""
colour_checker_n_ohta = os.path.join(RESOURCES_DIRECTORY,
'colorchecker_n_ohta.csv')
sds = read_sds_from_csv_file(colour_checker_n_ohta)
colour_checker_n_ohta_test = os.path.join(self._temporary_directory,
'colorchecker_n_ohta.csv')
write_sds_to_csv_file(sds, colour_checker_n_ohta_test)
sds_test = read_sds_from_csv_file(colour_checker_n_ohta_test)
for key, value in sds.items():
self.assertEqual(value, sds_test[key])
def test_raise_exception_write_sds_to_csv_file(self):
"""
Tests :func:`colour.io.tabular.write_sds_to_csv_file` definition
raised exception.
"""
colour_checker_n_ohta = os.path.join(RESOURCES_DIRECTORY,
'colorchecker_n_ohta.csv')
sds = read_sds_from_csv_file(colour_checker_n_ohta)
key = list(sds.keys())[0]
sds[key] = sds[key].align(SpectralShape(400, 700, 10))
self.assertRaises(RuntimeError, write_sds_to_csv_file, sds, '')
if __name__ == '__main__':
unittest.main()
```
#### File: rgb/tests/test_ycocg.py
```python
import numpy as np
import unittest
from itertools import permutations
from colour.models.rgb import RGB_to_YCoCg, YCoCg_to_RGB
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Development'
__all__ = [
'TestRGB_to_YCoCg',
'TestYCoCg_to_RGB',
]
class TestRGB_to_YCoCg(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.ycocg.RGB_to_YCoCg` definition unit tests
methods.
"""
def test_RGB_to_YCoCg(self):
"""
Tests :func:`colour.models.rgb.ycocg.RGB_to_YCoCg` definition.
"""
np.testing.assert_array_equal(
RGB_to_YCoCg(np.array([0.75, 0.75, 0.0])),
np.array([0.5625, 0.375, 0.1875]))
np.testing.assert_array_equal(
RGB_to_YCoCg(np.array([0.25, 0.5, 0.75])),
np.array([0.5, -0.25, 0.0]))
np.testing.assert_array_equal(
RGB_to_YCoCg(np.array([0.0, 0.75, 0.75])),
np.array([0.5625, -0.375, 0.1875]))
def test_n_dimensional_RGB_to_YCoCg(self):
"""
Tests :func:`colour.models.rgb.ycocg.RGB_to_YCoCg` definition
n-dimensional arrays support.
"""
RGB = np.array([0.75, 0.75, 0.0])
YCoCg = RGB_to_YCoCg(RGB)
RGB = np.tile(RGB, 4)
RGB = np.reshape(RGB, (4, 3))
YCoCg = np.tile(YCoCg, 4)
YCoCg = np.reshape(YCoCg, (4, 3))
np.testing.assert_array_equal(RGB_to_YCoCg(RGB), YCoCg)
RGB = np.tile(RGB, 4)
RGB = np.reshape(RGB, (4, 4, 3))
YCoCg = np.tile(YCoCg, 4)
YCoCg = np.reshape(YCoCg, (4, 4, 3))
np.testing.assert_array_equal(RGB_to_YCoCg(RGB), YCoCg)
RGB = np.tile(RGB, 4)
RGB = np.reshape(RGB, (4, 4, 4, 3))
YCoCg = np.tile(YCoCg, 4)
YCoCg = np.reshape(YCoCg, (4, 4, 4, 3))
np.testing.assert_array_equal(RGB_to_YCoCg(RGB), YCoCg)
@ignore_numpy_errors
def test_nan_RGB_to_YCoCg(self):
"""
Tests :func:`colour.models.rgb.ycocg.RGB_to_YCoCg` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
RGB = np.array(case)
RGB_to_YCoCg(RGB)
class TestYCoCg_to_RGB(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.ycocg.YCoCg_to_RGB` definition unit tests
methods.
"""
def test_YCoCg_to_RGB(self):
"""
Tests :func:`colour.models.rgb.ycocg.YCoCg_to_RGB` definition.
"""
np.testing.assert_array_equal(
YCoCg_to_RGB(np.array([0.5625, 0.375, 0.1875])),
np.array([0.75, 0.75, 0.0]))
np.testing.assert_array_equal(
YCoCg_to_RGB(np.array([0.5, -0.25, 0.0])),
np.array([0.25, 0.5, 0.75]))
np.testing.assert_array_equal(
YCoCg_to_RGB(np.array([0.5625, -0.375, 0.1875])),
np.array([0.0, 0.75, 0.75]))
def test_n_dimensional_YCoCg_to_RGB(self):
"""
Tests :func:`colour.models.rgb.ycocg.YCoCg_to_RGB` definition
n-dimensional arrays support.
"""
YCoCg = np.array([0.5625, 0.375, 0.1875])
RGB = YCoCg_to_RGB(YCoCg)
RGB = np.tile(RGB, 4)
RGB = np.reshape(RGB, (4, 3))
YCoCg = np.tile(YCoCg, 4)
YCoCg = np.reshape(YCoCg, (4, 3))
np.testing.assert_array_equal(YCoCg_to_RGB(YCoCg), RGB)
RGB = np.tile(RGB, 4)
RGB = np.reshape(RGB, (4, 4, 3))
YCoCg = np.tile(YCoCg, 4)
YCoCg = np.reshape(YCoCg, (4, 4, 3))
np.testing.assert_array_equal(YCoCg_to_RGB(YCoCg), RGB)
RGB = np.tile(RGB, 4)
RGB = np.reshape(RGB, (4, 4, 4, 3))
YCoCg = np.tile(YCoCg, 4)
YCoCg = np.reshape(YCoCg, (4, 4, 4, 3))
np.testing.assert_array_equal(YCoCg_to_RGB(YCoCg), RGB)
@ignore_numpy_errors
def test_nan_YCoCg_to_RGB(self):
"""
Tests :func:`colour.models.rgb.ycocg.YCoCg_to_RGB` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
YCoCg = np.array(case)
YCoCg_to_RGB(YCoCg)
if __name__ == '__main__':
unittest.main()
```
#### File: transfer_functions/tests/test_nikon_nlog.py
```python
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
log_encoding_NLog,
log_decoding_NLog,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestLogEncoding_VLog',
'TestLogDecoding_VLog',
]
class TestLogEncoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition unit tests methods.
"""
def test_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition.
"""
self.assertAlmostEqual(
log_encoding_NLog(0.0), 0.124372627896372, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18), 0.363667770117139, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18, 12), 0.363667770117139, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18, 10, False), 0.351634850262366, places=7)
self.assertAlmostEqual(
log_encoding_NLog(0.18, 10, False, False),
0.337584957293328,
places=7)
self.assertAlmostEqual(
log_encoding_NLog(1.0), 0.605083088954056, places=7)
def test_n_dimensional_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition n-dimensional arrays support.
"""
L_in = 0.18
V_out = log_encoding_NLog(L_in)
L_in = np.tile(L_in, 6)
V_out = np.tile(V_out, 6)
np.testing.assert_almost_equal(
log_encoding_NLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3))
V_out = np.reshape(V_out, (2, 3))
np.testing.assert_almost_equal(
log_encoding_NLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3, 1))
V_out = np.reshape(V_out, (2, 3, 1))
np.testing.assert_almost_equal(
log_encoding_NLog(L_in), V_out, decimal=7)
def test_domain_range_scale_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition domain and range scale support.
"""
L_in = 0.18
V_out = log_encoding_NLog(L_in)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_encoding_NLog(L_in * factor),
V_out * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_encoding_NLog` definition nan support.
"""
log_encoding_NLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLogDecoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition unit tests methods.
"""
def test_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition.
"""
self.assertAlmostEqual(
log_decoding_NLog(0.124372627896372), 0.0, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.363667770117139), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.363667770117139, 12), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.351634850262366, 10, False), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.337584957293328, 10, False, False),
0.18,
places=7)
self.assertAlmostEqual(
log_decoding_NLog(0.605083088954056), 1.0, places=7)
def test_n_dimensional_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition n-dimensional arrays support.
"""
V_out = 0.363667770117139
L_in = log_decoding_NLog(V_out)
V_out = np.tile(V_out, 6)
L_in = np.tile(L_in, 6)
np.testing.assert_almost_equal(
log_decoding_NLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3))
L_in = np.reshape(L_in, (2, 3))
np.testing.assert_almost_equal(
log_decoding_NLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3, 1))
L_in = np.reshape(L_in, (2, 3, 1))
np.testing.assert_almost_equal(
log_decoding_NLog(V_out), L_in, decimal=7)
def test_domain_range_scale_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition domain and range scale support.
"""
V_out = 0.363667770117139
L_in = log_decoding_NLog(V_out)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_decoding_NLog(V_out * factor),
L_in * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_NLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.nikon_nlog.\
log_decoding_NLog` definition nan support.
"""
log_decoding_NLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
if __name__ == '__main__':
unittest.main()
```
#### File: transfer_functions/tests/test_panasonic_vlog.py
```python
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
log_encoding_VLog,
log_decoding_VLog,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestLogEncoding_VLog',
'TestLogDecoding_VLog',
]
class TestLogEncoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition unit tests methods.
"""
def test_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition.
"""
self.assertAlmostEqual(log_encoding_VLog(0.0), 0.125, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18), 0.423311448760136, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 12), 0.423311448760136, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 10, False), 0.421287228403675, places=7)
self.assertAlmostEqual(
log_encoding_VLog(0.18, 10, False, False),
0.409009628526078,
places=7)
self.assertAlmostEqual(
log_encoding_VLog(1.0), 0.599117700158146, places=7)
def test_n_dimensional_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition n-dimensional arrays support.
"""
L_in = 0.18
V_out = log_encoding_VLog(L_in)
L_in = np.tile(L_in, 6)
V_out = np.tile(V_out, 6)
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3))
V_out = np.reshape(V_out, (2, 3))
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
L_in = np.reshape(L_in, (2, 3, 1))
V_out = np.reshape(V_out, (2, 3, 1))
np.testing.assert_almost_equal(
log_encoding_VLog(L_in), V_out, decimal=7)
def test_domain_range_scale_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition domain and range scale support.
"""
L_in = 0.18
V_out = log_encoding_VLog(L_in)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_encoding_VLog(L_in * factor),
V_out * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_encoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_encoding_VLog` definition nan support.
"""
log_encoding_VLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLogDecoding_VLog(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition unit tests methods.
"""
def test_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition.
"""
self.assertAlmostEqual(log_decoding_VLog(0.125), 0.0, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.423311448760136), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.423311448760136, 12), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.421287228403675, 10, False), 0.18, places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.409009628526078, 10, False, False),
0.18,
places=7)
self.assertAlmostEqual(
log_decoding_VLog(0.599117700158146), 1.0, places=7)
def test_n_dimensional_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition n-dimensional arrays support.
"""
V_out = 0.423311448760136
L_in = log_decoding_VLog(V_out)
V_out = np.tile(V_out, 6)
L_in = np.tile(L_in, 6)
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3))
L_in = np.reshape(L_in, (2, 3))
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
V_out = np.reshape(V_out, (2, 3, 1))
L_in = np.reshape(L_in, (2, 3, 1))
np.testing.assert_almost_equal(
log_decoding_VLog(V_out), L_in, decimal=7)
def test_domain_range_scale_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition domain and range scale support.
"""
V_out = 0.423311448760136
L_in = log_decoding_VLog(V_out)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_decoding_VLog(V_out * factor),
L_in * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_log_decoding_VLog(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.panasonic_vlog.\
log_decoding_VLog` definition nan support.
"""
log_decoding_VLog(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
if __name__ == '__main__':
unittest.main()
```
#### File: plotting/tests/test_quality.py
```python
import unittest
from matplotlib.pyplot import Axes, Figure
from colour.colorimetry import (
SDS_ILLUMINANTS,
SDS_LIGHT_SOURCES,
SpectralShape,
reshape_sd,
)
from colour.plotting import (
plot_single_sd_colour_rendering_index_bars,
plot_multi_sds_colour_rendering_indexes_bars,
plot_single_sd_colour_quality_scale_bars,
plot_multi_sds_colour_quality_scales_bars,
)
from colour.plotting.quality import plot_colour_quality_bars
from colour.quality import colour_quality_scale
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestPlotColourQualityBars',
'TestPlotSingleSdColourRenderingIndexBars',
'TestPlotMultiSdsColourRenderingIndexesBars',
'TestPlotSingleSdColourQualityScaleBars',
'TestPlotMultiSdsColourQualityScalesBars',
]
class TestPlotColourQualityBars(unittest.TestCase):
"""
Defines :func:`colour.plotting.quality.plot_colour_quality_bars` definition
unit tests methods.
"""
def test_plot_colour_quality_bars(self):
"""
Tests :func:`colour.plotting.quality.plot_colour_quality_bars`
definition.
"""
illuminant = SDS_ILLUMINANTS['FL2']
light_source = SDS_LIGHT_SOURCES['Kinoton 75P']
light_source = reshape_sd(light_source, SpectralShape(360, 830, 1))
cqs_i = colour_quality_scale(illuminant, additional_data=True)
cqs_l = colour_quality_scale(light_source, additional_data=True)
figure, axes = plot_colour_quality_bars([cqs_i, cqs_l])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourRenderingIndexBars(unittest.TestCase):
"""
Defines :func:`colour.plotting.quality.\
plot_single_sd_colour_rendering_index_bars` definition unit tests methods.
"""
def test_plot_single_sd_colour_rendering_index_bars(self):
"""
Tests :func:`colour.plotting.quality.\
plot_single_sd_colour_rendering_index_bars` definition.
"""
figure, axes = plot_single_sd_colour_rendering_index_bars(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotMultiSdsColourRenderingIndexesBars(unittest.TestCase):
"""
Defines :func:`colour.plotting.quality.\
plot_multi_sds_colour_rendering_indexes_bars` definition unit tests methods.
"""
def test_plot_multi_sds_colour_rendering_indexes_bars(self):
"""
Tests :func:`colour.plotting.quality.\
plot_multi_sds_colour_rendering_indexes_bars` definition.
"""
figure, axes = plot_multi_sds_colour_rendering_indexes_bars(
[SDS_ILLUMINANTS['FL2'], SDS_LIGHT_SOURCES['Kinoton 75P']])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourQualityScaleBars(unittest.TestCase):
"""
Defines :func:`colour.plotting.quality.\
plot_single_sd_colour_quality_scale_bars` definition unit tests methods.
"""
def test_plot_single_sd_colour_quality_scale_bars(self):
"""
Tests :func:`colour.plotting.quality.\
plot_single_sd_colour_quality_scale_bars` definition.
"""
figure, axes = plot_single_sd_colour_quality_scale_bars(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotMultiSdsColourQualityScalesBars(unittest.TestCase):
"""
Defines :func:`colour.plotting.quality.\
plot_multi_sds_colour_quality_scales_bars` definition unit tests methods.
"""
def test_plot_multi_sds_colour_quality_scales_bars(self):
"""
Tests :func:`colour.plotting.quality.\
plot_multi_sds_colour_quality_scales_bars` definition.
"""
figure, axes = plot_multi_sds_colour_quality_scales_bars(
[SDS_ILLUMINANTS['FL2'], SDS_LIGHT_SOURCES['Kinoton 75P']])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == '__main__':
unittest.main()
```
#### File: colour/utilities/data_structures.py
```python
from collections.abc import Mapping, MutableMapping, Sequence
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'attest',
'Structure',
'Lookup',
'CaseInsensitiveMapping',
'LazyCaseInsensitiveMapping',
'Node',
]
def attest(condition, message=str()):
"""
A replacement for `assert` that is not removed by optimised Python
execution.
See :func:`colour.utilities.assert` for more information.
Notes
-----
- This definition name is duplicated to avoid import circular dependency.
"""
# Avoiding circular dependency.
import colour.utilities
colour.utilities.attest(condition, message)
class Structure(dict):
"""
Defines a dict-like object allowing to access key values using dot syntax.
Other Parameters
----------------
\\*args : list, optional
Arguments.
\\**kwargs : dict, optional
Key / Value pairs.
Methods
-------
- :meth:`~colour.utilities.Structure.__init__`
References
----------
:cite:`Mansencald`
Examples
--------
>>> person = Structure(first_name='John', last_name='Doe', gender='male')
>>> person.first_name
'John'
>>> sorted(person.keys())
['first_name', 'gender', 'last_name']
>>> person['gender']
'male'
"""
def __init__(self, *args, **kwargs):
super(Structure, self).__init__(*args, **kwargs)
self.__dict__ = self
class Lookup(dict):
"""
Extends *dict* type to provide a lookup by value(s).
Methods
-------
- :meth:`~colour.utilities.Lookup.keys_from_value`
- :meth:`~colour.utilities.Lookup.first_key_from_value`
References
----------
:cite:`Mansencalc`
Examples
--------
>>> person = Lookup(first_name='John', last_name='Doe', gender='male')
>>> person.first_key_from_value('John')
'first_name'
>>> persons = Lookup(John='Doe', Jane='Doe', Luke='Skywalker')
>>> sorted(persons.keys_from_value('Doe'))
['Jane', 'John']
"""
def keys_from_value(self, value):
"""
Gets the keys with given value.
Parameters
----------
value : object
Value.
Returns
-------
object
Keys.
"""
keys = []
for key, data in self.items():
matching = data == value
try:
matching = all(matching)
except TypeError:
matching = all((matching, ))
if matching:
keys.append(key)
return keys
def first_key_from_value(self, value):
"""
Gets the first key with given value.
Parameters
----------
value : object
Value.
Returns
-------
object
Key.
"""
return self.keys_from_value(value)[0]
class CaseInsensitiveMapping(MutableMapping):
"""
Implements a case-insensitive mutable mapping / *dict* object.
Allows values retrieving from keys while ignoring the key case.
The keys are expected to be str or string-like objects supporting the
:meth:`str.lower` method.
Parameters
----------
data : dict
*dict* of data to store into the mapping at initialisation.
Other Parameters
----------------
\\**kwargs : dict, optional
Key / Value pairs to store into the mapping at initialisation.
Attributes
----------
- :attr:`~colour.utilities.CaseInsensitiveMapping.data`
Methods
-------
- :meth:`~colour.utilities.CaseInsensitiveMapping.__init__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__repr__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__setitem__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__getitem__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__delitem__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__contains__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__iter__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__len__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__eq__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.__ne__`
- :meth:`~colour.utilities.CaseInsensitiveMapping.copy`
- :meth:`~colour.utilities.CaseInsensitiveMapping.lower_items`
Warnings
--------
The keys are expected to be str or string-like objects.
References
----------
:cite:`Reitza`
Examples
--------
>>> methods = CaseInsensitiveMapping({'McCamy': 1, 'Hernandez': 2})
>>> methods['mccamy']
1
"""
def __init__(self, data=None, **kwargs):
self._data = dict()
self.update({} if data is None else data, **kwargs)
@property
def data(self):
"""
Getter property for the data.
Returns
-------
dict
Data.
"""
return self._data
def __repr__(self):
"""
Returns the mapping representation with the original item names.
Returns
-------
str
Mapping representation.
"""
return '{0}({1})'.format(self.__class__.__name__, dict(self.items()))
def __setitem__(self, item, value):
"""
Sets given item with given value.
The item is stored as lower in the mapping while the original name and
its value are stored together as the value in a *tuple*:
{"item.lower()": ("item", value)}
Parameters
----------
item : object
Attribute.
value : object
Value.
"""
self._data[item.lower()] = (item, value)
def __getitem__(self, item):
"""
Returns the value of given item.
The item value is retrieved using its lower name in the mapping.
Parameters
----------
item : str
Item name.
Returns
-------
object
Item value.
"""
return self._data[item.lower()][1]
def __delitem__(self, item):
"""
Deletes the item with given name.
The item is deleted from the mapping using its lower name.
Parameters
----------
item : str
Item name.
"""
del self._data[item.lower()]
def __contains__(self, item):
"""
Returns if the mapping contains given item.
Parameters
----------
item : str
Item name.
Returns
-------
bool
Is item in mapping.
"""
return item.lower() in self._data
def __iter__(self):
"""
Iterates over the items names in the mapping.
The item names returned are the original input ones.
Returns
-------
generator
Item names.
"""
return (item for item, value in self._data.values())
def __len__(self):
"""
Returns the items count.
Returns
-------
int
Items count.
"""
return len(self._data)
def __eq__(self, item):
"""
Returns the equality with given object.
Parameters
----------
item
Object item.
Returns
-------
bool
Equality.
"""
if isinstance(item, Mapping):
item_mapping = CaseInsensitiveMapping(item)
else:
raise ValueError(
'Impossible to test equality with "{0}" class type!'.format(
item.__class__.__name__))
return dict(self.lower_items()) == dict(item_mapping.lower_items())
def __ne__(self, item):
"""
Returns the inequality with given object.
Parameters
----------
item
Object item.
Returns
-------
bool
Inequality.
"""
return not (self == item)
def copy(self):
"""
Returns a copy of the mapping.
Returns
-------
CaseInsensitiveMapping
Mapping copy.
Notes
-----
- The :class:`colour.utilities.CaseInsensitiveMapping` class copy
returned is a simple *copy* not a *deepcopy*.
"""
return CaseInsensitiveMapping(self._data.values())
def lower_items(self):
"""
Iterates over the lower items names.
Returns
-------
generator
Lower item names.
"""
return ((item, value[1]) for (item, value) in self._data.items())
class LazyCaseInsensitiveMapping(CaseInsensitiveMapping):
"""
Implements a lazy case-insensitive mutable mapping / *dict* object by
inheriting from :class:`colour.utilities.CaseInsensitiveMapping` class.
Allows lazy values retrieving from keys while ignoring the key case.
The keys are expected to be str or string-like objects supporting the
:meth:`str.lower` method. The lazy retrieval is performed as follows:
If the value is a callable, then it is evaluated and its return value is
stored in place of the current value.
Parameters
----------
data : dict
*dict* of data to store into the mapping at initialisation.
Other Parameters
----------------
\\**kwargs : dict, optional
Key / Value pairs to store into the mapping at initialisation.
Methods
-------
- :meth:`~colour.utilities.LazyCaseInsensitiveMapping.__getitem__`
Warnings
--------
The keys are expected to be str or string-like objects.
Examples
--------
>>> def callable_a():
... print(2)
... return 2
>>> methods = LazyCaseInsensitiveMapping(
... {'McCamy': 1, 'Hernandez': callable_a})
>>> methods['mccamy']
1
>>> methods['hernandez']
2
2
"""
def __getitem__(self, item):
"""
Returns the value of given item.
The item value is retrieved using its lower name in the mapping. If
the value is a callable, then it is evaluated and its return value is
stored in place of the current value.
Parameters
----------
item : str
Item name.
Returns
-------
object
Item value.
"""
import colour
value = super(LazyCaseInsensitiveMapping, self).__getitem__(item)
if callable(value) and hasattr(colour, '__disable_lazy_load__'):
value = value()
super(LazyCaseInsensitiveMapping, self).__setitem__(item, value)
return value
class Node:
"""
Represents a basic node supporting the creation of basic node trees.
Parameters
----------
parent : Node, optional
Parent of the node.
children : Node, optional
Children of the node.
data : object
The data belonging to this node.
Attributes
----------
- :attr:`~colour.utilities.Node.name`
- :attr:`~colour.utilities.Node.parent`
- :attr:`~colour.utilities.Node.children`
- :attr:`~colour.utilities.Node.id`
- :attr:`~colour.utilities.Node.root`
- :attr:`~colour.utilities.Node.leaves`
- :attr:`~colour.utilities.Node.siblings`
- :attr:`~colour.utilities.Node.data`
Methods
-------
- :meth:`~colour.utilities.Node.__new__`
- :meth:`~colour.utilities.Node.__init__`
- :meth:`~colour.utilities.Node.__str__`
- :meth:`~colour.utilities.Node.__len__`
- :meth:`~colour.utilities.Node.is_root`
- :meth:`~colour.utilities.Node.is_inner`
- :meth:`~colour.utilities.Node.is_leaf`
- :meth:`~colour.utilities.Node.walk`
- :meth:`~colour.utilities.Node.render`
Examples
--------
>>> node_a = Node('Node A')
>>> node_b = Node('Node B', node_a)
>>> node_c = Node('Node C', node_a)
>>> node_d = Node('Node D', node_b)
>>> node_e = Node('Node E', node_b)
>>> node_f = Node('Node F', node_d)
>>> node_g = Node('Node G', node_f)
>>> node_h = Node('Node H', node_g)
>>> [node.name for node in node_a.leaves]
['Node H', 'Node E', 'Node C']
>>> print(node_h.root.name)
Node A
>>> len(node_a)
7
"""
_INSTANCE_ID = 1
"""
Node id counter.
_INSTANCE_ID : int
"""
def __new__(cls, *args, **kwargs):
"""
Constructor of the class.
Other Parameters
----------
\\*args : list, optional
Arguments.
\\**kwargs : dict, optional
Keywords arguments.
Returns
-------
Node
Class instance.
"""
instance = super(Node, cls).__new__(cls)
instance._id = Node._INSTANCE_ID
Node._INSTANCE_ID += 1
return instance
def __init__(self, name=None, parent=None, children=None, data=None):
self._name = '{0}#{1}'.format(self.__class__.__name__, self._id)
self.name = name
self._parent = None
self.parent = parent
self._children = None
self._children = [] if children is None else children
self._data = data
@property
def name(self):
"""
Getter and setter property for the name.
Parameters
----------
value : str
Value to set the name with.
Returns
-------
str
Node name.
"""
return self._name
@name.setter
def name(self, value):
"""
Setter for the **self.name** property.
"""
if value is not None:
attest(
isinstance(value, str),
'"{0}" attribute: "{1}" is not a "string" like object!'.format(
'name', value))
self._name = value
@property
def parent(self):
"""
Getter and setter property for the node parent.
Parameters
----------
value : Node
Parent to set the node with.
Returns
-------
Node
Node parent.
"""
return self._parent
@parent.setter
def parent(self, value):
"""
Setter for the **self.parent** property.
"""
if value is not None:
attest(
issubclass(value.__class__, Node),
'"{0}" attribute: "{1}" is not a "{2}" subclass!'.format(
'parent', value, Node.__class__.__name__))
value.children.append(self)
self._parent = value
@property
def children(self):
"""
Getter and setter property for the node children.
Parameters
----------
value : list
Children to set the node with.
Returns
-------
list
Node children.
"""
return self._children
@children.setter
def children(self, value):
"""
Setter for the **self.children** property.
"""
if value is not None:
attest(
isinstance(value, Sequence) and not isinstance(value, str),
'"{0}" attribute: "{1}" type is not a "Sequence" instance!'
.format('children', value))
for element in value:
attest(
issubclass(element.__class__, Node),
'"{0}" attribute: A "{1}" element is not a "{2}" subclass!'
.format('children', element, Node.__class__.__name__))
for node in value:
node.parent = self
self._children = list(value)
@property
def id(self):
"""
Getter property for the node id.
Returns
-------
int
Node id.
"""
return self._id
@property
def root(self):
"""
Getter property for the node tree.
Returns
-------
Node
Node root.
"""
if self.is_root():
return self
else:
return list(self.walk(ascendants=True))[-1]
@property
def leaves(self):
"""
Getter property for the node leaves.
Returns
-------
generator
Node leaves.
"""
if self.is_leaf():
return (node for node in (self, ))
else:
return (node for node in self.walk() if node.is_leaf())
@property
def siblings(self):
"""
Getter property for the node siblings.
Returns
-------
list
Node siblings.
"""
if self.parent is None:
return (sibling for sibling in ())
else:
return (sibling for sibling in self.parent.children
if sibling is not self)
@property
def data(self):
"""
Getter property for the node data.
Returns
-------
Data
Node data.
"""
return self._data
@data.setter
def data(self, value):
"""
Setter for the **self.data** property.
"""
self._data = value
def __str__(self):
"""
Returns a formatted string representation of the node.
Returns
-------
str
Formatted string representation.
"""
return '{0}#{1}({2})'.format(self.__class__.__name__, self._id,
self._data)
def __len__(self):
"""
Returns the number of children of the node.
Returns
-------
int
Number of children of the node.
"""
return len(list(self.walk()))
def is_root(self):
"""
Returns whether the node is a root node.
Returns
-------
bool
Whether the node is a root node.
Examples
--------
>>> node_a = Node('Node A')
>>> node_b = Node('Node B', node_a)
>>> node_c = Node('Node C', node_b)
>>> node_a.is_root()
True
>>> node_b.is_root()
False
"""
return self.parent is None
def is_inner(self):
"""
Returns whether the node is an inner node.
Returns
-------
bool
Whether the node is an inner node.
Examples
--------
>>> node_a = Node('Node A')
>>> node_b = Node('Node B', node_a)
>>> node_c = Node('Node C', node_b)
>>> node_a.is_inner()
False
>>> node_b.is_inner()
True
"""
return all([not self.is_root(), not self.is_leaf()])
def is_leaf(self):
"""
Returns whether the node is a leaf node.
Returns
-------
bool
Whether the node is a leaf node.
Examples
--------
>>> node_a = Node('Node A')
>>> node_b = Node('Node B', node_a)
>>> node_c = Node('Node C', node_b)
>>> node_a.is_leaf()
False
>>> node_c.is_leaf()
True
"""
return len(self._children) == 0
def walk(self, ascendants=False):
"""
Returns a generator used to walk into :class:`colour.utilities.Node`
trees.
Parameters
----------
ascendants : bool, optional
Whether to walk up the node tree.
Returns
-------
generator
Node tree walker
Examples
--------
>>> node_a = Node('Node A')
>>> node_b = Node('Node B', node_a)
>>> node_c = Node('Node C', node_a)
>>> node_d = Node('Node D', node_b)
>>> node_e = Node('Node E', node_b)
>>> node_f = Node('Node F', node_d)
>>> node_g = Node('Node G', node_f)
>>> node_h = Node('Node H', node_g)
>>> for node in node_a.walk():
... print(node.name)
Node B
Node D
Node F
Node G
Node H
Node E
Node C
"""
attribute = 'children' if not ascendants else 'parent'
nodes = getattr(self, attribute)
nodes = nodes if isinstance(nodes, list) else [nodes]
for node in nodes:
yield node
if not getattr(node, attribute):
continue
for relative in node.walk(ascendants=ascendants):
yield relative
def render(self, tab_level=0):
"""
Renders the current node and its children as a string.
Parameters
----------
tab_level : int, optional
Initial indentation level
Returns
------
str
Rendered node tree.
Examples
--------
>>> node_a = Node('Node A')
>>> node_b = Node('Node B', node_a)
>>> node_c = Node('Node C', node_a)
>>> print(node_a.render())
|----"Node A"
|----"Node B"
|----"Node C"
<BLANKLINE>
"""
output = ''
for i in range(tab_level):
output += ' '
tab_level += 1
output += '|----"{0}"\n'.format(self.name)
for child in self._children:
output += child.render(tab_level)
tab_level -= 1
return output
```
#### File: colour/utilities/__init__.py
```python
import sys
from .data_structures import (
Lookup,
Structure,
CaseInsensitiveMapping,
LazyCaseInsensitiveMapping,
Node,
)
from .common import (
CacheRegistry,
CACHE_REGISTRY,
handle_numpy_errors,
ignore_numpy_errors,
raise_numpy_errors,
print_numpy_errors,
warn_numpy_errors,
ignore_python_warnings,
attest,
batch,
disable_multiprocessing,
multiprocessing_pool,
is_matplotlib_installed,
is_networkx_installed,
is_opencolorio_installed,
is_openimageio_installed,
is_pandas_installed,
is_sklearn_installed,
is_tqdm_installed,
is_trimesh_installed,
required,
is_iterable,
is_string,
is_numeric,
is_integer,
is_sibling,
filter_kwargs,
filter_mapping,
first_item,
get_domain_range_scale,
set_domain_range_scale,
domain_range_scale,
to_domain_1,
to_domain_10,
to_domain_100,
to_domain_degrees,
to_domain_int,
from_range_1,
from_range_10,
from_range_100,
from_range_degrees,
from_range_int,
copy_definition,
validate_method,
)
from .verbose import (
ColourWarning,
ColourUsageWarning,
ColourRuntimeWarning,
message_box,
show_warning,
warning,
runtime_warning,
usage_warning,
filter_warnings,
suppress_warnings,
numpy_print_options,
ANCILLARY_COLOUR_SCIENCE_PACKAGES,
ANCILLARY_RUNTIME_PACKAGES,
ANCILLARY_DEVELOPMENT_PACKAGES,
ANCILLARY_EXTRAS_PACKAGES,
describe_environment,
)
from .array import (
MixinDataclassArray,
as_array,
as_int_array,
as_float_array,
as_numeric,
as_int,
as_float,
set_float_precision,
set_int_precision,
closest_indexes,
closest,
interval,
is_uniform,
in_array,
tstack,
tsplit,
row_as_diagonal,
orient,
centroid,
fill_nan,
has_only_nan,
ndarray_write,
zeros,
ones,
full,
index_along_last_axis,
)
from ..algebra.common import (
normalise_maximum,
vector_dot,
matrix_dot,
linear_conversion,
linstep_function,
)
from .metrics import metric_mse, metric_psnr
from colour.utilities.deprecation import ModuleAPI, build_API_changes
from colour.utilities.documentation import is_documentation_building
__all__ = [
'Lookup',
'Structure',
'CaseInsensitiveMapping',
'LazyCaseInsensitiveMapping',
'Node',
]
__all__ += [
'CacheRegistry',
'CACHE_REGISTRY',
'handle_numpy_errors',
'ignore_numpy_errors',
'raise_numpy_errors',
'print_numpy_errors',
'warn_numpy_errors',
'ignore_python_warnings',
'attest',
'batch',
'disable_multiprocessing',
'multiprocessing_pool',
'is_matplotlib_installed',
'is_networkx_installed',
'is_opencolorio_installed',
'is_openimageio_installed',
'is_pandas_installed',
'is_sklearn_installed',
'is_tqdm_installed',
'is_trimesh_installed',
'required',
'is_iterable',
'is_string',
'is_numeric',
'is_integer',
'is_sibling',
'filter_kwargs',
'filter_mapping',
'first_item',
'get_domain_range_scale',
'set_domain_range_scale',
'domain_range_scale',
'to_domain_1',
'to_domain_10',
'to_domain_100',
'to_domain_degrees',
'to_domain_int',
'from_range_1',
'from_range_10',
'from_range_100',
'from_range_degrees',
'from_range_int',
'copy_definition',
'validate_method',
]
__all__ += [
'ColourWarning',
'ColourUsageWarning',
'ColourRuntimeWarning',
'message_box',
'show_warning',
'warning',
'runtime_warning',
'usage_warning',
'filter_warnings',
'suppress_warnings',
'numpy_print_options',
'ANCILLARY_COLOUR_SCIENCE_PACKAGES',
'ANCILLARY_RUNTIME_PACKAGES',
'ANCILLARY_DEVELOPMENT_PACKAGES',
'ANCILLARY_EXTRAS_PACKAGES',
'describe_environment',
]
__all__ += [
'MixinDataclassArray',
'as_array',
'as_int_array',
'as_float_array',
'as_numeric',
'as_int',
'as_float',
'set_float_precision',
'set_int_precision',
'closest_indexes',
'closest',
'normalise_maximum',
'interval',
'is_uniform',
'in_array',
'tstack',
'tsplit',
'row_as_diagonal',
'vector_dot',
'matrix_dot',
'orient',
'centroid',
'linear_conversion',
'fill_nan',
'has_only_nan',
'linstep_function',
'ndarray_write',
'zeros',
'ones',
'full',
'index_along_last_axis',
]
__all__ += [
'metric_mse',
'metric_psnr',
]
# ----------------------------------------------------------------------------#
# --- API Changes and Deprecation Management ---#
# ----------------------------------------------------------------------------#
class utilities(ModuleAPI):
def __getattr__(self, attribute):
return super(utilities, self).__getattr__(attribute)
# v0.4.0
API_CHANGES = {
'ObjectFutureAccessChange': [
[
'colour.utilities.linstep_function',
'colour.algebra.linstep_function',
],
[
'colour.utilities.linear_conversion',
'colour.algebra.linear_conversion',
],
[
'colour.utilities.matrix_dot',
'colour.algebra.matrix_dot',
],
[
'colour.utilities.normalise_maximum',
'colour.algebra.normalise_maximum',
],
[
'colour.utilities.vector_dot',
'colour.algebra.vector_dot',
],
]
}
"""
Defines the *colour.utilities* sub-package API changes.
API_CHANGES : dict
"""
if not is_documentation_building():
sys.modules['colour.utilities'] = utilities(
sys.modules['colour.utilities'], build_API_changes(API_CHANGES))
del ModuleAPI, is_documentation_building, build_API_changes, sys
```
#### File: colour/utilities/verbose.py
```python
import numpy as np
import os
import sys
import traceback
import warnings
from collections import defaultdict
from contextlib import contextmanager
from itertools import chain
from textwrap import TextWrapper
from warnings import filterwarnings, formatwarning, warn
from colour.utilities import is_string
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'ColourWarning',
'ColourUsageWarning',
'ColourRuntimeWarning',
'message_box',
'show_warning',
'warning',
'runtime_warning',
'usage_warning',
'filter_warnings',
'suppress_warnings',
'numpy_print_options',
'ANCILLARY_COLOUR_SCIENCE_PACKAGES',
'ANCILLARY_RUNTIME_PACKAGES',
'ANCILLARY_DEVELOPMENT_PACKAGES',
'ANCILLARY_EXTRAS_PACKAGES',
'describe_environment',
]
class ColourWarning(Warning):
"""
This is the base class of *Colour* warnings. It is a subclass of
:class:`Warning` class.
"""
class ColourUsageWarning(Warning):
"""
This is the base class of *Colour* usage warnings. It is a subclass
of :class:`colour.utilities.ColourWarning` class.
"""
class ColourRuntimeWarning(Warning):
"""
This is the base class of *Colour* runtime warnings. It is a subclass
of :class:`colour.utilities.ColourWarning` class.
"""
def message_box(message, width=79, padding=3, print_callable=print):
"""
Prints a message inside a box.
Parameters
----------
message : str
Message to print.
width : int, optional
Message box width.
padding : str, optional
Padding on each sides of the message.
print_callable : callable, optional
Callable used to print the message box.
Returns
-------
bool
Definition success.
Examples
--------
>>> message = ('Lorem ipsum dolor sit amet, consectetur adipiscing elit, '
... 'sed do eiusmod tempor incididunt ut labore et dolore magna '
... 'aliqua.')
>>> message_box(message, width=75)
===========================================================================
* *
* Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do *
* eiusmod tempor incididunt ut labore et dolore magna aliqua. *
* *
===========================================================================
True
>>> message_box(message, width=60)
============================================================
* *
* Lorem ipsum dolor sit amet, consectetur adipiscing *
* elit, sed do eiusmod tempor incididunt ut labore et *
* dolore magna aliqua. *
* *
============================================================
True
>>> message_box(message, width=75, padding=16)
===========================================================================
* *
* Lorem ipsum dolor sit amet, consectetur *
* adipiscing elit, sed do eiusmod tempor *
* incididunt ut labore et dolore magna *
* aliqua. *
* *
===========================================================================
True
"""
ideal_width = width - padding * 2 - 2
def inner(text):
"""
Formats and pads inner text for the message box.
"""
return '*{0}{1}{2}{0}*'.format(
' ' * padding, text, (' ' * (width - len(text) - padding * 2 - 2)))
print_callable('=' * width)
print_callable(inner(''))
wrapper = TextWrapper(
width=ideal_width, break_long_words=False, replace_whitespace=False)
lines = [wrapper.wrap(line) for line in message.split("\n")]
lines = [' ' if len(line) == 0 else line for line in lines]
for line in chain(*lines):
print_callable(inner(line.expandtabs()))
print_callable(inner(''))
print_callable('=' * width)
return True
def show_warning(message,
category,
path,
line,
file_=None,
code=None,
frame_range=(1, None)):
"""
Alternative :func:`warnings.showwarning` definition that allows traceback
printing.
This definition is expected to be used by setting the
*COLOUR_SCIENCE__COLOUR__SHOW_WARNINGS_WITH_TRACEBACK* environment variable
prior to importing *colour*.
Parameters
----------
message : str
Warning message.
category : Warning
:class:`Warning` sub-class.
path : str
File path to read the line at ``lineno`` from if ``line`` is None.
line : int
Line number to read the line at in ``filename`` if ``line`` is None.
file_ : file, optional
:class:`file` object to write the warning to, defaults to
:attr:`sys.stderr` attribute.
code : str, optional
Source code to be included in the warning message.
frame_range : array_like, optional
Traceback frame range, i.e first frame and numbers of frame above it.
Notes
-----
- Setting the *COLOUR_SCIENCE__COLOUR__SHOW_WARNINGS_WITH_TRACEBACK*
environment variable will result in the :func:`warnings.showwarning`
definition to be replaced with the
:func:`colour.utilities.show_warning` definition and thus providing
complete traceback from the point where the warning occurred.
"""
if file_ is None:
file_ = sys.stderr
if file_ is None:
return
try:
# Generating a traceback to print useful warning origin.
frame_in, frame_out = frame_range
try:
raise ZeroDivisionError
except ZeroDivisionError:
frame = sys.exc_info()[2].tb_frame.f_back
while frame_in:
frame = frame.f_back
frame_in -= 1
traceback.print_stack(frame, frame_out, file_)
file_.write(formatwarning(message, category, path, line, code))
except (IOError, UnicodeError):
pass
if os.environ.get( # pragma: no cover
'COLOUR_SCIENCE__COLOUR__SHOW_WARNINGS_WITH_TRACEBACK'):
warnings.showwarning = show_warning # pragma: no cover
def warning(*args, **kwargs):
"""
Issues a warning.
Other Parameters
----------------
\\*args : list, optional
Arguments.
\\**kwargs : dict, optional
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> warning('This is a warning!') # doctest: +SKIP
"""
kwargs['category'] = kwargs.get('category', ColourWarning)
warn(*args, **kwargs)
return True
def runtime_warning(*args, **kwargs):
"""
Issues a runtime warning.
Other Parameters
----------------
\\*args : list, optional
Arguments.
\\**kwargs : dict, optional
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> usage_warning('This is a runtime warning!') # doctest: +SKIP
"""
kwargs['category'] = ColourRuntimeWarning
warning(*args, **kwargs)
return True
def usage_warning(*args, **kwargs):
"""
Issues an usage warning.
Other Parameters
----------------
\\*args : list, optional
Arguments.
\\**kwargs : dict, optional
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> usage_warning('This is an usage warning!') # doctest: +SKIP
"""
kwargs['category'] = ColourUsageWarning
warning(*args, **kwargs)
return True
def filter_warnings(colour_runtime_warnings=None,
colour_usage_warnings=None,
colour_warnings=None,
python_warnings=None):
"""
Filters *Colour* and also optionally overall Python warnings.
The possible values for all the actions, i.e. each argument, are as
follows:
- *None* (No action is taken)
- *True* (*ignore*)
- *False* (*default*)
- *error*
- *ignore*
- *always*
- *default*
- *module*
- *once*
Parameters
----------
colour_runtime_warnings : bool or str, optional
Whether to filter *Colour* runtime warnings according to the action
value.
colour_usage_warnings : bool or str, optional
Whether to filter *Colour* usage warnings according to the action
value.
colour_warnings : bool or str, optional
Whether to filter *Colour* warnings, this also filters *Colour* usage
and runtime warnings according to the action value.
python_warnings : bool or str, optional
Whether to filter *Python* warnings according to the action value.
Examples
--------
Filtering *Colour* runtime warnings:
>>> filter_warnings(colour_runtime_warnings=True)
Filtering *Colour* usage warnings:
>>> filter_warnings(colour_usage_warnings=True)
Filtering *Colour* warnings:
>>> filter_warnings(colour_warnings=True)
Filtering all the *Colour* and also Python warnings:
>>> filter_warnings(python_warnings=True)
Enabling all the *Colour* and Python warnings:
>>> filter_warnings(*[False] * 4)
Enabling all the *Colour* and Python warnings using the *default* action:
>>> filter_warnings(*['default'] * 4)
Setting back the default state:
>>> filter_warnings(colour_runtime_warnings=True)
"""
for action, category in [
(colour_warnings, ColourWarning),
(colour_runtime_warnings, ColourRuntimeWarning),
(colour_usage_warnings, ColourUsageWarning),
(python_warnings, Warning),
]:
if action is None:
continue
if is_string(action):
action = action
else:
action = 'ignore' if action else 'default'
filterwarnings(action, category=category)
# Defaulting to filter *Colour* runtime warnings.
filter_warnings(colour_runtime_warnings=True)
@contextmanager
def suppress_warnings(colour_runtime_warnings=None,
colour_usage_warnings=None,
colour_warnings=None,
python_warnings=None):
"""
A context manager filtering *Colour* and also optionally overall Python
warnings.
The possible values for all the actions, i.e. each argument, are as
follows:
- *None* (No action is taken)
- *True* (*ignore*)
- *False* (*default*)
- *error*
- *ignore*
- *always*
- *default*
- *module*
- *once*
Parameters
----------
colour_runtime_warnings : bool or str, optional
Whether to filter *Colour* runtime warnings according to the action
value.
colour_usage_warnings : bool or str, optional
Whether to filter *Colour* usage warnings according to the action
value.
colour_warnings : bool or str, optional
Whether to filter *Colour* warnings, this also filters *Colour* usage
and runtime warnings according to the action value.
python_warnings : bool or str, optional
Whether to filter *Python* warnings according to the action value.
"""
filters = warnings.filters
show_warnings = warnings.showwarning
filter_warnings(
colour_warnings=colour_warnings,
colour_runtime_warnings=colour_runtime_warnings,
colour_usage_warnings=colour_usage_warnings,
python_warnings=python_warnings)
try:
yield
finally:
warnings.filters = filters
warnings.showwarning = show_warnings
@contextmanager
def numpy_print_options(*args, **kwargs):
"""
A context manager implementing context changes to *Numpy* print behaviour.
Other Parameters
----------------
\\*args : list, optional
Arguments.
\\**kwargs : dict, optional
Keywords arguments.
Examples
-------
>>> np.array([np.pi]) # doctest: +ELLIPSIS
array([ 3.1415926...])
>>> with numpy_print_options(formatter={'float': '{:0.1f}'.format}):
... np.array([np.pi])
array([3.1])
"""
options = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**options)
ANCILLARY_COLOUR_SCIENCE_PACKAGES = {}
"""
Ancillary *colour-science.org* packages to describe.
ANCILLARY_COLOUR_SCIENCE_PACKAGES : dict
"""
ANCILLARY_RUNTIME_PACKAGES = {}
"""
Ancillary runtime packages to describe.
ANCILLARY_RUNTIME_PACKAGES : dict
"""
ANCILLARY_DEVELOPMENT_PACKAGES = {}
"""
Ancillary development packages to describe.
ANCILLARY_DEVELOPMENT_PACKAGES : dict
"""
ANCILLARY_EXTRAS_PACKAGES = {}
"""
Ancillary extras packages to describe.
ANCILLARY_EXTRAS_PACKAGES : dict
"""
def describe_environment(runtime_packages=True,
development_packages=False,
extras_packages=False,
print_environment=True,
**kwargs):
"""
Describes *Colour* running environment, i.e. interpreter, runtime and
development packages.
Parameters
----------
runtime_packages : bool, optional
Whether to return the runtime packages versions.
development_packages : bool, optional
Whether to return the development packages versions.
extras_packages : bool, optional
Whether to return the extras packages versions.
print_environment : bool, optional
Whether to print the environment.
Other Parameters
----------------
padding : str, optional
{:func:`colour.utilities.message_box`},
Padding on each sides of the message.
print_callable : callable, optional
{:func:`colour.utilities.message_box`},
Callable used to print the message box.
width : int, optional
{:func:`colour.utilities.message_box`},
Message box width.
Returns
-------
defaultdict
Environment.
Examples
--------
>>> environment = describe_environment(width=75) # doctest: +SKIP
===========================================================================
* *
* Interpreter : *
* python : 3.8.6 (default, Nov 20 2020, 18:29:40) *
* [Clang 12.0.0 (clang-1200.0.32.27)] *
* *
* colour-science.org : *
* colour : v0.3.16-3-gd8bac475 *
* *
* Runtime : *
* imageio : 2.9.0 *
* matplotlib : 3.3.3 *
* networkx : 2.5 *
* numpy : 1.19.4 *
* pandas : 0.25.3 *
* pygraphviz : 1.6 *
* scipy : 1.5.4 *
* tqdm : 4.54.0 *
* *
===========================================================================
>>> environment = describe_environment(True, True, True, width=75)
... # doctest: +SKIP
===========================================================================
* *
* Interpreter : *
* python : 3.8.6 (default, Nov 20 2020, 18:29:40) *
* [Clang 12.0.0 (clang-1200.0.32.27)] *
* *
* colour-science.org : *
* colour : v0.3.16-3-gd8bac475 *
* *
* Runtime : *
* imageio : 2.9.0 *
* matplotlib : 3.3.3 *
* networkx : 2.5 *
* numpy : 1.19.4 *
* pandas : 0.25.3 *
* pygraphviz : 1.6 *
* scipy : 1.5.4 *
* tqdm : 4.54.0 *
* *
* Development : *
* biblib-simple : 0.1.1 *
* coverage : 5.3 *
* coveralls : 2.2.0 *
* flake8 : 3.8.4 *
* invoke : 1.4.1 *
* jupyter : 1.0.0 *
* mock : 4.0.2 *
* nose : 1.3.7 *
* pre-commit : 2.1.1 *
* pytest : 6.1.2 *
* restructuredtext-lint : 1.3.2 *
* sphinx : 3.1.2 *
* sphinx_rtd_theme : 0.5.0 *
* sphinxcontrib-bibtex : 1.0.0 *
* toml : 0.10.2 *
* twine : 3.2.0 *
* yapf : 0.23.0 *
* *
* Extras : *
* ipywidgets : 7.5.1 *
* notebook : 6.1.5 *
* *
===========================================================================
"""
environment = defaultdict(dict)
environment['Interpreter']['python'] = sys.version
import subprocess # nosec
import colour
# TODO: Implement support for "pyproject.toml" file whenever "TOML" is
# supported in the standard library.
# NOTE: A few clauses are not reached and a few packages are not available
# during continuous integration and are thus ignored for coverage.
try: # pragma: no cover
version = subprocess.check_output( # nosec
['git', 'describe'],
cwd=colour.__path__[0],
stderr=subprocess.STDOUT).strip()
version = version.decode('utf-8')
except Exception: # pragma: no cover
version = colour.__version__
environment['colour-science.org']['colour'] = version
environment['colour-science.org'].update(ANCILLARY_COLOUR_SCIENCE_PACKAGES)
if runtime_packages:
for package in [
'imageio', 'matplotlib', 'networkx', 'numpy', 'pandas',
'pygraphviz', 'PyOpenColorIO', 'scipy', 'sklearn', 'tqdm',
'trimesh'
]:
try:
namespace = __import__(package)
environment['Runtime'][package] = namespace.__version__
except ImportError:
continue
# OpenImageIO
try: # pragma: no cover
namespace = __import__('OpenImageIO')
environment['Runtime']['OpenImageIO'] = namespace.VERSION_STRING
except ImportError: # pragma: no cover
pass
environment['Runtime'].update(ANCILLARY_RUNTIME_PACKAGES)
def _get_package_version(package, mapping):
"""
Returns given package version.
"""
namespace = __import__(package)
if package in mapping:
import pkg_resources
distributions = [
distribution for distribution in pkg_resources.working_set
]
for distribution in distributions:
if distribution.project_name == mapping[package]:
return distribution.version
return namespace.__version__
if development_packages:
mapping = {
'biblib.bib': 'biblib-simple',
'pre_commit': 'pre-commit',
'restructuredtext_lint': 'restructuredtext-lint',
'sphinxcontrib.bibtex': 'sphinxcontrib-bibtex'
}
for package in [
'biblib.bib', 'coverage', 'coveralls', 'flake8', 'invoke',
'jupyter', 'mock', 'nose', 'pre_commit', 'pytest',
'restructuredtext_lint', 'sphinx', 'sphinx_rtd_theme',
'sphinxcontrib.bibtex', 'toml', 'twine', 'yapf'
]:
try:
version = _get_package_version(package, mapping)
package = mapping.get(package, package)
environment['Development'][package] = version
except Exception: # pragma: no cover
# pylint: disable=B112
continue
environment['Development'].update(ANCILLARY_DEVELOPMENT_PACKAGES)
if extras_packages:
mapping = {}
for package in ['ipywidgets', 'notebook']:
try:
version = _get_package_version(package, mapping)
package = mapping.get(package, package)
environment['Extras'][package] = version
except Exception: # pragma: no cover
# pylint: disable=B112
continue
environment['Extras'].update(ANCILLARY_EXTRAS_PACKAGES)
if print_environment:
message = str()
for category in ('Interpreter', 'colour-science.org', 'Runtime',
'Development', 'Extras'):
elements = environment.get(category)
if not elements:
continue
message += '{0} :\n'.format(category)
for key, value in elements.items():
lines = value.split('\n')
message += ' {0} : {1}\n'.format(key, lines.pop(0))
indentation = len(' {0} : '.format(key))
for line in lines:
message += '{0}{1}\n'.format(' ' * indentation, line)
message += '\n'
message_box(message.strip(), **kwargs)
return environment
```
|
{
"source": "JGoldstone/colour-datasets",
"score": 2
}
|
#### File: loaders/tests/test_kuopio.py
```python
import numpy as np
import os
import unittest
from colour import SpectralShape
from colour_datasets.loaders.kuopio import (
MatFileMetadata_KuopioUniversity, read_sds_from_mat_file_KuopioUniversity)
from colour_datasets.loaders.kuopio import (
DatasetLoader_MunsellColorsMattSpectrofotometerMeasured,
DatasetLoader_MunsellColorsMattAOTFMeasured,
DatasetLoader_MunsellColorsGlossySpectrofotometerMeasured,
DatasetLoader_MunsellColorsGlossyAllSpectrofotometerMeasured,
DatasetLoader_ForestColors, DatasetLoader_PaperSpectra,
DatasetLoader_LumberSpectra, DatasetLoader_AgfaIT872Set)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2019-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestReadSdsFromMatFileKuopioUniversity',
'TestDatasetLoader_KuopioUniversity'
]
class TestReadSdsFromMatFileKuopioUniversity(unittest.TestCase):
"""
Defines :func:`colour_datasets.loaders.kuopio.\
read_sds_from_mat_file_KuopioUniversity` definition unit tests methods.
"""
def test_read_sds_from_mat_file_KuopioUniversity(self):
"""
Tests :func:`colour_datasets.loaders.kuopio.\
read_sds_from_mat_file_KuopioUniversity` definition.
"""
mat_file = os.path.join(
os.path.dirname(__file__), 'resources', 'Kuopio_Typical.mat')
metadata = MatFileMetadata_KuopioUniversity(
'munsell', SpectralShape(400, 700, 10), True, 'S')
sds = read_sds_from_mat_file_KuopioUniversity(mat_file, metadata)
self.assertEqual(len(sds), 32)
np.testing.assert_array_almost_equal(
sds['5R 6/8 (3)'].values,
np.array([
17.870000000000000,
19.840000000000000,
19.530000000000000,
18.730000000000000,
18.230000000000000,
17.630000000000000,
17.040000000000000,
16.660000000000000,
15.700000000000000,
14.080000000000000,
12.810000000000000,
12.650000000000000,
12.110000000000000,
10.840000000000000,
10.890000000000000,
14.260000000000000,
23.080000000000000,
33.950000000000000,
43.500000000000000,
49.790000000000000,
52.340000000000000,
53.170000000000000,
53.300000000000000,
53.100000000000000,
52.960000000000000,
52.840000000000000,
52.700000000000000,
52.480000000000000,
52.120000000000000,
51.940000000000000,
51.890000000000000,
]),
decimal=7)
class TestDatasetLoader_KuopioUniversity(unittest.TestCase):
"""
Defines :class:`colour_datasets.loaders.kuopio.\
DatasetLoader_KuopioUniversity` class unit tests methods.
"""
def test_required_attributes(self):
"""
Tests presence of required attributes.
"""
required_attributes = ('ID', 'METADATA')
for dataset_loader in (
DatasetLoader_MunsellColorsMattSpectrofotometerMeasured,
DatasetLoader_MunsellColorsMattAOTFMeasured,
DatasetLoader_MunsellColorsGlossySpectrofotometerMeasured,
DatasetLoader_MunsellColorsGlossyAllSpectrofotometerMeasured,
DatasetLoader_ForestColors, DatasetLoader_PaperSpectra,
DatasetLoader_LumberSpectra, DatasetLoader_AgfaIT872Set):
for attribute in required_attributes:
self.assertIn(attribute, dir(dataset_loader))
def test_required_methods(self):
"""
Tests presence of required methods.
"""
required_methods = ('__init__', 'load')
for dataset_loader in (
DatasetLoader_MunsellColorsMattSpectrofotometerMeasured,
DatasetLoader_MunsellColorsMattAOTFMeasured,
DatasetLoader_MunsellColorsGlossySpectrofotometerMeasured,
DatasetLoader_MunsellColorsGlossyAllSpectrofotometerMeasured,
DatasetLoader_ForestColors, DatasetLoader_PaperSpectra,
DatasetLoader_LumberSpectra, DatasetLoader_AgfaIT872Set):
for method in required_methods:
self.assertIn(method, dir(dataset_loader))
def test_load(self):
"""
Tests :func:`colour_datasets.loaders.kuopio.\
DatasetLoader_KuopioUniversity.load` method.
"""
dataset_loaders = {
DatasetLoader_MunsellColorsMattSpectrofotometerMeasured(): (
'munsell', 1269, '2.5R 5/2',
np.array([
0.105600000000000,
0.114100000000000,
0.117000000000000,
0.119000000000000,
0.121900000000000,
0.124600000000000,
0.127100000000000,
0.135200000000000,
0.137100000000000,
0.141900000000000,
0.151300000000000,
0.155800000000000,
0.154900000000000,
0.162200000000000,
0.166000000000000,
0.168700000000000,
0.173300000000000,
0.174400000000000,
0.173900000000000,
0.179700000000000,
0.182200000000000,
0.183900000000000,
0.185500000000000,
0.184600000000000,
0.184800000000000,
0.187700000000000,
0.184600000000000,
0.183700000000000,
0.187600000000000,
0.186500000000000,
0.183200000000000,
0.185800000000000,
0.184400000000000,
0.181300000000000,
0.183800000000000,
0.181800000000000,
0.179500000000000,
0.181900000000000,
0.180200000000000,
0.178600000000000,
0.179600000000000,
0.176300000000000,
0.176300000000000,
0.178100000000000,
0.175100000000000,
0.174000000000000,
0.176300000000000,
0.173400000000000,
0.173800000000000,
0.175500000000000,
0.171100000000000,
0.170600000000000,
0.174300000000000,
0.171800000000000,
0.170600000000000,
0.172800000000000,
0.172300000000000,
0.171200000000000,
0.170900000000000,
0.169300000000000,
0.172000000000000,
0.175300000000000,
0.171500000000000,
0.169500000000000,
0.170800000000000,
0.167100000000000,
0.167700000000000,
0.170100000000000,
0.165600000000000,
0.164800000000000,
0.167500000000000,
0.164600000000000,
0.162500000000000,
0.165500000000000,
0.162500000000000,
0.163400000000000,
0.163600000000000,
0.161500000000000,
0.162400000000000,
0.164500000000000,
0.162200000000000,
0.163900000000000,
0.164900000000000,
0.162200000000000,
0.163600000000000,
0.166100000000000,
0.162900000000000,
0.164700000000000,
0.167400000000000,
0.164800000000000,
0.165200000000000,
0.165400000000000,
0.164400000000000,
0.166300000000000,
0.167600000000000,
0.163800000000000,
0.164400000000000,
0.163600000000000,
0.162100000000000,
0.163800000000000,
0.162900000000000,
0.158600000000000,
0.160300000000000,
0.159100000000000,
0.155300000000000,
0.157200000000000,
0.159000000000000,
0.154100000000000,
0.154500000000000,
0.154600000000000,
0.151100000000000,
0.153700000000000,
0.155000000000000,
0.151200000000000,
0.151100000000000,
0.151300000000000,
0.149300000000000,
0.152000000000000,
0.150700000000000,
0.148400000000000,
0.151300000000000,
0.152600000000000,
0.149000000000000,
0.152200000000000,
0.152700000000000,
0.149700000000000,
0.150800000000000,
0.151100000000000,
0.148300000000000,
0.152100000000000,
0.153100000000000,
0.150900000000000,
0.151100000000000,
0.149300000000000,
0.148500000000000,
0.150900000000000,
0.148300000000000,
0.145300000000000,
0.148100000000000,
0.147000000000000,
0.144900000000000,
0.146600000000000,
0.145300000000000,
0.143100000000000,
0.146300000000000,
0.144400000000000,
0.141000000000000,
0.141200000000000,
0.143700000000000,
0.142000000000000,
0.142900000000000,
0.142400000000000,
0.142700000000000,
0.145400000000000,
0.142900000000000,
0.141300000000000,
0.144500000000000,
0.143700000000000,
0.143400000000000,
0.148600000000000,
0.147500000000000,
0.147000000000000,
0.151300000000000,
0.153100000000000,
0.150800000000000,
0.155800000000000,
0.156400000000000,
0.158100000000000,
0.162700000000000,
0.164600000000000,
0.165200000000000,
0.171900000000000,
0.171900000000000,
0.173600000000000,
0.176300000000000,
0.176800000000000,
0.178200000000000,
0.183400000000000,
0.179800000000000,
0.177500000000000,
0.180400000000000,
0.184800000000000,
0.191700000000000,
0.194900000000000,
0.195100000000000,
0.193100000000000,
0.196400000000000,
0.196700000000000,
0.195900000000000,
0.201000000000000,
0.200100000000000,
0.197400000000000,
0.201200000000000,
0.200500000000000,
0.198200000000000,
0.203900000000000,
0.204700000000000,
0.201900000000000,
0.204000000000000,
0.203800000000000,
0.202000000000000,
0.206200000000000,
0.206600000000000,
0.204200000000000,
0.205500000000000,
0.204700000000000,
0.205100000000000,
0.208300000000000,
0.206300000000000,
0.204100000000000,
0.208100000000000,
0.206600000000000,
0.203300000000000,
0.206400000000000,
0.206400000000000,
0.203900000000000,
0.206100000000000,
0.205400000000000,
0.202700000000000,
0.204600000000000,
0.205300000000000,
0.204700000000000,
0.205800000000000,
0.204600000000000,
0.204000000000000,
0.206900000000000,
0.203500000000000,
0.202900000000000,
0.207000000000000,
0.206200000000000,
0.203400000000000,
0.206500000000000,
0.205500000000000,
0.204300000000000,
0.207100000000000,
0.205100000000000,
0.202300000000000,
0.205000000000000,
0.204100000000000,
0.203900000000000,
0.205800000000000,
0.203500000000000,
0.203800000000000,
0.205800000000000,
0.203200000000000,
0.201200000000000,
0.203700000000000,
0.200600000000000,
0.201400000000000,
0.206300000000000,
0.203100000000000,
0.201000000000000,
0.203600000000000,
0.200500000000000,
0.199300000000000,
0.202700000000000,
0.202300000000000,
0.201700000000000,
0.202900000000000,
0.199900000000000,
0.199200000000000,
0.202500000000000,
0.200400000000000,
0.199500000000000,
0.199800000000000,
0.196400000000000,
0.197200000000000,
0.200500000000000,
0.196800000000000,
0.196700000000000,
0.200100000000000,
0.197800000000000,
0.195700000000000,
0.198500000000000,
0.196500000000000,
0.197100000000000,
0.198400000000000,
0.195900000000000,
0.196400000000000,
0.200200000000000,
0.198300000000000,
0.199400000000000,
0.198500000000000,
0.193700000000000,
0.195300000000000,
0.198400000000000,
0.193300000000000,
0.193800000000000,
0.197200000000000,
0.193900000000000,
0.194700000000000,
0.197300000000000,
0.195000000000000,
0.197700000000000,
0.200700000000000,
0.197100000000000,
0.196300000000000,
0.197700000000000,
0.196800000000000,
0.197300000000000,
0.198000000000000,
0.193900000000000,
0.195300000000000,
0.196100000000000,
0.192900000000000,
0.195100000000000,
0.196100000000000,
0.188600000000000,
0.187100000000000,
0.192700000000000,
0.195800000000000,
0.194800000000000,
0.196100000000000,
0.192200000000000,
0.195100000000000,
0.196500000000000,
0.192600000000000,
0.195000000000000,
0.195500000000000,
0.193100000000000,
0.193200000000000,
0.194200000000000,
0.191800000000000,
0.194500000000000,
0.193600000000000,
0.191100000000000,
0.193100000000000,
0.192800000000000,
0.190600000000000,
0.193600000000000,
0.192400000000000,
0.189900000000000,
0.192900000000000,
0.193900000000000,
0.191700000000000,
0.194700000000000,
0.195900000000000,
0.192400000000000,
0.192900000000000,
0.193100000000000,
0.192200000000000,
0.195700000000000,
0.195500000000000,
0.192200000000000,
0.195100000000000,
0.193600000000000,
0.192300000000000,
0.196200000000000,
0.196100000000000,
0.193800000000000,
0.198400000000000,
0.196900000000000,
0.192900000000000,
0.196300000000000,
0.196800000000000,
0.194800000000000,
0.195600000000000,
0.194600000000000,
0.193700000000000,
0.196900000000000,
0.196100000000000,
0.194800000000000,
0.196700000000000,
0.193900000000000,
0.193400000000000,
0.197000000000000,
0.193000000000000,
0.192200000000000,
0.196000000000000,
0.193800000000000,
0.191800000000000,
0.194000000000000,
0.193500000000000,
0.191000000000000,
0.192300000000000,
0.190700000000000,
0.189500000000000,
0.191700000000000,
0.189900000000000,
0.191200000000000,
0.192400000000000,
0.189600000000000,
0.189500000000000,
0.193200000000000,
0.191200000000000,
0.188600000000000,
0.192800000000000,
0.191700000000000,
0.190800000000000,
0.194300000000000,
0.191600000000000,
0.190400000000000,
0.192700000000000,
0.191000000000000,
0.189300000000000,
0.192200000000000,
0.190200000000000,
0.190100000000000,
0.192900000000000,
0.191300000000000,
0.192900000000000,
0.196700000000000,
0.191600000000000,
0.192400000000000,
0.195900000000000,
0.189600000000000,
0.188700000000000,
0.192400000000000,
0.193200000000000,
0.193000000000000,
0.193400000000000,
0.190200000000000,
0.192200000000000,
0.193100000000000,
0.190000000000000,
0.191000000000000,
0.192500000000000,
0.190200000000000,
0.191300000000000,
0.192700000000000,
0.190000000000000,
])),
DatasetLoader_MunsellColorsMattAOTFMeasured(): (
'munsell', 1250, '10bV50C01.NM5',
np.array([
0.363525390625000,
0.486328125000000,
0.262451171875000,
0.270263671875000,
0.278076171875000,
0.293945312500000,
0.272705078125000,
0.253417968750000,
0.272216796875000,
0.255859375000000,
0.260498046875000,
0.253906250000000,
0.256591796875000,
0.248535156250000,
0.245849609375000,
0.243408203125000,
0.247802734375000,
0.240234375000000,
0.247314453125000,
0.243164062500000,
0.237548828125000,
0.238525390625000,
0.230957031250000,
0.227050781250000,
0.231689453125000,
0.232421875000000,
0.228027343750000,
0.223876953125000,
0.224853515625000,
0.219726562500000,
0.220703125000000,
0.218994140625000,
0.216552734375000,
0.217529296875000,
0.217041015625000,
0.213134765625000,
0.212402343750000,
0.204833984375000,
0.210205078125000,
0.205810546875000,
0.201416015625000,
0.202392578125000,
0.200439453125000,
0.198730468750000,
0.197998046875000,
0.193359375000000,
0.192871093750000,
0.193115234375000,
0.192626953125000,
0.188476562500000,
0.189208984375000,
0.185058593750000,
0.185546875000000,
0.186035156250000,
0.183349609375000,
0.183105468750000,
0.181884765625000,
0.178222656250000,
0.175292968750000,
0.169921875000000,
0.175048828125000,
])),
DatasetLoader_MunsellColorsGlossySpectrofotometerMeasured(): (
'munsell', 32, '5R 5/6',
np.array([
12.660000000000000,
13.540000000000000,
12.990000000000000,
12.260000000000000,
11.910000000000000,
11.580000000000000,
11.360000000000000,
11.430000000000000,
10.910000000000000,
9.8000000000000000,
9.1100000000000000,
9.1400000000000000,
8.5200000000000000,
7.4800000000000000,
8.1600000000000000,
11.190000000000000,
15.190000000000000,
18.460000000000000,
23.560000000000000,
29.770000000000000,
33.250000000000000,
34.400000000000000,
34.540000000000000,
34.350000000000000,
34.200000000000000,
34.050000000000000,
33.800000000000000,
33.560000000000000,
33.290000000000000,
33.080000000000000,
32.910000000000000,
])),
DatasetLoader_MunsellColorsGlossyAllSpectrofotometerMeasured(): (
'X', 1600, '5',
np.array([
0.0832583349355893,
0.0841964216140708,
0.0854254747054747,
0.0864870564212114,
0.0885143682165685,
0.0905455902475432,
0.0915811880405238,
0.0935670213290593,
0.0953374607500153,
0.0969212265220306,
0.0988861173336562,
0.1011019151764140,
0.1027070137118110,
0.1045144157706090,
0.1066298320094840,
0.1078871227364190,
0.1097310323760100,
0.1114069239380190,
0.1121451511457540,
0.1134318032825190,
0.1141553695955370,
0.1148042526315790,
0.1151973800818870,
0.1163717178232080,
0.1153836989247310,
0.1163973344056990,
0.1164192531233960,
0.1176007052049480,
0.1185813542341110,
0.1188167084135430,
0.1188947903717930,
0.1194576529747440,
0.1206333985004790,
0.1203924436437340,
0.1212710711071110,
0.1208673887423540,
0.1215377256924970,
0.1218716508912110,
0.1213794497567520,
0.1217316822846940,
0.1216057200200700,
0.1220691362725450,
0.1223934228755990,
0.1226491662040630,
0.1222738901730910,
0.1235775559991130,
0.1240115273049840,
0.1245753981184280,
0.1249519072803720,
0.1251793875497570,
0.1253437823548850,
0.1259486272019440,
0.1259670591996470,
0.1261504072273180,
0.1270547857142860,
0.1275530353200880,
0.1278131387343720,
0.1280998512642540,
0.1287212301001870,
0.1289580095830810,
0.1290085828891700,
0.1304132516826660,
0.1309290648193960,
0.1315601250826540,
0.1320659696068720,
0.1328932240677590,
0.1336453489265910,
0.1340303717553890,
0.1347657294298580,
0.1352923279986800,
0.1360370366290280,
0.1365566273001920,
0.1375466104152930,
0.1380393871162610,
0.1391758261775510,
0.1393372198783630,
0.1403947401936650,
0.1410517545489320,
0.1420981132075470,
0.1424267063197030,
0.1431591745373150,
0.1439438302804960,
0.1449724509333040,
0.1457406097108570,
0.1466319866826770,
0.1477144227624550,
0.1491561375701750,
0.1499657283479590,
0.1508730084930310,
0.1524472420812020,
0.1538901500326160,
0.1551999854276550,
0.1564189116238570,
0.1575284381833020,
0.1588692308277620,
0.1593696495517520,
0.1605326245110820,
0.1618569582133350,
0.1624176661422450,
0.1634395257586450,
0.1635596262494570,
0.1647163760720880,
0.1653961094581390,
0.1659311061379690,
0.1668263889643190,
0.1664016268098260,
0.1663602603460430,
0.1672364293227780,
0.1676109344315600,
0.1680388326738580,
0.1677260481471460,
0.1674615913396480,
0.1674423665261110,
0.1669457804244260,
0.1667212939521800,
0.1666681862479700,
0.1661996093893670,
0.1660631997190860,
0.1650462213562810,
0.1644598642563330,
0.1639480785837650,
0.1629394804605160,
0.1618968264677260,
0.1607553251918300,
0.1599774502784840,
0.1592006389084410,
0.1577751116168180,
0.1567381133546260,
0.1558041359727410,
0.1546063862270590,
0.1532839006439740,
0.1522304826541110,
0.1510174361195320,
0.1495370270065490,
0.1482986794128800,
0.1471751082251080,
0.1459533303020460,
0.1448406104887160,
0.1432260271395360,
0.1420294881655200,
0.1407796123863140,
0.1394713345247770,
0.1383847867252320,
0.1367663760554230,
0.1353930054621170,
0.1340665548764000,
0.1326094541324100,
0.1314476955556760,
0.1300619568392020,
0.1286112691620170,
0.1270600768689440,
0.1256763453237410,
0.1247108740387740,
0.1233902828348500,
0.1219225162024490,
0.1203756671729230,
0.1193858886718750,
0.1187244485879990,
0.1172117915401300,
0.1163088532870850,
0.1148534423920700,
0.1134792034486500,
0.1125721330001090,
0.1113368023192800,
0.1101989148244470,
0.1091195956961200,
0.1083813403562120,
0.1071390462089160,
0.1061137185040440,
0.1049129130387580,
0.1043954382535030,
0.1031281954323000,
0.1021073306429620,
0.1010716444082520,
0.1004949793702500,
0.0995802646626368,
0.0984846824799607,
0.0976298555319497,
0.0964366697093181,
0.0959713445121951,
0.0946097380316976,
0.0940169040274674,
0.0931408770974068,
0.0925075464007411,
0.0919924512854102,
0.0911384338532010,
0.0904112434318108,
0.0898916765781003,
0.0889631941324027,
0.0886735681284474,
0.0881560421558456,
0.0874990131233596,
0.0870141730990311,
0.0865602858079677,
0.0866091052286152,
0.0860980602739726,
0.0854415269900361,
0.0852274163424125,
0.0846683259332347,
0.0846999037966362,
0.0846302515481997,
0.0837975875576037,
0.0838024112149533,
0.0835321230735480,
0.0829661160327131,
0.0827144267149202,
0.0827143225629190,
0.0820904100032906,
0.0820583758300862,
0.0819189005552196,
0.0810632600471517,
0.0810455174001206,
0.0807908284431793,
0.0804156337410190,
0.0805326402629417,
0.0800952396585686,
0.0796956921896410,
0.0793305183644425,
0.0789345770872087,
0.0784959303128253,
0.0783585716629300,
0.0780296335618316,
0.0776355686360401,
0.0777728303000492,
0.0771084880319877,
0.0769203308138898,
0.0765511326039387,
0.0762573573616277,
0.0762127566381391,
0.0760990485894276,
0.0759584208223972,
0.0755359285636025,
0.0756633663670248,
0.0752572122010094,
0.0758166600909639,
0.0750690017513135,
0.0752405613919895,
0.0750479940841367,
0.0752528940517383,
0.0749732792022792,
0.0751002570131788,
0.0750104604924056,
0.0749880663745893,
0.0752553795596451,
0.0753496369021501,
0.0753240486895493,
0.0749273240054870,
0.0755281749629548,
0.0757077530932087,
0.0758634115061267,
0.0756506801228609,
0.0760071605101143,
0.0762060860026327,
0.0759151579640193,
0.0760791654510557,
0.0761815485996705,
0.0765150256522692,
0.0762693840004381,
0.0764163189645717,
0.0764907408057002,
0.0768342669584245,
0.0771621960440524,
0.0770743948220065,
0.0770292538916904,
0.0771631784423267,
0.0774133684557129,
0.0772509793050447,
0.0776359754048861,
0.0776684550740538,
0.0775999245903436,
0.0775543019880607,
0.0775452066523959,
0.0779931107448912,
0.0779379115287394,
0.0777371116127967,
0.0777113861657265,
0.0783069040254470,
0.0777791275336913,
0.0778322734546252,
0.0782278086575343,
0.0781885667306111,
0.0779885797133166,
0.0778922203584937,
0.0777887903693571,
0.0781322884794139,
0.0778500300990532,
0.0783473231527094,
0.0781106787355065,
0.0774791683038638,
0.0774638428430621,
0.0776397440804944,
0.0778363414820891,
0.0773739737159128,
0.0771565329105620,
0.0774208283325135,
0.0773433725061492,
0.0769061458287716,
0.0768768537704918,
0.0767942762841530,
0.0766405641193834,
0.0768223210852969,
0.0756511902310809,
0.0760848653489134,
0.0758909124746839,
0.0757557372797899,
0.0755393640350877,
0.0755921310541311,
0.0759533260309984,
0.0755523312534209,
0.0758025853417513,
0.0754538890712176,
0.0759966492343413,
0.0756392191463549,
0.0760002427745665,
0.0759172330727733,
0.0760517874821252,
0.0761247379087473,
0.0767259722054381,
0.0763790106863501,
0.0764716400109619,
0.0764261489525063,
0.0764849258345667,
0.0770762127916552,
0.0770786163439449,
0.0777177075901432,
0.0779242324199406,
0.0779871221093106,
0.0782395180299033,
0.0780202550409318,
0.0784945261194030,
0.0789988898659046,
0.0787182916666667,
0.0795837732500822,
0.0803447880449685,
0.0798549101363562,
0.0801640755957272,
0.0806020982436883,
0.0807538561632564,
0.0815723849317322,
0.0814840643355108,
0.0818510493352379,
0.0819726217696014,
0.0822825937877291,
0.0826006385614824,
0.0832230251162791,
0.0832401884518462,
0.0837584412217095,
0.0840583776960650,
0.0838307027945206,
0.0846559244351832,
0.0854320944276695,
0.0859695935852373,
0.0860562020024205,
0.0868489965268207,
0.0869247383567663,
0.0877802062760588,
0.0889851523971662,
0.0886742533164529,
0.0894202225519288,
0.0903602252401458,
0.0913718090645038,
0.0926356862097440,
0.0927020975529644,
0.0934591620557682,
0.0942531088738516,
0.0957034433521885,
0.0966463331682351,
0.0970120648886445,
0.0982979563203177,
0.0993772702256467,
0.1001024339091560,
0.1006514627853130,
0.1021924514103130,
0.1032385466651990,
0.1042875362287090,
0.1054265632733870,
0.1065878370941110,
0.1078802324765710,
0.1085841372602890,
0.1096687124910860,
0.1103224411182040,
0.1116595158900050,
0.1135477486645740,
0.1144331781621860,
0.1143250851485150,
0.1156502670851920,
0.1175013129411760,
0.1179270310695630,
0.1182087558274100,
0.1191784615553600,
0.1209157444943570,
0.1216799742574260,
0.1230600100148570,
0.1251525243466300,
0.1264191929573590,
0.1278286560939470,
0.1295155392232370,
0.1325001371944510,
0.1325402033842440,
0.1334973586771410,
0.1362069264544460,
])),
DatasetLoader_ForestColors(): ('pine', 370, '5',
np.array([
0.010262410000000,
0.009839101400000,
0.012529907000000,
0.011030105000000,
0.010073634000000,
0.011320871000000,
0.011616203000000,
0.013212691000000,
0.012491421000000,
0.011912613000000,
0.013115942000000,
0.013417573000000,
0.013631902000000,
0.013967374000000,
0.014361868000000,
0.014427279000000,
0.014636329000000,
0.014908329000000,
0.014993297000000,
0.015136227000000,
0.015386547000000,
0.015711171000000,
0.015828966000000,
0.016981529000000,
0.018321589000000,
0.019439448000000,
0.021571993000000,
0.023876195000000,
0.025659029000000,
0.026894433000000,
0.028889134000000,
0.030469200000000,
0.030692223000000,
0.031212534000000,
0.030800426000000,
0.029837495000000,
0.029041031000000,
0.027807930000000,
0.027085866000000,
0.026870222000000,
0.026034403000000,
0.025490563000000,
0.025915747000000,
0.025255465000000,
0.024883133000000,
0.024609150000000,
0.023686946000000,
0.023991298000000,
0.023958765000000,
0.023967050000000,
0.023539582000000,
0.022725872000000,
0.022347244000000,
0.022138569000000,
0.021979660000000,
0.020823906000000,
0.021076211000000,
0.021165034000000,
0.022165784000000,
0.025146573000000,
0.029714434000000,
0.039837663000000,
0.052246223000000,
0.067425578000000,
0.083176671000000,
0.097080232000000,
0.111191460000000,
0.122961630000000,
0.134962030000000,
0.143059710000000,
0.149133660000000,
0.155173970000000,
0.155457870000000,
0.159591120000000,
0.164270350000000,
0.165211360000000,
0.167401470000000,
0.167736380000000,
0.169301000000000,
0.170914620000000,
0.171809910000000,
0.172325160000000,
0.174672460000000,
0.176431510000000,
0.174736990000000,
0.177491730000000,
0.176703620000000,
0.177523560000000,
0.182620180000000,
0.182529490000000,
0.183265810000000,
0.183518600000000,
0.186661620000000,
])),
DatasetLoader_PaperSpectra(): ('newsprintsce', 36, '5',
np.array([
28.430000000000000,
37.390000000000000,
44.860000000000000,
48.860000000000000,
51.120000000000000,
52.330000000000000,
53.140000000000000,
53.930000000000000,
54.620000000000000,
55.090000000000000,
54.890000000000000,
53.670000000000000,
51.830000000000000,
50.610000000000000,
48.660000000000000,
45.180000000000000,
43.640000000000000,
48.450000000000000,
58.400000000000000,
67.180000000000000,
69.940000000000000,
69.630000000000000,
69.300000000000000,
69.340000000000000,
69.370000000000000,
69.190000000000000,
68.880000000000000,
68.610000000000000,
68.290000000000000,
68.250000000000000,
68.230000000000000,
])),
DatasetLoader_LumberSpectra(): ('birchWp', 12, '5',
np.array([
0.044233333000000,
0.045133333000000,
0.045233333000000,
0.046333333000000,
0.046833333000000,
0.047633333000000,
0.048733333000000,
0.049633333000000,
0.049933333000000,
0.051733333000000,
0.052733333000000,
0.053133333000000,
0.053833333000000,
0.054633333000000,
0.055433333000000,
0.056333333000000,
0.056833333000000,
0.058033333000000,
0.058433333000000,
0.059633333000000,
0.059933333000000,
0.060433333000000,
0.061033333000000,
0.063233333000000,
0.063833333000000,
0.064133333000000,
0.064133333000000,
0.065533333000000,
0.066533333000000,
0.067033333000000,
0.067833333000000,
0.068233333000000,
0.068633333000000,
0.069933333000000,
0.070033333000000,
0.071533333000000,
0.071933333000000,
0.072433333000000,
0.072933333000000,
0.073833333000000,
0.074433333000000,
0.074933333000000,
0.075833333000000,
0.076233333000000,
0.076833333000000,
0.077233333000000,
0.077933333000000,
0.078133333000000,
0.078133333000000,
0.079933333000000,
0.080333333000000,
0.080833333000000,
0.081333333000000,
0.081633333000000,
0.082433333000000,
0.083733333000000,
0.083833333000000,
0.084233333000000,
0.085033333000000,
0.085733333000000,
0.085733333000000,
0.086333333000000,
0.086733333000000,
0.087433333000000,
0.088133333000000,
0.089033333000000,
0.089433333000000,
0.089733333000000,
0.090033333000000,
0.090333333000000,
0.090833333000000,
0.091533333000000,
0.092233333000000,
0.092633333000000,
0.092833333000000,
0.093333333000000,
0.094133333000000,
0.094833333000000,
0.095133333000000,
0.095833333000000,
0.096233333000000,
0.097133333000000,
0.096833333000000,
0.097733333000000,
0.098133333000000,
0.098933333000000,
0.099233333000000,
0.099633333000000,
0.100333330000000,
0.101433330000000,
0.101933330000000,
0.102533330000000,
0.102933330000000,
0.103633330000000,
0.103533330000000,
0.104533330000000,
0.104833330000000,
0.105833330000000,
0.106133330000000,
0.106933330000000,
0.106733330000000,
0.107733330000000,
0.108033330000000,
0.108133330000000,
0.108533330000000,
0.109633330000000,
0.109833330000000,
0.110533330000000,
0.111133330000000,
0.111633330000000,
0.111533330000000,
0.111833330000000,
0.113033330000000,
0.112833330000000,
0.113333330000000,
0.114033330000000,
0.114333330000000,
0.115233330000000,
0.116033330000000,
0.116433330000000,
0.116933330000000,
0.117333330000000,
0.117733330000000,
0.118633330000000,
0.118933330000000,
0.119633330000000,
0.119833330000000,
0.120733330000000,
0.121233330000000,
0.121833330000000,
0.122333330000000,
0.123133330000000,
0.123633330000000,
0.124133330000000,
0.124433330000000,
0.125233330000000,
0.125533330000000,
0.126033330000000,
0.126633330000000,
0.127033330000000,
0.127533330000000,
0.128033330000000,
0.128033330000000,
0.128833330000000,
0.129233330000000,
0.129433330000000,
0.130233330000000,
0.130833330000000,
0.130933330000000,
0.131833330000000,
0.132033330000000,
0.132433330000000,
0.133233330000000,
0.134233330000000,
0.134133330000000,
0.134533330000000,
0.135033330000000,
0.135433330000000,
0.136133330000000,
0.136033330000000,
0.136933330000000,
0.137733330000000,
0.138333330000000,
0.138533330000000,
0.139133330000000,
0.139633330000000,
0.139933330000000,
0.140133330000000,
0.140633330000000,
0.141433330000000,
0.141633330000000,
0.142433330000000,
0.142733330000000,
0.143933330000000,
0.143633330000000,
0.144233330000000,
0.144533330000000,
0.145333330000000,
0.145233330000000,
0.145933330000000,
0.146233330000000,
0.147133330000000,
0.147233330000000,
0.147533330000000,
0.148133330000000,
0.148733330000000,
0.148933330000000,
0.149533330000000,
0.149933330000000,
0.150733330000000,
0.151333330000000,
0.151633330000000,
0.152133330000000,
0.152033330000000,
0.152233330000000,
0.152333330000000,
0.153233330000000,
0.153833330000000,
0.154433330000000,
0.154333330000000,
0.154633330000000,
0.155433330000000,
0.155433330000000,
0.155333330000000,
0.155833330000000,
0.156833330000000,
0.157433330000000,
0.158033330000000,
0.158533330000000,
0.158933330000000,
0.158833330000000,
0.158533330000000,
0.158533330000000,
0.160633330000000,
0.161133330000000,
0.160933330000000,
0.161633330000000,
0.162033330000000,
0.162333330000000,
0.163033330000000,
0.163333330000000,
0.163433330000000,
0.163833330000000,
0.163933330000000,
0.164333330000000,
0.165433330000000,
0.165733330000000,
0.166033330000000,
0.166333330000000,
0.166433330000000,
0.166533330000000,
0.167833330000000,
0.167933330000000,
0.167733330000000,
0.168233330000000,
0.168333330000000,
0.168533330000000,
0.169333330000000,
0.169533330000000,
0.170333330000000,
0.170033330000000,
0.171033330000000,
0.170433330000000,
0.171233330000000,
0.171533330000000,
0.172233330000000,
0.172133330000000,
0.172233330000000,
0.172733330000000,
0.173533330000000,
0.174033330000000,
0.174133330000000,
0.175033330000000,
0.175433330000000,
0.175733330000000,
0.176133330000000,
0.175733330000000,
0.175833330000000,
0.175733330000000,
0.176833330000000,
0.176733330000000,
0.177033330000000,
0.176933330000000,
0.177233330000000,
0.178233330000000,
0.178933330000000,
0.178533330000000,
0.180033330000000,
0.180233330000000,
0.180633330000000,
0.180633330000000,
0.181433330000000,
0.180433330000000,
0.180833330000000,
0.181233330000000,
0.181033330000000,
0.181233330000000,
0.182333330000000,
0.181833330000000,
0.182133330000000,
0.183333330000000,
0.182333330000000,
0.182633330000000,
0.183533330000000,
0.183833330000000,
0.183933330000000,
0.183433330000000,
0.184733330000000,
0.184633330000000,
0.185033330000000,
0.185433330000000,
0.186033330000000,
0.185833330000000,
0.186833330000000,
0.185733330000000,
0.186433330000000,
0.187033330000000,
0.187333330000000,
0.187433330000000,
0.187833330000000,
0.187433330000000,
0.186333330000000,
0.186933330000000,
0.188433330000000,
0.188433330000000,
0.188833330000000,
0.189333330000000,
0.190133330000000,
0.189633330000000,
0.190433330000000,
0.190133330000000,
0.190733330000000,
0.190033330000000,
0.189933330000000,
0.190433330000000,
0.190433330000000,
0.190933330000000,
0.191633330000000,
0.191833330000000,
0.191933330000000,
0.191733330000000,
0.191233330000000,
0.192333330000000,
0.192833330000000,
0.193233330000000,
0.193633330000000,
0.193633330000000,
0.193033330000000,
0.192933330000000,
0.192833330000000,
0.193533330000000,
0.193433330000000,
0.193733330000000,
0.193833330000000,
0.194333330000000,
0.194033330000000,
0.195133330000000,
0.195033330000000,
0.194933330000000,
0.196233330000000,
0.197033330000000,
0.196833330000000,
0.197333330000000,
0.195533330000000,
0.195733330000000,
0.197233330000000,
0.198333330000000,
0.196433330000000,
0.197233330000000,
0.196833330000000,
0.197433330000000,
0.197033330000000,
0.196833330000000,
0.198433330000000,
0.198233330000000,
0.198233330000000,
0.198533330000000,
0.198233330000000,
0.197833330000000,
0.199133330000000,
0.199233330000000,
0.199333330000000,
0.199433330000000,
0.200133330000000,
0.200133330000000,
0.200533330000000,
0.199433330000000,
0.200633330000000,
0.200633330000000,
0.200233330000000,
0.199833330000000,
0.200133330000000,
0.201433330000000,
0.202233330000000,
0.201333330000000,
0.201233330000000,
0.201433330000000,
0.201833330000000,
0.201533330000000,
0.203233330000000,
0.202333330000000,
0.201433330000000,
0.203333330000000,
0.202733330000000,
0.202533330000000,
0.202633330000000,
0.203533330000000,
0.203433330000000,
0.202633330000000,
0.203133330000000,
0.203233330000000,
0.204533330000000,
0.204533330000000,
0.203533330000000,
0.203133330000000,
0.202633330000000,
0.203133330000000,
0.204433330000000,
0.205033330000000,
0.205533330000000,
0.204733330000000,
0.206333330000000,
0.205633330000000,
0.207733330000000,
0.207133330000000,
0.207233330000000,
0.206933330000000,
0.206833330000000,
0.209133330000000,
0.207533330000000,
0.207733330000000,
0.208333330000000,
0.208333330000000,
0.206133330000000,
0.207433330000000,
0.209033330000000,
0.209233330000000,
0.208633330000000,
0.207733330000000,
0.210233330000000,
0.209633330000000,
0.208833330000000,
0.210233330000000,
0.209633330000000,
0.210133330000000,
0.211033330000000,
0.210733330000000,
0.210133330000000,
0.210533330000000,
0.208633330000000,
0.209033330000000,
0.209733330000000,
0.210533330000000,
0.210033330000000,
0.208433330000000,
0.210433330000000,
0.210933330000000,
0.209633330000000,
0.210233330000000,
0.212233330000000,
0.212433330000000,
0.211433330000000,
0.212133330000000,
0.212733330000000,
0.211533330000000,
0.212033330000000,
0.211333330000000,
0.209733330000000,
0.210433330000000,
0.211233330000000,
0.212533330000000,
0.211533330000000,
0.211733330000000,
0.210133330000000,
0.210033330000000,
0.210833330000000,
0.211333330000000,
0.211233330000000,
0.213733330000000,
0.211133330000000,
0.211533330000000,
0.214833330000000,
0.211433330000000,
0.214633330000000,
0.214433330000000,
0.214833330000000,
0.216733330000000,
0.215833330000000,
0.214833330000000,
0.219333330000000,
0.216833330000000,
0.215333330000000,
0.215433330000000,
0.217633330000000,
0.216033330000000,
0.215233330000000,
0.217533330000000,
0.216933330000000,
0.215733330000000,
0.209633330000000,
0.209633330000000,
0.216766670000000,
0.217466670000000,
0.215466670000000,
0.215566670000000,
0.214766670000000,
0.213066670000000,
0.212366670000000,
0.212866670000000,
0.213166670000000,
0.211066670000000,
0.212366670000000,
0.213066670000000,
0.211666670000000,
0.209966670000000,
0.209366670000000,
0.210766670000000,
0.210066670000000,
0.210666670000000,
0.211766670000000,
0.208966670000000,
0.208266670000000,
0.210366670000000,
0.210866670000000,
0.209366670000000,
0.208966670000000,
0.209966670000000,
0.208166670000000,
0.207166670000000,
0.208766670000000,
0.208566670000000,
0.207566670000000,
0.205666670000000,
0.206166670000000,
0.206366670000000,
0.206166670000000,
0.206166670000000,
0.205766670000000,
0.204866670000000,
0.206066670000000,
0.205466670000000,
0.205066670000000,
0.204566670000000,
0.204266670000000,
0.204366670000000,
0.203666670000000,
0.203366670000000,
0.202066670000000,
0.202266670000000,
0.203866670000000,
0.203166670000000,
0.202866670000000,
0.201966670000000,
0.201166670000000,
0.201266670000000,
0.201266670000000,
0.200966670000000,
0.200766670000000,
0.200766670000000,
0.201266670000000,
0.200766670000000,
0.200066670000000,
0.199766670000000,
0.199366670000000,
0.199366670000000,
0.199466670000000,
0.199066670000000,
0.198466670000000,
0.198366670000000,
0.198466670000000,
0.198266670000000,
0.197966670000000,
0.198066670000000,
0.197266670000000,
0.196866670000000,
0.196566670000000,
0.196666670000000,
0.196266670000000,
0.195366670000000,
0.195366670000000,
0.195166670000000,
0.194066670000000,
0.193666670000000,
0.193266670000000,
0.193066670000000,
0.192266670000000,
0.192066670000000,
0.191766670000000,
0.190966670000000,
0.190666670000000,
0.190066670000000,
0.190066670000000,
0.190266670000000,
0.190366670000000,
0.190766670000000,
0.190866670000000,
0.190866670000000,
0.190966670000000,
0.190866670000000,
0.191166670000000,
0.191266670000000,
0.191366670000000,
0.191566670000000,
0.191766670000000,
0.191466670000000,
0.191766670000000,
0.191966670000000,
0.192166670000000,
0.191766670000000,
0.192366670000000,
0.192166670000000,
0.192266670000000,
0.192266670000000,
0.191966670000000,
0.191666670000000,
0.191966670000000,
0.191666670000000,
0.191466670000000,
0.191766670000000,
0.192266670000000,
0.191866670000000,
0.191866670000000,
0.191866670000000,
0.191966670000000,
0.191666670000000,
0.191266670000000,
0.191466670000000,
0.191566670000000,
0.191866670000000,
0.192566670000000,
0.192366670000000,
0.191966670000000,
0.192066670000000,
0.192366670000000,
0.192166670000000,
0.192266670000000,
0.192566670000000,
0.192866670000000,
0.192466670000000,
0.192966670000000,
0.192966670000000,
0.192966670000000,
0.192766670000000,
0.193066670000000,
0.193266670000000,
0.193066670000000,
0.193066670000000,
0.193366670000000,
0.192866670000000,
0.193366670000000,
0.193666670000000,
0.193966670000000,
0.193866670000000,
0.193566670000000,
0.193866670000000,
0.193566670000000,
0.193666670000000,
0.193966670000000,
0.194166670000000,
0.194366670000000,
0.194266670000000,
0.194066670000000,
0.194166670000000,
0.194266670000000,
0.194466670000000,
0.194466670000000,
0.194566670000000,
0.194866670000000,
0.194966670000000,
0.194866670000000,
0.194566670000000,
0.194466670000000,
0.194866670000000,
0.195166670000000,
0.195166670000000,
0.195066670000000,
0.195366670000000,
0.195566670000000,
0.195466670000000,
0.195766670000000,
0.195466670000000,
0.195466670000000,
0.195766670000000,
0.195466670000000,
0.195266670000000,
0.195566670000000,
0.195666670000000,
0.195666670000000,
0.195666670000000,
0.196366670000000,
0.196066670000000,
0.195766670000000,
0.195666670000000,
0.195966670000000,
0.195866670000000,
0.195866670000000,
0.196066670000000,
0.196566670000000,
0.196166670000000,
0.196666670000000,
0.196366670000000,
0.196466670000000,
0.196266670000000,
0.196066670000000,
0.196066670000000,
0.196366670000000,
0.196466670000000,
0.196466670000000,
0.196766670000000,
0.196866670000000,
0.196466670000000,
0.196866670000000,
0.196666670000000,
0.196066670000000,
0.196166670000000,
0.196666670000000,
0.196666670000000,
0.196666670000000,
0.197066670000000,
0.197366670000000,
0.197066670000000,
0.197166670000000,
0.197166670000000,
0.197366670000000,
0.197166670000000,
0.197066670000000,
0.197066670000000,
0.196766670000000,
0.197166670000000,
0.197266670000000,
0.196966670000000,
0.196966670000000,
0.197466670000000,
0.197066670000000,
0.196766670000000,
0.196966670000000,
0.197666670000000,
0.197066670000000,
0.196866670000000,
0.197166670000000,
0.197166670000000,
0.197366670000000,
0.197566670000000,
0.197466670000000,
0.197366670000000,
0.197366670000000,
0.197366670000000,
0.197266670000000,
0.196566670000000,
0.197266670000000,
0.197466670000000,
0.197066670000000,
0.196866670000000,
0.197066670000000,
0.196766670000000,
0.196966670000000,
0.197166670000000,
0.197366670000000,
0.196866670000000,
0.196966670000000,
0.196766670000000,
0.196466670000000,
0.195966670000000,
0.195666670000000,
0.195966670000000,
0.196066670000000,
0.195666670000000,
0.195366670000000,
0.195066670000000,
0.194966670000000,
0.194666670000000,
0.194566670000000,
0.194766670000000,
0.194466670000000,
0.194166670000000,
0.193866670000000,
0.193566670000000,
0.193366670000000,
0.193466670000000,
0.193866670000000,
0.193066670000000,
0.192866670000000,
0.192666670000000,
0.192366670000000,
0.192066670000000,
0.191966670000000,
0.191566670000000,
0.190966670000000,
0.190666670000000,
0.190666670000000,
0.190366670000000,
0.190266670000000,
0.190266670000000,
0.189866670000000,
0.189366670000000,
0.189066670000000,
0.189066670000000,
0.188466670000000,
0.188066670000000,
0.188166670000000,
0.187966670000000,
0.187466670000000,
0.187266670000000,
0.187266670000000,
0.187066670000000,
0.186766670000000,
0.186666670000000,
0.186666670000000,
0.186166670000000,
0.186466670000000,
0.186266670000000,
0.185966670000000,
0.185766670000000,
0.185766670000000,
0.185566670000000,
0.185166670000000,
0.184866670000000,
0.184966670000000,
0.185066670000000,
0.185166670000000,
0.184966670000000,
0.184466670000000,
0.184366670000000,
0.183866670000000,
0.183666670000000,
0.183666670000000,
0.183366670000000,
0.183066670000000,
0.183066670000000,
0.182166670000000,
0.180366670000000,
0.180166670000000,
0.180066670000000,
0.179766670000000,
0.179966670000000,
0.180066670000000,
0.179766670000000,
0.179566670000000,
0.179466670000000,
0.179766670000000,
0.179566670000000,
0.179466670000000,
0.179466670000000,
0.179466670000000,
0.179666670000000,
0.179566670000000,
0.179566670000000,
0.179366670000000,
0.179766670000000,
0.180166670000000,
0.179466670000000,
0.179466670000000,
0.179566670000000,
0.179466670000000,
0.179266670000000,
0.179466670000000,
0.179466670000000,
0.179766670000000,
0.179966670000000,
0.180266670000000,
0.180466670000000,
0.179766670000000,
0.180066670000000,
0.180266670000000,
0.179966670000000,
0.180166670000000,
0.180766670000000,
0.180666670000000,
0.180766670000000,
0.181066670000000,
0.180766670000000,
0.180766670000000,
0.181066670000000,
0.181366670000000,
0.181066670000000,
0.181266670000000,
0.181566670000000,
0.181566670000000,
0.181566670000000,
0.182066670000000,
0.182166670000000,
0.182066670000000,
0.182066670000000,
0.182066670000000,
0.182366670000000,
0.182266670000000,
0.182566670000000,
0.182566670000000,
0.182466670000000,
0.182966670000000,
0.182966670000000,
0.183166670000000,
0.182966670000000,
0.182366670000000,
0.182566670000000,
0.182966670000000,
0.183366670000000,
0.183366670000000,
0.183266670000000,
0.183166670000000,
0.183166670000000,
0.183566670000000,
0.183666670000000,
0.183466670000000,
0.183566670000000,
0.183566670000000,
0.183266670000000,
0.183466670000000,
0.184166670000000,
0.184366670000000,
0.183966670000000,
0.184066670000000,
0.184266670000000,
0.183866670000000,
0.183466670000000,
0.183666670000000,
0.183766670000000,
0.183866670000000,
0.183966670000000,
0.184266670000000,
0.184066670000000,
0.184166670000000,
0.184466670000000,
0.184366670000000,
0.184366670000000,
0.184866670000000,
0.185066670000000,
0.184866670000000,
0.184666670000000,
0.185166670000000,
0.185266670000000,
0.185566670000000,
0.185466670000000,
0.185266670000000,
0.185166670000000,
0.184966670000000,
0.185066670000000,
0.185366670000000,
0.185166670000000,
0.185366670000000,
0.185766670000000,
0.185666670000000,
0.185666670000000,
0.185366670000000,
0.185466670000000,
0.185066670000000,
0.184666670000000,
0.184666670000000,
0.184766670000000,
0.185366670000000,
0.185166670000000,
0.185366670000000,
0.185166670000000,
0.184866670000000,
0.184866670000000,
0.184566670000000,
0.184466670000000,
0.184566670000000,
0.184866670000000,
0.184666670000000,
0.184466670000000,
0.184366670000000,
0.184166670000000,
0.183466670000000,
0.183666670000000,
0.183866670000000,
0.183366670000000,
0.182766670000000,
0.182866670000000,
0.183266670000000,
0.182866670000000,
0.182966670000000,
0.182766670000000,
0.181966670000000,
0.181666670000000,
0.181266670000000,
0.180866670000000,
0.180466670000000,
0.180366670000000,
0.180666670000000,
0.180266670000000,
0.179366670000000,
0.179266670000000,
0.179066670000000,
0.178666670000000,
0.178466670000000,
0.178366670000000,
0.177966670000000,
0.177566670000000,
0.177766670000000,
0.177166670000000,
0.176866670000000,
0.176266670000000,
0.175666670000000,
0.175466670000000,
0.174866670000000,
0.174466670000000,
0.174166670000000,
0.173966670000000,
0.174366670000000,
0.174266670000000,
0.173766670000000,
0.173466670000000,
0.173166670000000,
0.173266670000000,
0.172266670000000,
0.171866670000000,
0.171566670000000,
0.171266670000000,
0.170766670000000,
0.170366670000000,
0.169566670000000,
0.169466670000000,
0.169166670000000,
0.169666670000000,
0.169666670000000,
0.169366670000000,
0.169366670000000,
0.169566670000000,
0.169766670000000,
0.169566670000000,
0.169466670000000,
0.169366670000000,
0.168166670000000,
0.167566670000000,
0.166866670000000,
0.167066670000000,
0.166666670000000,
0.166066670000000,
0.166266670000000,
0.165766670000000,
0.165566670000000,
0.165566670000000,
0.165166670000000,
0.164566670000000,
0.164166670000000,
0.163566670000000,
0.162466670000000,
0.161766670000000,
0.161866670000000,
0.160966670000000,
0.160266670000000,
0.159866670000000,
0.159566670000000,
0.159166670000000,
0.158166670000000,
0.157666670000000,
0.157066670000000,
0.156266670000000,
0.155466670000000,
0.154566670000000,
0.153766670000000,
0.153066670000000,
0.152066670000000,
0.151666670000000,
0.150666670000000,
0.150066670000000,
0.149966670000000,
0.149566670000000,
0.148566670000000,
0.148066670000000,
0.147766670000000,
0.147266670000000,
0.146266670000000,
0.146266670000000,
0.145466670000000,
0.144966670000000,
0.144466670000000,
0.144366670000000,
0.144366670000000,
0.143666670000000,
0.143466670000000,
0.143366670000000,
0.142966670000000,
0.142866670000000,
0.142166670000000,
0.142066670000000,
0.142266670000000,
0.142066670000000,
0.141966670000000,
0.141666670000000,
0.141366670000000,
0.141466670000000,
0.141366670000000,
0.140866670000000,
0.140966670000000,
0.141366670000000,
0.141166670000000,
0.141166670000000,
0.141366670000000,
0.141266670000000,
0.140966670000000,
0.140866670000000,
0.141066670000000,
0.141066670000000,
0.140866670000000,
0.141166670000000,
0.140866670000000,
0.140766670000000,
0.141366670000000,
0.141266670000000,
0.140866670000000,
0.140866670000000,
0.140966670000000,
0.140766670000000,
0.140466670000000,
0.140466670000000,
0.140566670000000,
0.140566670000000,
0.140966670000000,
0.140666670000000,
0.140466670000000,
0.140266670000000,
0.140166670000000,
0.140366670000000,
0.140266670000000,
0.140466670000000,
0.140566670000000,
0.140966670000000,
0.141466670000000,
0.141066670000000,
0.141366670000000,
0.141166670000000,
0.141366670000000,
0.141766670000000,
0.141666670000000,
0.141466670000000,
0.141666670000000,
0.141966670000000,
0.142266670000000,
0.141866670000000,
0.141666670000000,
0.142066670000000,
0.142266670000000,
0.142266670000000,
0.142566670000000,
0.142666670000000,
0.142766670000000,
0.143166670000000,
0.143266670000000,
0.143266670000000,
0.143066670000000,
0.143366670000000,
0.143566670000000,
0.143666670000000,
0.143866670000000,
0.144066670000000,
0.144166670000000,
0.143866670000000,
0.144666670000000,
0.144666670000000,
0.144666670000000,
0.144666670000000,
0.144866670000000,
0.145066670000000,
0.145166670000000,
0.145266670000000,
0.145566670000000,
0.145666670000000,
0.146166670000000,
0.146266670000000,
0.145666670000000,
0.145866670000000,
0.146366670000000,
0.146366670000000,
0.146066670000000,
0.145966670000000,
0.145866670000000,
0.146066670000000,
0.146866670000000,
0.146966670000000,
0.146666670000000,
0.146666670000000,
0.146766670000000,
0.146966670000000,
0.146766670000000,
0.146666670000000,
0.146766670000000,
0.146666670000000,
0.147166670000000,
0.147166670000000,
0.147066670000000,
0.147166670000000,
0.146966670000000,
0.146866670000000,
0.147166670000000,
0.147166670000000,
0.147066670000000,
0.147266670000000,
0.147866670000000,
0.147666670000000,
0.147066670000000,
0.147566670000000,
0.147366670000000,
0.147766670000000,
0.147566670000000,
0.147466670000000,
0.147766670000000,
0.147966670000000,
0.147966670000000,
0.147666670000000,
0.147966670000000,
0.148366670000000,
0.148166670000000,
0.148166670000000,
0.148366670000000,
0.148866670000000,
0.148566670000000,
0.148666670000000,
0.148666670000000,
0.148766670000000,
0.149066670000000,
0.148866670000000,
0.148866670000000,
0.148966670000000,
0.148866670000000,
0.148866670000000,
0.149066670000000,
0.148966670000000,
0.149066670000000,
0.149366670000000,
0.149966670000000,
0.149966670000000,
0.149766670000000,
0.149966670000000,
0.149966670000000,
0.149866670000000,
0.149966670000000,
0.150166670000000,
0.150666670000000,
0.150266670000000,
0.150666670000000,
0.150866670000000,
0.151066670000000,
0.151166670000000,
0.150866670000000,
0.150866670000000,
0.151166670000000,
0.151666670000000,
0.152266670000000,
0.152066670000000,
0.151966670000000,
0.152266670000000,
0.152366670000000,
0.152666670000000,
0.152866670000000,
0.153266670000000,
0.153166670000000,
0.153166670000000,
0.153666670000000,
0.153266670000000,
0.153866670000000,
0.154266670000000,
0.154666670000000,
0.154566670000000,
0.154566670000000,
0.154766670000000,
0.154866670000000,
0.154266670000000,
0.154966670000000,
0.155266670000000,
0.155866670000000,
0.155766670000000,
0.156166670000000,
0.156266670000000,
0.156066670000000,
0.156266670000000,
0.156266670000000,
0.156266670000000,
0.156466670000000,
0.156566670000000,
0.156466670000000,
0.156166670000000,
0.156466670000000,
0.156966670000000,
0.156966670000000,
0.156966670000000,
0.157066670000000,
0.157266670000000,
0.157366670000000,
0.157366670000000,
0.157566670000000,
0.157366670000000,
0.157466670000000,
0.157766670000000,
0.157366670000000,
0.157166670000000,
0.157666670000000,
0.157366670000000,
0.157366670000000,
0.157266670000000,
0.157466670000000,
0.157166670000000,
0.156966670000000,
0.157066670000000,
0.156866670000000,
0.156766670000000,
0.156766670000000,
0.156966670000000,
0.156866670000000,
0.156766670000000,
0.156566670000000,
0.156466670000000,
0.156666670000000,
0.155966670000000,
0.155666670000000,
0.155966670000000,
0.155866670000000,
0.155566670000000,
0.155966670000000,
0.156866670000000,
0.156566670000000,
0.156466670000000,
0.156366670000000,
0.155766670000000,
0.155766670000000,
0.155666670000000,
0.155266670000000,
0.154866670000000,
0.155466670000000,
0.154866670000000,
0.154966670000000,
0.154966670000000,
0.154566670000000,
0.154566670000000,
0.153966670000000,
0.154066670000000,
0.154066670000000,
0.153966670000000,
0.154166670000000,
0.154066670000000,
0.153666670000000,
0.153666670000000,
0.153866670000000,
0.153566670000000,
0.153066670000000,
0.153066670000000,
0.153066670000000,
0.152666670000000,
0.152866670000000,
0.153066670000000,
0.153066670000000,
0.152766670000000,
0.152566670000000,
0.152466670000000,
0.152466670000000,
0.152666670000000,
0.152466670000000,
0.152266670000000,
0.152066670000000,
0.152366670000000,
0.152266670000000,
0.152166670000000,
0.151766670000000,
0.151666670000000,
0.151866670000000,
0.151966670000000,
0.151666670000000,
0.151566670000000,
0.151866670000000,
0.151366670000000,
0.151366670000000,
0.151466670000000,
0.151466670000000,
0.151466670000000,
0.151566670000000,
0.151466670000000,
0.151566670000000,
0.151266670000000,
0.151466670000000,
0.151166670000000,
0.151066670000000,
0.151566670000000,
0.151566670000000,
0.151766670000000,
0.152066670000000,
0.151866670000000,
0.151666670000000,
0.151766670000000,
0.151966670000000,
0.151766670000000,
0.151966670000000,
0.152366670000000,
0.152666670000000,
0.152566670000000,
0.152466670000000,
0.152566670000000,
0.152166670000000,
0.151766670000000,
0.152266670000000,
0.152266670000000,
0.151866670000000,
0.152066670000000,
0.152166670000000,
0.152266670000000,
0.152466670000000,
0.152166670000000,
0.152066670000000,
0.152066670000000,
0.152666670000000,
0.152666670000000,
0.152166670000000,
0.152066670000000,
0.151666670000000,
0.151566670000000,
0.150966670000000,
0.150366670000000,
0.150566670000000,
0.150366670000000,
0.150866670000000,
0.150766670000000,
0.150966670000000,
0.151266670000000,
0.150966670000000,
0.150966670000000,
0.150966670000000,
0.150766670000000,
0.151066670000000,
0.151266670000000,
0.151966670000000,
0.151966670000000,
0.151566670000000,
0.151666670000000,
0.151466670000000,
0.151966670000000,
0.152166670000000,
0.152066670000000,
0.152166670000000,
0.152266670000000,
0.152666670000000,
0.152266670000000,
0.151766670000000,
0.152166670000000,
0.152166670000000,
0.151866670000000,
0.152066670000000,
0.152166670000000,
0.152366670000000,
0.152666670000000,
0.153066670000000,
0.152766670000000,
0.152566670000000,
0.152466670000000,
0.152266670000000,
0.152366670000000,
0.152166670000000,
0.152466670000000,
0.152266670000000,
0.152066670000000,
0.153366670000000,
0.153166670000000,
0.153066670000000,
0.153166670000000,
0.152866670000000,
0.153066670000000,
0.153266670000000,
0.153166670000000,
0.153266670000000,
0.153266670000000,
0.153666670000000,
0.153566670000000,
0.154166670000000,
0.153366670000000,
0.152766670000000,
0.153166670000000,
0.153866670000000,
0.153566670000000,
0.153866670000000,
0.154166670000000,
0.154766670000000,
0.154666670000000,
0.154966670000000,
0.155166670000000,
0.155166670000000,
0.155366670000000,
0.155366670000000,
0.155466670000000,
0.155466670000000,
0.156166670000000,
0.156166670000000,
0.155866670000000,
0.155566670000000,
0.155466670000000,
0.155366670000000,
0.154966670000000,
0.154966670000000,
0.154866670000000,
0.154066670000000,
0.154366670000000,
0.155366670000000,
0.154466670000000,
0.153866670000000,
0.153866670000000,
0.153766670000000,
0.153566670000000,
0.153766670000000,
0.154266670000000,
0.154366670000000,
0.154366670000000,
0.154766670000000,
0.154966670000000,
0.154966670000000,
0.154666670000000,
0.155466670000000,
0.155666670000000,
0.156166670000000,
0.156466670000000,
0.156366670000000,
0.156166670000000,
0.156966670000000,
0.155966670000000,
0.154966670000000,
0.154466670000000,
0.152766670000000,
0.151866670000000,
0.151066670000000,
0.150066670000000,
0.148566670000000,
0.148066670000000,
0.147366670000000,
0.146166670000000,
0.145466670000000,
0.144266670000000,
0.143666670000000,
0.143766670000000,
0.143066670000000,
0.142366670000000,
0.141466670000000,
0.141666670000000,
0.141166670000000,
0.140166670000000,
0.139566670000000,
0.139266670000000,
0.138166670000000,
0.137666670000000,
0.136666670000000,
0.136166670000000,
0.134766670000000,
0.134066670000000,
0.132966670000000,
0.132166670000000,
0.131066670000000,
0.130366670000000,
0.129366670000000,
0.128366670000000,
0.127166670000000,
0.126666670000000,
0.124966670000000,
0.124066670000000,
0.123866670000000,
0.123266670000000,
0.121466670000000,
0.121966670000000,
0.121266670000000,
0.120666670000000,
0.120066670000000,
0.119766670000000,
0.118866670000000,
0.118466670000000,
0.118566670000000,
0.118966670000000,
0.118266670000000,
0.117466670000000,
0.118066670000000,
0.117666670000000,
0.117266670000000,
0.117966670000000,
0.118166670000000,
0.117666670000000,
0.117766670000000,
0.117766670000000,
0.117666670000000,
0.117466670000000,
0.117866670000000,
0.118366670000000,
0.118766670000000,
0.118366670000000,
0.118766670000000,
0.119166670000000,
0.119766670000000,
0.118866670000000,
0.118766670000000,
0.119166670000000,
0.119266670000000,
0.119366670000000,
0.119866670000000,
0.119966670000000,
0.120066670000000,
0.120566670000000,
0.120966670000000,
0.120666670000000,
0.120566670000000,
0.120566670000000,
0.120766670000000,
0.120766670000000,
0.121066670000000,
0.121066670000000,
0.120866670000000,
0.121166670000000,
0.121766670000000,
0.121466670000000,
0.121166670000000,
0.121466670000000,
0.121366670000000,
0.121566670000000,
0.121466670000000,
0.121466670000000,
0.121666670000000,
0.121766670000000,
0.122566670000000,
0.122566670000000,
0.122566670000000,
0.122966670000000,
0.123666670000000,
0.124266670000000,
0.124466670000000,
0.124866670000000,
0.125966670000000,
0.125966670000000,
0.127266670000000,
0.127666670000000,
0.128466670000000,
0.128366670000000,
0.128866670000000,
0.129066670000000,
0.129366670000000,
0.129366670000000,
0.129466670000000,
0.129766670000000,
0.130466670000000,
0.130466670000000,
0.130866670000000,
0.131066670000000,
0.131466670000000,
0.131866670000000,
0.132366670000000,
0.132266670000000,
0.132666670000000,
0.133166670000000,
0.133366670000000,
0.133166670000000,
0.133566670000000,
0.133866670000000,
0.133966670000000,
0.134166670000000,
0.134366670000000,
0.134266670000000,
0.134166670000000,
0.134266670000000,
0.135066670000000,
0.134766670000000,
0.134566670000000,
0.134466670000000,
0.134066670000000,
0.134066670000000,
0.133566670000000,
0.133266670000000,
0.133466670000000,
0.133266670000000,
0.133966670000000,
0.133666670000000,
0.133066670000000,
0.133466670000000,
0.133366670000000,
0.133266670000000,
0.133466670000000,
0.133466670000000,
0.133066670000000,
0.132866670000000,
0.132766670000000,
0.132366670000000,
0.132166670000000,
0.131966670000000,
0.131566670000000,
0.131866670000000,
0.131266670000000,
0.131066670000000,
0.130866670000000,
0.130766670000000,
0.130866670000000,
0.130466670000000,
0.129966670000000,
0.129866670000000,
0.129566670000000,
0.129666670000000,
0.129366670000000,
0.128866670000000,
0.128266670000000,
0.128366670000000,
0.128366670000000,
0.127766670000000,
0.127466670000000,
0.127166670000000,
0.126766670000000,
0.126666670000000,
0.126466670000000,
0.126466670000000,
0.126066670000000,
0.125866670000000,
0.125766670000000,
0.125366670000000,
0.125366670000000,
0.124766670000000,
0.124266670000000,
0.123866670000000,
0.123266670000000,
0.123566670000000,
0.123066670000000,
0.122766670000000,
0.122866670000000,
0.122666670000000,
0.122466670000000,
0.122366670000000,
0.122066670000000,
0.121866670000000,
0.121466670000000,
0.121566670000000,
0.121266670000000,
0.120766670000000,
0.121366670000000,
0.120966670000000,
0.120266670000000,
0.120266670000000,
0.120066670000000,
0.119766670000000,
0.120066670000000,
0.120266670000000,
0.119766670000000,
0.119366670000000,
0.119666670000000,
0.119366670000000,
0.119566670000000,
0.119266670000000,
0.118566670000000,
0.118466670000000,
0.119066670000000,
0.118766670000000,
0.118666670000000,
0.118666670000000,
0.119366670000000,
0.119166670000000,
0.119666670000000,
0.118866670000000,
0.118266670000000,
0.118666670000000,
0.119166670000000,
0.118866670000000,
0.118466670000000,
0.118566670000000,
0.119066670000000,
0.118166670000000,
0.119066670000000,
0.118866670000000,
0.118766670000000,
0.118666670000000,
0.118766670000000,
0.119466670000000,
0.118666670000000,
0.118766670000000,
0.119266670000000,
0.118566670000000,
0.118866670000000,
0.119166670000000,
0.118766670000000,
0.118866670000000,
0.118666670000000,
0.119366670000000,
0.119266670000000,
0.119166670000000,
0.119866670000000,
0.120166670000000,
0.119566670000000,
0.120166670000000,
0.120466670000000,
0.119966670000000,
0.120166670000000,
0.120166670000000,
0.120066670000000,
0.119166670000000,
0.120666670000000,
0.120466670000000,
0.120166670000000,
0.120266670000000,
0.119966670000000,
0.119866670000000,
0.120866670000000,
0.120566670000000,
0.120866670000000,
0.121366670000000,
0.121566670000000,
0.121466670000000,
0.121566670000000,
0.122166670000000,
0.123066670000000,
0.124166670000000,
0.123766670000000,
0.122766670000000,
0.123466670000000,
0.124066670000000,
0.125466670000000,
0.124666670000000,
0.124366670000000,
0.124266670000000,
0.124066670000000,
0.124366670000000,
0.124866670000000,
0.124266670000000,
0.124966670000000,
0.125366670000000,
0.125466670000000,
0.124766670000000,
0.124166670000000,
0.124366670000000,
0.124566670000000,
0.123966670000000,
0.124366670000000,
0.124166670000000,
0.124766670000000,
0.124866670000000,
0.125766670000000,
0.126066670000000,
0.125166670000000,
0.126466670000000,
0.126466670000000,
0.126266670000000,
0.127066670000000,
0.127766670000000,
0.127366670000000,
0.126366670000000,
0.128266670000000,
0.127966670000000,
0.127366670000000,
0.127666670000000,
0.128366670000000,
0.127566670000000,
0.126866670000000,
0.127266670000000,
0.128766670000000,
0.127966670000000,
0.129466670000000,
0.130066670000000,
0.129866670000000,
0.128666670000000,
0.128166670000000,
0.129366670000000,
0.128266670000000,
0.127366670000000,
0.129166670000000,
0.128166670000000,
0.130766670000000,
0.130766670000000,
0.130566670000000,
0.129566670000000,
0.128366670000000,
0.128366670000000,
0.128766670000000,
0.127366670000000,
0.127966670000000,
0.128066670000000,
0.129066670000000,
0.127766670000000,
0.127266670000000,
0.127966670000000,
0.129366670000000,
0.129166670000000,
0.128266670000000,
0.127666670000000,
0.125066670000000,
0.124566670000000,
0.126166670000000,
0.124966670000000,
0.125866670000000,
0.127566670000000,
0.125566670000000,
0.125466670000000,
0.122366670000000,
0.123766670000000,
0.121066670000000,
0.119666670000000,
0.122366670000000,
0.120966670000000,
0.119566670000000,
0.120766670000000,
0.119966670000000,
0.119666670000000,
0.118066670000000,
0.119066670000000,
0.118666670000000,
0.116166670000000,
0.117266670000000,
0.119666670000000,
0.118566670000000,
0.115766670000000,
0.115266670000000,
0.116666670000000,
0.116466670000000,
0.116066670000000,
0.112066670000000,
0.111066670000000,
0.112866670000000,
0.113366670000000,
0.114266670000000,
0.112766670000000,
0.112166670000000,
0.113766670000000,
0.110966670000000,
0.111066670000000,
0.111466670000000,
0.112766670000000,
0.112866670000000,
0.111966670000000,
0.110666670000000,
0.111066670000000,
0.113266670000000,
0.112366670000000,
0.110966670000000,
0.110166670000000,
0.110566670000000,
0.111666670000000,
0.113066670000000,
0.111166670000000,
0.112366670000000,
0.114466670000000,
0.112266670000000,
0.111066670000000,
0.111966670000000,
0.111466670000000,
0.110366670000000,
0.109466670000000,
0.114066670000000,
0.113466670000000,
0.113366670000000,
0.114566670000000,
0.113966670000000,
0.115766670000000,
0.113366670000000,
0.113366670000000,
0.111766670000000,
0.107366670000000,
0.111066670000000,
0.112666670000000,
0.110066670000000,
0.112066670000000,
0.113466670000000,
0.114266670000000,
0.113066670000000,
0.114066670000000,
0.107566670000000,
0.108066670000000,
0.116366670000000,
0.116666670000000,
0.115266670000000,
0.112266670000000,
0.114466670000000,
0.114066670000000,
0.113166670000000,
0.111466670000000,
0.109266670000000,
0.109466670000000,
0.111466670000000,
0.110066670000000,
0.111266670000000,
0.111166670000000,
0.111166670000000,
0.109866670000000,
0.110066670000000,
0.109966670000000,
0.106266670000000,
0.107566670000000,
0.111766670000000,
0.112066670000000,
0.111866670000000,
0.110366670000000,
0.107466670000000,
0.107366670000000,
0.111966670000000,
0.108066670000000,
0.108666670000000,
0.109066670000000,
0.111466670000000,
0.107166670000000,
0.104366670000000,
0.107766670000000,
0.110766670000000,
0.110666670000000,
0.110366670000000,
0.110566670000000,
0.111266670000000,
0.111266670000000,
0.113866670000000,
0.111566670000000,
0.109466670000000,
0.108666670000000,
0.110466670000000,
0.109866670000000,
0.105266670000000,
0.109966670000000,
0.108666670000000,
0.107466670000000,
0.112766670000000,
0.112366670000000,
0.111966670000000,
0.107366670000000,
0.110266670000000,
0.110666670000000,
0.109566670000000,
0.110466670000000,
0.110866670000000,
0.111566670000000,
0.109166670000000,
0.108766670000000,
0.104266670000000,
0.106766670000000,
0.107866670000000,
0.107566670000000,
0.109466670000000,
0.109366670000000,
0.106666670000000,
0.107566670000000,
0.116166670000000,
0.114266670000000,
0.114466670000000,
0.112966670000000,
0.109466670000000,
0.109566670000000,
0.107366670000000,
0.105566670000000,
0.109866670000000,
0.115766670000000,
0.117766670000000,
0.113166670000000,
0.111566670000000,
0.113766670000000,
0.112966670000000,
0.111766670000000,
0.114266670000000,
0.113666670000000,
0.108866670000000,
0.108766670000000,
0.113166670000000,
0.112966670000000,
0.111966670000000,
0.111366670000000,
0.111566670000000,
0.111466670000000,
0.110066670000000,
0.111066670000000,
0.113266670000000,
0.107466670000000,
0.114166670000000,
0.113266670000000,
0.111666670000000,
0.108766670000000,
0.105666670000000,
0.106766670000000,
0.106666670000000,
0.111266670000000,
0.109266670000000,
0.107466670000000,
0.112366670000000,
0.113366670000000,
0.110066670000000,
0.106366670000000,
0.109166670000000,
0.111166670000000,
0.105466670000000,
0.102966670000000,
0.105966670000000,
0.106266670000000,
0.112866670000000,
0.111366670000000,
0.107766670000000,
0.106366670000000,
0.104766670000000,
0.108966670000000,
0.109366670000000,
0.107966670000000,
0.106066670000000,
0.106666670000000,
0.105966670000000,
0.103066670000000,
0.102766670000000,
0.103266670000000,
0.099166667000000,
0.105166670000000,
0.105066670000000,
0.101866670000000,
0.104666670000000,
0.106366670000000,
0.105966670000000,
0.100866670000000,
0.101566670000000,
0.107166670000000,
0.105966670000000,
0.104966670000000,
0.105466670000000,
0.112866670000000,
0.106266670000000,
0.104466670000000,
0.106666670000000,
0.103566670000000,
0.103066670000000,
0.097566667000000,
0.108366670000000,
0.103966670000000,
0.102266670000000,
0.100266670000000,
0.102866670000000,
0.094066667000000,
0.104766670000000,
0.104166670000000,
0.091766667000000,
0.090566667000000,
0.094666667000000,
0.098866667000000,
0.095666667000000,
0.096666667000000,
0.094366667000000,
0.091066667000000,
0.097966667000000,
0.095066667000000,
0.099266667000000,
0.091966667000000,
0.094966667000000,
0.099266667000000,
0.094466667000000,
0.088366667000000,
0.092566667000000,
0.096466667000000,
0.094366667000000,
0.092866667000000,
0.102266670000000,
0.095266667000000,
0.089366667000000,
0.098566667000000,
0.099466667000000,
0.095866667000000,
0.085666667000000,
0.091066667000000,
0.103866670000000,
0.097166667000000,
0.102766670000000,
0.101766670000000,
0.099366667000000,
0.094266667000000,
0.091166667000000,
0.091466667000000,
0.084366667000000,
0.085066667000000,
0.100666670000000,
0.101466670000000,
0.098766667000000,
0.097666667000000,
0.097466667000000,
0.091866667000000,
0.084666667000000,
0.094666667000000,
0.096566667000000,
0.087066667000000,
0.107666670000000,
0.099666667000000,
0.093566667000000,
0.093566667000000,
0.094666667000000,
0.093066667000000,
0.086266667000000,
0.085966667000000,
0.092266667000000,
0.097966667000000,
0.099166667000000,
0.097866667000000,
0.088466667000000,
0.092166667000000,
0.096066667000000,
0.097566667000000,
0.107766670000000,
0.098166667000000,
0.092066667000000,
0.097566667000000,
0.107966670000000,
0.093366667000000,
0.102966670000000,
0.106766670000000,
0.100166670000000,
0.104166670000000,
0.099166667000000,
0.098266667000000,
0.095166667000000,
0.104766670000000,
0.098166667000000,
0.101566670000000,
0.097566667000000,
0.099966667000000,
0.085066667000000,
0.084866667000000,
0.094266667000000,
0.087966667000000,
0.094566667000000,
0.104766670000000,
0.104866670000000,
0.106666670000000,
0.104166670000000,
0.115366670000000,
0.110066670000000,
0.103766670000000,
0.104066670000000,
0.100766670000000,
0.112366670000000,
0.106266670000000,
0.116066670000000,
0.122966670000000,
0.106366670000000,
0.104566670000000,
0.114966670000000,
0.122566670000000,
0.115766670000000,
0.122266670000000,
0.112866670000000,
0.106066670000000,
0.128666670000000,
0.128066670000000,
0.120866670000000,
0.101866670000000,
0.108366670000000,
0.114366670000000,
0.114466670000000,
0.113466670000000,
0.110566670000000,
0.096666667000000,
0.118666670000000,
0.115566670000000,
0.107166670000000,
0.111266670000000,
0.117166670000000,
0.120366670000000,
0.123066670000000,
0.102666670000000,
0.098766667000000,
0.117266670000000,
0.145466670000000,
0.123366670000000,
0.123666670000000,
0.134666670000000,
0.129566670000000,
0.135366670000000,
0.120466670000000,
0.108766670000000,
0.112166670000000,
0.100266670000000,
0.128266670000000,
0.129966670000000,
0.118766670000000,
0.133766670000000,
0.129966670000000,
0.125766670000000,
0.127166670000000,
0.119066670000000,
0.116466670000000,
0.115366670000000,
0.124166670000000,
0.116166670000000,
0.109866670000000,
0.110566670000000,
0.116766670000000,
0.110366670000000,
0.111666670000000,
0.113966670000000,
0.107866670000000,
0.107066670000000,
0.118166670000000,
0.110466670000000,
0.109166670000000,
0.105866670000000,
0.095566667000000,
0.095566667000000,
0.097366667000000,
0.096366667000000,
0.092966667000000,
0.088466667000000,
0.092366667000000,
0.093266667000000,
0.095566667000000,
0.096666667000000,
0.102666670000000,
0.100966670000000,
0.092066667000000,
0.090066667000000,
0.093066667000000,
0.089966667000000,
0.095766667000000,
0.097966667000000,
0.099966667000000,
0.094166667000000,
0.092366667000000,
0.097866667000000,
0.094966667000000,
0.093866667000000,
0.094066667000000,
0.097466667000000,
0.106466670000000,
0.099966667000000,
0.102966670000000,
0.098166667000000,
0.103566670000000,
0.106166670000000,
0.103366670000000,
0.103466670000000,
0.092766667000000,
0.095466667000000,
0.114066670000000,
0.099866667000000,
0.094766667000000,
0.105166670000000,
0.092566667000000,
0.093666667000000,
0.080566667000000,
0.081866667000000,
0.080866667000000,
0.075166667000000,
0.101966670000000,
0.093266667000000,
0.074666667000000,
0.078366667000000,
0.085066667000000,
0.089066667000000,
0.087566667000000,
0.091166667000000,
0.098666667000000,
0.092466667000000,
0.139666670000000,
0.083266667000000,
0.064766667000000,
0.087166667000000,
0.156066670000000,
0.181266670000000,
0.122966670000000,
0.173566670000000,
0.207666670000000,
0.213466670000000,
0.178966670000000,
0.277466670000000,
])),
DatasetLoader_AgfaIT872Set(): ('agfa', 289, '5',
np.array([
8.6300000000000000,
12.090000000000000,
14.140000000000000,
14.020000000000000,
14.160000000000000,
14.190000000000000,
14.250000000000000,
14.530000000000000,
14.810000000000000,
14.880000000000000,
14.480000000000000,
13.610000000000000,
12.600000000000000,
11.720000000000000,
11.600000000000000,
12.690000000000000,
14.160000000000000,
16.350000000000000,
18.900000000000000,
20.870000000000000,
21.630000000000000,
21.230000000000000,
20.360000000000000,
19.520000000000000,
18.790000000000000,
18.400000000000000,
18.530000000000000,
19.180000000000000,
20.460000000000000,
22.580000000000000,
25.470000000000000,
])),
}
for dataset_loader, values in dataset_loaders.items():
self.assertEqual(len(dataset_loader.load()[values[0]]), values[1])
np.testing.assert_array_almost_equal(
dataset_loader.content[values[0]][values[2]].values,
values[3],
decimal=7)
if __name__ == '__main__':
unittest.main()
```
#### File: colour_datasets/utilities/common.py
```python
import functools
import gzip
import hashlib
import json
import os
import setuptools.archive_util
import shutil
import sys
import urllib.error
import urllib.request
from tqdm import tqdm
from cachetools import cached, TTLCache
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2019-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'suppress_stdout', 'TqdmUpTo', 'hash_md5', 'url_download', 'json_open',
'unpack_gzipfile'
]
class suppress_stdout:
"""
A context manager and decorator temporarily suppressing standard output.
"""
def __enter__(self):
"""
Called upon entering the context manager and decorator.
"""
self._stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
return self
def __exit__(self, *args):
"""
Called upon exiting the context manager and decorator.
"""
sys.stdout.close()
sys.stdout = self._stdout
def __call__(self, function):
"""
Calls the wrapped definition.
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
with self:
return function(*args, **kwargs)
return wrapper
class TqdmUpTo(tqdm):
"""
:class:`tqdm` sub-class used to report the progress of an action.
"""
def update_to(self, chunks_count=1, chunk_size=1, total_size=None):
"""
Reports the progress of an action.
Parameters
----------
chunks_count : int, optional
Number of blocks transferred.
chunk_size : int, optional
Size of each block (in tqdm units).
total_size : int, optional
Total size (in tqdm units).
"""
if total_size is not None:
self.total = total_size
self.update(chunks_count * chunk_size - self.n)
def hash_md5(filename, chunk_size=2 ** 16):
"""
Computes the *Message Digest 5 (MD5)* hash of given file.
Parameters
----------
filename : unicode
File to compute the *MD5* hash of.
chunk_size : int, optional
Chunk size to read from the file.
Returns
-------
unicode
*MD5* hash of given file.
"""
md5 = hashlib.md5()
with open(filename, 'rb') as file_object:
while True:
chunk = file_object.read(chunk_size)
if not chunk:
break
md5.update(chunk)
return md5.hexdigest()
def url_download(url, filename, md5=None, retries=3):
"""
Downloads given url and saves its content at given file.
Parameters
----------
url : unicode
Url to download.
filename : unicode
File to save the url content at.
md5 : unicode, optional
*Message Digest 5 (MD5)* hash of the content at given url. If provided
the saved content at given file will be hashed and compared to ``md5``.
retries : int, optional
Number of retries in case where a networking error occurs or the *MD5*
hash is not matching.
Examples
--------
>>> import os
>>> url_download(
... 'https://github.com/colour-science/colour-datasets', os.devnull)
"""
attempt = 0
while attempt != retries:
try:
with TqdmUpTo(
unit='B',
unit_scale=True,
miniters=1,
desc='Downloading "{0}" file'.format(
url.split('/')[-1])) as progress:
urllib.request.urlretrieve(
url,
filename=filename,
reporthook=progress.update_to,
data=None)
if md5 is not None:
if md5.lower() != hash_md5(filename):
raise ValueError(
'"MD5" hash of "{0}" file '
'does not match the expected hash!'.format(filename))
attempt = retries
except (urllib.error.URLError, IOError, ValueError) as error:
attempt += 1
print('An error occurred while downloading "{0}" file '
'during attempt {1}, retrying...'.format(filename, attempt))
if attempt == retries:
raise error
@cached(cache=TTLCache(maxsize=256, ttl=300))
def json_open(url, retries=3):
"""
Opens given url and return its content as *JSON*.
Parameters
----------
url : unicode
Url to open.
retries : int, optional
Number of retries in case where a networking error occurs.
Notes
-----
- The definition caches the request *JSON* output for 5 minutes.
Examples
--------
# Doctests skip for Python 2.x compatibility.
>>> json_open('https://zenodo.org/api/records/3245883')[:38]
... # doctest: +SKIP
'{"conceptdoi":"10.5281/zenodo.3245882"'
"""
attempt = 0
while attempt != retries:
try:
return json.loads(urllib.request.urlopen(url).read())
attempt = retries
except (urllib.error.URLError, IOError, ValueError) as error:
attempt += 1
print('An error occurred while opening "{0}" url '
'during attempt {1}, retrying...'.format(url, attempt))
if attempt == retries:
raise error
def unpack_gzipfile(filename, extraction_directory, *args):
"""
Unpacks given *GZIP* file to given extraction directory.
Parameters
----------
filename : unicode
*GZIP* file to extract.
extraction_directory : unicode
Directory where to extract the *GZIP* file.
Other Parameters
----------------
\\*args : list, optional
Arguments.
Returns
-------
bool
Definition success.
Notes
-----
- This definition is used as an extra driver for
:func:`setuptools.archive_util.unpack archive` definition.
"""
extraction_path = os.path.join(
extraction_directory,
os.path.splitext(os.path.basename(filename))[0])
if not os.path.exists(extraction_directory):
os.makedirs(extraction_directory)
try:
with gzip.open(filename) as gzip_file, open(extraction_path,
'wb') as output_file:
shutil.copyfileobj(gzip_file, output_file)
except Exception as e:
print(e)
raise setuptools.archive_util.UnrecognizedFormat(
'{0} is not a "GZIP" file!'.format(filename))
return True
setuptools.archive_util.extraction_drivers = (
setuptools.archive_util.unpack_directory,
setuptools.archive_util.unpack_zipfile,
setuptools.archive_util.unpack_tarfile,
unpack_gzipfile,
)
```
|
{
"source": "jgolebiowski/graphAttack",
"score": 4
}
|
#### File: graphAttack/graphAttack/coreNode.py
```python
import numpy as np
class Node(object):
"""Node - a basic building block of the graph
Attributes
----------
endNode : bool
Flag stating whether this is the final node of the graph
name : str
name of the node
outputs : list
list of nodes that operate on output of this node
referenceNumber : int
reference number of this node
result : np.array
output of this node
shape : tuple
shape
"""
shape = None
name = "Node"
referenceNumber = None
def __init__(self):
self.outputs = []
self.result = None
self.endNode = True
def __repr__(self):
"""Represent as a string - usefull for printing
Returns
-------
str
description of this node
"""
output = "<%s>" % self.name
return output
def __add__(self, other):
"""Add operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
AddOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return AddOperation(self, other)
def __matmul__(self, other):
"""matmul operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
MatMatmulOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return MatMatmulOperation(self, other)
def __mul__(self, other):
"""multiply operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
MultiplyOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return MultiplyOperation(self, other)
def __truediv__(self, other):
"""Divide operaition through operator overloading
Parameters
----------
other : ga.Node
Second Node ot perform the operation on
Returns
-------
ga.operation
DivideOperation
Raises
------
ValueError
This can only be performed on two Node instances
"""
if not isinstance(other, Node):
raise ValueError("This can only be performed on two Node instances")
return DivideOperation(self, other)
def prependName(self, string):
"""Prepend name with a string
Parameters
----------
string : str
prefix
"""
self.name = str(string) + self.name
def assignReferenceNumber(self, number):
"""Assign a reference number
Parameters
----------
number : int
reference number
"""
self.referenceNumber = number
self.prependName("op" + str(number) + "-")
def setShape(self):
"""Set the shape of the output of this node"""
raise NotImplementedError("This is an abstract class, this routine should be implemented in children")
def addOutput(self, output):
"""Attach the node that is the output of this Node
Parameters
----------
output : ga.Node
attach an output node to this node
"""
self.outputs.append(output)
self.endNode = False
def resetOutputs(self):
"""Reset outputs of this node"""
self.outputs = []
self.endNode = True
def reset(self):
"""Reset the values and gradients held by this operation"""
raise NotImplemented("This is an abstract class")
def getValue(self):
"""Return a vaue of this operation"""
if (self.result is None):
raise NotImplemented("The result is not set at initialization, maybe use an operation")
return self.result
def broadcast_shape(shp1, shp2):
"""Broadcast the shape of those arrays
Parameters
----------
shp1 : tuple
shape of array 1
shp2 : tuple
shape of array 2
Returns
-------
tuple
shape resulting from broadcasting two arrays using numpy rules
Raises
------
ValueError
Arrays cannot be broadcasted
"""
try:
return np.broadcast(np.empty(shp1), np.empty(shp2)).shape
except ValueError:
raise ValueError("Arrays cannot be broadcasted - %s and %s " % (str(shp1), str(shp2)))
def reduce_shape(inputArr, targetArr):
"""Reduce the dimensions by summing the input array over necesary axis
to obtain the targetArray shape.
Parameters
----------
inputArr : np.array
array 1
targetArr : np.array
array 2
Returns
-------
np.array
Resulting array (sum over the necessary axis)
Raises
------
ValueError
The two arrays cannot be reduced properly
"""
if (inputArr.shape == targetArr.shape):
return inputArr
if (inputArr.ndim == targetArr.ndim):
axReduce = []
for dimIndex in range(inputArr.ndim):
if targetArr.shape[dimIndex] == 1:
axReduce.append(dimIndex)
axReduce = tuple(axReduce)
return np.sum(inputArr, axis=axReduce, keepdims=True)
try:
if (inputArr.shape[1] == targetArr.shape[0]):
return np.sum(inputArr, axis=0)
except (IndexError):
pass
except (TypeError):
pass
try:
if (inputArr.shape[0] == targetArr.shape[1]):
return np.sum(inputArr, axis=1)
except (IndexError):
pass
except (TypeError):
pass
raise ValueError("The two arrays cannot be reduced properly")
from .operations.twoInputOperations import AddOperation, MatMatmulOperation, MultiplyOperation, DivideOperation
```
#### File: graphAttack/graphAttack/coreOperation.py
```python
from .coreNode import Node
from .coreNode import broadcast_shape
import numpy as np
class Operation(Node):
"""Class for storing all possible operations
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
"""
name = "Operation"
def __init__(self):
super().__init__()
self.result = None
self.testing = False
def __repr__(self):
"""Represent as a string - usefull for printing"""
output = "<%s>" % self.name
return output
def getValue(self, *args, **kwargs):
"""Obtain value of the oprtation"""
raise NotImplementedError("This is not yet implemented")
def perform(self, *args, **kwargs):
"""Return the value of the operation given inputs"""
raise NotImplementedError("This is an abstract class, this routine should be implemented in children")
def reset(self, *args, **kwargs):
"""Reset the values and gradients held by this operation"""
raise NotImplementedError("This is an abstract class, this routine should be implemented in children")
def getGradient(self, *args, **kwargs):
"""Return the derevative of this operation with respect to
each input"""
raise NotImplementedError("This is an abstract class, this routine should be implemented in children")
class TwoInputOperation(Operation):
"""Operation accepting two input and returning one output
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
gradB : np.array
gradient with respect to inputB
inputA : ga.Operation
Operation feeding data A into this operation
inputB : ga.Operation
Operation feeding data B into this operation
shape : tuple
shape of the output
"""
name = "TwoInputOperation"
def __init__(self, inputA=None, inputB=None):
super().__init__()
self.inputA = inputA
self.inputB = inputB
self.gradA = None
self.gradB = None
inputA.addOutput(self)
inputB.addOutput(self)
self.setShape()
def __repr__(self):
"""Represent as a string - usefull for printing"""
output = "<%s with inputs: (%s, %s) and outputs: (" % (self.name, self.inputA.name, self.inputB.name)
for op in self.outputs:
output += "%s, " % op.name
output += ")>"
return output
def setShape(self):
"""Set the output shape"""
self.shape = broadcast_shape(np.shape(self.inputA), np.shape(self.inputB))
def reset(self):
"""Reset the values and gradients held by this operation"""
self.result = None
self.gradA = None
self.gradB = None
self.setShape()
def getValue(self):
"""Return a vaue of this operation
Returns
-------
np.array
Output value
"""
if (self.result is None):
self.result = self.perform(self.inputA.getValue(), self.inputB.getValue())
return self.result
def getGradient(self, input):
"""Obtain gradient with respect ot a chosen input
Parameters
----------
input : ga.Operation
Operation with respect to which the graient is calculated
Returns
-------
np.array
Gradient value
Raises
------
ValueError
Must select either gradient from inputA or inputB
"""
if (input is self.inputA):
if (self.gradA is None):
self.gradA = self.performGradient(input=0)
return self.gradA
elif (input is self.inputB):
if (self.gradB is None):
self.gradB = self.performGradient(input=1)
return self.gradB
else:
raise ValueError("Must select either gradient from inputA or inputB")
class SingleInputOperation(Operation):
"""Operation accepting one input and returning one output
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
shape : tuple
shape of the output
"""
name = "OneInputOperation"
def __init__(self, inputA=None):
super().__init__()
self.inputA = inputA
self.gradA = None
inputA.addOutput(self)
self.setShape()
def __repr__(self):
"""Represent as a string - usefull for printing"""
output = "<%s with input: (%s) and outputs: (" % (self.name, self.inputA.name)
for op in self.outputs:
output += "%s, " % op.name
output += ")>"
return output
def setShape(self):
"""Set the output shape"""
self.shape = np.shape(self.inputA)
def reset(self):
"""Reset the values and gradients held by this operation"""
self.result = None
self.gradA = None
self.setShape()
def getValue(self):
"""Return a vaue of this operation
Returns
-------
np.array
Output value
"""
if (self.result is None):
self.result = self.perform(self.inputA.getValue())
return self.result
def getGradient(self, input=None):
"""Obtain gradient with respect ot a chosen input
parameter input added for consistancy
Parameters
----------
input : ga.Operation
Operation with respect to which the graient is calculated
Added for consistancy as those operations only have one input
Returns
-------
np.array
Gradient value
Raises
------
ValueError
Must select either gradient from inputA or inputB
"""
if (input is self.inputA):
if (self.gradA is None):
self.gradA = self.performGradient()
return self.gradA
else:
raise ValueError("Must select gradient from inputA")
class MultipleInputOperation(Operation):
"""Operation accepting multiple inputs and returning one output
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
grads : list(np.array)
gradients with respect to inouts grads[i]: gradient iwht respect to input i
inputs : list(ga.Operation)
Operations feeding data into this operation
shape : tuple
shape of the output
"""
name = "MultipleInputOperation"
def __init__(self, *args):
super().__init__()
self.result = None
self.inputs = list(args)
self.grads = [None for item in self.inputs]
for item in self.inputs:
item.addOutput(self)
self.setShape()
def __repr__(self):
"""Represent as a string - usefull for printing"""
output = "<%s with inputs: (" % (self.name)
for op in self.inputs:
output += "%s" % op.name
output += ") and outputs: ("
for op in self.outputs:
output += "%s, " % op.name
output += ")>"
return output
def setShape(self):
"""Set the output shape"""
raise NotImplementedError("This should be implemented individually")
def reset(self):
"""Reset the values and gradients held by this operation"""
self.result = None
self.grads = [None for item in self.inputs]
self.setShape()
def getValue(self):
"""Return a vaue of this operation
Returns
-------
np.array
Output value
"""
if (self.result is None):
values = [op.getValue() for op in self.inputs]
self.result = self.perform(*values)
return self.result
def getGradient(self, input):
"""Obtain gradient with respect ot a chosen input
Parameters
----------
input : ga.Operation
Operation with respect to which the graient is calculated
Returns
-------
np.array
Gradient value
Raises
------
ValueError
Must select either gradient from inputA or inputB
"""
for index in range(len(self.inputs)):
if (input is self.inputs[index]):
if (self.grads[index] is None):
self.grads[index] = self.performGradient(input=index)
return self.grads[index]
else:
raise ValueError("Must select a gradient of one of the inputs")
class CostOperation(SingleInputOperation):
"""Operation accepting one input and one label, returning the cost
Labels are to be provided as a standard numpy array, not an operation.
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
labels : np.arrays
Data labels to compare with hypothesis
nExamples : int
Number of examples in current batch
shape : tuple
shape of the output
"""
name = "CostOperation"
def __init__(self, inputA, labels):
self.labels = labels
super().__init__(inputA)
self.setShape()
def setShape(self):
"""Set the output shape"""
self.shape = (1, )
if (np.ndim(self.labels) >= 2):
self.nExamples = self.labels.shape[0]
else:
self.nExamples = 1
def reset(self):
"""Reset the values and gradients held by this operation"""
self.result = None
self.gradA = None
self.setShape()
def assignLabels(self, labels):
"""Assign a new set of labels"""
if labels.shape != self.inputA.shape:
message = "Shapes of labels and input must match: " +\
str(labels.shape) + " != " + str(self.inputA.shape)
raise ValueError(message)
self.labels = labels
self.setShape()
def getValue(self):
"""Return a vaue of this operation
Returns
-------
float
Evaluated cost
"""
if (self.result is None):
self.result = self.perform(self.inputA.getValue(), self.labels)
return self.result
def makePredictions(self, choice=False):
"""Do not evaluate the cost but instead make predictions besed on input
Returns
-------
np.array
Predictions using the current hypothesis: values fed to cost evaluation operation
Parameters
----------
choice : bool
If true, sample from the probability density based on the models results
instead of using the maximum argument
"""
shape = self.inputA.getValue().shape
predictions = np.zeros(shape)
if np.size(shape) == 1:
if choice:
indexMax = np.random.choice(self.inputA.getValue().size, p=self.inputA.getValue())
else:
indexMax = np.argmax(self.inputA.getValue())
predictions[indexMax] = 1
else:
for i, example in enumerate(self.inputA.getValue()):
if choice:
argmax = np.random.choice(example.size, p=example)
else:
argmax = example.argmax()
indexMax = np.unravel_index(argmax, example.shape)
predictions[i, indexMax] = 1
return predictions
# class MultipleInputOperation(Operation):
# """Operation accepting two input and returning one output
# Attributes
# ----------
# name : str
# Name of the operation
# result : np.array
# Output of the operation
# testing : bool
# Flag specifying if the operation is in testing (making prefictions: True)
# or training (optimizing parameters: False) mode
# gradA : np.array
# gradient with respect to inputA
# gradB : np.array
# gradient with respect to inputB
# inputA : ga.Operation
# Operation feeding data A into this operation
# inputB : ga.Operation
# Operation feeding data B into this operation
# shape : tuple
# shape of the output
# """
# name = "MultipleInputOperation"
# def __init__(self):
# super().__init__()
# self.inputs = []
# self.grads = []
# def __repr__(self):
# """Represent as a string - usefull for printing"""
# output = "<%s with inputs: ( " % (self.name)
# for op in self.inputs:
# output += "%s, " % op.name
# output += ") and outputs: ("
# for op in self.outputs:
# output += "%s, " % op.name
# output += ")>"
# return output
# def setShape(self):
# """Set the output shape"""
# self.shape = broadcast_shape(np.shape(self.inputA), np.shape(self.inputB))
# def reset(self):
# """Reset the values and gradients held by this operation"""
# self.result = None
# self.gradA = None
# self.gradB = None
# self.setShape()
# def getValue(self):
# """Return a vaue of this operation
# Returns
# -------
# np.array
# Output value
# """
# if (self.result is None):
# self.result = self.perform(self.inputA.getValue(), self.inputB.getValue())
# return self.result
# def getGradient(self, input):
# """Obtain gradient with respect ot a chosen input
# Parameters
# ----------
# input : ga.Operation
# Operation with respect to which the graient is calculated
# Returns
# -------
# np.array
# Gradient value
# Raises
# ------
# ValueError
# Must select either gradient from inputA or inputB
# """
# if (input is self.inputA):
# if (self.gradA is None):
# self.gradA = self.performGradient(input=0)
# return self.gradA
# elif (input is self.inputB):
# if (self.gradB is None):
# self.gradB = self.performGradient(input=1)
# return self.gradB
# else:
# raise ValueError("Must select either gradient from inputA or inputB")
```
#### File: graphAttack/gaUtilities/neuralNetwork.py
```python
import numpy as np
from ..coreDataContainers import Variable
from ..operations.activationOperations import *
from ..operations.costOperations import *
from ..operations.twoInputOperations import *
from ..operations.singleInputOperations import *
from ..operations.convolutionOperation import *
from ..operations.transformationOperations import *
from ..operations.multipleInputOperations import *
from .misc import generateRandomVariable, generateZeroVariable
def addDenseLayer(mainGraph, nOutputNodes,
inputOperation=None,
activation=ReLUActivation,
dropoutRate=0,
batchNormalisation=False):
"""Append a dense layer to the graph
Parameters
----------
mainGraph : ga.Graph
computation graph to which append the dense layer
nOutputNodes : int
Number of output nodes
inputOperation : ga.Operation
operation feeding the data to the layer
activation : ga.SingleInputOperation
activatin operation of choice
dropoutRate : float
dropout rate at the end of this layer
batchNormalisation: bool
Whether to use Batch normalisation
w : np.array
weigthts in shape (nOutputNodes, nFeatures)
if None randomly initialized
b : np.array
biases, in shape (nOutputNodes, )
if None, randomly initialized
Returns
-------
ga.Operation
Last operation of the dense layer
"""
N, D = inputOperation.shape
if (inputOperation is None):
inputOperation = mainGraph.operations[-1]
w = generateRandomVariable(shape=(nOutputNodes, D),
transpose=True, nInputs=D)
b = generateRandomVariable(shape=nOutputNodes,
transpose=False, nInputs=1)
wo = mainGraph.addOperation(w, doGradient=True)
bo = mainGraph.addOperation(b, doGradient=True)
mmo = mainGraph.addOperation(MatMatmulOperation(inputOperation, wo),
doGradient=False,
finalOperation=False)
addo = mainGraph.addOperation(AddOperation(mmo, bo),
doGradient=False,
finalOperation=False)
if (dropoutRate > 0):
dpo = mainGraph.addOperation(DropoutOperation(addo, dropoutRate),
doGradient=False,
finalOperation=False)
else:
dpo = addo
if (batchNormalisation):
beta = mainGraph.addOperation(generateRandomVariable((1, nOutputNodes)), doGradient=True)
gamma = mainGraph.addOperation(generateRandomVariable((1, nOutputNodes)), doGradient=True)
bnorm = mainGraph.addOperation(BatchNormalisationOperation(dpo, beta, gamma))
else:
bnorm = dpo
acto = mainGraph.addOperation(activation(bnorm),
doGradient=False,
finalOperation=False)
return acto
def addConv2dLayer(mainGraph,
inputOperation=None,
nFilters=1,
filterHeigth=2,
filterWidth=2,
padding="SAME",
convStride=1,
activation=ReLUActivation,
batchNormalisation=False,
pooling=MaxPoolOperation,
poolHeight=2,
poolWidth=2,
poolStride=2):
"""Append a convolution2D layer with pooling
Parameters
----------
mainGraph : ga.Graph
computation graph to which append the dense layer
inputOperation : ga.Operation
operation feeding the data to the layer
nFilters : int
number of filter to be applied for the convolution
filterHeigth : int
convolution filter heigth
filterWidth : int
convolution filter width
padding: "SAME" or "VALID"
padding method for the convolution
convStride : int
stride for the convolution filter
activation : ga.SingleInputOperation
activatin operation of choice
batchNormalisation: bool
Whether to use Batch normalisation
pooling : ga.SingleInputOperation
pooling operation of choice
poolHeight : int
heigth of the pooling filter
poolWidth : int
width of the pooling filter
poolStride : int
stride of the pooling operation
Returns
-------
ga.Operation
Last operation of the dense layer
"""
N, C, H, W = inputOperation.shape
w = generateRandomVariable(shape=(nFilters, C, filterHeigth, filterWidth),
transpose=False, nInputs=(filterHeigth * filterWidth * C))
b = generateRandomVariable(shape=(1, nFilters, 1, 1), transpose=False, nInputs=1)
filterWop = mainGraph.addOperation(w, doGradient=True, feederOperation=False)
opConv2d = mainGraph.addOperation(Conv2dOperation(
inputOperation, filterWop, stride=convStride, paddingMethod=padding))
filterBop = mainGraph.addOperation(b, doGradient=True, feederOperation=False)
addConv2d = mainGraph.addOperation(AddOperation(opConv2d, filterBop))
if (batchNormalisation):
beta = mainGraph.addOperation(generateRandomVariable((1, *addConv2d.shape[1:])), doGradient=True)
gamma = mainGraph.addOperation(generateRandomVariable((1, *addConv2d.shape[1:])), doGradient=True)
bnorm = mainGraph.addOperation(BatchNormalisationOperation(addConv2d, beta, gamma))
else:
bnorm = addConv2d
actop = mainGraph.addOperation(activation(bnorm),
doGradient=False,
finalOperation=False)
poolOP = mainGraph.addOperation(pooling(inputA=actop,
poolHeight=poolHeight,
poolWidth=poolWidth,
stride=poolStride))
return poolOP
```
#### File: graphAttack/operations/convolutionOperation.py
```python
from ..coreOperation import *
from ..coreNode import broadcast_shape, reduce_shape
from ..gaUtilities import graphAttackFunctions as gaf
import numpy as np
class Conv2dOperation(TwoInputOperation):
"""Convolution operation
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
gradA : np.array
gradient with respect to inputA (data)
gradB : np.array
gradient with respect to inputB (weights)
inputA : ga.Operation
Operation feeding data A into this operation
This shold be the input of convolution in the format of
nExamples x nChannels x Height x Width (NCHW)
inputB : ga.Operation
Operation feeding data B into this operation
This should provide the weights for this operation in the format of
nFilters x nChannels x filterHeight x filterWidth
shape : tuple
shape of the output
padding : int
how much zero rows/cold to add to an image
If provided at initialization stage, ovverites padding generated to match
paddingMethod
paddingMethod : str
SAME: enough padding is added so that outpu image dimensions match the input's
VALID: no padding is added and the image size will be reduced
stride : int
step size for scanning an image
"""
name = "Conv2dOperation"
def __init__(self, inputA=None, inputB=None, stride=1, paddingMethod="SAME", padding=None):
if (stride < 1):
raise ValueError("Stride must be at least one")
self.stride = stride
shapesMatch = (inputA.shape[1] == inputB.shape[1])
if not shapesMatch:
raise ValueError(
"""Shapes of inputs must be compatible with regard of nChannels, but %d != %d.
see docstring for explanation of the format.""" % (inputA.shape[1], inputB.shape[1]))
if (paddingMethod != "SAME") and (paddingMethod != "VALID"):
raise NotImplementedError("Only SAME and VALID paddingMethod is implemented!")
self.paddingMethod = paddingMethod
# ------ Figure out padding value
if padding is not None:
self.padding = padding
elif (self.paddingMethod == "SAME"):
if (inputB.shape[2] != inputB.shape[3]):
raise NotImplementedError("Only square filters are supported for same padding")
self.padding = int((inputB.shape[2] - 1) / 2)
elif (self.paddingMethod == "VALID"):
self.padding = 0
else:
raise NotImplementedError("Only SAME and VALID paddingMethod is implemented!")
super().__init__(inputA, inputB)
def setShape(self):
"""Set the output shape"""
# ------ Find the output shape
nExamples = self.inputA.shape[0]
nFilters = self.inputB.shape[0]
outputHeight = int(((self.inputA.shape[2] - self.inputB.shape[2] + 2 * self.padding) /
self.stride) + 1)
outputWidth = int(((self.inputA.shape[3] - self.inputB.shape[3] + 2 * self.padding) /
self.stride) + 1)
self.shape = (nExamples, nFilters, outputHeight, outputWidth)
def perform(self, a, b):
"""Perform convolution in 2D
Parameters
----------
a : np.array
Input data
b : np.array
Weights
Returns
-------
np.aarray
Result of the operation
"""
N, C, H, W = self.inputA.shape
NF, C, FH, FW = self.inputB.shape
N, NF, oH, oW = self.shape
# ------ Obtain a 2d representation of image and filters
aCol = im2col_indices(a, FH, FW, self.padding, self.stride)
bCol = b.reshape(NF, -1)
# ------ Store them for later gradient evaluation
self.inputACols = aCol
self.inputBCols = bCol
# ------ Obtain the 2d representation of the output
outCol = gaf.matmul(aCol, bCol.T)
# ------ Convert into appropriate shape
outMat = outCol
outMat = outCol.reshape(oH, oW, N, NF)
outMat = outMat.transpose(2, 3, 0, 1)
return outMat
def performGradient(self, input, out=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
Specify an input operation with respect to which the
gradient is calculated
the key is:
inputA => 0
inputB => 1
Returns
-------
np.array
Gradient propagated through this operation
Raises
------
ValueError
input has ot be either 0 or 1
"""
N, C, H, W = self.inputA.shape
NF, C, FH, FW = self.inputB.shape
N, NF, oH, oW = self.shape
# ------ Gather gradient
if (self.endNode):
grad = np.ones(self.shape)
else:
grad = np.zeros(self.shape)
for out in self.outputs:
grad += out.getGradient(self)
# ------ Reshape the gradient into the form of 2D array in the
# ------ format of outCol from forward pass
gradCols = grad.transpose(2, 3, 0, 1)
gradCols = gradCols.reshape(-1, NF)
if (input == 0):
gradCols = gaf.matmul(gradCols, self.inputBCols)
grad = col2im_indices(gradCols, self.inputA.shape, FH, FW,
padding=self.padding, stride=self.stride)
elif (input == 1):
gradCols = gaf.matmul(self.inputACols.T, gradCols)
grad = gradCols.T.reshape(self.inputB.shape)
return grad
class MaxPoolOperation(SingleInputOperation):
"""Apply max pooling using im2col, quite slow yet simple to understand
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
inputA : ga.Operation
Operation feeding data A into this operation
This shold be the input of convolution in the format of
nExamples x nChannels x Height x Width (NCHW)
inputA : ga.Operation
Operation feeding data A into this operation
shape : tuple
shape of the output
poolHeight : int
Heigth of the pooling filter
poolWidth : int
Width of the pooling filter
stride : int
step size for scanning an image
NOTE: stride and filters should be compatible so that
Image width/heigth - poolHeigth/Width % stride == 0
axis : int
Axis over which to perform the sum
"""
name = "MaxPoolOperation"
def __init__(self, inputA=None, poolHeight=2, poolWidth=2, stride=1):
self.stride = stride
self.poolHeight = poolHeight
self.poolWidth = poolWidth
super().__init__(inputA)
def setShape(self):
"""Set the output shape"""
if (((self.inputA.shape[2] - self.poolHeight) % self.stride != 0) or
((self.inputA.shape[3] - self.poolWidth) % self.stride != 0)):
raise ValueError("""stride and padding should be compatible so that
Image weigth/heigth - poolHeigth/Width MODULO stride == 0""")
# ------ Find the output shape
nExamples = self.inputA.shape[0]
nChannels = self.inputA.shape[1]
outputHeight = int((self.inputA.shape[2] - self.poolHeight) / self.stride + 1)
outputWidth = int((self.inputA.shape[3] - self.poolWidth) / self.stride + 1)
self.shape = (nExamples, nChannels, outputHeight, outputWidth)
def perform(self, a):
"""MaxPool each of the images, use im2cols and find argmax of each row
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
N, C, H, W = self.inputA.shape
N, C, oH, oW = self.shape
aReshaped = a.reshape(N * C, 1, H, W)
self.aCols = im2col_indices(aReshaped, self.poolHeight, self.poolWidth, padding=0, stride=self.stride)
self.aColsArgmax = np.argmax(self.aCols, axis=1)
outCol = self.aCols[np.arange(self.aColsArgmax.size), self.aColsArgmax]
outMat = outCol.reshape(oH, oW, N, C)
outMat = outMat.transpose(2, 3, 0, 1)
return outMat
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.shape)
else:
grad = np.zeros(self.shape)
for out in self.outputs:
grad += out.getGradient(self)
N, C, H, W = self.inputA.shape
N, C, oH, oW = self.shape
inpGradCols = grad.transpose(2, 3, 0, 1).flatten()
gradCols = np.zeros_like(self.aCols)
gradCols[np.arange(self.aColsArgmax.size), self.aColsArgmax] = inpGradCols
grad = col2im_indices(gradCols, (N * C, 1, H, W), self.poolHeight,
self.poolWidth, padding=0, stride=self.stride)
grad = grad.reshape(self.inputA.shape)
return grad
###############################################################################
# Those are im2col and col2im functions copied from the assignement for
# Stanford CS 231n class:
# http://cs231n.stanford.edu/
# https://github.com/cs231n/cs231n.github.io
#
# The only modification is that the im2col_indices returns a transposed version
# of the matrix in shape of (nelements, filterH x filterW)
# col2im_indices now accepts the transposed version of cols to be compatible with im2col
# Both functions use the same notation internally, the changes is only in intreface
###############################################################################
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = int((H + 2 * padding - field_height) / stride + 1)
out_width = int((W + 2 * padding - field_width) / stride + 1)
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k.astype(int), i.astype(int), j.astype(int))
def im2col_indices(x, field_height, field_width, padding=1, stride=1):
""" An implementation of im2col based on some fancy indexing """
# Zero-pad the input
p = padding
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
k, i, j = get_im2col_indices(x.shape, field_height, field_width, padding, stride)
cols = x_padded[:, k, i, j]
C = x.shape[1]
cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)
return cols.T
def col2im_indices(cols, x_shape, field_height=3, field_width=3, padding=1,
stride=1):
""" An implementation of col2im based on fancy indexing and np.add.at """
# ------ Chyange to be compatible wih im2col
cols = cols.T
# ------ end of change
N, C, H, W = x_shape
H_padded, W_padded = H + 2 * padding, W + 2 * padding
x_padded = np.zeros((N, C, H_padded, W_padded), dtype=cols.dtype)
k, i, j = get_im2col_indices(x_shape, field_height, field_width, padding, stride)
cols_reshaped = cols.reshape(C * field_height * field_width, -1, N)
cols_reshaped = cols_reshaped.transpose(2, 0, 1)
np.add.at(x_padded, (slice(None), k, i, j), cols_reshaped)
if padding == 0:
return x_padded
return x_padded[:, :, padding:-padding, padding:-padding]
```
#### File: graphAttack/operations/multipleInputOperations.py
```python
from ..coreOperation import *
from ..coreNode import broadcast_shape, reduce_shape
import numpy as np
class BatchNormalisationOperation(MultipleInputOperation):
"""Perform batch normalisation in the data
Attributes
----------
initialize with thee inputs:
inputA: dataflow
inputB: beta parameter
inputC: gamma parameter
name : str
Name of the operation
result : np.array
Output of the operation
testing : bool
Flag specifying if the operation is in testing (making prefictions: True)
or training (optimizing parameters: False) mode
grads : list(np.array)
gradients with respect to inouts grads[i]: gradient iwht respect to input i
inputs : list(ga.Operation)
Operations feeding data into this operation
shape : tuple
shape of the output
"""
name = "BatchNormalisationOperation"
def __init__(self, inputA=None, inputB=None, inputC=None, running_param=0.1):
super().__init__(inputA, inputB, inputC)
self.setShape()
self.testing = False
self.muMean = 0
self.varMean = 1
self.lastaMean = 0
self.lastVarInv = 1
self.lastaNorm = np.zeros(inputA.shape)
self.running_param = running_param
def reset(self):
"""Reset the values and gradients held by this operation"""
self.result = None
self.grads = [None for item in self.inputs]
self.setShape()
def setShape(self):
"""Set the output shape"""
self.shape = self.inputs[0].shape
def perform(self, a, b, c):
"""Perform dropout
Parameters
----------
a : np.array
Input data
b : np.array
Input data
c : np.array
Input data
Returns
-------
np.array
Output data
"""
if self.testing:
self.lastaNorm = (a - self.muMean) / np.sqrt(self.varMean + 1e-8)
else:
mu = np.mean(a, axis=0, keepdims=True)
var = np.var(a, axis=0, keepdims=True)
self.lastaMean = (a - mu)
self.lastVarInv = 1. / np.sqrt(var + 1e-8)
self.lastaNorm = self.lastaMean * self.lastVarInv
self.muMean = self.muMean * (1 - self.running_param) + mu * self.running_param
self.varMean = self.varMean * (1 - self.running_param) + var * self.running_param
out = self.lastaNorm * c + b
# out = self.lastaNorm
return out
def performGradient(self, input):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
Specify an input operation with respect to which the
gradient is calculated
Returns
-------
np.array
Gradient propagated through this operation
Raises
------
ValueError
input has to be from 0 to len(self.inputs)
"""
if (self.endNode):
grad = np.ones(self.inputs[input].shape)
else:
grad = np.zeros(self.inputs[0].shape)
for out in self.outputs:
grad += reduce_shape(out.getGradient(self), grad)
if (input == 0):
nExamples = self.inputs[0].shape[0]
daNorm = grad * self.inputs[2].getValue()
dVar = np.sum(daNorm * self.lastaMean, axis=0) * (-0.5) * np.power(self.lastVarInv, 3)
dMu = np.sum(daNorm * (-self.lastVarInv), axis=0) + dVar * np.mean(-2 * self.lastaMean, axis=0)
grad = (daNorm * self.lastVarInv) + (dVar * 2 * self.lastaMean / nExamples) + (dMu / nExamples)
elif (input == 1):
grad = np.sum(grad, axis=0)
elif (input == 2):
grad = np.sum(grad * self.lastaNorm, axis=0)
return grad
```
#### File: graphAttack/operations/transformationOperations.py
```python
from ..coreOperation import *
from ..coreNode import broadcast_shape, reduce_shape
import numpy as np
class FlattenFeaturesOperation(SingleInputOperation):
"""Flatten the axis greater than 0 to turn
dim > 2 tensors into 2d arrays
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
nExamples : int
Number of examples in current batch
shape : tuple
shape of the output
"""
name = "FlattenFeaturesOperation"
def setShape(self):
"""Set the output shape"""
inpShapeSize = len(self.inputA.shape)
if (inpShapeSize >= 2):
self.nExamples = self.inputA.shape[0]
numFeatures = 1
for index in range(inpShapeSize - 1):
numFeatures *= self.inputA.shape[index + 1]
self.shape = (self.nExamples, numFeatures)
else:
self.nExamples = 1
self.shape = (self.nExamples, self.inputA.shape[0])
def perform(self, a):
"""Perform the flattening
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return a.reshape(self.shape)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self).reshape(self.inputA.shape)
return grad
class ReshapeFeaturesOperation(SingleInputOperation):
"""Gather features and reshape them, transform a 2d array
(nExamples, nFeatures) into a multidim array of
(nExamples, shape)
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
nExamples : int
Number of examples in current batch
shape : tuple
shape of the output
exampleShape : tuple
shape of each example, Result of this operation is a matrix
with shape (nExamples, nFeatures in each examle)
"""
name = "ReshapeFeaturesOperation"
def __init__(self, inputA=None, exampleShape=0):
self.exampleShape = exampleShape
super().__init__(inputA)
self.setShape()
def setShape(self):
"""Set the output shape"""
inpShapeSize = len(self.inputA.shape)
if (inpShapeSize >= 2):
self.nExamples = self.inputA.shape[0]
self.shape = (self.nExamples, ) + self.exampleShape
else:
self.nExamples = 1
self.shape = (self.nExamples, ) + self.exampleShape
def perform(self, a):
"""Reshape the flatend array to desired shape
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return a.reshape(self.shape)
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
grad = np.zeros(self.inputA.shape)
for out in self.outputs:
grad += out.getGradient(self).reshape(self.inputA.shape)
return grad
class SliceOperation(SingleInputOperation):
"""Performs array slicing using numpy index expressions
example for index_exp:
>>> x = np.arange(4).reshape(2, 2)
>>> indexExp = np.index_exp[0, :]
>>> x[indexExp]
array([0, 1])
see https://docs.scipy.org/doc/numpy/reference/generated/numpy.s_.html#numpy-s
for more information
Attributes
----------
name : str
Name of the operation
result : np.array
Output of the operation
gradA : np.array
gradient with respect to inputA
inputA : ga.Operation
Operation feeding data A into this operation
indexExp : np.index_exp
Index expression for slicing
shape : tuple
shape of the output
"""
name = "SliceOperation"
def __init__(self, inputA=None, indexExp=None):
if indexExp is None:
raise ValueError("Must provide index Expression as numpy.index_exp!")
self.indexExp = indexExp
super().__init__(inputA)
def setShape(self):
"""Set the output shape"""
testMat = np.zeros(self.inputA.shape)
result = testMat[self.indexExp]
self.shape = result.shape
def perform(self, a):
"""Reshape the flatend array to desired shape
Parameters
----------
a : np.array
Input data
Returns
-------
np.array
Output data
"""
return a[self.indexExp]
def performGradient(self, input=None):
"""Find out the gradient with respect to the parameter
Parameters
----------
input : int
placeholder variable since this operation has only one input
Returns
-------
np.array
Gradient propagated through this operation
"""
if (self.endNode):
grad = np.ones(self.inputA.shape)
else:
gradGather = np.zeros(self.shape)
for out in self.outputs:
gradGather += out.getGradient(self)
grad = np.zeros(self.inputA.shape)
grad[self.indexExp] = gradGather
return grad
```
|
{
"source": "jgolebiowski/kitkopt",
"score": 3
}
|
#### File: examples/bayesian-stateless-propose-points/control.py
```python
import numpy as np
from kitkopt.bayesian_optimizer import propose_points
from kitkopt.hyper_parameter import HyperParameter
from kitkopt.kernels import rbf
def main():
# ------ Define hyperparameters with bounds and stepsize
hyperparam_config = [
HyperParameter(0, 3, 1),
HyperParameter(0, 5, 2)
]
# ------ Define previously tested points and function values at those points
tested_points = np.array([
[0, 2],
[2, 0],
[1, 4]
], dtype=float)
values = np.array([1,
2,
3], dtype=float)
# ------ Decide the number of points to be proposed each iteration
num_points = 4
# ------ Define the GP parameters including the kernel and its parameters
gp_settings = dict(
kernel=rbf,
kernel_params=(0.1, 1),
noise=1e-6
)
# ------ Ask the optimizer for new points
new_points = propose_points(tested_points, values, hyperparam_config,
num_points=num_points, seed=123,
acquisition_function="Thompson", gp_settings=gp_settings)
# ------ pritn the proposed points
print("Proposed points:")
print(new_points)
if (__name__ == '__main__'):
main()
```
#### File: kitkopt/kitkopt/acquisition.py
```python
import numpy as np
from kitkopt.gaussian_process import GaussianProcessRegression
from kitkopt.hypergrid import not_in_array
from kitkopt.utilities import OptimizerError
def _get_new_unique_point_UCB(hypergrid: np.ndarray,
gp: GaussianProcessRegression,
ucb_tradeoff_parameter: float = 0.5) -> np.ndarray:
"""
Propose a new point using Upper confidence bound acq(x) = mu(x) - b * sigma(x)
points different from the ones proposed before.
:param hypergrid: grid with previously untested combinations
:param gp: Gaussian Process Regressor that has been fit to previous points
:param num_points: number of points to be proposed
:param ucb_tradeoff_parameter: Parameter b, determining the tradeoff brtween exploration and exploitation
:param max_iter: Maxium number of tries for drawing new points
:return: new points, different from the ones proposed before
"""
acquisition = gp.mu.squeeze() - ucb_tradeoff_parameter * gp.sigmas.squeeze()
minid = np.argmin(acquisition)
return np.expand_dims(hypergrid[minid, :], axis=0)
def _get_new_unique_points_Thompson(hypergrid: np.ndarray,
gp: GaussianProcessRegression,
num_points,
max_iter: int = 100) -> np.ndarray:
"""
Propose a new point using Thompson sampling, different from the ones proposed before
:param hypergrid: grid with previously untested combinations
:param gp: Gaussian Process Regressor that has been fit to previous points
:param num_points: number of points to be proposed
:param max_iter: Maxium number of tries for drawing new points
:return: new points, given in a (num_points, nunm_hyperparams) matrix
"""
maxvalue = np.max(hypergrid) + 1
new_points = np.ones((num_points, hypergrid.shape[1]), dtype=float) * maxvalue
for idx in range(num_points):
new_points[idx, :] = _get_single_point_Thompson(new_points, hypergrid, gp)
return new_points
def _get_single_point_Thompson(new_points_so_far: np.ndarray,
hypergrid: np.ndarray,
gp: GaussianProcessRegression,
max_iter: int = 100) -> np.ndarray:
"""
Propose a new point using Thompson sampling, different from the ones proposed before
:param new_points_so_far: points already proposed in shape (n_points, n_hyperparams)
:param hypergrid: grid with previously untested combinations
:param gp: Gaussian Process Regressor that has been fit to previous points
:param max_iter: Maxium number of tries for drawing new points
:return: new points, different from the ones proposed before
"""
for idx in range(max_iter):
sample = gp.sample()
minid = np.argmin(sample)
proposed_point = hypergrid[minid, :]
if not_in_array(proposed_point, new_points_so_far):
return proposed_point
raise OptimizerError("Could not find a new unique point within iteration number!")
```
#### File: kitkopt/kitkopt/hypergrid.py
```python
import itertools
from typing import List
import numba
import numpy as np
from kitkopt.hyper_parameter import HyperParameter
@numba.jit(
[
numba.boolean(numba.float64[:], numba.float64[:, :], numba.float64),
numba.boolean(numba.float32[:], numba.float32[:, :], numba.float32)
], nopython=False
)
def _numba_not_in_array(vector: np.ndarray, array: np.ndarray, delta: float = 1e-4) -> bool:
"""
Check if a given vector is NOT a row of a given array
"""
diff = np.abs(array - vector)
for idx in range(array.shape[0]):
localdiff = np.max(diff[idx, :])
if localdiff < delta:
return False
return True
def not_in_array(vector: np.ndarray, array: np.ndarray, delta: float = 1e-4) -> bool:
"""
Check if a given vector is NOT a row of a given array
:param vector: vector in shape (dim1, )
:param array: array in shape (dim2, dim1)
:param delta: delta used to compute float equality
:return: True if a given vector is NOT a row of a given array
"""
if len(array) == 0 or len(vector) == 0:
return False
try:
return _numba_not_in_array(vector, array, delta)
except TypeError:
diff = np.min(np.max(np.abs(vector - array), axis=1))
return (diff > delta)
def get_hypergrid(hyperparams_config: List[HyperParameter]) -> np.ndarray:
"""
Return a grid with all potential hyperparameter combinations
:param hyperparams_config: List of hyperparameters
:return: grid with possible combinations
"""
hypervalues = [
np.arange(hyperparam.lower_bound, hyperparam.upper_bound + hyperparam.stepsize / 2, hyperparam.stepsize)
for hyperparam in hyperparams_config
]
potential_points = [item for item in itertools.product(*hypervalues)]
potential_points = np.array(potential_points, dtype=float)
return potential_points
def prune_hypergrid(hypergrid: np.ndarray, tested_points: np.ndarray) -> np.ndarray:
"""
Prun the grid of potential hyperparameters combinations, rmoving previously seen ones
:param hypergrid: Grid with potential hyperparameter combinations
:param tested_points: previously tested points with dims (n_points, n_hyperparameters)
:return: grid with previously untested combinations
"""
if len(tested_points) == 0:
return hypergrid
mask = [not_in_array(potential_point, tested_points) for potential_point in hypergrid]
return hypergrid[mask]
```
#### File: kitkopt/kitkopt/kernels.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
def poly_kernel(x: np.ndarray, y: np.ndarray, degree: int=1, gamma: float=1.0, coef0: float=0.0):
"""
Compute the polynomial kernel between x and y::
K(x, y) = (gamma * <x, y> + coef0)^degree
:param x: ndarray of shape (n_samples_1, n_features)
:param y: ndarray of shape (n_samples_2, n_features)
:param degree: see formula
:param gamma: see formula
:param coef0: see formula
:return: Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
K = np.dot(x, y.T)
K *= gamma
K += coef0
K **= degree
return K
def rbf(a: np.ndarray, b: np.ndarray, length: float, sigma_f: float):
"""
Radial basis kernel: Gaussian distance kernel
:param a: design matrix of shape (nexamples, nfeatures)
:param b: design matrix of shape (nexamples, nfeatures)
:param length: Kernel parameter: vertical length
:param sigma_f: Kernel parameter: horizontal length
:return: Gram Matrix of shape (nexamples, nexamples)
"""
sqdist = np.sum(a**2, 1).reshape(-1, 1) + np.sum(b**2, 1) - 2 * np.dot(a, b.T)
return np.exp(-0.5 * (1 / length) * sqdist) * sigma_f
```
#### File: jgolebiowski/kitkopt/test.py
```python
import logging
import unittest
import tst.test_kernels
import tst.test_gaussian_process
import tst.test_rescale
import tst.test_hypergrid
import tst.test_random_optimizer
import tst.test_bayesian_optimizer
import tst.test_acquisition
def main():
# initialize
suite = unittest.TestSuite()
loader = unittest.TestLoader()
# Load tests from modules
suite.addTest(loader.loadTestsFromModule(tst.test_kernels))
suite.addTest(loader.loadTestsFromModule(tst.test_gaussian_process))
suite.addTest(loader.loadTestsFromModule(tst.test_rescale))
suite.addTest(loader.loadTestsFromModule(tst.test_hypergrid))
suite.addTest(loader.loadTestsFromModule(tst.test_random_optimizer))
suite.addTest(loader.loadTestsFromModule(tst.test_bayesian_optimizer))
suite.addTest(loader.loadTestsFromModule(tst.test_acquisition))
# Run tests
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
if (__name__ == "__main__"):
main()
```
#### File: kitkopt/tst/test_acquisition.py
```python
import numpy as np
import unittest
from kitkopt.gaussian_process import GaussianProcessRegression
from kitkopt.hyper_parameter import HyperParameter
from kitkopt.kernels import rbf
from kitkopt.hypergrid import not_in_array, get_hypergrid
from kitkopt.bayesian_optimizer import propose_points, minimize_function
from kitkopt.acquisition import _get_single_point_Thompson, _get_new_unique_points_Thompson, _get_new_unique_point_UCB
from kitkopt.utilities import debugtool, OptimizerError
class AcquisitionrTest(unittest.TestCase):
def test_get_new_unique_point_Thompson(self):
hyperparam_config = [
HyperParameter(0, 3, 1),
HyperParameter(0, 5, 2)
]
grid = get_hypergrid(hyperparam_config)
previous_points = np.array([
[0, 4],
[1, 0],
[3, 2],
[4, 2]
])
gp_settings = dict(
kernel=rbf,
kernel_params=(0.02, 0.25),
noise=1e-6
)
gp = GaussianProcessRegression(gp_settings["kernel"],
*gp_settings["kernel_params"],
noise=gp_settings["noise"])
gp.initialize(grid)
for idx in range(100):
self.assertTrue(not_in_array(_get_single_point_Thompson(previous_points, grid, gp, 100), previous_points))
# Check error
gp.initialize(previous_points)
with self.assertRaises(OptimizerError):
_get_single_point_Thompson(previous_points, previous_points, gp)
def test_get_new_unique_points_Thompson(self):
hyperparam_config = [
HyperParameter(0, 3, 1),
HyperParameter(0, 5, 2)
]
grid = get_hypergrid(hyperparam_config)
num_points = 5
gp_settings = dict(
kernel=rbf,
kernel_params=(0.02, 0.25),
noise=1e-6
)
gp = GaussianProcessRegression(gp_settings["kernel"],
*gp_settings["kernel_params"],
noise=gp_settings["noise"])
gp.initialize(grid)
new_points = _get_new_unique_points_Thompson(grid, gp, num_points)
for i in range(len(new_points)):
for j in range(i + 1, len(new_points)):
self.assertFalse(np.array_equal(new_points[i], new_points[j]))
def test_get_new_unique_points_UCB(self):
hyperparam_config = [
HyperParameter(0, 3, 1),
HyperParameter(0, 5, 2)
]
grid = get_hypergrid(hyperparam_config)
known_points = np.array([
[0, 0]])
values = np.array([
-2])
gp_settings = dict(
kernel=rbf,
kernel_params=(0.02, 0.25),
noise=1e-6
)
gp = GaussianProcessRegression(gp_settings["kernel"],
*gp_settings["kernel_params"],
noise=gp_settings["noise"])
gp.fit(known_points, values, grid)
ucb_tradeoff = 0.5
new_points = _get_new_unique_point_UCB(grid, gp, ucb_tradeoff)
target = np.array([[0., 0.]])
np.testing.assert_allclose(new_points, target, atol=1e-5)
if (__name__ == "__main__"):
unittest.main()
```
#### File: kitkopt/tst/test_random_optimizer.py
```python
import numpy as np
import unittest
from kitkopt.hyper_parameter import HyperParameter
from kitkopt.random_optimizer import propose_points, \
_get_new_unique_point, minimize_function
from kitkopt.hypergrid import not_in_array, get_hypergrid, prune_hypergrid
from kitkopt.utilities import debugtool, OptimizerError
class RandomOptimizerTest(unittest.TestCase):
def test_minimize(self):
def funct(x):
return np.sum(np.square(x))
hyperparam_config = [
HyperParameter(-5, 5, 1),
HyperParameter(-5, 5, 1)
]
best_point, best_value = minimize_function(funct, hyperparam_config,
extra_function_args=(),
tolerance=1e-2,
seed=123)
np.testing.assert_allclose(best_point, np.array([0, 0]), atol=1e-5)
np.testing.assert_allclose(best_value, np.array([0]), atol=1e-5)
def test_propose_points(self):
hyperparam_config = [
HyperParameter(0, 4, 1),
HyperParameter(0, 5, 2)
]
tested_points = np.array([
[0, 4],
[1, 0],
[3, 2],
[4, 2]
])
target = np.array([[1., 2.],
[2., 4.],
[0., 2.],
[1., 4.],
[4., 4.],
[4., 0.],
[0., 0.],
[2., 0.],
[3., 0.],
[3., 4.],
[2., 2.]])
result = propose_points(tested_points, None, hyperparam_config, 11, seed=123)
# print(repr(result))
np.testing.assert_almost_equal(result, target, decimal=5)
target = np.array([[1., 2.],
[2., 4.],
[0., 2.],
[1., 4.],
[4., 4.],
[4., 0.]])
result = propose_points(tested_points, None, hyperparam_config, 6, seed=123)
# print(repr(result))
np.testing.assert_almost_equal(result, target, decimal=5)
# Check error
with self.assertRaises(OptimizerError):
propose_points(tested_points, None, hyperparam_config, 20, seed=123)
def test_get_new_unique_point(self):
hyperparam_config = [
HyperParameter(0, 4, 1),
HyperParameter(0, 5, 2)
]
grid = get_hypergrid(hyperparam_config)
previous_points = np.array([
[0, 4],
[1, 0],
[3, 2],
[4, 2]
])
for idx in range(100):
self.assertTrue(not_in_array(_get_new_unique_point(previous_points, grid, 100), previous_points))
# Check error
with self.assertRaises(OptimizerError):
_get_new_unique_point(previous_points, previous_points)
```
|
{
"source": "JG-OLIVEIRA/math_repo",
"score": 4
}
|
#### File: JG-OLIVEIRA/math_repo/fat.py
```python
def fat(value):
if value > 1:
return value * fat(value - 1)
return 1
print(fat(5)/ fat(3) * fat(2))
print(fat(4))
print(fat(3))
print(fat(2))
print(fat(1))
```
|
{
"source": "jgollenz/python-lsp-server",
"score": 2
}
|
#### File: pylsp/config/pycodestyle_conf.py
```python
import pycodestyle
from pylsp._utils import find_parents
from .source import ConfigSource
CONFIG_KEY = 'pycodestyle'
USER_CONFIGS = [pycodestyle.USER_CONFIG] if pycodestyle.USER_CONFIG else []
PROJECT_CONFIGS = ['pycodestyle.cfg', 'setup.cfg', 'tox.ini']
OPTIONS = [
('exclude', 'plugins.pycodestyle.exclude', list),
('filename', 'plugins.pycodestyle.filename', list),
('hang-closing', 'plugins.pycodestyle.hangClosing', bool),
('ignore', 'plugins.pycodestyle.ignore', list),
('max-line-length', 'plugins.pycodestyle.maxLineLength', int),
('indent-size', 'plugins.pycodestyle.indentSize', int),
('select', 'plugins.pycodestyle.select', list),
('aggressive', 'plugins.pycodestyle.aggressive', int),
]
class PyCodeStyleConfig(ConfigSource):
def user_config(self):
config = self.read_config_from_files(USER_CONFIGS)
return self.parse_config(config, CONFIG_KEY, OPTIONS)
def project_config(self, document_path):
files = find_parents(self.root_path, document_path, PROJECT_CONFIGS)
config = self.read_config_from_files(files)
return self.parse_config(config, CONFIG_KEY, OPTIONS)
```
|
{
"source": "jgollub1/tennis_match_prediction",
"score": 3
}
|
#### File: tennis_match_prediction/src/data_classes.py
```python
import numpy as np
from collections import defaultdict
'''
tracking object for player's year-long performance over time
accepts dates in (year,month)
last_year contains last 12 months stats, most recent to least
'''
class stats_52():
def __init__(self,date):
self.most_recent = date
self.last_year = np.zeros([12,4])
def time_diff(self,new_date,old_date):
return 12*(new_date[0]-old_date[0])+(new_date[1]-old_date[1])
def set_month(self,match_date):
diff = self.time_diff(match_date,self.most_recent)
if diff>=12:
self.last_year = np.zeros([12,4])
elif diff>0:
self.last_year[diff:] = self.last_year[:12-diff]; self.last_year[:diff] = 0 ## Doing this is dangerous!!!
self.most_recent = match_date
def update(self,match_date,match_stats):
self.set_month(match_date)
self.last_year[0] = self.last_year[0]+match_stats
'''
tracking object for opponent-adjusted ratings
stores opponent ability at time of match to compare performance against
'''
class adj_stats_52():
def __init__(self,date):
self.most_recent = date
self.last_year = np.zeros([12,6])
self.adj_sr = [0,0]
def time_diff(self,new_date,old_date):
return 12*(new_date[0]-old_date[0])+(new_date[1]-old_date[1])
def set_month(self,match_date):
diff = self.time_diff(match_date,self.most_recent)
if diff>=12:
self.last_year = np.zeros([12,6])
elif diff>0:
self.last_year[diff:] = self.last_year[:12-diff]; self.last_year[:diff] = 0
self.most_recent = match_date
self.update_adj_sr()
def update(self,match_date,match_stats):
self.set_month(match_date)
self.last_year[0] = self.last_year[0]+match_stats
self.update_adj_sr()
# update the player's adjust serve/return ability, based on last twelve months
def update_adj_sr(self):
s_pt, r_pt = np.sum(self.last_year[:,1]), np.sum(self.last_year[:,3])
if s_pt==0 or r_pt==0:
self.adj_sr = [0,0]
return
with np.errstate(divide='ignore', invalid='ignore'):
f_i = np.sum(self.last_year[:,0])/s_pt
f_adj = 1 - np.sum(self.last_year[:,4])/s_pt
g_i = np.sum(self.last_year[:,2])/r_pt
g_adj = 1 - np.sum(self.last_year[:,5])/r_pt
self.adj_sr[0] = f_i - f_adj
self.adj_sr[1] = g_i - g_adj
'''
tracking object for common-opponent ratings
stores all historical performance against opponents
'''
class commop_stats():
def __init__(self):
self.history = defaultdict(lambda: np.zeros(4))
def update(self, match_stats, opponent_name):
self.history[opponent_name] += match_stats
'''
tracking object for common-opponent ratings
stores past year of performance against opponents
'''
# class commop_stats_52():
# def __init__(self, date):
# self.last_year = defaultdict(lambda: np.zeros([12, 4]))
# self.most_recent = date
# def time_diff(self, new_date, old_date):
# return 12*(new_date[0]-old_date[0])+(new_date[1]-old_date[1])
# def update_player_stats(self, match_date, opponent_name):
# diff = self.time_diff(match_date, self.most_recent)
# if diff>=12:
# self.last_year[opponent_name] = np.zeros([12,4])
# elif diff>0:
# self.last_year[opponent_name][diff:] = self.last_year[opponent_name][:12-diff]
# self.last_year[opponent_name][:diff] = 0
# def update_player_histories(self, match_date, opponent_name):
# for opp_name in np.union1d(opponent_name, self.last_year.keys()):
# self.update_player_stats(match_date, opp_name)
# self.most_recent = match_date
# def update(self, match_date, match_stats, opponent_name):
# self.update_player_histories(match_date, opponent_name)
# self.last_year[opponent_name][0] = self.last_year[opponent_name][0]+match_stats
'''
tracking object for yearly tournament averages
'''
class tny_52():
def __init__(self,date):
self.most_recent = date
self.tny_stats = np.zeros([2,2])
self.historical_avgs = {}
def update(self,match_year,match_stats):
diff = match_year-self.most_recent
if diff>=2:
self.tny_stats = np.zeros([2,2])
elif diff==1:
self.tny_stats[1] = self.tny_stats[0]; self.tny_stats[0]=0
self.tny_stats[0] = self.tny_stats[0]+match_stats
self.most_recent = match_year
self.historical_avgs[match_year] = (self.tny_stats[0][0],self.tny_stats[0][1])
return 0 if self.tny_stats[1][1]==0 else self.tny_stats[1][0]/float(self.tny_stats[1][1])
```
#### File: tennis_match_prediction/src/data_functions.py
```python
import os
import sys
sys.path.insert(0, './sackmann')
import re
import datetime
import numpy as np
import pandas as pd
import elo_538 as elo
from tennisMatchProbability import matchProb
from processing_util import normalize_name
from data_classes import stats_52, adj_stats_52, tny_52, commop_stats
from collections import defaultdict
from globals import COMMOP_START_YEAR, EPSILON
pd.options.mode.chained_assignment = None
'''
concatenate original match dataframes from years
(start_y, end_y)
'''
def concat_data(start_y, end_y, tour):
match_year_list = []
for i in range(start_y, end_y+1):
f_name = "../match_data_formatted/{}_matches_{}.csv".format(tour, i)
try:
match_year_list.append(pd.read_csv(f_name))
except:
print('could not find file for year: ', i)
full_match_df = pd.concat(match_year_list, ignore_index = True)
return full_match_df.sort_values(by=['tny_date','tny_name','match_num'], ascending=True).reset_index(drop=True)
'''
match data preprocessing
'''
def format_match_df(df,tour,ret_strings=[],abd_strings=[]):
cols = [u'tourney_id', u'tourney_name', u'surface', u'draw_size', u'tourney_date',
u'match_num', u'winner_name', u'loser_name', u'score', u'best_of', u'w_svpt',
u'w_1stWon', u'w_2ndWon', u'l_svpt', u'l_1stWon', u'l_2ndWon']
df = df[cols]
df = df.rename(columns={'winner_name':'w_name','loser_name':'l_name','tourney_id':'tny_id',\
'tourney_name':'tny_name','tourney_date':'tny_date'})
df['w_name'] = [normalize_name(x,tour) for x in df['w_name']]
df['l_name'] = [normalize_name(x,tour) for x in df['l_name']]
df['tny_name'] = ['<NAME>' if '<NAME>' in s else s for s in df['tny_name']]
df['tny_name'] = [s.replace('Australian Chps.','Australian Open').replace('Australian Open-2',\
'Australian Open').replace('U.S. National Chps.','US Open') for s in df['tny_name']]
df['is_gs'] = (df['tny_name'] == 'Australian Open') | (df['tny_name'] == '<NAME>') |\
(df['tny_name'] == 'Wimbledon') | (df['tny_name'] == 'US Open')
# format dates
df['tny_date'] = [datetime.datetime.strptime(str(x), "%Y%m%d").date() for x in df['tny_date']]
df['match_year'] = [x.year for x in df['tny_date']]
df['match_month'] = [x.month for x in df['tny_date']]
df['match_year'] = df['match_year'] + (df['match_month'] == 12) # correct december start dates
df['match_month'] = [1 if month==12 else month for month in df['match_month']] # to following year
df['score'] = [re.sub(r"[\(\[].*?[\)\]]", "", str(s)) for s in df['score']] # str(s) fixes any nans
df['score'] = ['RET' if 'RET' in s else s for s in df['score']]
df['w_swon'], df['l_swon'] = df['w_1stWon']+df['w_2ndWon'], df['l_1stWon']+df['l_2ndWon']
df['w_rwon'], df['l_rwon'] = df['l_svpt']-df['l_swon'], df['w_svpt']-df['w_swon']
df['w_rpt'], df['l_rpt'] = df['l_svpt'], df['w_svpt']
df.drop(['w_1stWon','w_2ndWon','l_1stWon','l_2ndWon'], axis=1, inplace=True)
# remove matches involving a retirement
abd_d, ret_d = set(abd_strings), set(ret_strings)
df['score'] = ['ABN' if score.split(' ')[-1] in abd_d else score for score in df['score']]
df['score'] = ['RET' if score in ret_d else score for score in df['score']]
return df.loc[(df['score'] != 'ABN') & (df['score'] != 'RET')].reset_index(drop=True)
'''
original dataset labels columns by 'w_'/'l_'
change 'w'/'l' to 'p0','p1' (where p0 is the higher ranked player, according to Elo ratings)
'''
# TODO: refactor this into two functions
def change_labels(df, cols):
# change w,l TO p0,p1
for col in cols:
df['p0'+col] = [df['l'+col][i] if df['winner'][i] else df['w'+col][i] for i in range(len(df))]
df['p1'+col] = [df['w'+col][i] if df['winner'][i] else df['l'+col][i] for i in range(len(df))]
# add s/r pct columns
p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])
for label in ['p0','p1']:
df[label+'_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_swon']/df[label+'_52_svpt'])]
df[label+'_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_rwon']/df[label+'_52_rpt'])]
df[label+'_sf_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_swon']/df[label+'_sf_52_svpt'])]
df[label+'_sf_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_rwon']/df[label+'_sf_52_rpt'])]
for label in ['w', 'l']:
df.drop([label + col for col in cols], axis=1, inplace=True)
df['tny_name'] = [s if s==s else '<NAME>' for s in df['tny_name']]
return df
'''
original dataset labels columns by 'w_'/'l_'
change 'w'/'l' to 'p0','p1' (where p0 is the higher ranked player, according to Elo ratings)
(without extra formatting)
'''
def change_labels_v2(df, cols):
# change w,l TO p0,p1
for col in cols:
df['p0'+col] = [df['l'+col][i] if df['winner'][i] else df['w'+col][i] for i in range(len(df))]
df['p1'+col] = [df['w'+col][i] if df['winner'][i] else df['l'+col][i] for i in range(len(df))]
for label in ['w', 'l']:
df.drop([label + col for col in cols], axis=1, inplace=True)
return df
'''
confirm that match serve/return stats are not null
'''
def validate(row, label):
return row[label+'_swon']==row[label+'_swon'] and row[label+'_svpt']==row[label+'_svpt'] \
and row[label+'_rwon']==row[label+'_rwon'] and row[label+'_rpt']==row[label+'_rpt']
'''
from start_ind (a year before start_year), collect cumulative
12-month s/r stats prior to each match
'''
def get_current_52_stats(df, start_ind):
players_stats = {}
active_players = {}
w_l = ['p0', 'p1']
start_date = (df['match_year'][start_ind],df['match_month'][start_ind])
avg_stats = stats_52(start_date)
avg_stats.update(start_date,(6.4,10,3.6,10)) # set as prior so first row is not nan
for i, row in df[start_ind:].iterrows():
date = row['match_year'],row['match_month']
avg_stats.set_month(date)
for k,label in enumerate(w_l):
if row[label+'_name'] not in players_stats:
players_stats[row[label+'_name']] = stats_52(date)
# store serving stats prior to match, update current month
players_stats[row[label+'_name']].set_month(date)
if validate(row, label):
match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\
row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])
players_stats[row[label+'_name']].update(date,match_stats)
avg_stats.update(date,match_stats)
active_players[row[label+'_name']] = 1 # log active player
# update every player to current month
for player in active_players.keys():
players_stats[player].set_month(date)
players = active_players.keys()
current_52_stats = [[player] + list(np.sum(players_stats[player].last_year,axis=0)) \
for player in players]
# avg_52_stats = np.sum(avg_stats.last_year,axis=0)
cols = ['player','52_swon','52_svpt','52_rwon','52_rpt']
current_stats_df = pd.DataFrame(current_52_stats, columns=cols)
current_stats_df['52_s_pct'] = current_stats_df['52_swon']/current_stats_df['52_svpt']
current_stats_df['52_r_pct'] = current_stats_df['52_rwon']/current_stats_df['52_rpt']
return current_stats_df[current_stats_df['52_svpt']>0] # return players active in past 12 months
'''
generate 12-month stats for Barnett-Clarke model
as well as variations (adjusted, EM-normalized)
'''
def generate_stats(df, start_ind):
df = generate_52_stats(df,start_ind)
print('generated 52 stats...')
df = generate_52_adj_stats(df,start_ind)
print('generated 52 adj stats...')
df = generate_tny_stats(df,start_ind)
print('generated tny stats...')
df = generate_commop_stats(df, start_ind)
print('generated commop stats...')
cols = ['_name','_elo_538','_sf_elo_538', #'_elo','_sf_elo'
'_swon', '_svpt', '_rwon', '_rpt',
'_52_swon', '_52_svpt','_52_rwon','_52_rpt',
'_sf_52_swon','_sf_52_svpt','_sf_52_rwon','_sf_52_rpt',
'_52_s_adj','_52_r_adj']
# of players p0, p1, p0 will always be the player with the first name alphabetically (since this is deterministic)
# the 'winner' will be 1 when p0's name comes alphabetically last and 0 otherwise
df['winner'] = df['w_name'] > df['l_name']
df = change_labels(df, cols)
df = change_labels_v2(df, ['_commop_s_pct', '_commop_r_pct'])
df['elo_diff'] = df['p0_elo_538'] - df['p1_elo_538']
df['sf_elo_diff'] = df['p0_sf_elo_538'] - df['p1_sf_elo_538']
# # dataframe with only official matches
# df = df[df['winner']!='None']
# df = df.reset_index(drop=True)
# cols = ['52_s_adj','52_r_adj']
em_cols = ['s_pct', 'r_pct', 'sf_s_pct', 'sf_r_pct', '52_s_adj', '52_r_adj']
df = generate_sr_pct(df)
# FIX for correct em stat sample sizes
df = df.loc[start_ind:].reset_index(drop=True)
df = generate_em_stats(df, em_cols)
return df
'''
add s/r pct columns, replacing nan with overall avg
'''
def generate_sr_pct(df):
p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])
p_hat = p_hat/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])
for label in ['p0','p1']:
# divide with np.nan_to_num and use p_hat as a placeholder when n=0
df[label+'_s_pct'] = np.nan_to_num(df[label+'_52_swon']/df[label+'_52_svpt'])
df[label+'_s_pct'] = df[label+'_s_pct'] + (p_hat) * (df[label+'_s_pct'] == 0)
df[label+'_r_pct'] = np.nan_to_num(df[label+'_52_rwon']/df[label+'_52_rpt'])
df[label+'_r_pct'] = df[label+'_r_pct'] + (1-p_hat)*(df[label+'_r_pct'] == 0)
df[label+'_sf_s_pct'] = np.nan_to_num(df[label+'_sf_52_swon']/df[label+'_sf_52_svpt'])
df[label+'_sf_s_pct'] = df[label+'_sf_s_pct'] + (p_hat) * (df[label+'_sf_s_pct'] == 0)
df[label+'_sf_r_pct'] = np.nan_to_num(df[label+'_sf_52_rwon']/df[label+'_sf_52_rpt'])
df[label+'_sf_r_pct'] = df[label+'_sf_r_pct'] + (1-p_hat)*(df[label+'_sf_r_pct'] == 0)
# finally, generate the observed service percentages in each match
df[label+'_s_pct_obsv'] = np.nan_to_num(df[label+'_swon']/df[label+'_svpt'])
return df
def finalize_df(df):
# generate serving probabilities for Barnett-Clarke model
df['match_id'] = range(len(df))
df['tny_stats'] = [df['avg_52_s'][i] if df['tny_stats'][i]==0 else df['tny_stats'][i] for i in range(len(df))]
df['p0_s_kls'] = df['tny_stats']+(df['p0_s_pct']-df['avg_52_s']) - (df['p1_r_pct']-df['avg_52_r'])
df['p1_s_kls'] = df['tny_stats']+(df['p1_s_pct']-df['avg_52_s']) - (df['p0_r_pct']-df['avg_52_r'])
df['p0_s_kls_EM'] = df['tny_stats']+(df['p0_s_pct_EM']-df['avg_52_s']) - (df['p1_r_pct_EM']-df['avg_52_r'])
df['p1_s_kls_EM'] = df['tny_stats']+(df['p1_s_pct_EM']-df['avg_52_s']) - (df['p0_r_pct_EM']-df['avg_52_r'])
df['p0_s_sf_kls'] = df['tny_stats']+(df['p0_sf_s_pct']-df['sf_avg_52_s']) - (df['p1_sf_r_pct']-df['sf_avg_52_r'])
df['p1_s_sf_kls'] = df['tny_stats']+(df['p1_sf_s_pct']-df['sf_avg_52_s']) - (df['p0_sf_r_pct']-df['sf_avg_52_r'])
df['p0_s_sf_kls_EM'] = df['tny_stats']+(df['p0_sf_s_pct_EM']-df['sf_avg_52_s']) - (df['p1_sf_r_pct_EM']-df['sf_avg_52_r'])
df['p1_s_sf_kls_EM'] = df['tny_stats']+(df['p1_sf_s_pct_EM']-df['sf_avg_52_s']) - (df['p0_sf_r_pct_EM']-df['sf_avg_52_r'])
df['p0_s_adj_kls'] = df['tny_stats']+(df['p0_52_s_adj']) - (df['p1_52_r_adj'])
df['p1_s_adj_kls'] = df['tny_stats']+(df['p1_52_s_adj']) - (df['p0_52_r_adj'])
df['p0_s_adj_kls_EM'] = df['tny_stats']+(df['p0_52_s_adj_EM']) - (df['p1_52_r_adj_EM'])
df['p1_s_adj_kls_EM'] = df['tny_stats']+(df['p1_52_s_adj_EM']) - (df['p0_52_r_adj_EM'])
df['p0_s_commop_kls'] = df['tny_stats']+(df['p0_commop_s_pct'] - df['avg_52_s']) - (df['p1_commop_r_pct'] - df['avg_52_r'])
df['p1_s_commop_kls'] = df['tny_stats']+(df['p1_commop_s_pct'] - df['avg_52_s']) - (df['p0_commop_r_pct'] - df['avg_52_r'])
p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])
df['p0_s_baseline'] = p_hat
df['p1_s_baseline'] = p_hat
# generate match probabilities for Barnett-Clarke method, with or w/o EM estimators
df['match_prob_kls'] = [matchProb(row['p0_s_kls'],1-row['p1_s_kls']) for i,row in df.iterrows()]
df['match_prob_kls_EM'] = [matchProb(row['p0_s_kls_EM'],1-row['p1_s_kls_EM']) for i,row in df.iterrows()]
df['match_prob_sf_kls'] = [matchProb(row['p0_s_sf_kls'],1-row['p1_s_sf_kls']) for i,row in df.iterrows()]
df['match_prob_sf_kls_EM'] = [matchProb(row['p0_s_sf_kls_EM'],1-row['p1_s_sf_kls_EM']) for i,row in df.iterrows()]
df['match_prob_adj_kls'] = [matchProb(row['p0_s_adj_kls'],1-row['p1_s_adj_kls']) for i,row in df.iterrows()]
df['match_prob_adj_kls_EM'] = [matchProb(row['p0_s_adj_kls_EM'],1-row['p1_s_adj_kls_EM']) for i,row in df.iterrows()]
df['match_prob_commop_kls'] = [matchProb(row['p0_s_commop_kls'],1-row['p1_s_commop_kls']) for i,row in df.iterrows()]
df['match_prob_commop'] = [1 - df['w_commop_match_prob'][i] if df['winner'][i] else df['w_commop_match_prob'][i] for i in range(len(df))]
# generate win probabilities from elo differences
df['elo_prob'] = (1+10**(df['elo_diff']/-400.))**-1
df['sf_elo_prob'] = [(1+10**(diff/-400.))**-1 for diff in df['sf_elo_diff']]
# elo-induced serve percentages
df = generate_bc_stats_elo_induced(df, 'elo',start_ind=0)
return df
def get_start_ind(match_df, start_year):
return match_df[match_df['match_year']>=start_year-1].index[0]
'''
returns dataframe with up-to-date player stats through date of most recent match
'''
def generate_df(tour, start_year, end_year, ret_strings, abd_strings, counts_538):
print('start_year: ', start_year)
print('end_year: ', end_year)
match_df = concat_data(start_year, end_year, tour)
print('match_df.shape before: ', match_df.shape)
start_ind = match_df[match_df['match_year']>=start_year-1].index[0]
print('match_df.shape: ', match_df.shape)
match_df = generate_elo(match_df, counts_538)
print('generated elo on match dataset...')
match_df = generate_stats(match_df, start_ind) # 52, adj, tny, etc.
match_df = finalize_df(match_df)
match_df = match_df.reset_index(drop=True)
print('finalized df...')
return match_df
'''
returns two dataframes
1) contains up-to-date player stats through date of most recent match
2) contains every match with elo/serve/return/etc stats
'''
def generate_test_dfs(tour, start_year, end_year, ret_strings, abd_strings, counts_538):
match_df = concat_data(start_year, end_year, tour)
start_ind = match_df[match_df['match_year']>=start_year-1].index[0]
match_df = generate_elo(match_df, counts_538)
match_df = generate_52_stats(match_df, start_ind)
match_df = generate_52_adj_stats(match_df, start_ind)
match_df = generate_tny_stats(match_df, start_ind)
match_df = generate_commop_stats(match_df, start_ind)
# TODO: add generate_em_stats() right here
return match_df
'''
receives n x 2 array with columns 'w_name', 'l_name', 'is_gs'
'''
def generate_elo_columns(arr, counts_538):
# print('arr[0]: ', arr[0])
# print('arr[1]: ', arr[1])
# print('arr[0].dtype', arr[:, 0].dtype)
# print('arr[1].dtype', arr[:, 1].dtype)
# print('arr[:, :2]', arr[:, :2])
# for s in arr:
# if isinstance(s[1], bool):
# print('s: ', s)
# print('is: ', [s for s in arr if isinstance(s[0], bool))
# players_set = np.unique(arr[:, :2].astype(str))
player_names = arr[:, :2].flatten()
players_set = np.where(player_names!=player_names, '', player_names).tolist()
# players_set = list(set(list(np.concatenate(arr[:, 0], arr[:, 1]))))
# player_count = len(players_set)
# print('player_count: ', player_count)
# initial_ratings = [elo.Rating() for _ in range(player_count)]
# zipped = zip(
# players_set,
# [elo.Rating() for _ in range(player_count)]
# )
# # print('zipped: ', zipped)
# players_elo = dict(zip(
# players_set,
# [elo.Rating() for _ in range(player_count)]
# )) # can use default dict here?
players_elo = {}
for player in players_set:
# print('player: ', player)
players_elo[player] = elo.Rating()
match_elos = np.zeros([arr.shape[0], 2])
elo_obj = elo.Elo_Rater()
# update player elo from every recorded match
for i in range(arr.shape[0]):
w_name, l_name = arr[i][:2]
if w_name != w_name or l_name != l_name:
match_elos[i] = np.nan, np.nan
continue
match_elos[i] = players_elo[w_name].value, players_elo[l_name].value
elo_obj.rate_1vs1(players_elo[w_name], players_elo[l_name], arr[i][2], counts_538)
return match_elos[:,0], match_elos[:,1]
def generate_surface_elo_columns(df, surfaces, counts_538):
df['w_sf_elo_538'], df['l_sf_elo_538'] = df['w_elo_538'], df['l_elo_538']
for surface in surfaces:
surface_df = df[(df['surface'] == surface) & (df['w_name'] == df['w_name']) & (df['l_name'] == df['l_name'])]
w_elo_columns, l_elo_columns = generate_elo_columns(np.array(surface_df[['w_name', 'l_name', 'is_gs']]), True)
df.loc[df['surface'] == surface, 'w_sf_elo_538'] = w_elo_columns
df.loc[df['surface'] == surface, 'l_sf_elo_538'] = l_elo_columns
return df['w_sf_elo_538'], df['l_sf_elo_538']
'''
receives n x 4 array with columns 'w_name', 'l_name', 'is_gs', 'Date'
'''
def generateEloColumnsWithHistory(arr, counts_538):
playerEloHistory = defaultdict(list)
players_set = np.unique(arr[:, :2])
players_elo = dict(zip(
players_set,
[elo.Rating() for __ in range(len(players_set))]
)) # can use default dict here?
match_elos = np.zeros([arr.shape[0], 2])
elo_obj = elo.Elo_Rater()
# update player elo from every recorded match
for i in range(arr.shape[0]):
w_name, l_name = arr[i][:2]
isGrandSlam = arr[i][2]
date = datetime.datetime.strptime(arr[i][3], '%Y-%m-%d')
match_elos[i] = players_elo[w_name].value, players_elo[l_name].value
elo_obj.rate_1vs1(players_elo[w_name], players_elo[l_name], 0, counts_538)
playerEloHistory[w_name].append({ 'date': date, 'newElo': players_elo[w_name].value, 'won': 1 })
playerEloHistory[l_name].append({ 'date': date, 'newElo': players_elo[l_name].value, 'won': 0 })
return match_elos[:,0], match_elos[:,1], playerEloHistory, players_elo
'''
return match dataframe with each player's pre-match elo ratings
'''
def generate_elo(df, counts_538=True):
df['w_elo_538'], df['l_elo_538'] = generate_elo_columns(np.array(df[['w_name', 'l_name', 'is_gs']]), True)
df['w_sf_elo_538'], df['l_sf_elo_538'] = generate_surface_elo_columns(df, ['Hard', 'Clay', 'Grass'], counts_538)
return df
# df['w_sf_elo_538'], df['l_sf_elo_538'] = df['w_elo_538'], df['l_elo_538']
# for surface in ['Hard', 'Clay', 'Grass']:
# surface_df = df[df['surface'] == surface]
# w_elo_columns, l_elo_columns = generate_elo_columns(np.array(surface_df[['w_name', 'l_name', 'is_gs']]), True)
# df.loc[df['surface'] == surface, 'w_sf_elo_538'] = w_elo_columns
# df.loc[df['surface'] == surface, 'l_sf_elo_538'] = l_elo_columns
# return df
'''
replace nan values with overall average array value
'''
def fill_nan_with_mean(arr):
mean = np.nanmean(arr)
arr[np.isnan(arr)] = mean
return arr
'''
collect 12-month s/r average performance by player
'''
def generate_52_stats(df,start_ind):
players_stats = {}
start_date = (df['match_year'][start_ind],df['match_month'][start_ind])
avg_stats = stats_52(start_date)
# set as prior so first row is not nan
avg_stats.update(start_date,(6.4,10,3.6,10))
# array w/ 2x1 arrays for each player's 12-month serve/return performance
match_52_stats = np.zeros([2,len(df),4])
avg_52_stats = np.zeros([len(df),4]) # avg tour-wide stats for serve, return
s_players_stats = {}
s_avg_stats = {}
for surface in ('Hard','Clay','Grass'):
s_players_stats[surface] = {}
s_avg_stats[surface] = stats_52((df['match_year'][0],df['match_month'][0]))
s_avg_stats[surface].update(start_date,(6.4,10,3.6,10))
s_match_52_stats = np.zeros([2,len(df),4])
s_avg_52_stats = np.zeros([len(df),4])
w_l = ['w','l']
for i, row in df.loc[start_ind:].iterrows():
surface = row['surface']
date = row['match_year'],row['match_month']
avg_stats.set_month(date)
avg_52_stats[i] = np.sum(avg_stats.last_year,axis=0)
for k,label in enumerate(w_l):
if row[label+'_name'] not in players_stats:
players_stats[row[label+'_name']] = stats_52(date)
# store serving stats prior to match, update current month
players_stats[row[label+'_name']].set_month(date)
match_52_stats[k][i] = np.sum(players_stats[row[label+'_name']].last_year,axis=0) # all four stats per player
# update serving stats if not null
if validate(row, label):
match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\
row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])
players_stats[row[label+'_name']].update(date,match_stats)
avg_stats.update(date,match_stats)
# repeat above process for surface-specific stats
if surface not in ('Hard','Clay','Grass'):
continue
s_avg_stats[surface].set_month(date)
s_avg_52_stats[i] = np.sum(s_avg_stats[surface].last_year,axis=0)
for k,label in enumerate(w_l):
if row[label+'_name'] not in s_players_stats[surface]:
s_players_stats[surface][row[label+'_name']] = stats_52(date)
# store serving stats prior to match, from current month
s_players_stats[surface][row[label+'_name']].set_month(date)
s_match_52_stats[k][i] = np.sum(s_players_stats[surface][row[label+'_name']].last_year,axis=0)
# update serving stats if not null
if validate(row, label):
match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\
row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])
s_players_stats[surface][row[label+'_name']].update(date,match_stats)
s_avg_stats[surface].update(date,match_stats)
for k,label in enumerate(w_l):
df[label+'_52_swon'] = match_52_stats[k][:,0]
df[label+'_52_svpt'] = match_52_stats[k][:,1]
df[label+'_52_rwon'] = match_52_stats[k][:,2]
df[label+'_52_rpt'] = match_52_stats[k][:,3]
df[label+'_sf_52_swon'] = s_match_52_stats[k][:,0]
df[label+'_sf_52_svpt'] = s_match_52_stats[k][:,1]
df[label+'_sf_52_rwon'] = s_match_52_stats[k][:,2]
df[label+'_sf_52_rpt'] = s_match_52_stats[k][:,3]
with np.errstate(divide='ignore', invalid='ignore'):
df['avg_52_s'] = fill_nan_with_mean(np.divide(avg_52_stats[:,0],avg_52_stats[:,1]))
df['avg_52_r'] = fill_nan_with_mean(np.divide(avg_52_stats[:,2],avg_52_stats[:,3]))
df['sf_avg_52_s'] = fill_nan_with_mean(np.divide(s_avg_52_stats[:,0],s_avg_52_stats[:,1]))
df['sf_avg_52_r'] = fill_nan_with_mean(np.divide(s_avg_52_stats[:,2],s_avg_52_stats[:,3]))
return df
'''
Efron-Morris estimators for 52-week serve and return percentages
Calculates B_i coefficients in terms of service points
Feed any existing col where ['p0_'+col, 'p1_'+col] within df.columns
# TODO: you should be passing in the full column suffix after 'p0_'/'p1_'
'''
def generate_em_stats(df,cols):
for col in cols:
stat_history = np.concatenate([df['p0_'+col],df['p1_'+col]],axis=0)
n = int(len(stat_history)/2)
prefix = 'sf_52_' if 'sf' in col else '52_'
suffix = 'svpt' if '_s_' in col else 'rpt'
num_points = np.concatenate([df['p0_'+prefix+suffix],df['p1_'+prefix+suffix]])
p_hat = np.mean(stat_history)
sigma2_i = fill_nan_with_mean(np.divide(p_hat*(1-p_hat),num_points,where=num_points>0))
tau2_hat = np.nanvar(stat_history)
B_i = sigma2_i/(tau2_hat+sigma2_i)
stat_history[stat_history!=stat_history] = p_hat
df['p0_' + col + '_EM'] = df['p0_' + col]+B_i[:n] * (p_hat - df['p0_' + col])
df['p1_' + col + '_EM'] = df['p1_' + col]+B_i[n:] * (p_hat - df['p1_' + col])
print(col, p_hat)
return df # ok if p_hats don't add up because they're avg of averages
'''
Efron-Morris estimators for 52-week serve and return percentages
Calculates B_i coefficients in terms of service points
Feed any existing col within df.columns
'''
def generate_em_stats_current(df,cols):
for col in cols:
stat_history = df[col]
num_points = df['52_svpt'] if col=='52_swon' else df['52_rpt']
p_hat = np.mean(stat_history)
sigma2_i = fill_nan_with_mean(np.divide(p_hat*(1-p_hat),num_points,where=num_points>0))
tau2_hat = np.nanvar(stat_history)
B_i = sigma2_i/(tau2_hat+sigma2_i)
stat_history[stat_history!=stat_history] = p_hat
df[col+'_EM'] = df[col]+B_i*(p_hat-df[col])
print(col, p_hat)
return df # ok if p_hats don't add up because they're avg of averages
'''
use validate stats before calling statsClass.update() method
'''
def is_valid(arr):
return not np.isnan(arr).any()
'''
collects 12-month s/r stats relative to historical opponents
columns '52_s_adj','52_r_adj' represent how well a player
performs above average
'''
def generate_52_adj_stats(df,start_ind=0):
players_stats = {}
match_52_stats = np.zeros([2,len(df),2]) # 2x1 arrays for x_i, x_j's 12-month s/r performance
w_l = ['w','l']
for i, row in df.loc[start_ind:].iterrows():
surface = row['surface']
date = row['match_year'],row['match_month']
avg_52_s, avg_52_r = row['avg_52_s'],row['avg_52_r']
match_stats = [[],[]]
# add new players to the dictionary
for k,label in enumerate(w_l):
if row[label+'_name'] not in players_stats:
players_stats[row[label+'_name']] = adj_stats_52(date)
# store pre-match adj stats
for k,label in enumerate(w_l):
players_stats[row[label+'_name']].set_month(date)
# fill in player's adjusted stats prior to start of match
match_52_stats[k][i] = players_stats[row[label+'_name']].adj_sr
# update serving stats if not null
if validate(row, label):
sv_stats = (row[label+'_swon'],row[label+'_svpt'],row[label+'_rwon'],row[label+'_rpt'])
# TODO: this is the troublesome line... could be extracting nan value from opponent
# TODO: also rewrite this so it's readable (plus with arrays not obvious at)
opp_r_ablty = players_stats[row[w_l[1-k]+'_name']].adj_sr[1] + avg_52_r
opp_s_ablty = players_stats[row[w_l[1-k]+'_name']].adj_sr[0] + avg_52_s
opp_stats = (opp_r_ablty * row[label + '_svpt'], opp_s_ablty * row[label + '_rpt'])
match_stats[k] = sv_stats + opp_stats
# update players' adjusted scores based on pre-match adjusted ratings
for k,label in enumerate(w_l):
# if is_valid(match_stats):
if validate(row, label) and is_valid(match_stats):
players_stats[row[label+'_name']].update(date,match_stats[k])
for k,label in enumerate(w_l):
df[label+'_52_s_adj'] = match_52_stats[k][:,0]
df[label+'_52_r_adj'] = match_52_stats[k][:,1]
return df
'''
generate delta between two players relative to shared opponent
delta_i^AB = (spw(A, C_i) - (1 - rpw(A, C_i))) - (spw(B, C_i) - (1 - rpw(B, C_i)))
'''
def generate_delta(p1_stats, p2_stats):
p1_s_pct, p1_r_pct = p1_stats[0]/float(p1_stats[1]), p1_stats[2]/float(p1_stats[3])
p2_s_pct, p2_r_pct = p2_stats[0]/float(p2_stats[1]), p2_stats[2]/float(p2_stats[3])
return (p1_s_pct - (1 - p1_r_pct)) - (p2_s_pct - (1 - p2_r_pct))
'''
return true if total service/return points both greater than zero
'''
def has_stats(last_year_stats):
return last_year_stats[1] > 0 and last_year_stats[3] > 0
'''
get opponents who have played a match in the past 12 months (more than 0 points)
'''
def get_opponents(player_d, player_name):
historical_opponents = player_d[player_name].history.keys()
return [opp for opp in historical_opponents if has_stats(player_d[player_name].history[opp])]
'''
compute serve/return parameters, given their common opponent history
'''
def generate_commop_params(player_d, player1, player2):
p1_opponents, p2_opponents = get_opponents(player_d, player1), get_opponents(player_d, player2)
common_opponents = np.intersect1d(p1_opponents, p2_opponents)
if len(common_opponents) == 0:
return [0]
match_deltas = np.zeros(len(common_opponents))
for i, comm_op in enumerate(common_opponents):
p1_match_stats = player_d[player1].history[comm_op]
p2_match_stats = player_d[player2].history[comm_op]
comm_op_delta = generate_delta(p1_match_stats, p2_match_stats)
match_deltas[i] = comm_op_delta
if np.isnan(comm_op_delta):
print('nan here: ', p1_match_stats, p2_match_stats, comm_op)
overall_delta = np.mean(match_deltas)
if np.isnan(overall_delta):
print('nan, match_deltas: ', match_deltas)
return match_deltas
'''
collect historical s/r common-opponent performance by player
'''
def generate_commop_stats(df, start_ind):
player_d = {}
match_52_stats = np.zeros([2,len(df), 2])
match_probs = np.zeros([len(df)])
w_l = ['w','l']
for i, row in df.loc[start_ind:].iterrows():
for k, label in enumerate(w_l):
opponent_name = row[w_l[1-k]+'_name']
if row[label+'_name'] not in player_d:
player_d[row[label+'_name']] = commop_stats()
if validate(row, label):
match_stats = (row[label+'_swon'],row[label+'_svpt'],row[w_l[1-k]+'_svpt']-\
row[w_l[1-k]+'_swon'],row[w_l[1-k]+'_svpt'])
player_d[row[label+'_name']].update(match_stats, opponent_name)
# can compute common-opponent stats after current match stats inputted
if row['match_year'] >= COMMOP_START_YEAR: # start at COMMOP_START_YEAR, computationally intensive
match_deltas = generate_commop_params(player_d, row['w_name'], row['l_name'])
overall_delta = np.mean(match_deltas)
w_s_pct, w_r_pct = (.6 + overall_delta/2), (.4 + overall_delta/2)
match_52_stats[0][i] = [w_s_pct, w_r_pct]
match_52_stats[1][i] = [1 - w_r_pct, 1 - w_s_pct]
iterated_match_probs = [
np.mean([
matchProb(.6 + match_delta, .4),
matchProb(.6, .4 + match_delta)
])
for match_delta in match_deltas
]
match_probs[i] = np.mean(iterated_match_probs)
for k,label in enumerate(w_l):
df[label+'_commop_s_pct'] = match_52_stats[k][:,0]
df[label+'_commop_r_pct'] = match_52_stats[k][:,1]
df['w_commop_match_prob'] = match_probs
return df
'''
collect yearly tournament serve averages for 'f_av'
in Barnette-Clark equation
'''
def generate_tny_stats(df,start_ind=0):
tny_stats = {}
tny_52_stats = np.zeros(len(df))
for i, row in df.loc[start_ind:].iterrows():
if row['tny_name']=='<NAME>':
continue
year,t_id = row['tny_id'].split('-')
year = int(year)
match_stats = (row['w_swon']+row['l_swon'],row['w_svpt']+row['l_svpt'])
# handle nan cases, provide tny_stats if possible
if row['w_swon']!=row['w_swon']:
if t_id in tny_stats:
if year-1 in tny_stats[t_id].historical_avgs:
swon,svpt = tny_stats[t_id].historical_avgs[year-1]
tny_52_stats[i] = swon/float(svpt)
continue
# create new object if needed, then update
elif t_id not in tny_stats:
tny_stats[t_id] = tny_52(year)
tny_52_stats[i] = tny_stats[t_id].update(year,match_stats)
df['tny_stats'] = tny_52_stats
return df
'''
approximate inverse elo-->s_pct calculator
'''
def elo_induced_s(prob, s_total):
s0 = s_total/2
diff = s_total/4
current_prob = .5
while abs(current_prob-prob) > EPSILON:
if current_prob < prob:
s0 += diff
else:
s0 -= diff
diff /= 2
current_prob = matchProb(s0,1-(s_total-s0))
return s0, s_total-s0
'''
import to set s_total with EM-normalized percentages
'''
def generate_bc_stats_elo_induced(df,col,start_ind=0):
df['s_total'] = df['p0_s_kls_EM'] + df['p1_s_kls_EM']
induced_s = np.zeros([len(df),2])
for i, row in df.loc[start_ind:].iterrows():
induced_s[i] = elo_induced_s(row[col+'_prob'],row['s_total'])
df['p0_s_kls_' + col] = induced_s[:,0]
df['p1_s_kls_' + col] = induced_s[:,1]
del df['s_total']
return df
def format_pbp_df(df,tour='atp'):
df['w_name'] = np.where(df['winner'] == 0, df['server1'], df['server2'])
df['l_name'] = np.where(df['winner'] == 0, df['server2'], df['server1'])
df['w_name'] = [normalize_name(x,tour=tour) for x in df['w_name']]
df['l_name'] = [normalize_name(x,tour=tour) for x in df['l_name']]
df['date'] = pd.to_datetime(df['date'])
df['match_year'] = [x.year for x in df['date']]
df['match_month'] = [x.month for x in df['date']]
df['date'] = [x.date() for x in df['date']]
df['score'] = [re.sub(r"[\(\[].*?[\)\]]", "", s) for s in df['score']]
return df
def connect_match_and_pbp_dfs(match_df,pbp_df,col_d,player_cols,start_year=2009):
pbp_dict = {}; winner_dict = {}
for i in xrange(len(pbp_df)):
key = pbp_df['w_name'][i] +' ' + pbp_df['l_name'][i] + ' ' \
+ str(pbp_df['match_year'][i]) + ' ' + pbp_df['score'][i]
key = key+' '+str(pbp_df['match_month'][i]) if key in col_d else key
if key in pbp_dict:
continue
pbp_dict[key] = pbp_df['pbp'][i]
winner_dict[key] = pbp_df['winner'][i]
# in case of a collision (about 10 cases), I only take the first match with that key
c = 0
pbps,winners = [],[]
info = {}
match_df = match_df[match_df['match_year']>=start_year]
for i in match_df.index:
key = match_df['w_name'][i] +' ' + match_df['l_name'][i] + ' ' \
+str(match_df['match_year'][i])+' '+match_df['score'][i]
key = key+' '+str(match_df['match_month'][i]) if key in col_d else key
if key in pbp_dict:
c += 1
pbps.append(pbp_dict[key])
winners.append(winner_dict[key])
if key in info:
pbps[-1] = 'None'; winners[-1] = 'None'
print('collision');
print(key + ' ' + str(match_df['match_month'][i]))
info[key] = 1
else:
pbps.append('None')
# we'll just make 'winner' a random 0 or 1 for now
winners.append(np.random.choice([0,1]))
print(c)
match_df['pbp'] = pbps
match_df['winner'] = winners
#df = match_df[match_df['pbp']!='NA']
#cols = df.columns.drop(['loser_id','winner_id'])
df = match_df[match_df.columns.drop(['loser_id','winner_id'])]
df = df.reset_index(drop=True)
# # change w,l TO p0,p1
# for col in player_cols:
# df['p0'+col] = [df['l'+col][i] if df['winner'][i] else df['w'+col][i] for i in xrange(len(df))]
# df['p1'+col] = [df['w'+col][i] if df['winner'][i] else df['l'+col][i] for i in xrange(len(df))]
# # add s/r pct columns
# p_hat = np.sum([df['p0_52_swon'],df['p1_52_swon']])/np.sum([df['p0_52_svpt'],df['p1_52_svpt']])
# for label in ['p0','p1']:
# df[label+'_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_swon']/df[label+'_52_svpt'])]
# df[label+'_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_52_rwon']/df[label+'_52_rpt'])]
# df[label+'_sf_s_pct'] = [p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_swon']/df[label+'_sf_52_svpt'])]
# df[label+'_sf_r_pct'] = [1-p_hat if x==0 else x for x in np.nan_to_num(df[label+'_sf_52_rwon']/df[label+'_sf_52_rpt'])]
# df['elo_diff'] = [df['p0_elo'][i] - df['p1_elo'][i] for i in xrange(len(df))]
# df['sf_elo_diff'] = [df['p0_sf_elo'][i] - df['p1_sf_elo'][i] for i in xrange(len(df))]
# df['tny_name'] = [s if s==s else '<NAME>' for s in df['tny_name']]
return df
```
#### File: src/sackmann/reseeder.py
```python
import random
from operator import itemgetter
## todo: reseeder() treats, e.g. seeds 17-32 as equivalent, though grand slam draws assign 17-24 to
## potential matchups with 9-16 and 25-32 to potential matchups with 1-8. add function parameter for that.
def insertSeedOrNonseed(ndraw, seeds, nonseeds, sj, nj):
if sj < len(seeds):
ndraw.append(seeds[sj])
sj += 1
else:
ndraw.append(nonseeds[nj])
nj += 1
return ndraw, sj, nj
byerow = [129, 'bye', 'bye', '', 'BYE']
def reseeder(draw, seedloc=3):
## reseeder takes a list of players (perhaps an existing draw) and generates
## a (new) draw according to non-strict seeding rules. 1st and 2nd seeds are
## placed in separate halves; 3 and 4 are randomly placed in the remaining
## quarters; 5 through 8 are randomly placed in the remaining 8ths,
## and so on. If there are byes, they are given to the top seeds.
##
## parameter "draw" is a list of player entries between 24 and 128 players long, e.g.
## [['1', 'nadalra15', '(1)Nadal', '1', 'ESP'],
## ['2', 'gruelsi15', 'Greul', '', 'GER']
## ...]
## The column contents don't matter, except that seeds must all be in the same column,
## the index of which is the optional 'seedloc' parameter.
## The player rows will be returned in the new order.
##
## As many seeds can be specified as you wish, but only one-quarter of the field
## (defined as a power of two, e.g. 24 players implies a 32 draw, so up to 8 seeds)
## will be 'seeded' in the sense that they will avoid other seeds until later
## rounds. There must be at least two seeds.
## Byes are ok, too. There cannot be more byes than seeds, and byes cannot
## exceed one-quarter of the draw. reseeder inserts those with a row defined
## by the global variable byerow
##
## A basic test is included, testReseeder(). It generates sample draws within
## the above parameters, with draw sizes from 24 to 128 and numbers of seeds
## between 2 and one-quarter of the field, then runs reseeder on each.
drawlen = len(draw)
if drawlen > 64: drawbin = 128
elif drawlen > 32: drawbin = 64
else: drawbin = 32
numbyes = drawbin - drawlen
## extract and sort seeds
seedlist = []
for p in draw:
if str(p[seedloc]).isdigit():
newrow = p[:seedloc] + [int(p[seedloc])] + p[(seedloc+1):]
seedlist.append(newrow)
if len(seedlist) < 2:
print('reseeder requires at least two seeds')
print(impossiblevariable)
elif numbyes > len(seedlist):
print('reseeder cannot handle more byes than seeds')
print(impossiblevariable)
seedlist = sorted(seedlist, key=itemgetter(seedloc))
## place seeds in groups: 1, 2, 3-4, 5-8, etc.
s1, s2, s3, s5, s9, s17, s33 = [], [], [], [], [], [], []
for i in range(len(seedlist)):
s = seedlist[i]
if i == 0: s1.append(s)
elif i == 1: s2.append(s)
elif i < 4: s3.append(s)
elif i < 8: s5.append(s)
elif i < 16: s9.append(s)
elif i < 32: s17.append(s)
else: s33.append(s)
## next few lines place 'extra' seeds with unseeded players
if drawbin == 128: unseeds = s33
elif drawbin == 64: unseeds = s17
else: unseeds = s9
for r in draw:
if str(r[seedloc]).isdigit(): pass
else: unseeds.append(r)
random.shuffle(s3)
random.shuffle(s5)
random.shuffle(s9)
random.shuffle(s17)
random.shuffle(unseeds)
## generate new draw according to non-strict seeding logic
ndraw = []
i = 0
j, k, m, n, p = 0, 0, 0, 0, 0 ## counters to loop through lists of seeds, e.g. k through s5 for seeds 5-8
while True:
if i == drawbin: break
if i == 0: ndraw.append(s1[0])
elif i == (drawbin/2): ndraw.append(s2[0])
elif i % (drawbin/4) == 0:
ndraw, j, p = insertSeedOrNonseed(ndraw, s3, unseeds, j, p)
elif i % (drawbin/8) == 0:
ndraw, k, p = insertSeedOrNonseed(ndraw, s5, unseeds, k, p)
elif drawbin >= 64 and i % (drawbin/16) == 0:
ndraw, m, p = insertSeedOrNonseed(ndraw, s9, unseeds, m, p)
elif drawbin == 128 and i % (drawbin/32) == 0:
ndraw, n, p = insertSeedOrNonseed(ndraw, s17, unseeds, n, p)
elif numbyes and type(ndraw[-1][seedloc]) is int:
if int(ndraw[-1][seedloc]) <= numbyes:
ndraw.append(byerow)
else:
ndraw.append(unseeds[p])
p += 1
else:
ndraw.append(unseeds[p])
p += 1
i += 1
return ndraw
def generateSampleDraw(fieldsize, numseeds):
fakedraw = []
for i in range(1, fieldsize+1):
if i <= numseeds:
fakerow = [i, 'playerid', 'Player', i, 'XXX']
else:
fakerow = [i, 'playerid', 'Player', '', 'XXX']
fakedraw.append(fakerow)
return fakedraw
def testReseeder():
## generates sample draws for every acceptable combination of
## field size and number of seeds
for fieldsize in range(24, 129):
if fieldsize > 64: maxseeds = 32
elif fieldsize > 32: maxseeds = 16
else: maxseeds = 8
for numseeds in range(2, (maxseeds+1)):
byes = maxseeds*4 - fieldsize
if byes > numseeds: continue
print fieldsize, numseeds
fakedraw = generateSampleDraw(fieldsize, numseeds)
ndraw = reseeder(fakedraw)
def printSampleReseed(fieldsize, numseeds):
## generates sample draw and sends it to reseeder to get a
## visual check that it is behaving as predicted
sdraw = generateSampleDraw(fieldsize, numseeds)
ndraw = reseeder(sdraw)
for player in ndraw: print player
```
#### File: tennis_match_prediction/test/data_tests.py
```python
import pandas as pd
import numpy as np
from globals import COUNTS_538, TOUR, RET_STRINGS, ABD_STRINGS
from data_functions import *
from tqdm import tqdm
START_YEAR_TEST = 2012
END_YEAR_TEST = 2015
CURRENT_DF_TEST_PATH = 'test_data/test_df_current.csv'
MATCH_DF_TEST_PATH = 'test_data/test_df_match.csv'
TEST_COLUMNS_ELO = ['w_elo_538', 'l_elo_538', 'w_sf_elo_538', 'l_sf_elo_538']
TEST_COLUMNS_52 = [
'w_52_swon', 'w_52_svpt', 'w_52_rwon', 'w_52_rpt',
'w_sf_52_swon', 'w_sf_52_svpt', 'w_sf_52_rwon', 'w_sf_52_rpt',
'l_52_swon', 'l_52_svpt', 'l_52_rwon', 'l_52_rpt',
'l_sf_52_swon', 'l_sf_52_svpt', 'l_sf_52_rwon', 'l_sf_52_rpt',
'avg_52_s', 'avg_52_r'
]
TEST_COLUMNS_52_ADJ = [
'w_52_s_adj', 'w_52_r_adj', 'l_52_s_adj', 'l_52_r_adj'
]
TEST_COLUMNS_COMMOP = [
'w_commop_s_pct', 'w_commop_r_pct', 'l_commop_s_pct', 'l_commop_r_pct'
]
def compare_cols(df, test_df, col_name):
try:
assert(np.isclose(df[col_name], test_df[col_name]).all())
except:
print 'failed at col: ', col_name
def test_cols(df, test_df, cols):
for col in tqdm(cols):
compare_cols(df, test_df, col)
# TODO: update tests to use match_df not active df
def test_elo(df, test_df):
print '### testing elo ###'
elo_df = generate_elo(df, COUNTS_538)
print 'generated: ', elo_df.shape
test_cols(elo_df, test_df, TEST_COLUMNS_ELO)
print '--- elo passed ---'
def test_52_stats(df, test_df):
print '### testing 52 stats ###'
df = generate_52_stats(df, 0)
test_cols(df, test_df, TEST_COLUMNS_52)
print '--- 52 stats passed ---'
def test_52_adj_stats(df, test_df):
print '### testing 52 adj stats ###'
df = generate_52_adj_stats(df, 0)
test_cols(df, test_df, TEST_COLUMNS_52_ADJ)
print '--- 52 adj stats passed ---'
def test_commop_stats(df, test_df):
print '### testing commop stats ###'
df = generate_commop_stats(df, 0)
test_cols(df, test_df, TEST_COLUMNS_COMMOP)
assert(np.isclose(df['w_commop_s_pct'] + df['l_commop_s_pct'], 1.2).all())
assert(np.isclose(df['w_commop_r_pct'] + df['l_commop_r_pct'], .8).all())
print '--- commop stats passed ---'
# def test_em_stats(df, test_df):
# print '### testing commop stats ###'
# df = generate_(df, 0)
# test_cols(df, test_df, TEST_COLUMNS_COMMOP)
# print '--- EM stats passed ---'
def validate_data_pipeline(df, test_df):
test_elo(df, test_df)
test_52_stats(df, test_df)
test_52_adj_stats(df, test_df)
test_commop_stats(df, test_df)
def validate_test_df(test_df):
return
if __name__=='__main__':
df = concat_data(START_YEAR_TEST, END_YEAR_TEST, TOUR)
test_df = pd.read_csv(MATCH_DF_TEST_PATH)
validate_test_df(test_df)
validate_data_pipeline(df, test_df)
# # only run this once
# def build_test_df():
# match_df = generate_test_dfs(TOUR, 2012, 2015, RET_STRINGS, ABD_STRINGS, COUNTS_538)
# match_file_path = 'test_data/test_df_match.csv'
# match_df.to_csv(match_file_path, index=False)
# print '{} constructed'.format(match_file_path)
```
|
{
"source": "jgome047/KGI-hand-tracking-research",
"score": 3
}
|
#### File: jgome047/KGI-hand-tracking-research/hand angle 3d.py
```python
import mediapipe as mp
import cv2
import numpy as np
import uuid
import os
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
from matplotlib import pyplot as plt
#define the joints of interest (pretty much all of them)
joint_list = [[4,3,2], [8,7,6], [12,11,10], [16,15,14], [20,19,18]]
joint_list[3]
#function for calculating finger angles (for now, in 2D)
def draw_finger_angles(image, results, joint_list):
# Loop through hands
for hand in results.multi_hand_landmarks:
#Loop through joint sets
for joint in joint_list:
a = np.array([hand.landmark[joint[0]].x, hand.landmark[joint[0]].y, hand.landmark[joint[0]].z]) # First coord
b = np.array([hand.landmark[joint[1]].x, hand.landmark[joint[1]].y, hand.landmark[joint[1]].z]) # Second coord
c = np.array([hand.landmark[joint[2]].x, hand.landmark[joint[2]].y, hand.landmark[joint[2]].z]) # Third coord
#length of finger (might need to add some calcs distance formula sqrt((x2-x1)**2)+(y2-y1)**2))
#len_ab = np.sqrt[(((b[0])-(a[0]))**2)+(((b[1])-(a[1]))**2)]
#len_bc = np.sqrt[(((c[0])-(b[0]))**2)+(((c[1])-(b[1]))**2)]
len_ab = 2.5
len_bc = 2.5
# assign easy to "read" coordinates based on arrays
xa = a[0]
ya = a[1]
za = a[2]
xb = b[0]
yb = b[1]
zb = b[2]
# calculate the z position of point b
zb = np.sqrt(len_ab**2 - (xb-xa)**2 - (yb-ya)**2)
# assign easy to "read" coordinates based on array
xc = c[0]
yc = c[1]
zc = c[2]
# calculate the z position of point b
zc = np.sqrt(len_bc**2 - (xc-xb)**2 - (yc-yb)**2)
# calculate the length of segment ac
len_ac = np.sqrt(xc**2 + yc**2 + zc**2)
# compute the angle between segments ab and bc
theta = np.arccos( (len_ab**2+len_bc**2-len_ac**2) / (2*len_ab*len_bc))
angle = np.abs(theta*180.0/np.pi)
if angle > 180.0:
angle = 360-angle
cv2.putText(image, str(round(angle,2)), tuple(np.multiply([b[0],b[1]], [640, 480]).astype(int)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
return image
#labelling hands as L and R, angles of each finger, and confidence values
def get_label(index, hand, results):
output = None
for idx, classification in enumerate(results.multi_handedness):
if classification.classification[0].index == index:
# Process results
label = classification.classification[0].label
score = classification.classification[0].score
text = '{} {}'.format(label, round(score, 2))
# Extract Coordinates
coords = tuple(np.multiply(
np.array((hand.landmark[mp_hands.HandLandmark.WRIST].x, hand.landmark[mp_hands.HandLandmark.WRIST].y)),
[640,480]).astype(int))
output = text, coords
return output
#camera capture and drawing of finger segments
cap = cv2.VideoCapture(0)
with mp_hands.Hands(min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands:
while cap.isOpened():
ret, frame = cap.read()
# BGR 2 RGB
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Flip on horizontal
image = cv2.flip(image, 1)
# Set flag
image.flags.writeable = False
# Detections
results = hands.process(image)
# Set flag to true
image.flags.writeable = True
# RGB 2 BGR
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# Detections
print(results)
# Rendering results
if results.multi_hand_landmarks:
for num, hand in enumerate(results.multi_hand_landmarks):
mp_drawing.draw_landmarks(image, hand, mp_hands.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(color=(250, 44, 250), thickness=2, circle_radius=2),
)
# Render left or right detection
if get_label(num, hand, results):
text, coord = get_label(num, hand, results)
cv2.putText(image, text, coord, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
# Draw angles to image from joint list
draw_finger_angles(image, results, joint_list)
# Save our image
#cv2.imwrite(os.path.join('Output Images', '{}.jpg'.format(uuid.uuid1())), image)
cv2.imshow('Hand Tracking', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
```
|
{
"source": "Jgomez95/Data-Mining-Mini-Project",
"score": 3
}
|
#### File: Data-Mining-Mini-Project/part2/p2.py
```python
from sklearn import datasets
from sklearn import tree
from sklearn import svm
from sklearn.svm import SVC
import os
trainPath = '/Users/Juan/Projects/Data-Mining-Mini-Project/train'
testPath = '/Users/Juan/Projects/Data-Mining-Mini-Project/test'
def main():
svm_classifier(trainPath, testPath)
def svm_classifier(file_path_training, file_path_test):
# declartion of all list need in this section
allElements = []
allWords = []
labels = []
allWordsTest = []
allElementsTest = []
countTest = []
i = 0
j = 0
#********************************************
#read in the train data
for filename in os.listdir(file_path_training):
if filename.startswith("spm"):
#adds 1 for spam to labels list
labels.append(1)
else:
labels.append(0)
#reads all files and saves every word to allWords list
with open((trainPath+"/"+filename), 'r') as f:
allElements.append(f.read())
for line in f:
for word in line.split():
allWords.append(word)
# change allWords list to dictionary to purge a duliplicate words
#allWords = set(allWords)
count = [0] * len(allWords)
for email in allElements:
for word in allWords:
count[i] = email.count(word)
i += 1
#********************************************
#read in the test data
for filename in os.listdir(file_path_test):
splitWords = filename.split(' ')
allElementsTest.append(splitWords)
with open((testPath+"/"+filename), 'r') as f:
for line in f:
for word in line.split():
allWordsTest.append(word)
# change allWordsTest list to dictionary to purge a duliplicate words
allWordsTest = set(allWordsTest)
# iterates through words and gets a count of all the words in the emails
for email in allElementsTest:
countTest.append([])
for word in allWordsTest:
countTest[i].append(email.count(word))
j += 1
#prints if successful
print("successful reading in test data & train data")
#SVM classifer training set
clf = svm.SVC(gamma='auto', C=1.0, class_weight=None, kernel='linear', verbose=False, degree=3)
clf.fit(count, labels)
# run the svm prediction method with our test data
#print( clf.predict(countTest))
main()
```
|
{
"source": "jgomezc1/PY_series",
"score": 3
}
|
#### File: PY_series/pyseries/fourier.py
```python
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def ricker(nt, Tt, tc, fc):
"""
Computes a Ricker pulse of:
Central frequency fc
Time centered at tc
Time windowed at Tt
Samped at nt points
"""
time = np.linspace(0, Tt, nt)
tau = np.pi*fc*(time - tc)
Rick = (2.0*tau**2 - 1.0) * np.exp(-tau**2)
return Rick, time
def grafsignalG(A, dt , Ngra):
"""
Plots the generalized time signal A[ndats] into Ngra
"""
ndats = len(A)
x=np.zeros([ndats], dtype=float)
x=np.arange(0,ndats*dt,dt)
plt.figure(Ngra)
plt.plot(x,A)
plt.grid()
#
return
def Ftrans(datos, ndats, dt, fs):
"""
Compute the Fourier spectra of datos[] of
length ndats and sampled at dt.
Returns the result in Samag after smoothing by the
smoothing factor fs.
"""
nfr = int(ndats/2)
df = 1.0/(ndats*dt)
x = np.arange(df,nfr*df, df)
A = np.fft.fft(datos)
Aa = np.abs(A)
# Smooth the spectrum.
Sa = Aa[1:nfr]
Samag = smooth(Sa , x , fs)
nfs = nfr-1
return x , Samag, A , nfs
def smooth(Sa, Freq, fftfs):
"""
Parameters
----------
Sa : ndarray
Original spectrum.
Freq : float
Frequency.
fftfs : float
Smoothing factor.
"""
Sas = np.zeros([len(Sa)],dtype=float)
fia = 1
fma = 1
suma = Sa[0] * Sa[0]
pot = 1./2./fftfs
fsexpi = 2**(-pot)
fsexpm = 2**( pot)
Sas[0] = Sa[0]
NNfft = len(Sa)
for i in range(1, NNfft):
#
fi = int((i + 1) * fsexpi)
fm = int((i + 1) * fsexpm)
if fi < 1:
fi = 1
if fm > NNfft:
fm = NNfft
for Num in range(fia - 1, fi - 1):
#
suma = suma - Sa[Num] * Sa[Num]
for Num in range(fma, fm):
#
suma = suma + Sa[Num]*Sa[Num]
Nf = fm - fi + 1
fia = fi
fma = fm
Sas[i]=np.sqrt(suma/Nf)
return (Sas)
def grafFourier(Sas , x , nfr , Nfig):
"""
Plots the Fourier spectral amplitude Sas into Nfig.
Sas : Spectrum
x : frecuency
xmin,xmax,ymin,ymax
"""
#
plt.figure(Nfig)
plt.plot(x,Sas)
plt.grid()
plt.xlabel('Frecuencia (Hz)')
plt.ylabel('Amplitud')
#
return
#
```
|
{
"source": "jgomezc1/SOLIDSPy_DYN",
"score": 3
}
|
#### File: SOLIDSPy_DYN/solidspydyn/assemutil.py
```python
from __future__ import division, print_function
import numpy as np
from scipy.sparse import coo_matrix
import uelutil as ue
import femutil as fem
def eqcounter(nodes):
"""Counts active equations and creates BCs array IBC
Parameters
----------
nodes : ndarray
Array with nodes coordinates and boundary conditions.
Returns
-------
neq : int
Number of equations in the system after removing the nodes
with imposed displacements.
IBC : ndarray (int)
Array that maps the nodes with number of equations.
"""
nnodes = nodes.shape[0]
IBC = np.zeros([nnodes, 2], dtype=np.integer)
for i in range(nnodes):
for k in range(2):
IBC[i , k] = int(nodes[i , k+3])
neq = 0
for i in range(nnodes):
for j in range(2):
if IBC[i, j] == 0:
IBC[i, j] = neq
neq = neq + 1
return neq, IBC
def DME(nodes, elements):
"""Counts active equations, creates BCs array IBC[]
and the assembly operator DME[]
Parameters
----------
nodes : ndarray.
Array with the nodal numbers and coordinates.
elements : ndarray
Array with the number for the nodes in each element.
Returns
-------
DME : ndarray (int)
Assembly operator.
IBC : ndarray (int)
Boundary conditions array.
neq : int
Number of active equations in the system.
"""
nels = elements.shape[0]
IELCON = np.zeros([nels, 9], dtype=np.integer)
DME = np.zeros([nels, 18], dtype=np.integer)
neq, IBC = eqcounter(nodes)
for i in range(nels):
iet = elements[i, 1]
ndof, nnodes, ngpts = fem.eletype(iet)
for j in range(nnodes):
IELCON[i, j] = elements[i, j+3]
kk = IELCON[i, j]
for l in range(2):
DME[i, 2*j+l] = IBC[kk, l]
return DME , IBC , neq
def retriever(elements , mats , nodes , i, uel=None):
"""Computes the elemental stiffness matrix of element i
Parameters
----------
elements : ndarray
Array with the number for the nodes in each element.
mats : ndarray.
Array with the material profiles.
nodes : ndarray.
Array with the nodal numbers and coordinates.
i : int.
Identifier of the element to be assembled.
Returns
-------
kloc : ndarray (float)
Array with the local stiffness matrix.
ndof : int.
Number of degrees of fredom of the current element.
"""
par = np.zeros([5], dtype=np.float)
IELCON = np.zeros([9], dtype=np.integer)
iet = elements[i , 1]
ndof, nnodes, ngpts = fem.eletype(iet)
elcoor = np.zeros([nnodes, 2])
im = np.int(elements[i, 2])
par[:] = mats[im, :]
for j in range(nnodes):
IELCON[j] = elements[i, j+3]
elcoor[j, 0] = nodes[IELCON[j], 1]
elcoor[j, 1] = nodes[IELCON[j], 2]
if uel is None:
if iet == 1:
kloc , mloc , cloc = ue.uel4nquad(elcoor , par)
elif iet == 2:
kloc , mloc , cloc = ue.uel6ntrian(elcoor, par)
elif iet == 3:
kloc , mloc , cloc = ue.uel3ntrian(elcoor, par)
elif iet == 5:
kloc , mloc , cloc = ue.uelspring(elcoor, par)
elif iet == 6:
kloc , mloc , cloc = ue.ueltruss2D(elcoor, par)
elif iet == 7:
kloc , mloc , cloc = ue.ueldashpot(elcoor, par)
elif iet == 8:
kloc , mloc , cloc = ue.uel9nquad(elcoor , par)
elif iet == 9:
kloc , mloc , cloc = ue.uel3dash(elcoor , par)
else:
kloc, ndof, iet = uel(elcoor, par)
return kloc , mloc , cloc , ndof , iet
def assembler(elements, mats, nodes, neq, DME, sparse=True, uel=None):
"""Assembles the global stiffness matrix
Parameters
----------
elements : ndarray (int)
Array with the number for the nodes in each element.
mats : ndarray (float)
Array with the material profiles.
nodes : ndarray (float)
Array with the nodal numbers and coordinates.
DME : ndarray (int)
Assembly operator.
neq : int
Number of active equations in the system.
sparse : boolean (optional)
Boolean variable to pick sparse assembler. It is True
by default.
uel : callable function (optional)
Python function that returns the local stiffness matrix.
Returns
-------
KG : ndarray (float)
Array with the global stiffness matrix. It might be
dense or sparse, depending on the value of _sparse_
"""
if sparse:
KG , MG , CG = sparse_assem(elements, mats, nodes, neq, DME, uel=uel)
else:
KG , MG , CG = dense_assem(elements, mats, nodes, neq, DME, uel=uel)
return KG , MG , CG
def effective(KG , MG , CG , ac ):
KE = ac[0]*MG+ac[1]*CG+KG
return KE
def dense_assem(elements, mats, nodes, neq, DME, uel=None):
"""
Assembles the global stiffness matrix _KG_
using a dense storing scheme
Parameters
----------
elements : ndarray (int)
Array with the number for the nodes in each element.
mats : ndarray (float)
Array with the material profiles.
nodes : ndarray (float)
Array with the nodal numbers and coordinates.
DME : ndarray (int)
Assembly operator.
neq : int
Number of active equations in the system.
uel : callable function (optional)
Python function that returns the local stiffness matrix.
Returns
-------
KG : ndarray (float)
Array with the global stiffness matrix in a dense numpy
array.
MG : ndarray (float)
Array with the global mass matrix in a dense numpy
array.
"""
KG = np.zeros((neq, neq))
MG = np.zeros((neq, neq))
CG = np.zeros((neq, neq))
nels = elements.shape[0]
for el in range(nels):
kloc , mloc , cloc , ndof , iet = retriever(elements , mats , nodes , el, uel=uel)
if iet == 6:
dme = np.zeros([ndof], dtype=np.integer)
dme[0] = DME[el, 0]
dme[1] = DME[el, 1]
dme[2] = DME[el, 3]
dme[3] = DME[el, 4]
else:
dme = DME[el, :ndof]
for row in range(ndof):
glob_row = dme[row]
if glob_row != -1:
for col in range(ndof):
glob_col = dme[col]
if glob_col != -1:
KG[glob_row, glob_col] = KG[glob_row, glob_col] +\
kloc[row, col]
MG[glob_row, glob_col] = MG[glob_row, glob_col] +\
mloc[row, col]
CG[glob_row, glob_col] = CG[glob_row, glob_col] +\
cloc[row, col]
return KG , MG , CG
def sparse_assem(elements, mats, nodes, neq, DME, uel=None):
"""
Assembles the global stiffness matrix _KG_
using a sparse storing scheme
The scheme used to assemble is COOrdinate list (COO), and
it converted to Compressed Sparse Row (CSR) afterward
for the solution phase [1]_.
Parameters
----------
elements : ndarray (int)
Array with the number for the nodes in each element.
mats : ndarray (float)
Array with the material profiles.
nodes : ndarray (float)
Array with the nodal numbers and coordinates.
DME : ndarray (int)
Assembly operator.
neq : int
Number of active equations in the system.
uel : callable function (optional)
Python function that returns the local stiffness matrix.
Returns
-------
KG : ndarray (float)
Array with the global stiffness matrix in a sparse
Compressed Sparse Row (CSR) format.
References
----------
.. [1] Sparse matrix. (2017, March 8). In Wikipedia,
The Free Encyclopedia.
https://en.wikipedia.org/wiki/Sparse_matrix
"""
rows = []
cols = []
kvals = []
mvals = []
cvals = []
nels = elements.shape[0]
for el in range(nels):
kloc , mloc , cloc , ndof , iet = retriever(elements , mats , nodes , el, uel=uel)
if iet == 6:
dme = np.zeros([ndof], dtype=np.integer)
dme[0] = DME[el, 0]
dme[1] = DME[el, 1]
dme[2] = DME[el, 3]
dme[3] = DME[el, 4]
else:
dme = DME[el, :ndof]
for row in range(ndof):
glob_row = dme[row]
if glob_row != -1:
for col in range(ndof):
glob_col = dme[col]
if glob_col != -1:
rows.append(glob_row)
cols.append(glob_col)
kvals.append(kloc[row, col])
mvals.append(mloc[row, col])
cvals.append(cloc[row, col])
stiff = coo_matrix((kvals, (rows, cols)),
shape=(neq, neq)).tocsr()
mass = coo_matrix((mvals, (rows, cols)),
shape=(neq, neq)).tocsr()
damp = coo_matrix((cvals, (rows, cols)),
shape=(neq, neq)).tocsr()
return stiff, mass , damp
def loadasem(loads, IBC, neq , ninc , T , Tc , fc):
"""Assembles the global Right Hand Side Vector RHSG
Parameters
----------
loads : ndarray
Array with the loads imposed in the system.
IBC : ndarray (int)
Array that maps the nodes with number of equations.
neq : int
Number of equations in the system after removing the nodes
with imposed displacements.
Returns
-------
RHSG : ndarray
Array with the right hand side vector.
"""
nloads = loads.shape[0]
RHSG = np.zeros((neq, ninc))
#
Tt= T
Nt=ninc
Rick, T=ricker(Nt, Tt, Tc, fc)
#
for i in range(nloads):
il = int(loads[i, 0])
ilx = IBC[il, 0]
ily = IBC[il, 1]
if ilx != -1:
for k in range(ninc):
RHSG[ilx , k] = loads[i, 1]*Rick[k]
if ily != -1:
for k in range(ninc):
RHSG[ily , k] = loads[i, 2]*Rick[k]
return RHSG
def ricker(nt, Tt, tc, fc):
Rick = np.zeros(nt)
T = np.zeros(nt)
dt = Tt/(nt-1)
for i in range(nt):
tao=np.pi*fc*(dt*i-tc)
Rick[i]=(2.*tao**2-1.)*np.exp(-tao**2)
T[i]= i*dt
return Rick, T
```
#### File: SOLIDSPy_DYN/solidspydyn/femutil.py
```python
from __future__ import division, print_function
import gaussutil as gau
import numpy as np
def eletype(iet):
"""Assigns number to degrees of freedom
According to iet assigns number of degrees of freedom, number of
nodes and minimum required number of integration points.
Parameters
----------
iet : int
Type of element. These are:
1. 4 node bilinear quadrilateral.
2. 6 node quadratic triangle.
3. 3 node linear triangle.
5. 2 node spring.
6. 2 node truss element.
7. 2D dashpot.
8. 9 noded quadratic element.
9. Lumped discrete dashpot.
Returns
-------
ndof : int
Number of degrees of freedom for the selected element.
nnodes : int
Number of nodes for the selected element.
ngpts : int
Number of Gauss points for the selected element.
"""
if iet == 1:
ndof = 8
nnodes = 4
ngpts = 4
if iet == 2:
ndof = 12
nnodes = 6
ngpts = 7
if iet == 3:
ndof = 6
nnodes = 3
ngpts = 3
if iet == 5:
ndof = 4
nnodes = 2
ngpts = 3
if iet == 6:
ndof = 4
nnodes = 2
ngpts = 3
if iet == 7:
ndof = 6
nnodes = 3
ngpts = 6
if iet == 8:
ndof = 18
nnodes = 9
ngpts = 9
if iet == 9:
ndof = 6
nnodes = 3
ngpts = 3
return ndof, nnodes, ngpts
#%% Shape functions and derivatives
def sha4(x, y):
"""Shape functions for a 4-noded quad element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(1, 1). Thus
>>> N = sha4(0, 0)
>>> N_ex = np.array([
... [1/4, 0, 1/4, 0, 1/4, 0, 1/4, 0],
... [0, 1/4, 0, 1/4, 0, 1/4, 0, 1/4]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha4(1, 1)
>>> N_ex = np.array([
... [0, 0, 0, 0, 1, 0, 0, 0],
... [0, 0, 0, 0, 0, 1, 0, 0]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 8))
H = 0.25*np.array(
[(1 - x)*(1 - y),
(1 + x)*(1 - y),
(1 + x)*(1 + y),
(1 - x)*(1 + y)])
N[0, ::2] = H
N[1, 1::2] = H
return N
def sha6(x, y):
"""Shape functions for a 6-noded triangular element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(0.5, 0.5). Thus
>>> N = sha6(0, 0)
>>> N_ex = np.array([
... [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
... [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha6(1/2, 1/2)
>>> N_ex = np.array([
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 12))
H = np.array(
[(1 - x - y) - 2*x*(1 - x - y) - 2*y*(1 - x - y),
x - 2*x*(1 - x - y) - 2*x*y,
y - 2*x*y - 2*y*(1-x-y),
4*x*(1 - x - y),
4*x*y,
4*y*(1 - x - y)])
N[0, ::2] = H
N[1, 1::2] = H
return N
def sha3(x, y):
"""Shape functions for a 3-noded triangular element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(0, 0.5). Thus
>>> N = sha3(0, 0)
>>> N_ex = np.array([
... [1, 0, 0, 0, 0, 0],
... [0, 1, 0, 0, 0, 0]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha3(1/2, 1/2)
>>> N_ex = np.array([
... [0, 0, 1/2, 0, 1/2, 0],
... [0, 0, 0, 1/2, 0, 1/2]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 6))
H = np.array([
(1 - x - y),
x,
y])
N[0, ::2] = H
N[1, 1::2] = H
return N
def shape1D(x):
"""Shape functions for a 3-noded linear element
Parameters
----------
x : float
x coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
"""
N = np.zeros((2 , 6))
H = np.zeros((3))
H[0] = 0.5*(1.0 - x) - 0.5*(1-x*x)
H[1] = 0.5*(1.0 + x) - 0.5*(1-x*x)
H[2] = (1-x*x)
N[ 0 , 0] = H[0]
N[ 0 , 2] = H[1]
N[ 0 , 4] = H[2]
N[ 1 , 1] = H[0]
N[ 1 , 3] = H[1]
N[ 1 , 5] = H[2]
return N
def sha9(R, S):
"""Shape functions for a 4-noded quad element
Parameters
----------
x : float
x coordinate for a point within the element.
y : float
y coordinate for a point within the element.
Returns
-------
N : Numpy array
Array of interpolation functions.
Examples
--------
We can check evaluating at two different points, namely (0, 0) and
(1, 1). Thus
>>> N = sha9(0, 0)
>>> N_ex = np.array([
... [1/4, 0, 1/4, 0, 1/4, 0, 1/4, 0],
... [0, 1/4, 0, 1/4, 0, 1/4, 0, 1/4]])
>>> np.allclose(N, N_ex)
True
and
>>> N = sha9(1, 1)
>>> N_ex = np.array([
... [0, 0, 0, 0, 1, 0, 0, 0],
... [0, 0, 0, 0, 0, 1, 0, 0]])
>>> np.allclose(N, N_ex)
True
"""
N = np.zeros((2, 18))
SN = np.zeros((9))
ONE = 1.0
QUART = 0.25
HALF = 0.5
RP =ONE+R
RM =ONE-R
RMS=ONE-R*R
SP =ONE+S
SM =ONE-S
SMS=ONE-S*S
#
SN[8]=RMS*SMS
SN[7]=HALF*SMS*RM-HALF*SN[8]
SN[6]=HALF*RMS*SP-HALF*SN[8]
SN[5]=HALF*SMS*RP-HALF*SN[8]
SN[4]=HALF*RMS*SM-HALF*SN[8]
SN[0]=QUART*RM*SM-HALF*SN[7]-HALF*SN[4]-QUART*SN[8]
SN[1]=QUART*RP*SM-HALF*SN[5]-HALF*SN[4]-QUART*SN[8]
SN[2]=QUART*RP*SP-HALF*SN[5]-HALF*SN[6]-QUART*SN[8]
SN[3]=QUART*RM*SP-HALF*SN[7]-HALF*SN[6]-QUART*SN[8]
#
N[0, ::2] = SN
N[1, 1::2] = SN
#
return N
def stdm4NQ(r, s, coord):
"""Strain-displacement interpolator B for a 4-noded quad element
Parameters
----------
r : float
r component in the natural space.
s : float
s component in the natural space.
coord : ndarray
Coordinates of the nodes of the element (4, 2).
Returns
-------
ddet : float
Determinant evaluated at `(r, s)`.
B : ndarray
Strain-displacement interpolator evaluated at `(r, s)`.
"""
nn = 4
B = np.zeros((3, 2*nn))
dhdx = 0.25*np.array([
[s - 1, -s + 1, s + 1, -s - 1],
[r - 1, -r - 1, r + 1, -r + 1]])
det, jaco_inv = jacoper(dhdx, coord)
dhdx = np.dot(jaco_inv, dhdx)
B[0, ::2] = dhdx[0, :]
B[1, 1::2] = dhdx[1, :]
B[2, ::2] = dhdx[1, :]
B[2, 1::2] = dhdx[0, :]
return det, B
def stdm9NQ(r, s, coord):
"""Strain-displacement interpolator B for a 9-noded quad element
Parameters
----------
r : float
r component in the natural space.
s : float
s component in the natural space.
coord : ndarray
Coordinates of the nodes of the element (4, 2).
Returns
-------
ddet : float
Determinant evaluated at `(r, s)`.
B : ndarray
Strain-displacement interpolator evaluated at `(r, s)`.
"""
####
P = np.zeros((2, 9))
ONE = 1.0
TWO = 2.0
QUARTER = 0.25
HALF = 0.5
RP= ONE+r
SP= ONE+s
RM= ONE-r
SM= ONE-s
RMS=ONE-r**TWO
SMS=ONE-s**TWO
#
# 9-NODED ELEMENT
# Derivatives w.r.t the natural coordinates
# w.r.t.r
#
P[0,8]=-TWO*r*SMS
P[0,7]=-HALF*SMS-HALF*P[0,8]
P[0,6]=-r*SP-HALF*P[0,8]
P[0,5]= HALF*SMS-HALF*P[0,8]
P[0,4]=-r*SM-HALF*P[0,8]
P[0,3]=-QUARTER*SP-HALF*P[0,6]-HALF*P[0,7]-QUARTER*P[0,8]
P[0,2]= QUARTER*SP-HALF*P[0,6]-HALF*P[0,5]-QUARTER*P[0,8]
P[0,1]= QUARTER*SM-HALF*P[0,4]-HALF*P[0,5]-QUARTER*P[0,8]
P[0,0]=-QUARTER*SM-HALF*P[0,7]-HALF*P[0,4]-QUARTER*P[0,8]
#
# w.r.t.s
#
P[1,8]=-TWO*s*RMS
P[1,7]=-s*RM-HALF*P[1,8]
P[1,6]= HALF*RMS-HALF*P[1,8]
P[1,5]=-s*RP-HALF*P[1,8]
P[1,4]=-HALF*RMS-HALF*P[1,8]
P[1,3]= QUARTER*RM-HALF*P[1,6]-HALF*P[1,7]-QUARTER*P[1,8]
P[1,2]= QUARTER*RP-HALF*P[1,6]-HALF*P[1,5]-QUARTER*P[1,8]
P[1,1]=-QUARTER*RP-HALF*P[1,4]-HALF*P[1,5]-QUARTER*P[1,8]
P[1,0]=-QUARTER*RM-HALF*P[1,7]-HALF*P[1,4]-QUARTER*P[1,8]
#
nn = 9
B = np.zeros((3, 2*nn))
det, jaco_inv = jacoper(P, coord)
dhdx = np.dot(jaco_inv, P)
B[0, ::2] = dhdx[0, :]
B[1, 1::2] = dhdx[1, :]
B[2, ::2] = dhdx[1, :]
B[2, 1::2] = dhdx[0, :]
return det, B
def stdm6NT(r, s, coord):
"""Strain-displacement interpolator B for a 6-noded triang element
Parameters
----------
r : float
r component in the natural space.
s : float
s component in the natural space.
coord : ndarray
Coordinates of the nodes of the element (6, 2).
Returns
-------
ddet : float
Determinant evaluated at `(r, s)`.
B : ndarray
Strain-displacement interpolator evaluated at `(r, s)`.
"""
nn = 6
B = np.zeros((3, 2*nn))
dhdx = np.array([
[4*r + 4*s - 3, 4*r - 1, 0, -8*r - 4*s + 4, 4*s, -4*s],
[4*r + 4*s - 3, 0, 4*s - 1, -4*r, 4*r, -4*r - 8*s + 4]])
det, jaco_inv = jacoper(dhdx, coord)
dhdx = np.dot(jaco_inv, dhdx)
B[0, ::2] = dhdx[0, :]
B[1, 1::2] = dhdx[1, :]
B[2, ::2] = dhdx[1, :]
B[2, 1::2] = dhdx[0, :]
return det, B
def stdm3NT(r, s, coord):
"""Strain-displacement interpolator B for a 3-noded triang element
Parameters
----------
r : float
r component in the natural space.
s : float
s component in the natural space.
coord : ndarray
Coordinates of the nodes of the element (3, 2).
Returns
-------
det : float
Determinant evaluated at `(r, s)`.
B : ndarray
Strain-displacement interpolator evaluated at `(r, s)`.
"""
nn = 3
B = np.zeros((3, 2*nn))
dhdx = np.array([
[-1, 1, 0],
[-1, 0, 1]])
det, jaco_inv = jacoper(dhdx, coord)
dhdx = np.dot(jaco_inv, dhdx)
B[0, ::2] = dhdx[0, :]
B[1, 1::2] = dhdx[1, :]
B[2, ::2] = dhdx[1, :]
B[2, 1::2] = dhdx[0, :]
return det, B
def jacoper(dhdx, coord):
"""
Parameters
----------
dhdx : ndarray
Derivatives of the interpolation function with respect to the
natural coordinates.
coord : ndarray
Coordinates of the nodes of the element (nn, 2).
Returns
-------
xja : ndarray (2, 2)
Jacobian of the transformation evaluated at `(r, s)`.
"""
jaco = dhdx.dot(coord)
det = np.linalg.det(jaco)
jaco_inv = np.linalg.inv(jaco)
return det, jaco_inv
#%% Material routines
def umat(nu, E , rho):
"""2D Elasticity consitutive matrix in plane stress
For plane strain use effective properties.
Parameters
----------
nu : float
Poisson coefficient (-1, 0.5).
E : float
Young modulus (>0).
Returns
-------
C : ndarray
Constitutive tensor in Voigt notation.
Examples
--------
>>> C = umat(1/3, 8/3)
>>> C_ex = np.array([
... [3, 1, 0],
... [1, 3, 0],
... [0, 0, 1]])
>>> np.allclose(C, C_ex)
True
"""
EN = E*(1.0-nu)
ED = (1.0+nu)*(1.0-2.0*nu)
EB = 2.0*(1.0+nu)
C = np.zeros((3, 3))
C[0, 0] = EN/ED
C[0, 1] = E*nu/ED
C[1, 0] = E*nu/ED
C[1, 1] = EN/ED
C[2, 2] = E/EB
return C
#def umat(beta , alpha , rho):
# """2D Elasticity consitutive matrix in plane stress
#
# For plane strain use effective properties.
#
# Parameters
# ----------
# nu : float
# Poisson coefficient (-1, 0.5).
# E : float
# Young modulus (>0).
#
# Returns
# -------
# C : ndarray
# Constitutive tensor in Voigt notation.
#
# Examples
# --------
#
# >>> C = umat(1/3, 8/3)
# >>> C_ex = np.array([
# ... [3, 1, 0],
# ... [1, 3, 0],
# ... [0, 0, 1]])
# >>> np.allclose(C, C_ex)
# True
#
# """
# c1 = rho*beta**2
# c2 = 3.0*alpha**2-4.0*beta**2
# c3 = alpha**2-beta**2
# E = c1*c2/c3
# print(E)
# c1 = 2.0*rho*beta**2
# nu = E/c1-1.0
# print(nu)
#
# EN = E*(1.0-nu)
# ED = (1.0+nu)*(1.0-2.0*nu)
# EB = 2.0*(1.0+nu)
#
# C = np.zeros((3, 3))
# C[0, 0] = EN/ED
# C[0, 1] = E*nu/ED
# C[1, 0] = E*nu/ED
# C[1, 1] = EN/ED
# C[2, 2] = E/EB
#
# return C
#def umat(Vp , Vs , rho):
# """
#
# """
# C = np.zeros((3, 3))
#
# VSS=Vs*Vs
# VPS=Vp*Vp
#
# C[0, 0] = rho*VPS
# C[0, 1] = rho*(VPS-2.0*VSS)
# C[1, 0] = rho*(VPS-2.0*VSS)
# C[1, 1] = rho*VPS
# C[2, 2] = rho*VSS
#
# return C
#%% Elemental strains
def str_el4(coord, ul):
"""Compute the strains at each element integration point
This one is used for 4-noded quadrilateral elements.
Parameters
----------
coord : ndarray
Coordinates of the nodes of the element (4, 2).
ul : ndarray
Array with displacements for the element.
Returns
-------
epsGT : ndarray
Strain components for the Gauss points.
xl : ndarray
Configuration of the Gauss points after deformation.
"""
epsl = np.zeros([3])
epsG = np.zeros([3, 4])
xl = np.zeros([4, 2])
XW, XP = gau.gpoints2x2()
for i in range(4):
ri = XP[i, 0]
si = XP[i, 1]
ddet, B = stdm4NQ(ri, si, coord)
epsl = np.dot(B, ul)
epsG[:, i] = epsl[:]
N = sha4(ri, si)
xl[i, 0] = sum(N[0, 2*i]*coord[i, 0] for i in range(4))
xl[i, 1] = sum(N[0, 2*i]*coord[i, 1] for i in range(4))
return epsG.T, xl
def str_el6(coord, ul):
"""Compute the strains at each element integration point
This one is used for 6-noded triangular elements.
Parameters
----------
coord : ndarray
Coordinates of the nodes of the element (6, 2).
ul : ndarray
Array with displacements for the element.
Returns
-------
epsGT : ndarray
Strain components for the Gauss points.
xl : ndarray
Configuration of the Gauss points after deformation.
"""
epsl = np.zeros([3])
epsG = np.zeros([3, 7])
xl = np.zeros([7, 2])
XW, XP = gau.gpoints7()
for i in range(7):
ri = XP[i, 0]
si = XP[i, 1]
ddet, B = stdm6NT(ri, si, coord)
epsl = np.dot(B, ul)
epsG[:, i] = epsl[:]
N = sha6(ri, si)
xl[i, 0] = sum(N[0, 2*i]*coord[i, 0] for i in range(6))
xl[i, 1] = sum(N[0, 2*i]*coord[i, 1] for i in range(6))
return epsG.T, xl
def str_el3(coord, ul):
"""Compute the strains at each element integration point
This one is used for 3-noded triangular elements.
Parameters
----------
coord : ndarray
Coordinates of the nodes of the element (nn, 2).
ul : ndarray
Array with displacements for the element.
Returns
-------
epsGT : ndarray
Strain components for the Gauss points.
xl : ndarray
Configuration of the Gauss points after deformation.
"""
epsl = np.zeros([3])
epsG = np.zeros([3, 3])
xl = np.zeros([3, 2])
XW, XP = gau.gpoints3()
for i in range(3):
ri = XP[i, 0]
si = XP[i, 1]
ddet, B = stdm3NT(ri, si, coord)
epsl = np.dot(B, ul)
epsG[:, i] = epsl
N = sha3(ri, si)
xl[i, 0] = sum(N[0, 2*i]*coord[i, 0] for i in range(3))
xl[i, 1] = sum(N[0, 2*i]*coord[i, 1] for i in range(3))
return epsG.T, xl
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: SOLIDSPy_DYN/solidspydyn/preprocesor.py
```python
from __future__ import division, print_function
import sys
import numpy as np
def readin(folder=""):
"""Read the input files"""
nodes = np.loadtxt(folder + 'nodes.txt' , ndmin=2)
mats = np.loadtxt(folder + 'mater.txt' , ndmin=2)
elements = np.loadtxt(folder + 'eles.txt' , ndmin=2, dtype=np.int)
loads = np.loadtxt(folder + 'loads.txt' , ndmin=2)
inipar = np.loadtxt(folder + 'inipar.txt', ndmin=2)
return inipar , nodes, mats, elements, loads
def intparams(inipar):
#
ass = np.zeros(9, dtype = float)
dt = inipar[0 , 0]
T = inipar[0 , 1]
Tc = inipar[0 , 2]
fc = inipar[0 , 3]
#
m=int(T/dt)
theta = 1.40
#
ass[0] = 6.0/(theta*theta*dt*dt)
ass[1] = 3.0/(theta*dt)
ass[2] = 2.0*ass[1]
ass[3] = theta*dt/2.0
ass[4] = ass[0]/theta
ass[5] = -ass[2]/theta
ass[6] =1.0 - (3.0/theta)
ass[7] = dt/2.0
ass[8] = dt*dt/6.0
#
return m , T , Tc , fc , dt , ass , theta
def echomod(nodes, mats, elements, loads, folder=""):
"""Create echoes of the model input files"""
np.savetxt(folder + "KNODES.txt", nodes, fmt='%5.2f', delimiter=' ')
np.savetxt(folder + "KMATES.txt", mats, fmt='%5.2f', delimiter=' ')
np.savetxt(folder + "KELEMS.txt", elements, fmt='%d', delimiter=' ')
np.savetxt(folder + "KLOADS.txt", loads, fmt='%5.2f', delimiter=' ')
def initial_params():
"""Read initial parameters for the simulation
The parameters to be read are:
- folder: location of the input files.
- name: name for the output files (if echo is True).
- echo: echo output files.
"""
# Check Python version
version = sys.version_info.major
if version == 3:
raw_input = input
elif version == 2:
pass
else:
raise ValueError("You should use Python 2.x at least!")
# Try to run with easygui
try:
import easygui
folder = easygui.diropenbox(title="Folder for the job") + "/"
name = easygui.enterbox("Enter the job name")
# echo = easygui.buttonbox("Do you want to echo files?",
# choices=["Yes", "No"])
except:
folder = raw_input('Enter folder (empty for the current one): ')
name = raw_input('Enter the job name: ')
# echo = raw_input('Do you want to echo files? (y/N):')
# if echo.upper() in ["YES", "Y"]:
# echo = True
# else:
# echo = False
return folder, name
def ele_writer(cells, cell_data ,ele_tag , phy_sur, ele_type, mat_tag, nini):
"""
Extracts a subset of elements from a complete mesh according to the
physical surface phy_sur and writes down the proper fields into an
elements array.
Parameters
----------
cell : dictionary
Dictionary created by meshio with cells information.
cell_data: dictionary
Dictionary created by meshio with cells data information.
ele_tag : string
Element type according to meshio convention,
e.g., quad9 or line3.
phy_sur : int
Physical surface for the subset.
ele_type: int
Element type.
mat_tag : int
Material profile for the subset.
ndof : int
Number of degrees of freedom for the elements.
nnode : int
Number of nodes for the element.
nini : int
Element id for the first element in the set.
Returns
-------
nf : int
Element id for the last element in the set
els_array : int
Elemental data.
"""
eles = cells[ele_tag]
dict_nnode = {'triangle': 3 , 'triangle6':6 , 'quad':4 }
nnode = dict_nnode[ele_tag]
phy_surface = cell_data[ele_tag]['physical']
ele_id = [cont for cont, _ in enumerate(phy_surface[:])
if phy_surface[cont] == phy_sur]
els_array = np.zeros([len(ele_id) , 3 + nnode], dtype=int)
els_array[: , 0] = range(nini , len(ele_id) + nini )
els_array[: , 1] = ele_type
els_array[: , 2] = mat_tag
els_array[: , 3::] = eles[ele_id, :]
nf = nini + len(ele_id)
return nf , els_array
def node_writer(points , point_data):
"""Write nodal data as required by SolidsPy
Parameters
----------
points : dictionary
Nodal points
point_data : dictionary
Physical data associatted to the nodes.
Returns
-------
nodes_array : ndarray (int)
Array with the nodal data according to SolidsPy.
"""
nodes_array = np.zeros([points.shape[0], 5])
nodes_array[:, 0] = range(points.shape[0])
nodes_array[:, 1:3] = points[:, :2]
return nodes_array
def boundary_conditions(cells, cell_data, phy_lin, nodes_array, bc_x, bc_y):
"""Impose nodal point boundary conditions as required by SolidsPy
Parameters
----------
cell : dictionary
Dictionary created by meshio with cells information.
cell_data: dictionary
Dictionary created by meshio with cells data information.
phy_lin : int
Physical line where BCs are to be imposed.
nodes_array : int
Array with the nodal data and to be modified by BCs.
bc_x, bc_y : int
Boundary condition flag along the x and y direction:
* -1: restrained
* 0: free
Returns
-------
nodes_array : int
Array with the nodal data after imposing BCs according
to SolidsPy.
"""
lines = cells["line"]
# Bounds contains data corresponding to the physical line.
phy_line = cell_data["line"]["physical"]
id_frontera = [cont for cont in range(len(phy_line))
if phy_line[cont] == phy_lin]
nodes_frontera = lines[id_frontera]
nodes_frontera = nodes_frontera.flatten()
nodes_frontera = list(set(nodes_frontera))
nodes_array[nodes_frontera, 3] = bc_x
nodes_array[nodes_frontera, 4] = bc_y
return nodes_array
def loading(cells, cell_data, phy_lin, P_x, P_y):
"""Impose nodal boundary conditions as required by SolidsPy
Parameters
----------
cell : dictionary
Dictionary created by meshio with cells information.
cell_data: dictionary
Dictionary created by meshio with cells data information.
phy_lin : int
Physical line where BCs are to be imposed.
nodes_array : int
Array with the nodal data and to be modified by BCs.
P_x, P_y : float
Load components in x and y directions.
Returns
-------
nodes_array : int
Array with the nodal data after imposing BCs according
to SolidsPy.
"""
lines = cells["line"]
# Bounds contains data corresponding to the physical line.
phy_line = cell_data["line"]["physical"]
id_carga = [cont for cont in range(len(phy_line))
if phy_line[cont] == phy_lin]
nodes_carga = lines[id_carga]
nodes_carga = nodes_carga.flatten()
nodes_carga = list(set(nodes_carga))
ncargas = len(nodes_carga)
cargas = np.zeros((ncargas, 3))
cargas[:, 0] = nodes_carga
cargas[:, 1] = P_x/ncargas
cargas[:, 2] = P_y/ncargas
return cargas
```
#### File: SOLIDSPy_DYN/solidspydyn/uelutil.py
```python
from __future__ import division, print_function
import numpy as np
import femutil as fem
import gaussutil as gau
def uel4nquad(coord, par):
"""Quadrilateral element with 4 nodes
Parameters
----------
coord : ndarray
Coordinates for the nodes of the element (4, 2).
enu : float
Poisson coefficient (-1, 0.5).
Emod : float
Young modulus (>0).
Returns
-------
kl : ndarray
Local stiffness matrix for the element (8, 8).
Examples
--------
>>> coord = np.array([[-1, -1], [1, -1], [1, 1], [-1, 1]])
>>> stiff = uel4nquad(coord, 1/3, 8/3)
>>> stiff_ex = 1/6 * np.array([
... [ 8, 3, -5, 0, -4, -3, 1, 0],
... [ 3, 8, 0, 1, -3, -4, 0, -5],
... [-5, 0, 8, -3, 1, 0, -4, 3],
... [ 0, 1, -3, 8, 0, -5, 3, -4],
... [-4, -3, 1, 0, 8, 3, -5, 0],
... [-3, -4, 0, -5, 3, 8, 0, 1],
... [ 1, 0, -4, 3, -5, 0, 8, -3],
... [ 0, -5, 3, -4, 0, 1, -3, 8]])
>>> np.allclose(stiff, stiff_ex)
True
"""
Emod = par[0]
enu = par[1]
rho = par[2]
calpha = par[3]
cbeta = par[4]
kl = np.zeros([8, 8])
ml = np.zeros([8, 8])
cl = np.zeros([8, 8])
C = fem.umat(enu, Emod , rho)
XW, XP = gau.gpoints2x2()
ngpts = 4
for i in range(0, ngpts):
ri = XP[i, 0]
si = XP[i, 1]
alf = XW[i]
ddet, B = fem.stdm4NQ(ri, si, coord)
N = fem.sha4(ri , si )
kl = kl + np.dot(np.dot(B.T,C), B)*alf*ddet
ml = ml + rho*np.dot(N.T , N)*alf*ddet
cl = calpha*kl + cbeta*ml
return kl , ml , cl
def uel9nquad(coord, par):
"""Quadrilateral element with 4 nodes
Parameters
----------
coord : ndarray
Coordinates for the nodes of the element (4, 2).
enu : float
Poisson coefficient (-1, 0.5).
Emod : float
Young modulus (>0).
Returns
-------
kl : ndarray
Local stiffness matrix for the element (8, 8).
Examples
--------
>>> coord = np.array([[-1, -1], [1, -1], [1, 1], [-1, 1]])
>>> stiff = uel4nquad(coord, 1/3, 8/3)
>>> stiff_ex = 1/6 * np.array([
... [ 8, 3, -5, 0, -4, -3, 1, 0],
... [ 3, 8, 0, 1, -3, -4, 0, -5],
... [-5, 0, 8, -3, 1, 0, -4, 3],
... [ 0, 1, -3, 8, 0, -5, 3, -4],
... [-4, -3, 1, 0, 8, 3, -5, 0],
... [-3, -4, 0, -5, 3, 8, 0, 1],
... [ 1, 0, -4, 3, -5, 0, 8, -3],
... [ 0, -5, 3, -4, 0, 1, -3, 8]])
>>> np.allclose(stiff, stiff_ex)
True
"""
Emod = par[0]
enu = par[1]
rho = par[2]
calpha = par[3]
cbeta = par[4]
kl = np.zeros([18, 18])
ml = np.zeros([18, 18])
cl = np.zeros([18, 18])
C = fem.umat(enu, Emod , rho)
XW, XP = gau.gpoints3x3()
ngpts = 9
for i in range(0, ngpts):
ri = XP[i, 0]
si = XP[i, 1]
alf = XW[i]
ddet, B = fem.stdm9NQ(ri, si, coord)
N = fem.sha9(ri , si )
kl = kl + np.dot(np.dot(B.T,C), B)*alf*ddet
ml = ml + rho*np.dot(N.T , N)*alf*ddet
cl = calpha*kl + cbeta*ml
return kl , ml , cl
def uel6ntrian(coord, par):
"""Triangular element with 6 nodes
Parameters
----------
coord : ndarray
Coordinates for the nodes of the element (6, 2).
enu : float
Poisson coefficient (-1, 0.5).
Emod : float
Young modulus (>0).
Returns
-------
kl : ndarray
Local stiffness matrix for the element (12, 12).
Examples
--------
>>> coord = np.array([
... [0, 0],
... [1, 0],
... [0, 1],
... [0.5, 0],
... [0.5, 0.5],
... [0, 0.5]])
>>> stiff = uel6ntrian(coord,1/3, 8/3)
>>> stiff_ex = 1/6 * np.array([
... [12, 6, 3, 1, 1, 1, -12, -4, 0, 0, -4, -4],
... [6, 12, 1, 1, 1, 3, -4, -4, 0, 0, -4, -12],
... [3, 1, 9, 0, 0, -1, -12, -4, 0, 4, 0, 0],
... [1, 1, 0, 3, -1, 0, -4, -4, 4, 0, 0, 0],
... [1, 1, 0, -1, 3, 0, 0, 0, 0, 4, -4, -4],
... [1, 3, -1, 0, 0, 9, 0, 0, 4, 0, -4, -12],
... [-12, -4, -12, -4, 0, 0, 32, 8, -8, -8, 0, 8],
... [-4, -4, -4, -4, 0, 0, 8, 32, -8, -24, 8, 0],
... [0, 0, 0, 4, 0, 4, -8, -8, 32, 8, -24, -8],
... [0, 0, 4, 0, 4, 0, -8, -24, 8, 32, -8, -8],
... [-4, -4, 0, 0, -4, -4, 0, 8, -24, -8, 32, 8],
... [-4, -12, 0, 0, -4, -12, 8, 0, -8, -8, 8, 32]])
>>> np.allclose(stiff, stiff_ex)
True
"""
Emod = par[0]
enu = par[1]
rho = par[2]
calpha = par[3]
cbeta = par[4]
kl = np.zeros([12, 12])
ml = np.zeros([12, 12])
cl = np.zeros([12, 12])
C = fem.umat(enu, Emod , rho)
XW, XP = gau.gpoints7()
ngpts = 7
for i in range(ngpts):
ri = XP[i, 0]
si = XP[i, 1]
alf = XW[i]
ddet, B = fem.stdm6NT(ri, si, coord)
N = fem.sha6(ri , si)
kl = kl + 0.5*np.dot(np.dot(B.T,C), B)*alf*ddet
ml = ml + 0.5*rho*np.dot(N.T , N)*alf*ddet
cl = calpha*kl + cbeta*ml
return kl , ml , cl
def uel3ntrian(coord, par):
"""Triangular element with 3 nodes
Parameters
----------
coord : ndarray
Coordinates for the nodes of the element (3, 2).
enu : float
Poisson coefficient (-1, 0.5).
Emod : float
Young modulus (>0).
Returns
-------
kl : ndarray
Local stiffness matrix for the element (6, 6).
Examples
--------
>>> coord = np.array([
... [0, 0],
... [1, 0],
... [0, 1]])
>>> stiff = uel3ntrian(coord, 1/3, 8/3)
>>> stiff_ex = 1/2 * np.array([
... [4, 2, -3, -1, -1, -1],
... [2, 4, -1, -1, -1, -3],
... [-3, -1, 3, 0, 0, 1],
... [-1, -1, 0, 1, 1, 0],
... [-1, -1, 0, 1, 1, 0],
... [-1, -3, 1, 0, 0, 3]])
>>> np.allclose(stiff, stiff_ex)
True
"""
Emod = par[0]
enu = par[1]
rho = par[2]
calpha = par[3]
cbeta = par[4]
kl = np.zeros([6, 6])
ml = np.zeros([6, 6])
cl = np.zeros([6, 6])
C = fem.umat(enu, Emod , rho)
XW, XP = gau.gpoints3()
ngpts = 3
for i in range(ngpts):
ri = XP[i, 0]
si = XP[i, 1]
alf = XW[i]
ddet, B = fem.stdm3NT(ri, si, coord)
N = fem.sha3(ri, si)
kl = kl + 0.5*np.dot(np.dot(B.T,C), B)*alf*ddet
ml = ml + 0.5*rho*np.dot(N.T , N)*alf*ddet
cl = calpha*kl + cbeta*ml
return kl , ml , cl
def uel3dash(coord, par):
"""Triangular element with 3 nodes
Parameters
----------
coord : ndarray
Coordinates for the nodes of the element (3, 2).
enu : float
Poisson coefficient (-1, 0.5).
Emod : float
Young modulus (>0).
Returns
-------
kl : ndarray
Local stiffness matrix for the element (6, 6).
Examples
--------
>>> coord = np.array([
... [0, 0],
... [1, 0],
... [0, 1]])
>>> stiff = uel3ntrian(coord, 1/3, 8/3)
>>> stiff_ex = 1/2 * np.array([
... [4, 2, -3, -1, -1, -1],
... [2, 4, -1, -1, -1, -3],
... [-3, -1, 3, 0, 0, 1],
... [-1, -1, 0, 1, 1, 0],
... [-1, -1, 0, 1, 1, 0],
... [-1, -3, 1, 0, 0, 3]])
>>> np.allclose(stiff, stiff_ex)
True
"""
Cmod = par[0]*par[2]
kl = np.zeros([6, 6])
ml = np.zeros([6, 6])
cl = np.zeros([6, 6])
cl[0 , 0] = Cmod
cl[1 , 1] = Cmod
return kl , ml , cl
def ueldashpot(coord, par):
"""
3 noded dashpot element
"""
Emod = par[0]
enu = par[1]
rho = par[2]
calpha = par[3]
cbeta = par[4]
kl = np.zeros([12, 12])
ml = np.zeros([12, 12])
cl = np.zeros([6, 6])
cll = np.zeros([12, 12])
C = np.zeros([2, 2])
AALFA = 2000.0
ABETA = 1000.0
C[0,1]=0.0
C[1,0]=0.0
C[0,0]=rho*AALFA
C[1,1]=rho*ABETA
vec = coord[1, :] - coord[0, :]
nx = vec[0]/np.linalg.norm(vec)
ny = vec[1]/np.linalg.norm(vec)
Q = np.array([
[ nx , ny ],
[-ny , nx]])
CT = np.dot(np.dot(Q.T, C), Q)
ele = np.linalg.norm(vec)
ddet = ele /2.0
XW, XP = gau.gpoints6()
ngpts = 6
for i in range(ngpts):
ri = XP[i]
alf = XW[i]
N = fem.shape1D(ri)
cl = cl + (np.dot(np.dot(N.T, CT), N))*alf*ddet
for i in range(6):
for j in range(6):
cll[i,j] = cl[i,j]
return kl , ml , cll
def uelspring(coord, par):
"""1D-2-noded Spring element
Parameters
----------
coord : ndarray
Coordinates for the nodes of the element (2, 2).
enu : float
Fictitious parameter.
Emod : float
Stiffness coefficient (>0).
Returns
-------
kl : ndarray
Local stiffness matrix for the element (4, 4).
Examples
--------
>>> coord = np.array([
... [0, 0],
... [1, 0]])
>>> stiff = uelspring(coord, 1/3, 8/3)
>>> stiff_ex = 8/3 * np.array([
... [-1, 0, 1, 0],
... [0, 0, 0, 0],
... [1, 0, -1, 0],
... [0, 0, 0, 0]])
>>> np.allclose(stiff, stiff_ex)
True
"""
Emod = par[0]
enu = par[1]
rho = par[2]
calpha = par[3]
cbeta = par[4]
vec = coord[1, :] - coord[0, :]
nx = vec[0]/np.linalg.norm(vec)
ny = vec[1]/np.linalg.norm(vec)
Q = np.array([
[nx, ny , 0 , 0],
[0, 0, nx , ny]])
kl = Emod * np.array([
[1, -1],
[-1, 1]])
kG = np.dot(np.dot(Q.T, kl), Q)
mG = np.zeros([2, 2])
cG = calpha*kG + cbeta*mG
return kG , mG , cG
def ueltruss2D(coord, par):
"""2D-2-noded truss element
Parameters
----------
coord : ndarray
Coordinates for the nodes of the element (2, 2).
A : float
Cross section area.
Emod : float
Young modulus (>0).
Returns
-------
kl : ndarray
Local stiffness matrix for the element (4, 4).
Examples
--------
>>> coord = np.array([
... [0, 0],
... [1, 0]])
>>> stiff = ueltruss2D(coord, 1.0 , 1000.0)
>>> stiff_ex = 8/3 * np.array([
... [-1, 0, 1, 0],
... [0, 0, 0, 0],
... [1, 0, -1, 0],
... [0, 0, 0, 0]])
>>> np.allclose(stiff, stiff_ex)
True
"""
Emod = par[0]
enu = par[1]
rho = par[2]
calpha = par[3]
cbeta = par[4]
vec = coord[1, :] - coord[0, :]
nx = vec[0]/np.linalg.norm(vec)
ny = vec[1]/np.linalg.norm(vec)
length = np.linalg.norm(vec)
Q = np.array([
[nx, ny , 0 , 0],
[0, 0, nx , ny]])
kl =(A*Emod/length) * np.array([
[1, -1],
[-1, 1]])
kG = np.dot(np.dot(Q.T, kl), Q)
mG = np.zeros([2, 2])
cG = calpha*kG + cbeta*mG
return kG , mG , cG
if __name__ == "__main__":
import doctest
doctest.testmod()
```
|
{
"source": "jgomezdans/GEOG0133",
"score": 3
}
|
#### File: docs/python/da.py
```python
import numpy as np
sd_0 = 0.3
sd_1 = 0.2
rho = -0.5
# Form B
B = np.matrix([[sd_0**2,rho*sd_0*sd_1],[rho*sd_0*sd_1,sd_1**2]])
# inverse
BI = B.I
# check:
print ('B x B-1 = I')
print (B,'x')
print (BI,'=')
print (BI * B)
import numpy as np
mean_0 = 0.2
mean_1 = 0.5
sd_0 = 0.3
sd_1 = 0.2
# case 1: with correlation
rho = -0.5
test = [0.,0.]
dx0 = test[0] - mean_0
dx1 = test[1] - mean_1
B00 = sd_0**2
B11 = sd_1**2
B01 = sd_0 * sd_1 * rho
Z2 = (dx0*B00+dx1*B01)*dx0 + (dx0*B01+dx1*B11)*dx1
detB = B00*B11 - B01**2
scale = (2.*np.pi) * np.sqrt(detB)
p0 = (1./scale) * np.exp(-0.5 * Z2)
print ('p0: rho = -0.5: p(0,0) =',p0)
# case 1: without correlation
rho = -0.0
test = [0.,0.]
dx0 = test[0] - mean_0
dx1 = test[1] - mean_1
B00 = sd_0**2
B11 = sd_1**2
B01 = sd_0 * sd_1 * rho
Z2 = (dx0*B00+dx1*B01)*dx0 + (dx0*B01+dx1*B11)*dx1
detB = B00*B11 - B01**2
scale = (2.*np.pi) * np.sqrt(detB)
p1 = (1./scale) * np.exp(-0.5 * Z2)
print ('p1: rho = 0.0: p(0,0) =',p1)
print ('p1/p0 =',p1/p0)
import numpy as np
import scipy.optimize
# prior
xb = np.array([0.1,0.5])
B = np.matrix([[0.2**2,0.5*0.2*0.3],[0.5*0.2*0.3,0.3**2]])
# a direct observation: sd = 0.1
xr = np.array([0.15,0.4])
R = np.matrix([[0.1**2,0.0],[0.0,0.1**2]])
BI = B.I
RI = R.I
# starting guess
x = np.array([0.,0.])
def cost(x,xb,BI,xr,RI):
'''
Return J and J' at x
'''
Jb = np.dot(np.array(0.5*(xb-x) * BI),(xb-x))[0]
Jr = np.dot(np.array(0.5*(xr-x) * RI),(xr-x))[0]
JbPrime = -(xb-x)*BI
JrPrime = -(xr-x)*RI
return Jr+Jb,np.array(JrPrime+JbPrime)[0]
def uncertainty(x,xb,BI,xr,RI):
# inverse of Hessian
return (BI + RI).I
retval = scipy.optimize.fmin_l_bfgs_b(cost,x,args=(xb,BI,xr,RI))
# x new
x = retval[0]
# uncertainty
Cpost = uncertainty(x,xb,BI,xr,RI)
# print prior
psigma0 = np.sqrt(B[0,0])
psigma1 = np.sqrt(B[1,1])
prho12 = B[0,1]/(psigma0*psigma1)
print ('prior: x0,x1 :',xb[0],xb[1])
print ('prior: sd0,sd1,rho:',psigma0,psigma1,prho12)
# print observation
rsigma0 = np.sqrt(R[0,0])
rsigma1 = np.sqrt(R[1,1])
rrho12 = R[0,1]/(rsigma0*rsigma1)
print ('observation: x0,x1 :',xr[0],xr[1])
print ('observation: sd0,sd1,rho:',rsigma0,rsigma1,rrho12)
sigma0 = np.sqrt(Cpost[0,0])
sigma1 = np.sqrt(Cpost[1,1])
rho12 = Cpost[0,1]/(sigma0*sigma1)
print ('posterior: x0,x1 :',x[0],x[1])
print ('posterior: sd0,sd1,rho:',sigma0,sigma1,rho12)
from plotGauss import *
plotGauss(xb[0],xb[1],psigma0,psigma1,prho12,\
title='prior',file='figures/Tprior.png')
plotGauss(xr[0],xr[1],rsigma0,rsigma1,rrho12,\
title='observation',file='figures/Tobs.png')
plotGauss(x[0],x[1],sigma0,sigma1,rho12,\
title='posterior',file='figures/Tpost.png')
import os
files = 'figures/Tprior.png figures/Tobs.png figures/Tpost.png'
os.system('convert -delay 50 -loop 0 %s figures/Tanim.gif'%files)
# just remind ourselves of the values above
Cprior = np.matrix(B)
Cpost = np.matrix(Cpost)
xpre = xb
xpost = x
D = 0.5*(np.log(np.linalg.det(Cprior)/np.linalg.det(Cpost)) + \
(Cpost * Cprior.I).trace()[0,0] - Cpost.shape[0])
print ('Dispersion =',D)
S = 0.5*np.dot((xpost-xpre).T * Cprior.I,xpost-xpre)[0,0]
print ('Signal =',S)
print ('relative entropy =',(D+S)/np.log(2.), 'bits')
```
#### File: docs/python/photosynthesis.py
```python
import numpy as np
import bethy_fapar as fapar
class photosynthesis():
def __init__(self):
'''
Class initialisation and setup of parameters
'''
# zero C in K
self.zeroC = 273.15
# gas constant J mol-1 K-1
self.R_gas = 8.314
# Minimum of maximum carboxylation rate [10^(-6) mol/(m^2 s)]
self.minOfMaxCarboxrate = 1e-12
# Minimum stomatal conductance [mol H2O /(m^2 s)]
self.minStomaConductance = 0.0
# oxygen concentration
self.Ox = 0.21 # mol(O2)mol(air)-1
# energy content of PAR quanta
self.EPAR = 220. # kJmol-1
# photon capture efficiency
self.alpha = 0.28
# maximum Michaelis-Menton values for CO2
self.KC0 = 460.e-6 # mol(CO2)mol(air)-1
# maximum Michaelis-Menton values for O2
self.KO0 = 330.e-3 # mol(O2)mol(air)-1
# activation energy for KC
self.EC = 59396. # J mol-1
# activation energy for KO
self.EO = 35948. # J mol-1
# activation energy for VCMAX
self.EV = 58520. # J mol-1
# activation energy for dark respiration
self.ER = 45000. # J mol-1
# Q10=2 (Collatz et al. 1992)
self.EK = 50967.
# ratio of dark respiration to PVM at 25 C
self.FRDC3 = 0.011
self.FRDC4 = 0.042
# scaling for GammaStar
self.GammaStarScale = 1.7e-6
# Effective quantum efficiency C4
self.ALC4 = 0.04
# Curvature parameter (C4)
self.Theta = 0.83
self.molarMassAir_kg = 28.97e-3
self.molarMassCO2_kg = 44.011e-3
# LAI limit used in N scaling
self.LaiLimit = 3.
def calc_nitrogen_scaling_factors(self,zlai,layer_bounds,declination,latitude):
'''
'''
factors = np.ones((layer_bounds.size,zlai.size))
cos_zenith_noon = np.cos(declination)*np.cos(latitude) \
+ np.sin(declination)*np.sin(latitude)
ww = np.where(cos_zenith_noon < 1e-3)
cos_zenith_noon[ww] = 1e-3
# Extinction factor
k12 = 0.5 / cos_zenith_noon
# Condition: LAI>LaiLimit
ww = np.where(zlai >= self.LaiLimit)
for i in range(ayer_bounds.size):
factors[i,:] = np.exp(-k12 * layer_bounds[i] * zlai.flatten())
return factors
def assimilate(self,delta_time,mask,cos_zenith,declination,latitude,\
swdown, par, frac_par_direct, pressure,\
canopy_temp, soil_albedo, CO2_concentration_air,\
canopy_conductance, lai, waterLimitationFlag):
'''
'''
# Expresse radiation in mol(photons) / (m^2 s)
swdown_mol = swdown/self.EPAR
# soil reflectivity is set to soil albedo of the visible range
soil_reflectivity_par = soil_albedo
# canopy_boundaries_lai
canopy_boundaries_lai = np.arange(ncanopy)/float(ncanopy)
# calculate nitrogen scaling factors
nitrogen_scaling_factors = self.calc_nitrogen_scaling_factors(lai,\
canopy_boundaries_lai,\
declination,\
latitude)
(laiPerLayer,fAPAR) = fapar.faparl(mask,ncanopy,lai,soil_reflectivity_par,cos_zenith,frac_par_direct,\
canopy_boundaries_lai)
# Compute absorbed PAR per leaf area in canopy layer [units: (absorbed photons) / (m^2(leaf area) s)] from
# par and fraction of absorbed PAR (Epar is needed to convert radiation intensity from W/m^2 to mol/(m^2 s))
apar_acc = np.zeros_like(faPAR)
lai_ = laiPerLayer*1.
ww = np.where(lai_ < 1.e-10)
lai_[ww] = 1.e-10
for icanopy in range(ncanopy):
apar_layer = (par/Epar)*faPAR[icanopy]/lai_[icanopy]
apar_acc += (par/Epar)*faPAR[icanopy]*delta_time
# Convert CO2 mass mixing ratio [kg/kg] to volume mixing ratio [mol/mol]
CO2_concentration_mol = CO2_concentration_air * self.molarMassAir_kg / self.molarMassCO2_kg
# estimate CO2 leaf conc
CO2_conc_leaf = self.FCI1C3*CO2_concentration_mol
self.photosynthesis(C3Flag,waterLimitationFlag,PAR,PIRRIN,P,T,CO2_concentration_mol,\
NSCL,ETransport,CarboxRate,Ci,Gs)
def photosynthesis(self,C3Flag,waterLimitedFlag,PAR,PIRRIN,P,T,Atm_co2_conc,\
NSCL,ETransport,CarboxRate,\
Ci,Gs):
'''
Farquar et al. 1980 C3 photosynthesis
args:
C3Flag : True if C3, False for C4
waterLimited : flags to indicate water limited or not
PAR : Absorbed PAR mol(photons) m-2 s-1
PIRRIN : Total irridiance at the surface mol m-2 s-1
P : air pressure (Pa)
T : vegetation (leaf) temperature (K)
Atm_co2_conc : Atmospheric CO2 conc.
NSCL : Nitrogen scaling factor at maximum
carboxylation rate and maximum
electron transport rate
ETransport
: The maximum rate of electron transport
at 25 C for each PFT (mol(CO2) m-2 s-1)
CarboxRate
: The maximum carboxilation rate at 25 C
(micro mol(CO2) m-2 s-1)
Ci : CO2 concentration inside leaf mol(CO2) mol(air)-1
Gs : Stomatal conductance (use for water-limited)
Returns:
(A,Diagnostics) : A = gross assimilation
'''
# return None if no data
if C3Flag.size == 0:
return None
# work out which are C3 and C4
wC3 = np.where(C3Flag)
wC4 = np.where(not C3Flag)
# process C3
if wC3.sum():
(A3,C3diagnostics) = self.photosynthesisC3(PAR[wC3],PIRRIN[wC3],P[wC3],T[wC3],Atm_co2_conc[wC3],\
NSCL[wC3],ETransport[wC3],CarboxRate[wC3],\
Ci[wC3],Gs[wC3],waterLimited[wC3])
else:
A3 = np.array([])
C3diagnostics = {}
# process C4
if wC4.sum():
(A4,C4diagnostics) = self.photosynthesisC4(PAR[wC4],PIRRIN[wC4],P[wC4],T[wC4],Atm_co2_conc[wC4],\
NSCL[wC4],ETransport[wC4],CarboxRate[wC4],\
Ci[wC4],Gs[wC4],waterLimited[wC4])
else:
A4 = np.array([])
C4diagnostics = {}
# combine
A = np.zeros_like(C3Flag).astype(float)
A[C3Flag] = A3
A[not C3Flag] = A4
self.Diagnostics = {}
keys = np.unique(np.array(C3diagnostics.keys() + C4diagnostics.keys()))
for k in keys:
self.Diagnostics[k] = np.zeros_like(A)
try:
self.Diagnostics[k][wC3] = C3diagnostics[k]
except:
pass
try:
self.Diagnostics[k][wC4] = C4diagnostics[k]
except:
pass
self.Diagnostics['C3Flag'] = C3Flag
self.Diagnostics['waterLimited'] = waterLimited
return (A,self.Diagnostics)
def photosynthesisC4(self,PAR,PIRRIN,P,T,Atm_co2_conc,\
NSCL,ETransport,CarboxRate,\
Ci,Gs,waterLimited):
'''
Similar to C3 case, but
For C4 plants the Farquhar equations are replaced by the set of equations of
Collatz et al. 1992:
args:
PAR : Absorbed PAR mol(photons) m-2 s-1
PIRRIN : Total irridiance at the surface mol m-2 s-1
P : air pressure (Pa)
T : vegetation (leaf) temperature (K)
Atm_co2_conc : Atmospheric CO2 conc.
NSCL : Nitrogen scaling factor at maximum
carboxylation rate and maximum
electron transport rate
ETransport
: The maximum rate of electron transport
at 25 C for each PFT (mol(CO2) m-2 s-1)
CarboxRate
: The maximum carboxilation rate at 25 C
(micro mol(CO2) m-2 s-1)
Ci : CO2 concentration inside leaf mol(CO2) mol(air)-1
Gs : Stomatal conductance
waterLimited : flags for water limited or not
Returns:
(A,Diagnostics) : A = gross assimilation
'''
# T1 = 25 C in K
T1 = 25. + self.zeroC
# T0 is veg temperature relative tp 25 C
T0 = T - T1
# TC is the temperatrure in C
TC = T - self.zeroC
# K is the PECase CO2 specifity instead of the electron transport capacity
# within C3 plants
K = ETransport * 1.e3 * NSCL \
* np.exp(self.EK * T0 / T1 / self.R_gas / T)
# VCMAX : : assume N content, therefore Rubisco is placed
# where most incoming light is
# NB .. this is a structural consideration
VCMAX = CarboxRate * NSCL * np.exp(self.EV * T0 / T1 / self.R_gas / T)
# dark respiration (mol(CO2)m-2s-1)
Rd = self.FRDC4 * CarboxRate * NSCL \
* np.exp(self.ER * T0 / T1 / self.R_gas / T) \
* highTInhibit(TC) \
* darkInhibit(PIRRIN)
# C4 gross photosynthesis at given Ci
J0 = (self.ALC4 * PAR + VCMAX) / 2. / self.Theta
Je = J0 - np.sqrt(J0*J0 - VCMAX * self.ALC4 * PAR / self.Theta)
Jc = np.zeros_like(Rd)
A = np.zeros_like(Rd)
waterLimit = np.where(waterLimited)
notWaterLimit = np.where(not waterLimited)
if notWaterLimit.sum() > 0:
Ci_ = Ci[notWaterLimit]
TC_ = TC[notWaterLimit]
Rd_ = Rd[notWaterLimit]
Atm_co2_conc_ = Atm_co2_conc[notWaterLimit]
P_ = P[notWaterLimit]
T_ = T[notwaterLimit]
K_ = K[notWaterLimit]
Je_ = Je[notWaterLimit]
Jc_ = K_ * Ci_
# assimilation is the minimum of Je and Jc
# with a high temperature inhibition
# mol(CO2)m-2s-1
A_ = Je_
ww = np.where(Jc_ < Je_)
A_[ww] = Jc_[ww]
A_ = A_ * highTInhhibit(TC_)
# stomatal conductance
Gs_ = 1.6 * (A_-Rd_) * self.R_gas * T_/ (Atm_co2_conc_ - Ci_) / P_
ww = np.where(Gs_ < self.minStomaConductance)
Gs_[ww] = self.minStomaConductance
Gs[notWaterLimit] = Gs_
Jc[notWaterLimit] = Jc_
A[notWaterLimit] = A_
else:
# water limted, so Gs is defined and Ci must be calculated
Gs_ = Gs[waterLimit]
TC_ = TC[waterLimit]
Rd_ = Rd[waterLimit]
Atm_co2_conc_ = Atm_co2_conc[waterLimit]
P_ = P[waterLimit]
T_ = T[waterLimit]
K_ = K[waterLimit]
Je_ = Je[waterLimit]
G0 = Gs_ / 1.6 / self.R_gas / T_ * P_
Jc_ = (G0 * Atm_co2_conc_ + Rd_)/(1. + G0/K_)
ww = np.where(Jc_ < 0)
Jc_[ww] = 0.
# assimilation is the minimum of Je and Jc
# with a high temperature inhibition
# mol(CO2)m-2s-1
A_ = Je_
ww = np.where(Jc_ < Je_)
A_[ww] = Jc_[ww]
A_ = A_ * highTInhhibit(TC)
maxer1 = A_ - Rd_
maxer2 = G0
ww = np.where(G0<1e-6)
maxer2[ww] = 1e-6
maxer = maxer1/maxer2
ww = np.where(maxer < 0)
maxer[ww] = 0.
Ci_ = Atm_co2_conc_ - maxer
Ci[notWaterLimit] = Ci_
Jc[notWaterLimit] = Jc_
A[notWaterLimit] = A_
Diagnostics = {'max carboxylation rate':VMAX,\
'internal leaf CO2':Ci,\
'gross assimilation':A,\
'dark respiration':Rd,\
'stomatal conductance':Gs,\
'max e-transport rate':Jmax,\
'carboxylation rate':Jc,\
'e-transport rate':Je}
return (A,Diagnostics)
def photosynthesisC3(self,PAR,PIRRIN,P,T,Atm_co2_conc,\
NSCL,ETransport,CarboxRate,\
Ci,Gs,waterLimited):
'''
Farquar et al. 1980 C3 photosynthesis
args:
PAR : Absorbed PAR mol(photons) m-2 s-1
PIRRIN : Total irridiance at the surface mol m-2 s-1
P : air pressure (Pa)
T : vegetation (leaf) temperature (K)
Atm_co2_conc : Atmospheric CO2 conc.
NSCL : Nitrogen scaling factor at maximum
carboxylation rate and maximum
electron transport rate
ETransport
: The maximum rate of electron transport
at 25 C for each PFT (mol(CO2) m-2 s-1)
CarboxRate
: The maximum carboxilation rate at 25 C
(micro mol(CO2) m-2 s-1)
Ci : CO2 concentration inside leaf mol(CO2) mol(air)-1
Gs : Stomatal conductance
waterLimited : flags to indicate water limited or not
Returns:
(A,Diagnostics) : A = gross assimilation
'''
# T1 = 25 C in K
T1 = 25. + self.zeroC
# T0 is veg temperature relative tp 25 C
T0 = T - T1
# TC is the temperatrure in C
TC = T - self.zeroC
# Temperature dependent rates and compensation point
KC = self.KC0 * np.exp(self.EC * T0 / T1 / self.R_gas / T)
KO = self.KO0 * np.exp(self.EO * T0 / T1 / self.R_gas / T)
# CO2 compensation point without leaf respiration
# assumed in JSBACH to be a linear fn of temperature (C)
GammaStar = self.GammaStarScale * TC
ww = np.where(GammaStar < 0)
GammaStar[ww] = 0.
# VCMAX : assume N content, therefore Rubisco is placed
# where most incoming light is
# NB .. this is a structural consideration
VCMAX = CarboxRate * NSCL * np.exp(self.EV * T0 / T1 / self.R_gas / T)
# Jmax maximum electron transport rate mol(CO2)m-2s-1
Jmax = ETransport * NSCL * TC/25.
ww = np.where(Jmax <= self.minOfMaxCarboxrate)
Jmax[ww] = self.minOfMaxCarboxrate
# electron transport rate:
ww = np.where(Jmax <= self.minOfMaxCarboxrate)
J = self.alpha * PAR * Jmax \
/ np.sqrt(Jmax * Jmax + self.alpha * self.alpha * PAR * PAR)
J[ww] = 0.
# dark respiration (mol(CO2)m-2s-1)
Rd = self.FRDC3 * CarboxRate * NSCL \
* np.exp(self.ER * T0 / T1 / self.R_gas / T) \
* highTInhibit(TC) \
* darkInhibit(PIRRIN)
Jc = np.zeros_like(Rd)
Je = np.zeros_like(Rd)
A = np.zeros_like(Rd)
waterLimit = np.where(waterLimited)
notWaterLimit = np.where(not waterLimited)
if notWaterLimit.sum() > 0:
VCMAX_ = VCMAX[notWaterLimit]
Ci_ = Ci[notWaterLimit]
GammaStar_ = GammaStar[notWaterLimit]
Kc_ = Kc[notWaterLimit]
KO_ = KO[notWaterLimit]
J_ = J[notWaterLimit]
TC_ = TC[notWaterLimit]
Rd_ = Rd[notWaterLimit]
Atm_co2_conc_ = Atm_co2_conc[notWaterLimit]
P_ = P[notWaterLimit]
T_ = T[notWaterLimit]
# no water limiting
# so Ci is defined and Gs is calculated
# electron transport limited rate Je and
# carboxylating rate Jc (mol(CO2)m-2s-1
Jc_ = VCMAX_ * (Ci_ - GammaStar_)/\
(Ci_ + Kc_ * (1 + (self.Ox/KO_)))
Je_ = J_ * (Ci_ - GammaStar_)/\
(4. * (Ci_ + 2. * GammaStar_))
# assimilation is the minimum of Je and Jc
# with a high temperature inhibition
# mol(CO2)m-2s-1
A_ = Je_
ww = np.where(Jc_ < Je_)
A_[ww] = Jc_[ww]
A_ = A_ * highTInhhibit(TC_)
# stomatal conductance
Gs_ = 1.6 * (A_-Rd_) * self.R_gas * T_/ (Atm_co2_conc_ - Ci_) / P_
ww = np.where(Gs < self.minStomaConductance)
Gs_[ww] = self.minStomaConductance
Gs[notWaterLimit] = Gs_
A[notWaterLimit] = A_
Jc[notWaterLimit] = Jc_
Je[notWaterLimit] = Je_
if waterLimit.sum() > 0:
VCMAX_ = VCMAX[waterLimit]
Gs_ = Gs[waterLimit]
GammaStar_ = GammaStar[waterLimit]
Kc_ = Kc[waterLimit]
KO_ = KO[waterLimit]
J_ = J[waterLimit]
TC_ = TC[waterLimit]
Rd_ = Rd[waterLimit]
Atm_co2_conc_ = Atm_co2_conc[waterLimit]
P_ = P[waterLimit]
T_ = T[waterLimit]
# water limted, so Gs is defined and Ci must be calculated
K1 = 2. * GammaStar_
W1 = i_ / 4.
W2 = VCMAX_
K2 = Kc_ * (1 + Ox/KO_)
G0 = Gs_ / 1.6 / self.R_gas / T_ * P_
B = Rd_ + W1 + G0 * (Atm_co2_conc_ + K1)
C = W1 * G0 * (Atm_co2_conc_ - GammaStar_) + W1 * Rd_
sqrter = (B*B / 4.) - C
ww = np.where(sqrter < 0)
sqrter[ww] = 0.
Je_ = B / 2. - np.sqrt(sqrter)
ww = np.where(Je < 0)
Je_[ww] = 0.
B = Rd_ + W2 + G0 * (Atm_co2_conc_ + K2)
C = W2 * G0 * (Atm_co2_conc_ - GammaStar_) + W2 * Rd_
sqrter = (B*B / 4.) - C
ww = np.where(sqrter < 0)
sqrter[ww] = 0.
Jc_ = B / 2. - np.sqrt(sqrter)
ww = np.where(Jc_ < 0)
Jc_[ww] = 0.
# assimilation is the minimum of Je and Jc
# with a high temperature inhibition
# mol(CO2)m-2s-1
A_ = Je_
ww = np.where(Jc_ < Je_)
A_[ww] = Jc_[ww]
A_ = A_ * highTInhhibit(TC_)
maxer1 = A_ - Rd_
maxer2 = G0
ww = np.where(G0<1e-6)
maxer2[ww] = 1e-6
maxer = maxer1/maxer2
ww = np.where(maxer < 0)
maxer[ww] = 0.
Ci_ = Atm_co2_conc_ - maxer
Ci[waterLimit] = Ci_
A[waterLimit] = A_
Jc[waterLimit] = Jc_
Je[waterLimit] = Je_
Diagnostics = {'max carboxylation rate':VMAX,\
'internal leaf CO2':Ci,\
'gross assimilation':A,\
'dark respiration':Rd,\
'stomatal conductance':Gs,\
'max e-transport rate':Jmax,\
'carboxylation rate':Jc,\
'e-transport rate':Je}
return (A,Diagnostics)
def highTInhibit(self,Tc):
'''
Inhibit assimilation and respiration at temperatures
above 55 C
From:
Collatz et al., Physiological and environmental regulation
of stomatal conductance, photosynthesis and transpiration: a model that
includes a laminar boundary layer,
Agricultural and Forest Meteorology, 54, pp. 107-136, 1991
Args:
Tc (np.array or similar) : Leaf temperature in C
Kwargs:
None
Returns:
A scaling term to reduce assimilation/respiration
(same type as input)
'''
out = 1./(1. + np.exp(1.3 * (T- 55.)))
ww = np.where(out > 1.)
out[ww] = 1.
return out
def darkInhibit(self,IRR):
'''
Inhibit dark respiration
Brooks and Farquhar, Effect of temperature on the CO2/O2 specifity on RBisCO
and the rate of respiration in the light, Planta 165, 397-406, 1985
inhibit the dark-respiration to 50% of it's uninhibited value
up from 50 umol/m^2s
From Bethy model (JSBACH)
Args:
IRR (np.array or similar) : Total irridiance at the surface [mol/(m^2 s)]
Kwargs:
None
Returns:
A scaling term to reduce dark respiration
(same type as input)
'''
out = 0.5 + 0.5*np.exp(-IRR * 1e6 / 10.)
ww = np.where(IRR == 0.)
out[ww] = 0.
ww = np.where(out > 1.)
out[w] = 1.
return out
def error(self,msg):
'''
Print error msg and store in self.lastErrorMsg
'''
from sys import stderr
stderr.write(msgi,'\n')
self.lastErrorMsg = msg
```
#### File: docs/python/plotGauss.py
```python
def plotGauss(mean_0,mean_1,sd_0,sd_1,rho,vminmax=None,\
file=None,title=None):
# Import some libraries, in case you haven't yet imported them
import matplotlib.pyplot as plt
import numpy as np
# size of grid
N = 1000
# generate grid (NB -1 to 1 here)
coords = 2. * (np.arange(N+1)/float(N) - 0.5)
x0, x1 = np.meshgrid(coords,coords)
x = np.array([x0, x1])
dx = np.array([x0[0,1] - x0[0,0], x1[1,0] - x1[0,0]])
grid = dx[0] * dx[1]
# set B
b01 = b10 = rho * sd_0 * sd_1
b00 = sd_0**2
b11 = sd_1**2
B = np.matrix([[b00,b01],[b10,b11]])
# set xb: the mean
xb = np.array([mean_0,mean_1])
xxb = np.zeros_like(x)
for i in range(xb.shape[0]): xxb[i,...] = xb[i]
e = x - xxb
n = np.shape(B)[0]
# inverse of B
BI = B.I
# scaling term
scale_1 = (2.*np.pi)**(n/2) * np.sqrt(np.linalg.det(B))
gauss = np.exp(-0.5 * ((e[0,...] * BI[0,0] + e[1,...] * BI[0,1])* e[0,...] \
+ (e[0,...] * BI[1,0] + e[1,...] * BI[1,1])* e[1,...])) \
/ scale_1
# check integral
print ('integral of Gaussian:',gauss.sum() * grid)
# plot
plt.clf()
if title:
plt.title(title)
#if len(list(vminmax)):
# plt.imshow(gauss,origin='lower',interpolation='nearest', \
# vmin=vminmax[0],vmax=vminmax[1],\
# extent=[x0.min(),x0.max(),x1.min(),x1.max()])
#else:
plt.imshow(gauss,origin='lower',interpolation='nearest', \
extent=[x0.min(),x0.max(),x1.min(),x1.max()])
plt.colorbar()
if file == None:
plt.show()
else:
plt.savefig(file)
```
|
{
"source": "JGomezST/algofi-amm-py-sdk",
"score": 3
}
|
#### File: algofi_amm/v0/balance_delta.py
```python
class BalanceDelta():
def __init__(self, pool, asset1_delta, asset2_delta, lp_delta, num_iter=0):
"""Constructor method for :class:`BalanceDelta`
:param pool: a :class:`Pool` object for querying pool data
:type pool: :class:`Pool`
:param asset1_delta: change in the asset 1 balance of the pool
:type asset1_delta: int
:param asset2_delta: change in the asset 2 balance of the pool
:type asset2_delta: int
:param lp_delta: change in the lp balance of the pool
:type lp_delta: int
:param num_iter: optional, denotes the estimated number of stableswap loop iterations used to compute expected txn cost
:type num_iter: int
"""
self.asset1_delta = asset1_delta
self.asset2_delta = asset2_delta
self.lp_delta = lp_delta
self.extra_compute_fee = int(num_iter / (700 / 400)) * 1000
if (lp_delta != 0):
self.price_delta = 0
elif (pool.lp_circulation == 0):
self.price_delta = 0
else:
starting_price_ratio = pool.asset1_balance / pool.asset2_balance
final_price_ratio = (pool.asset1_balance + asset1_delta) / (pool.asset2_balance + asset2_delta)
self.price_delta = abs((starting_price_ratio / final_price_ratio) - 1)
```
|
{
"source": "jgompis/kdabtv",
"score": 2
}
|
#### File: ex-model-from-cpp/pyqt/mymodel.py
```python
import random
from PyQt5.QtCore import QAbstractListModel, QObject, QMetaEnum, QTimer, pyqtSlot as Slot, QModelIndex, QVariant
class Data:
def __init__(self, name, flag, population):
super(Data, self).__init__()
self.name = name
self.flag = flag
self.population = population
class MyModel(QAbstractListModel, QObject):
def __init__(self):
super(MyModel, self).__init__()
self._m_data = [Data("Denmark", "images/denmark.jpg", 5.6),
Data("Sweden", "images/sweden.jpg", 9.6),
Data("Iceland", "images/iceland.jpg", 0.3),
Data("Norway", "images/norway.jpg", 5.1),
Data("Finland", "images/finland.jpg", 5.4)]
growthTimer = QTimer(self)
growthTimer.timeout.connect(self.growPopulation)
growthTimer.start(2000)
self.RAND_MAX = 2147483647
class Roles(QMetaEnum):
NameRole = 0
FlagRole = 1
PopulationRole = 2
def rowCount(self, parent):
if parent.isValid():
return 0
return len(self._m_data)
def data(self, index, role):
if not index:
return QVariant
if role == self.Roles.NameRole:
return self._m_data[index.row()].name
elif role == self.Roles.FlagRole:
return self._m_data[index.row()].flag
elif role == self.Roles.PopulationRole:
return self._m_data[index.row()].population
else:
return QVariant
def roleNames(self):
_name = "name".encode('utf-8')
_flag = "flag".encode('utf-8')
_population = "population".encode('utf-8')
mapping = {self.Roles.NameRole: _name,
self.Roles.FlagRole: _flag,
self.Roles.PopulationRole: _population}
return mapping
@Slot(int)
def duplicateData(self, row):
if row < 0 | row > len(self._m_data):
return
data = self._m_data[row]
rowOfInsert = row + 1
QAbstractListModel.beginInsertRows(self, QModelIndex(), rowOfInsert, rowOfInsert)
self._m_data.insert(rowOfInsert, data)
QAbstractListModel.endInsertRows(self)
@Slot(int)
def removeData(self, row):
if row < 0 | row > len(self._m_data):
return
data = self._m_data[row]
QAbstractListModel.beginRemoveRows(self, QModelIndex(), row, row)
self._m_data.remove(data)
QAbstractListModel.endRemoveRows(self)
@Slot()
def growPopulation(self):
growthFactor = 0.01 / self.RAND_MAX
count = len(self._m_data)
for i in range(0, count):
self._m_data[i].population += self._m_data[i].population * random.randint(1, self.RAND_MAX) * growthFactor
startIndex = QAbstractListModel.index(self, 0, 0)
endIndex = QAbstractListModel.index(self, count - 1, 0)
self.dataChanged.emit(startIndex, endIndex, [self.Roles.PopulationRole])
```
#### File: ex_timer_export/pyqt/randomtimer.py
```python
import random
from PyQt5.QtCore import QObject, QTimer, pyqtSignal as Signal, pyqtSlot as Slot, pyqtProperty
class RandomTimer(QObject):
timeout = Signal()
intervalChanged = Signal()
activeChanged = Signal()
def __init__(self, parent=None):
super(RandomTimer, self).__init__()
self.timer = QTimer()
self.timer.timeout.connect(self.timeout)
@Slot()
def start(self):
print("timer start")
if not self.timer.isActive():
self.timer.start()
self.activeChanged.emit()
@Slot()
def stop(self):
print("timer stop")
if self.timer.isActive():
self.timer.stop()
self.activeChanged.emit()
@Slot(int, int, result=int)
def randomInterval(self, min, max):
range = max - min
msec = min + random.randint(0, range)
return msec
@pyqtProperty(int, notify=intervalChanged)
def interval(self):
return self.timer.interval()
@interval.setter
def interval(self, msec):
if self.timer.interval() != msec:
self.timer.setInterval(msec)
self.intervalChanged.emit()
print("interval = {}".format(self.timer.interval()))
@pyqtProperty(bool, notify=activeChanged)
def active(self):
return self.timer.isActive()
```
#### File: Qt-Widgets-and-more/compileTime/time-compile.py
```python
import subprocess, re, sys,os, os.path, shutil, time, glob
ROOT="/home/blackie/dump/KDABViewer"
BUILDROOT=ROOT+"/build"
ITERATIONS=5
FOREAL=1
CCACHE="/usr/lib/ccache"
def runCommand(cmd):
print(" ".join(cmd))
if FOREAL:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
res = process.communicate()[0]
#print(res)
def nukeBuildDir():
if FOREAL:
shutil.rmtree(BUILDROOT)
os.mkdir(BUILDROOT)
def clearCCache():
runCommand(["/usr/bin/ccache", "-C"])
def runCMake(clang, ninja, define=None):
command=["cmake"]
if clang:
command = command + ["-DCMAKE_CXX_COMPILER=clang++"]
if ninja:
command = command + ["-G", "Ninja"]
if define:
command = command + ["-DCMAKE_CXX_FLAGS=-D" + define]
command = command + [".."]
runCommand(command)
def compile(ninja):
os.environ["MAKEFLAGS"]="-j 16"
command = ["make", "-j", "16"]
if ninja:
command = ["ninja"]
runCommand(command)
def setOutputFile(filename):
global TIMINGFILE
TIMINGFILE="/home/blackie/profiling/"+filename
writeHeader()
def writeHeader():
FILE = open(TIMINGFILE, "w")
FILE.write("compiler,build system,cclang on,stage,time\n")
def addOutputLine(clang,ninja,step,time):
FILE = open(TIMINGFILE, "a+")
ccacheon = "ccache" in os.environ["PATH"]
FILE.write("%s,%s,%s,%s,%s\n" % ("clang" if clang else "gcc", "ninja" if ninja else "make", "yes" if ccacheon else "no", step, int(round(time))))
def makeClean(ninja):
runCommand(["ninja" if ninja else "make", "clean"])
def timeAndWrite(clang,ninja,step):
start=time.time()
compile(ninja)
end = time.time()
addOutputLine(clang,ninja, step, end-start)
def setOrUnsetCCacheInPath(doInclude):
path = os.environ["PATH"].split(":")
path = filter(lambda item: "ccache" not in item, path)
if doInclude:
path = [CCACHE] + path
os.environ["PATH"] = ":".join(path)
# ---------------------------- Test funcitons
def oneGoWithCompilterAndBuildSystem(clang,ninja):
clearCCache()
nukeBuildDir()
os.chdir(BUILDROOT)
runCMake(clang=clang, ninja=ninja)
timeAndWrite(clang, ninja, "full build")
# rebuild after make clean
clearCCache()
makeClean(ninja)
timeAndWrite(clang, ninja, "clean build")
def compileAndBuildSystemTest():
setOutputFile("compilerAndBuild.csv")
setOrUnsetCCacheInPath(0)
for round in range(ITERATIONS):
print("compileAndBuildSystemTest------> Round %d of %d" % (round+1, ITERATIONS))
for ninja in [0, 1]:
for clang in [0,1]:
oneGoWithCompilterAndBuildSystem(clang=clang,ninja=ninja)
def ccacheTest():
setOutputFile("ccache.csv")
for useccache in [0,1]:
setOrUnsetCCacheInPath(useccache)
for round in range(ITERATIONS):
print("ccacheTest------> Round %d of %d (using CCache=%s)" % (useccache*ITERATIONS + round+1, ITERATIONS*2,"yes" if useccache else "no"))
oneGoWithCompilterAndBuildSystem(clang=1, ninja=1)
def runPCHMutation(headers):
for index in range(len(headers)+1):
subList = headers[:index]
if FOREAL:
FILE = open(ROOT + "/KDABViewer_pch.h","w")
for x in subList:
FILE.write("#include <%s>\n" % x)
FILE.close()
nukeBuildDir()
os.chdir(BUILDROOT)
runCMake(clang=1,ninja=1)
compile(ninja=1)
for round in range(ITERATIONS):
print("pchTest------> Round %d of %d" % (index*ITERATIONS + round+1, ITERATIONS*len(headers)+1))
clearCCache()
makeClean(ninja=1)
timeAndWrite(clang=1, ninja=1, step="/" + "/".join(subList))
def pchTest():
setOutputFile("PCH.csv")
setOrUnsetCCacheInPath(0)
runPCHMutation(["QtWidgets", "QtGui", "QtCore", "KDChart", "memory", "functional"]) # "chrono", "cmath", "optional", "mutex", "array", "vector", "numeric", "algorithm"
runPCHMutation(["QtCore", "QtGui", "QtWidgets"])
# -------- pchMostUsedTest
REGEXP = re.compile("^#include *<(Q.*/)?([a-zA-Z_]+)>")
def countIncludes(filename, map):
with open(filename) as fp:
for line in fp.readlines():
match = REGEXP.match(line)
if match:
str = match.group(2)
if str in map:
map[str]= map[str]+1
else:
map[str] = 1
def findSystemIncludes():
map = {}
for filename in glob.glob(ROOT + "/**/*.cpp", recursive=1)+ glob.glob(ROOT + "/**/*.h",recursive=1) :
if "3rdparty" in filename or "prefix" in filename or "xternal" in filename:
continue
countIncludes(filename, map)
list = sorted(map.items(), key=lambda x: x[1])
list.reverse()
print(list)
return [key for (key,count) in list]
def pchMostUsedTest():
setOutputFile("PCH-most-used.csv")
setOrUnsetCCacheInPath(0)
nukeBuildDir()
os.chdir(BUILDROOT)
# We need to build it all first, so we get all the ui_* files into existance
runCMake(clang=1,ninja=1)
compile(ninja=1)
list = findSystemIncludes()
steps=len(list)
for stage in range(steps):
with open(ROOT + "/KDABViewer_pch.h","w") as FILE:
for i in range(stage):
FILE.write("#include<%s>\n" % list[i])
runCMake(clang=1,ninja=1)
compile(ninja=1)
for round in range(ITERATIONS):
print("pchMostUsedTest------> Round %d of %d" % (stage*ITERATIONS + round+1, ITERATIONS*steps))
makeClean(ninja=1)
timeAndWrite(clang=1, ninja=1, step="%d" % stage)
#compileAndBuildSystemTest()
#ccacheTest()
#pchTest()
#pchMostUsedTest()
```
#### File: Qt-Widgets-and-more/debuggingHelper/QWAMTypes.py
```python
from dumper import Children, SubItem, UnnamedSubItem, DumperBase
from utils import DisplayFormat, TypeCode
from qttypes import *
import struct
####################### Your code below #######################
### Part 1
def qdump__Foo(d, value):
i = value["i"].integer()
j = value["j"].integer()
d.putValue("[%d,%d]" % (i,j))
d.putExpandable()
if d.isExpanded():
with Children(d):
d.putSubItem('j', value["j"])
# Don't try this at home :-)
# and the "i" (that is the one in quotes stand for type integer...
d.putSubItem('i', d.createValue(struct.pack("i",i), d.intType()))
with SubItem(d, "sum"):
d.putValue(i+j)
d.putType(d.intType()) # not really needed though
### Part 2
def qdump__MyNameSpace__Foo(d, value):
d.putValue("Secret!")
d.putPlainChildren(value)
### Part 3
#def qdump__Money(d, value):
# amount = value["m_amount"].floatingPoint()
# currency = value["m_currency"].integer()
# d.putValue("%s %s" % (("EUR" if (currency == 0) else "USD"), amount))
# d.putPlainChildren(value)
### Part 4
def qdump__Money(d, value):
str = d.call("@QString", value, "toString")
d.putStringValue(str)
d.putPlainChildren(value)
### Part 5
def qdump__FooOrBar(d, value):
str=d.parseAndEvaluate("fooOrBarToString(*((FooOrBar*)%s))" % value.laddress)
d.putStringValue(str)
d.putPlainChildren(value)
#### Part 6
def qdump__UserID(d, value):
employeeID = value.integer()
str=d.parseAndEvaluate("EmployeeDatabase::instance().lookup(%d)" % employeeID)
d.putStringValue(str)
def qdump__UserIDList(d, value):
d.createTypedefedType(d.lookupType("int"), "UserID");
d.formats[d.currentIName] = DisplayFormat.DirectQListStorage
d.putItem(value.cast("QList<UserID>"))
```
|
{
"source": "jgoney/api-test",
"score": 3
}
|
#### File: jgoney/api-test/server_test.py
```python
import json
import os
import unittest
from bson import objectid
from pymongo import MongoClient, TEXT
import config
import errors
import server
class ApiBaseTestCase(unittest.TestCase):
def setUp(self):
"""Initialize test app"""
self.app = server.app.test_client()
class ApiMongoTestCase(ApiBaseTestCase):
def setUp(self):
"""Setup Mongo connection"""
super().setUp()
client = MongoClient()
self.collection = client.api_test.songs
def tearDown(self):
super().tearDown()
self.collection.drop()
class ApiEmptyDBTestCase(ApiMongoTestCase):
def test_get_songs_empty_db(self):
"""
A GET on /songs with an empty database should return 200 and an empty JSON.
"""
rv = self.app.get('/songs')
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.data, b'[]')
def test_get_avg_difficulty_empty_db(self):
"""
A GET on /songs with an empty database should return 200 and an empty JSON.
"""
rv = self.app.get('/songs/avg/difficulty')
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.data, b'[]')
class ApiFixtureDBTestCase(ApiMongoTestCase):
def setUp(self):
"""Insert test data into db"""
super().setUp()
with open('songs.json') as f:
result = self.collection.insert_many([json.loads(line) for line in f.readlines()])
self.assertTrue(result.acknowledged)
self.fixture_ids = result.inserted_ids
self.collection.create_index([("artist", TEXT), ("title", TEXT)])
def test_get_songs(self):
rv = self.app.get('/songs')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 10)
def test_get_songs_with_n(self):
rv = self.app.get('/songs?n=3')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 3)
self.assertEqual(j[2]['artist'], 'Mr Fastfinger')
def test_get_songs_with_p(self):
rv = self.app.get('/songs?p=1')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 1)
self.assertEqual(j[0]['title'], 'Babysitting')
def test_get_songs_with_n_and_p(self):
rv = self.app.get('/songs?p=2&n=3')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 3)
self.assertEqual(j[0]['title'], 'Greasy Fingers - boss level')
def test_get_songs_with_p_too_big(self):
rv = self.app.get('/songs?p=3')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 0)
self.assertEqual(j, [])
def test_get_songs_with_invalid_n_and_p(self):
rv = self.app.get('/songs?p=fake')
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
rv = self.app.get('/songs?n=fake')
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
rv = self.app.get('/songs?p=fake&n=fake')
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
rv = self.app.get('/songs?n=-1')
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
def test_get_avg_difficulty(self):
rv = self.app.get('/songs/avg/difficulty')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 4)
def test_get_avg_difficulty_with_level(self):
rv = self.app.get('/songs/avg/difficulty?level=6')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 1)
def test_get_avg_difficulty_with_level_not_found(self):
rv = self.app.get('/songs/avg/difficulty?level=11')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 0)
self.assertEqual(j, [])
def test_get_avg_difficulty_with_invalid_level(self):
rv = self.app.get('/songs/avg/difficulty?level=fake')
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
j = json.loads(rv.data)
self.assertEqual(j['message'], '"fake" is not a valid argument for parameter "level"')
def test_songs_search_no_message(self):
rv = self.app.get('/songs/search')
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.MissingRequireArgumentError)
j = json.loads(rv.data)
self.assertEqual(j['message'], 'argument "message" is required for endpoint "/songs/search"')
def test_songs_search_valid_message(self):
def _assert_valid_message(rv):
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 1)
self.assertEqual(j[0]['artist'], '<NAME>')
# Basic test
rv = self.app.get('/songs/search?message=Fastfinger')
_assert_valid_message(rv)
# Mixed cases
rv = self.app.get('/songs/search?message=fAsTfInGeR')
_assert_valid_message(rv)
# Search on title
rv = self.app.get('/songs/search?message=Awaki-Waki')
_assert_valid_message(rv)
def test_songs_search_valid_message_return_multi(self):
rv = self.app.get('/songs/search?message=Yousicians')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 10)
def test_songs_search_valid_message_edge_cases(self):
# MongoDB $text search ignores stop words such as 'the'
rv = self.app.get('/songs/search?message=the')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 0)
# MongoDB $text search ignores diacritics by default
rv = self.app.get('/songs/search?message=gréåsy')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 1)
self.assertEqual(j[0]['title'], 'Greasy Fingers - boss level')
# Spaces in the message are okay
rv = self.app.get('/songs/search?message=greasy fingers')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(len(j), 1)
self.assertEqual(j[0]['title'], 'Greasy Fingers - boss level')
def test_songs_rating_null_body(self):
rv = self.app.post('/songs/rating')
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.MissingRequireArgumentError)
j = json.loads(rv.data)
self.assertEqual(j['message'], 'argument "song_id" is required for endpoint "/songs/rating"')
def test_songs_rating_invalid_id(self):
rv = self.app.post('/songs/rating', data={'song_id': 'hdhhd'})
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
j = json.loads(rv.data)
self.assertEqual(j['message'], '"hdhhd" is not a valid argument for parameter "song_id"')
def test_songs_rating_invalid_rating(self):
# Missing 'rating' param should throw an error
rv = self.app.post('/songs/rating', data={'song_id': objectid.ObjectId()})
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
j = json.loads(rv.data)
self.assertEqual(j['message'], '"None" is not a valid argument for parameter "rating"')
# Invalid 'rating' param should throw an error
rv = self.app.post('/songs/rating', data={'song_id': objectid.ObjectId(), 'rating': 'invalid'})
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
j = json.loads(rv.data)
self.assertEqual(j['message'], '"invalid" is not a valid argument for parameter "rating"')
# Out of bounds 'rating' param should throw an error
rv = self.app.post('/songs/rating', data={'song_id': objectid.ObjectId(), 'rating': 10})
self.assertEqual(rv.status_code, 500)
self.assertRaises(errors.InvalidArgumentError)
j = json.loads(rv.data)
self.assertEqual(j['message'], '"10" is not a valid argument for parameter "rating"')
def test_songs_rating_not_found(self):
oid = objectid.ObjectId()
rv = self.app.post('/songs/rating', data={'song_id': oid, 'rating': 4})
self.assertEqual(rv.status_code, 404)
self.assertRaises(errors.ObjectNotFoundError)
j = json.loads(rv.data)
self.assertEqual(j['message'], 'song_id "{}" not found'.format(oid))
def test_songs_rating_success(self):
oid = self.fixture_ids[0]
rv = self.app.post('/songs/rating', data={'song_id': oid, 'rating': 4})
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(j['status'], 'OK')
# Assert that rating was in fact incremented correctly
rv = self.app.get('/songs')
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(j[0]['rating'], {'4': 1})
def test_songs_avg_rating_success(self):
oid = self.fixture_ids[0]
rv = self.app.post('/songs/rating', data={'song_id': oid, 'rating': 4})
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(j['status'], 'OK')
# Assert that rating was in fact incremented correctly
rv = self.app.get('/songs/avg/rating/{}'.format(oid))
self.assertEqual(rv.status_code, 200)
j = json.loads(rv.data)
self.assertEqual(j['min_rating'], 4)
self.assertEqual(j['max_rating'], 4)
self.assertEqual(j['avg_rating'], 4)
def test_songs_avg_rating_invalid_oid(self):
oid = 'fake'
rv = self.app.get('/songs/avg/rating/{}'.format(oid))
self.assertEqual(rv.status_code, 500)
j = json.loads(rv.data)
self.assertEqual(j['message'], '"fake" is not a valid argument for parameter "song_id"')
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jgoney/patchjam",
"score": 3
}
|
#### File: patchjam/scripts/parse_sysex.py
```python
import os
import sys
import psycopg2
def split_sysex_str(s):
limit = bytes.fromhex("F7")
parts = s.split(limit)
for i, p in enumerate(parts):
p += limit
f_name = "part_{}.syx".format(i + 1,)
print(f_name)
with open(f_name, "wb") as f_out:
f_out.write(p)
def parse_sysex_voices(f_name):
voices = []
with open(f_name, "rb") as f:
sysex = f.read()
check_byte = sysex[-2:-1].hex()
# distinguishes where the voice data starts in sysex
header = bytes.fromhex("F0 43 00 09 20 00")
f.seek(0)
index = sysex.index(header)
offset = len(header) + index
f.seek(offset) # seek to beginning of voice data
byte_sum = sum(f.read(4096))
checksum = ((128 - byte_sum) & 127) % 128
checksum = "{0:02x}".format(
checksum
) # convert checksum to 0-padded hex string for comparison
if checksum != check_byte:
with open("checksum_errors.txt", "a") as errors:
# log checksum error, but continue since the voices may still work anyway
errors.write(f_name + "\n")
f.seek(offset)
try:
while f:
voice = f.read(128)
# Check last byte for f7 ("end of sysex" byte)
if voice[1:2].hex() == "f7":
break
name = voice[-10:]
name = name.decode("utf-8", "strict")
name = name.replace("\x00", "")
print(name)
d = {
"name": name,
"voice_data": voice[:-10],
"full_voice": voice,
"original_sysex": os.path.basename(f_name),
}
yield (d)
except ValueError:
raise
def walk_and_parse(path):
print("Walking {}".format(path))
unique = set([])
voices = []
total = 0
author = ""
for root, dirs, files in os.walk(path):
print("########")
print(root, dirs, files)
print("########")
if "!author.txt" in files:
with open(os.path.join(root, "!author.txt"), "r") as author_file:
temp_author = author_file.read().strip()
if temp_author:
author = temp_author
else:
author = os.path.basename(root)
print("author:", author)
for fn in files:
if fn.lower().endswith("syx"):
try:
for v in parse_sysex_voices(os.path.join(root, fn)):
total += 1
if v["voice_data"] not in unique:
unique.add(v["voice_data"])
v["author"] = author
voices.append(v)
except ValueError as e:
print(e)
with open("errors.txt", "a") as errors:
errors.write(os.path.join(root, fn) + "\n")
print(len(voices))
print(len(unique))
print(total)
return voices
if __name__ == "__main__":
patch_path = os.getenv("PATCH_PATH")
if not patch_path:
print("PATCH_PATH invalid, exiting...")
sys.exit(1)
print(patch_path)
conn = psycopg2.connect(
database=os.getenv("POSTGRES_DB"),
user=os.getenv("POSTGRES_USER"),
password=os.getenv("POSTGRES_PASSWORD"),
host="127.0.0.1",
port="5432",
)
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS patches;")
cur.execute("DROP TABLE IF EXISTS patches_favorites;")
cur.execute(
"""
CREATE TABLE IF NOT EXISTS patches
(id serial PRIMARY KEY NOT NULL,
name varchar NOT NULL,
author varchar,
original_sysex varchar,
synth varchar,
data BYTEA NOT NULL);
"""
)
conn.commit()
voices = walk_and_parse(patch_path)
print("Inserting {} voices...".format(len(voices)),)
for v in voices:
cur.execute(
"INSERT INTO patches (name, author, original_sysex, synth, data) VALUES (%s, %s, %s, %s, %s)",
(v["name"], v["author"], v["original_sysex"], "DX7", v["full_voice"]),
)
conn.commit()
conn.close()
```
|
{
"source": "jgonggrijp/cookiecutter-webapp-deluxe",
"score": 2
}
|
#### File: backend/{{cookiecutter.slug}}/placeholder_test.py
```python
def test_something(db):
assert 3 < 4
```
|
{
"source": "jgongo/shaka-player-embedded",
"score": 3
}
|
#### File: shaka/tools/make_license_file.py
```python
output. The paths should be relative to the file itself. For example,
Title: path/to/LICENSE
The extra licenses file is expected to contain input similar to the output;
a series of licenses, each with a title on a separate line prefixed by '@'.
This file is used in the demo to display the various licenses for the
dependencies of the project.
"""
import argparse
import os
import sys
import json
def _GenLicensesFile(out, paths, extras, base_path):
"""Reads the input files, and writes a licenses.txt file to the given output.
Args:
out: A file object for the output.
paths: A file object for the paths file.
extras: A file object for the extra licenses file.
base_path: The URL base used to resolve the relative URLs in the paths file.
"""
licenses = []
for line in paths:
name, path = line.split(': ', 1)
path = os.path.join(base_path, path.rstrip('\n'))
with open(path, 'r') as file:
licenses.append({'name': name, 'text': file.read()})
while True:
name = extras.readline()
if not name: break
text = extras.readline().replace('\\n', '\n')
licenses.append({'name': name, 'text': text})
out.write(json.dumps(licenses))
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--paths-file', required=True,
help='A file that contains paths to licenses.')
parser.add_argument('--extras-file', required=True,
help='A file that contains extra license text, ' +
'copied verbatim.')
parser.add_argument('--output', required=True,
help='The path to the file to generate.')
parsed_args = parser.parse_args(argv)
output_dir = os.path.dirname(parsed_args.output)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(parsed_args.output, 'w') as out:
with open(parsed_args.paths_file, 'r') as paths:
with open(parsed_args.extras_file, 'r') as extras:
base_path = os.path.dirname(parsed_args.paths_file)
_GenLicensesFile(out, paths, extras, base_path);
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
```
#### File: shaka/tools/run_configure.py
```python
import argparse
import os
import subprocess
import sys
_TOOLS_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
_THIRD_PARTY_DIR = os.path.join(_TOOLS_DIR, '..', '..', 'third_party')
def FlagsForCpu(cpu, target_os, sysroot):
"""Returns the flags to build for the given CPU."""
FLAGS = {
'x64': '-m64',
'x86': '-m32',
'arm': '-arch armv7',
'arm64': '-arch arm64',
}
assert ' ' not in sysroot, "sysroot path can't contain spaces"
assert cpu in FLAGS, 'Unsupported CPU architecture'
extra = ' -isysroot ' + sysroot if sysroot else ''
if target_os == 'ios':
if cpu == 'x86' or cpu == 'x64':
extra += ' -mios-simulator-version-min=9.0'
else:
extra += ' -miphoneos-version-min=9.0'
return FLAGS[cpu] + extra
def _GetHost(cpu, target_os):
"""Returns the host triple for the given OS and CPU."""
if cpu == 'x64':
cpu = 'x86_64'
elif cpu == 'arm64':
cpu = 'aarch64'
if target_os == 'linux':
return cpu + '-unknown-linux'
elif target_os == 'mac':
return cpu + '-apple-darwin'
elif target_os == 'ios':
return cpu + '-ios-darwin'
else:
raise RuntimeError('Unsupported host')
def MakeClean(src_dir):
"""Runs |make distclean| to clean up old configurations."""
# Ignore bad return code (e.g. errors for missing Makefile).
with open(os.devnull, 'w') as null:
subprocess.call(['make', 'clean'], cwd=src_dir, stdout=null, stderr=null)
subprocess.call(['make', 'distclean'], cwd=src_dir, stdout=null,
stderr=null)
def CrossConfigure(src_dir, out_dir, cpu, target_os, sysroot, extra_flags):
"""Runs a configure script for cross-compiling with the given options.
Arguments:
src_dir: The path of the source code.
out_dir: The path to put the generated files.
cpu: The CPU to build for.
target_os: The OS to build for.
sysroot: The path to the compiler sysroot (optional).
extra_flags: A list of extra flags to pass.
Returns:
The configure process' return code.
"""
# Create a relative path so the absolute path can contain spaces.
third_party_rel = os.path.relpath(_THIRD_PARTY_DIR, os.path.abspath(out_dir))
prefix = os.path.join(third_party_rel, 'llvm-build', 'Release+Asserts', 'bin')
assert ' ' not in prefix, "Path to compiler can't contain spaces"
clang_flags = FlagsForCpu(cpu, target_os, sysroot)
flags = extra_flags
flags += [
'--srcdir=' + os.path.relpath(src_dir, os.path.abspath(out_dir)),
'--host=' + _GetHost(cpu, target_os),
'CC=%s/clang %s' % (prefix, clang_flags),
'CPP=%s/clang -dM -E %s' % (prefix, clang_flags),
'CXX=%s/clang++ %s' % (prefix, clang_flags),
'CXXCPP=%s/clang++ -dM -E %s' % (prefix, clang_flags),
]
if not os.path.exists(out_dir):
os.makedirs(out_dir)
exe = os.path.join(os.path.abspath(src_dir), 'configure')
proc = subprocess.Popen([exe] + flags,
cwd=out_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, _ = proc.communicate()
if proc.returncode != 0:
print >> sys.stderr, stdout
return proc.returncode
```
|
{
"source": "JGONGSQ/controlM-rest",
"score": 2
}
|
#### File: controlm_rest_api/library/core.py
```python
import requests
import json
from ..settings.local import CTM, TEST_FOLDER
class CoreAPIs(object):
def __init__(self, username, password, base_url):
self.username = username
self.password = password
self.base_url = base_url
self.headers = {'content-type': 'application/json'}
self.token = self.login().json()['token']
self.auth_headers = {"Authorization": "Bearer " + self.token}
def login(self):
# define the login url
login_url = self.base_url + '/session/login'
body = {
"username": self.username,
"password": self.password
}
response = requests.post(login_url, headers=self.headers, json=body, verify=False)
return response
def deploy_jobs(self, jobfile):
# define the deploy job url
deploy_job_url = self.base_url + '/deploy'
# forming the file_data
file_data = file_data = {'definitionsFile': open(jobfile, 'rb')}
response = requests.post(deploy_job_url, headers=self.auth_headers, files=file_data, verify=False)
return response
def run_order_jobs(self, ctm=CTM, folder=TEST_FOLDER, **kwargs):
# define the run order url
run_order_job_url = self.base_url + '/run/order'
body = {
"ctm": ctm,
"folder": folder
}
if 'kwargs' in kwargs:
# updates the body dict
for k, v in kwargs['kwargs'].items():
body.update({k: v})
response = requests.post(run_order_job_url, headers=self.auth_headers, json=body, verify=False)
return response
def run_jobs_get_status(self, **kwargs):
# define the run jobs get status url
run_jobs_get_status_url = self.base_url + '/run/jobs/status'
body = dict()
if 'kwargs' in kwargs:
# update the body dict
for k, v in kwargs['kwargs'].items():
body.update({k: v})
response = requests.get(run_jobs_get_status_url, headers=self.auth_headers, params=body, verify=False)
return response
def run_job_status(self, job_id):
run_job_status_url = self.base_url + '/run/job/{job_id}/status'.format(job_id=job_id)
# make the get call
response = requests.get(run_job_status_url, headers=self.auth_headers, verify=False)
return response
def rerun_job(self, job_id):
# define the re-run jobs url
rerun_job_url = self.base_url + '/run/job/{job_id}/rerun'.format(job_id=job_id)
# make the post call
response = requests.post(rerun_job_url, headers=self.auth_headers, verify=False)
return response
```
|
{
"source": "jgonsior/hackzurich",
"score": 2
}
|
#### File: hackzurich/hackzurich/app.py
```python
import babel
import logging
import sys
import datetime as dt
from datetime import timedelta
from flask import Flask, render_template
from flask_admin.contrib.sqla import ModelView
from hackzurich import commands, public, user, challenge, chat, company
from hackzurich.user.models import User
from hackzurich.challenge.models import (
Challenge,
Category,
User_Challenge_Association,
Company,
)
from hackzurich.chat.models import ChatRoom, ChatMessage
from hackzurich.extensions import (
bcrypt,
cache,
csrf_protect,
db,
debug_toolbar,
flask_static_digest,
login_manager,
migrate,
admin,
socketio,
)
def create_dummy_data():
User.query.delete()
admin = User(
username="JoinIN Bot",
email="<EMAIL>",
password="<PASSWORD>",
active=True,
country="Kenia",
is_admin=True,
)
db.session.add(admin)
normal_user = User(
username="<NAME>",
email="<EMAIL>",
password="<PASSWORD>",
active=True,
country="Switzerland",
)
db.session.add(normal_user)
normal_user2 = User(
username="<NAME>",
email="<EMAIL>",
password="<PASSWORD>",
active=True,
country="USA",
)
db.session.add(normal_user2)
normal_user3 = User(
username="testuser3",
email="<EMAIL>",
password="<PASSWORD>",
active=True,
country="Lebanon",
)
db.session.add(normal_user3)
category1 = Category(name="Food", parent_id=None)
id1 = db.session.add(category1)
db.session.flush()
category2 = Category(name="Energy", parent_id=category1.id)
id2 = db.session.add(category2)
category3 = Category(name="Transport", parent_id=category1.id)
id2 = db.session.add(category3)
category4 = Category(name="Health", parent_id=category1.id)
id2 = db.session.add(category4)
category5 = Category(name="Social", parent_id=category1.id)
id2 = db.session.add(category5)
db.session.flush()
company1 = Company(name="Accenture", description="Description")
db.session.add(company1)
company2 = Company(name="McKinsey", description="McKinsey description")
db.session.add(company2)
company3 = Company(name="SmartSolation", description="McKinsey description")
db.session.add(company3)
db.session.flush()
chat_room = ChatRoom.create(
name="Ein schoener Raum", room_id="The cold and amazing shower!"
)
challenge = Challenge(
challengename="The cold and amazing shower!",
description="""
Thousands of people from all over the world already shower cold. Not only will you save energy, CO<sub>2</sub> and water but there are also many positive effects on your health connected with showering cold. Scientists found out, that cold showers do not only relief stress and prevents depressions, but also help to develop a more robust immune response.
Find out more:
https://www.wimhofmethod.com/benefits-of-cold-showers
https://www.healthline.com/health/cold-shower-benefits#improved-metabolism
You will save:
0.5kg of CO<sub>2</sub> per shower (based on gas boilers)
Equiv. 3.3 km with an average car
Company supporting you:
3X Carbon offsets in addition
""",
active=True,
category_id=category1.id,
co2offset=0.005,
company_id=company1.id,
chat_room=chat_room,
)
db.session.add(challenge)
chat_message = ChatMessage.create(
user=admin, text="Welcome to the challenge!", room=chat_room
)
chat_room = ChatRoom.create(name="Ein schoener Raum", room_id="Obvious Outdoor")
challenge1 = Challenge(
challengename="Obvious Outdoor",
description="""
The world is calling. Get out and enjoy your surroundings today. You can choose between running or cycling.
Still undecided?
Learn more: https://www.livestrong.com/article/372790-cycling-vs-running-calories/
Pick:
Run 4/8/12km
Cycle 15/30/45km
Company supporting you: Accenture!
Run: 20/20/30 kg Carbon offset
Eqvuiv. 66km/124km/200km with an average car
""",
active=True,
co2offset=0.03,
category_id=category2.id,
company_id=company1.id,
chat_room=chat_room,
)
db.session.add(challenge1)
chat_message = ChatMessage.create(
user=admin, text="Welcome to the challenge!", room=chat_room
)
chat_room = ChatRoom.create(
name="Ein schoener Raum", room_id="Just breathe and let us care about the rest!"
)
challenge2 = Challenge(
challengename="Just breathe and let us care about the rest!",
description="""
It sounds easy, but yet it can have a great impact on your life. Today, try to find three or more moments to stop and focus on your breath for two minutes.
Why does it matter to us? We want to give something back to society and support you to relief stress and balance your mental health. We are sure it will empower you to take better care of our planet too.
Find out more: link to instruction
Challenge:
3*2 min breathing!
Company supporting you: McKinsey
20 kg of Carbon offset
Equiv. 66km with an average car
""",
active=True,
co2offset=0.02,
category_id=category3.id,
company_id=company2.id,
chat_room=chat_room,
)
db.session.add(challenge2)
chat_message = ChatMessage.create(
user=admin, text="Welcome to the challenge!", room=chat_room
)
chat_room = ChatRoom.create(
name="Ein schoener Raum", room_id="Lower your thermostat by 1° C"
)
challenge3 = Challenge(
challengename="Lower your thermostat by 1° C",
description="""
Average Swiss household (44m^2 per person): 0.4 kg of CO<sub>2</sub> per heating day
Company supporting you: SmartSolation
10 kg of Carbon offset
""",
active=True,
co2offset=0.1,
category_id=category4.id,
company_id=company3.id,
chat_room=chat_room,
)
db.session.add(challenge3)
chat_message = ChatMessage.create(
user=admin, text="Welcome to the challenge!", room=chat_room
)
chat_room = ChatRoom.create(
name="Ein schoener Raum", room_id="Love your clothesline!"
)
challenge4 = Challenge(
challengename="Love your clothesline!",
description="""
Wash a load of laundry washed and dry it on a clothesline.
Find out more: https://www.theguardian.com/environment/ethicallivingblog/2008/may/02/treadlightlyswitchofftumbl
CO<sub>2</sub> savings:
1.8 kg of CO<sub>2</sub>
""",
active=True,
co2offset=0.018,
category_id=category5.id,
company_id=company3.id,
chat_room=chat_room,
)
db.session.add(challenge4)
chat_message = ChatMessage.create(
user=admin, text="Welcome to the challenge!", room=chat_room
)
chat_room = ChatRoom.create(
name="Ein schoener Raum", room_id="Food For Thought!"
)
challenge5 = Challenge(
challengename="Food For Thought!",
description="""
Thanks to the magic of data we have found a large number of sustainable recipes.
These were picked thanks their low CO<sub>2</sub> emissions per serving.
Today we suggest you cook:<br><br>
""",
active=True,
co2offset=0.018,
category_id=category1.id,
company_id=company3.id,
chat_room=chat_room,
)
db.session.add(challenge5)
chat_message = ChatMessage.create(
user=admin, text="Welcome to the challenge!", room=chat_room
)
db.session.flush()
user_challenge_association11 = User_Challenge_Association(
normal_user.id,
challenge1.id,
succeeded=True,
done_at=dt.datetime.now() - timedelta(days=13),
commited_to_at=dt.datetime.now() - timedelta(days=13, hours=1),
)
db.session.add(user_challenge_association11)
user_challenge_association12 = User_Challenge_Association(
normal_user.id,
challenge1.id,
succeeded=True,
done_at=dt.datetime.now() - timedelta(days=13),
commited_to_at=dt.datetime.now() - timedelta(days=13, hours=1),
)
db.session.add(user_challenge_association12)
user_challenge_association12 = User_Challenge_Association(
normal_user.id,
challenge1.id,
succeeded=True,
done_at=dt.datetime.now() - timedelta(days=12),
commited_to_at=dt.datetime.now() - timedelta(days=12, hours=1),
)
db.session.add(user_challenge_association12)
for i in range(1, 8):
user_challenge_association12 = User_Challenge_Association(
normal_user.id,
challenge1.id,
succeeded=True,
done_at=dt.datetime.now() - timedelta(days=i),
commited_to_at=dt.datetime.now() - timedelta(days=i, hours=1),
)
db.session.add(user_challenge_association12)
def create_app(config_object="hackzurich.settings"):
"""Create application factory, as explained here: http://flask.pocoo.org/docs/patterns/appfactories/.
:param config_object: The configuration object to use.
"""
app = Flask(__name__.split(".")[0])
app.config.from_object(config_object)
register_admin(app)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
configure_logger(app)
with app.app_context():
if not User.query.count():
app.logger.info("Creating dummy db data")
create_dummy_data()
db.session.commit()
@app.template_filter("datetime")
def format_datetime(value, format="medium"):
if format == "full":
format = "EEEE, d. MMMM y 'at' HH:mm"
elif format == "medium":
format = "EE dd.MM.y HH:mm"
return babel.dates.format_datetime(value, format)
return app
def register_extensions(app):
"""Register Flask extensions."""
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
csrf_protect.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
flask_static_digest.init_app(app)
admin.init_app(app)
socketio.init_app(app)
return None
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(challenge.views.blueprint)
app.register_blueprint(company.views.blueprint)
return None
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, "code", 500)
return render_template(f"{error_code}.html"), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {"db": db, "User": user.models.User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.test)
app.cli.add_command(commands.lint)
def register_admin(app):
"""Register admin interface."""
from hackzurich.user.models import User
from hackzurich.challenge.models import (
Challenge,
Category,
Company,
User_Challenge_Association,
)
from hackzurich.chat.models import ChatMessage, ChatRoom
admin.add_view(ModelView(User, db.session))
admin.add_view(ModelView(Challenge, db.session))
admin.add_view(ModelView(Category, db.session))
admin.add_view(ModelView(ChatRoom, db.session))
admin.add_view(ModelView(ChatMessage, db.session))
admin.add_view(ModelView(Company, db.session))
admin.add_view(ModelView(User_Challenge_Association, db.session))
app.config["FLASK_ADMIN_SWATCH"] = "cerulean"
def configure_logger(app):
"""Configure loggers."""
handler = logging.StreamHandler(sys.stdout)
if not app.logger.handlers:
app.logger.addHandler(handler)
```
#### File: hackzurich/challenge/forms.py
```python
from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import Challenge
class ChallengeForm(FlaskForm):
challengename = StringField(
"Challenge Name", validators=[DataRequired(), Length(min=3, max=25)]
)
description = TextAreaField(
"Description", validators=[DataRequired(), Length(min=6)]
)
active = BooleanField("Active?")
def __init__(self, *args, **kwargs):
"""Create instance."""
super(ChallengeForm, self).__init__(*args, **kwargs)
self.challenge = None
def validate(self):
"""Validate the form."""
initial_validation = super(ChallengeForm, self).validate()
if not initial_validation:
return False
challenge = Challenge.query.filter_by(
challengename=self.challengename.data
).first()
if challenge:
self.challengename.errors.append("Challengename already registered")
return False
return True
```
#### File: hackzurich/user/views.py
```python
from flask import Blueprint, render_template
from flask_login import login_required, current_user
from hackzurich.challenge.models import Challenge, User_Challenge_Association
import csv
blueprint = Blueprint(
"user_blueprint", __name__, url_prefix="/users", static_folder="../static"
)
@blueprint.route("/")
@login_required
def members():
"""List members."""
active_challenges = Challenge.query.filter_by(active=True).all()
done_user_challenges = User_Challenge_Association.query.filter_by(
user_id=current_user.id, succeeded=True
).all()
done_challenges = []
total_saved_co2 = 0
for done_user_challenge in done_user_challenges:
done_user_challenge.challenge = Challenge.query.filter_by(
id=done_user_challenge.challenge_id
).first()
done_challenges.append(done_user_challenge)
total_saved_co2 += done_user_challenge.challenge.co2offset
for active_challenge in active_challenges:
user_challenge_association = (
User_Challenge_Association.query.filter_by(
user_id=current_user.id, challenge_id=active_challenge.id
)
.order_by(User_Challenge_Association.commited_to_at.desc())
.first()
)
active_challenge.user_challenge_association = user_challenge_association
active_challenge.total_participants = (
User_Challenge_Association.query.distinct(
User_Challenge_Association.user_id
)
.filter_by(challenge_id=active_challenge.id)
.count()
)
with open("co2data/co2clean.csv") as csvfile:
reader = csv.reader(csvfile)
country_co2_csv = {rows[0]: rows[1] for rows in reader}
country_total_co2 = float(country_co2_csv[current_user.country])
return render_template(
"users/members.html",
active_challenges=active_challenges,
done_challenges=done_challenges,
total_saved_co2=total_saved_co2,
country_total_co2=country_total_co2,
)
```
|
{
"source": "jgonsior/reddit-web-crawler",
"score": 3
}
|
#### File: jgonsior/reddit-web-crawler/likeUpdater.py
```python
from selenium import webdriver
from selenium.webdriver.support import ui
from selenium.webdriver.common.keys import Keys
def page_is_loaded(driver):
return driver.find_element_by_tag_name("body") != None
driver = webdriver.Firefox()
driver.get("https://www.facebook.com/")
wait = ui.WebDriverWait(driver, 10)
wait.until(page_is_loaded)
email_field = driver.find_element_by_id("email")
email_field.send_keys("<EMAIL>")
password_field = driver.find_element_by_id("pass")
password_field.send_keys("password")
password_field.send_keys(Keys.RETURN)
```
#### File: jgonsior/reddit-web-crawler/Post.py
```python
__author__ = 'julius'
class Post:
""" Facebook Post """
def __init__(self):
self.id = None
self.fb_id = None
self.content = None
self.author = None
self.nLikes = None
self.nComments = 0
self.timeOfPublication = None
self.original_features = None
self.features = None
self.representativeFor = 0
self.daysSinceBegin = None
self.distances = {}
```
#### File: jgonsior/reddit-web-crawler/User.py
```python
__author__ = 'julius'
class User:
""" Facebook User """
def __init__(self):
self.id = None
self.fb_id = None
self.name = None
self.url = None
self.info = None
self.friends = None
```
|
{
"source": "jgonsior/reef",
"score": 3
}
|
#### File: reef/program_synthesis/heuristic_generator.py
```python
from pprint import pprint
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, f1_score
from program_synthesis.functions import get_labels_cutoff, marginals_to_labels
from program_synthesis.synthesizer import Synthesizer
from program_synthesis.verifier import Verifier
class HeuristicGenerator(object):
"""
A class to go through the synthesizer-verifier loop
"""
def __init__(self,
X_train,
X_val,
Y_val,
Y_train=None,
n_classes=2,
n_jobs=4,
b=0.5):
"""
Initialize HeuristicGenerator object
b: class prior of most likely class (TODO: use somewhere)
beta: threshold to decide whether to abstain or label for heuristics
gamma: threshold to decide whether to call a point vague or not
"""
self.X_train = X_train
self.X_val = X_val
self.Y_val = Y_val
self.Y_train = Y_train
self.b = b
self.vf = None
self.syn = None
self.hf = []
self.n_classes = n_classes
self.n_jobs = n_jobs
self.feat_combos = []
def apply_heuristics(self,
heuristics,
primitive_matrix,
feat_combos,
beta_opt,
debug=False):
"""
Apply given heuristics to given feature matrix X and abstain by beta
heuristics: list of pre-trained logistic regression models
feat_combos: primitive indices to apply heuristics to
beta: best beta value for associated heuristics
"""
L = np.zeros((np.shape(primitive_matrix)[0], len(heuristics)))
for i, hf in enumerate(heuristics):
# if debug:
# print(i, ": \t",
# primitive_matrix.iloc[:, list(feat_combos[i])])
L[:, i] = marginals_to_labels(
hf,
primitive_matrix.iloc[:, list(feat_combos[i])],
beta_opt[i],
self.n_classes,
debug=debug)
return L
def prune_heuristics(self, heuristics, feat_combos, keep=1):
"""
Selects the best heuristic based on Jaccard Distance and Reliability Metric
keep: number of heuristics to keep from all generated heuristics
"""
# -> check based on example downwards if this calculates the right thing
# -> the trick is, that sometimes the sum of abs(-1, 1) is being taken
def calculate_jaccard_distance(num_labeled_total, num_labeled_L):
scores = np.zeros(np.shape(num_labeled_L)[1])
for i in range(np.shape(num_labeled_L)[1]):
scores[i] = np.sum(
np.minimum(
num_labeled_L[:, i], num_labeled_total)) / np.sum(
np.maximum(num_labeled_L[:, i], num_labeled_total))
return 1 - scores
L_vals = np.array([])
L_trains = np.array([])
beta_opts = np.array([])
max_cardinality = len(heuristics)
for i in range(max_cardinality):
#Note that the LFs are being applied to the entire val set though they were developed on a subset...
beta_opt_temp = self.syn.find_optimal_beta(heuristics[i],
self.X_val,
feat_combos[i],
self.Y_val)
# print(i, "beta ", beta_opt_temp)
L_val_temp = self.apply_heuristics(heuristics[i],
self.X_val,
feat_combos[i],
beta_opt_temp,
debug=False)
L_train_temp = self.apply_heuristics(heuristics[i],
self.X_train,
feat_combos[i],
beta_opt_temp,
debug=False)
# print(beta_opt_temp)
beta_opts = np.append(beta_opts, beta_opt_temp)
if i == 0:
L_vals = np.append(
L_vals, L_val_temp) #converts to 1D array automatically
L_vals = np.reshape(L_vals, np.shape(L_val_temp))
L_trains = np.append(
L_trains,
L_train_temp) #converts to 1D array automatically
L_trains = np.reshape(L_trains, np.shape(L_train_temp))
else:
pprint("UIUIUIU" * 10000)
L_vals = np.concatenate((L_vals, L_val_temp), axis=1)
L_trains = np.concatenate((L_trains, L_train_temp), axis=1)
# print("L_val", L_vals)
#Use F1 trade-off for reliability
acc_cov_scores = [
f1_score(
self.Y_val,
L_vals[:, i],
average='micro',
) for i in range(np.shape(L_vals)[1])
]
acc_cov_scores = np.nan_to_num(acc_cov_scores)
# -> vc berechnung stimmt ni -> dann nach und nach den imdb_small datensatz größer machen bis die Ergebnisse ni mehr miteinander übereinstimmen
# print("\n" * 5)
# for i in range(np.shape(L_vals)[1]):
# print(i, L_vals[:, i])
# print("acc_cov_scores", np.sort(acc_cov_scores))
# print("\n" * 5)
if self.vf != None:
#Calculate Jaccard score for diversity
# @todo stimmt das hier?!
# lieber die formeln von unten für accuracy und coverage nehmen?!
# Es sieht so aus als ob die accuracie gleich bleiben, wohingegen die coverages immer größer werden
train_num_labeled = np.sum(self.vf.L_train >= 0, axis=1)
jaccard_scores = calculate_jaccard_distance(
train_num_labeled, np.abs(L_trains))
else:
jaccard_scores = np.ones(np.shape(acc_cov_scores))
# print("accs", acc_cov_scores)
# print("jaccs", jaccard_scores)
#Weighting the two scores to find best heuristic
combined_scores = 0.5 * acc_cov_scores + 0.5 * jaccard_scores
sort_idx = np.argsort(combined_scores)[::-1][0:keep]
return sort_idx
def run_synthesizer(self, max_cardinality=1, idx=None, keep=1, model='lr'):
"""
Generates Synthesizer object and saves all generated heuristics
max_cardinality: max number of features candidate programs take as input
idx: indices of validation set to fit programs over
keep: number of heuristics to pass to verifier
model: train logistic regression ('lr') or decision tree ('dt')
"""
if idx == None:
# first run, use the whole dataset
X_val = self.X_val
Y_val = self.Y_val
else:
# only use the points from the validation dataset for finding heuristics which had low confidence before!
X_val = self.X_val.iloc[idx, :]
Y_val = np.array(self.Y_val)[idx]
#Generate all possible heuristics
self.syn = Synthesizer(X_val,
Y_val,
n_classes=self.n_classes,
b=self.b,
n_jobs=self.n_jobs)
#Un-flatten indices
def index(a, inp):
i = 0
remainder = 0
while inp >= 0:
remainder = inp
inp -= len(a[i])
i += 1
try:
return a[i - 1][
remainder] #TODO: CHECK THIS REMAINDER THING WTF IS HAPPENING
except:
import pdb
pdb.set_trace()
#Select keep best heuristics from generated heuristics
hf, feat_combos = self.syn.generate_heuristics(model, max_cardinality)
sort_idx = self.prune_heuristics(hf, feat_combos, keep)
for i in sort_idx:
self.hf.append(index(hf, i))
self.feat_combos.append(index(feat_combos, i))
#create appended L matrices for validation and train set
beta_opt = self.syn.find_optimal_beta(self.hf, self.X_val,
self.feat_combos, self.Y_val)
self.L_val = self.apply_heuristics(self.hf,
self.X_val,
self.feat_combos,
beta_opt,
debug=False)
self.L_train = self.apply_heuristics(self.hf, self.X_train,
self.feat_combos, beta_opt)
def run_verifier(self):
"""
Generates Verifier object and saves marginals
"""
self.vf = Verifier(self.L_train,
self.L_val,
self.Y_val,
self.n_classes,
has_snorkel=False)
self.vf.train_gen_model()
self.vf.assign_marginals()
def gamma_optimizer(self, marginals):
"""
Returns the best gamma parameter for abstain threshold given marginals
marginals: confidences for data from a single heuristic
"""
m = len(self.hf)
gamma = 0.5 - (1 / (m**(3 / 2.)))
return gamma
def find_feedback(self):
"""
Finds vague points according to gamma parameter
self.gamma: confidence past 0.5 that relates to a vague or incorrect point
"""
#TODO: flag for re-classifying incorrect points
#incorrect_idx = self.vf.find_incorrect_points(b=self.b)
gamma_opt = self.gamma_optimizer(self.vf.val_marginals)
#gamma_opt = self.gamma
vague_idx = self.vf.find_vague_points(b=self.b, gamma=gamma_opt)
# incorrect_idx = vague_idx
# @todo: no concatenation but union!
# self.feedback_idx = list(set(list(np.concatenate(
# (vague_idx))))) #, incorrect_idx)))))
self.feedback_idx = list(vague_idx)
def calculate_accuracy(self, marginals, b, Y_true):
# hier werden marginals jetzt ohne b berechnet? bzw. nur die als abstain gezählt, die auch tatsächlich exackt b sind?
# wie sieht das in der feedback bestimmung aus?
Y_pred = np.argmax(marginals, axis=1)
# abstain for labels where the prediction isn't clear
# print("marginals", marginals)
indices_with_abstain = np.where(np.amax(marginals, axis=1) == b)
# print("indic_w_abst", list(indices_with_abstain))
for i in indices_with_abstain[0]:
# print(i)
# if len(i) == 0:
# continue
i = int(i)
Y_pred[i] = Y_true[i]
return accuracy_score(Y_true, Y_pred)
def calculate_coverage(self, marginals, b, Y_true):
# print("marg", marginals)
# print("b", b)
highest_probabilities = np.amax(marginals, axis=1)
# print("high", highest_probabilities)
total_labels = np.shape(highest_probabilities)[0]
amount_of_labels_not_abstain = total_labels - (
highest_probabilities == b).sum()
# print("amount_of_labels_not_abstain", amount_of_labels_not_abstain)
# print("total_labels", total_labels)
# print("\n")
return amount_of_labels_not_abstain / total_labels
def evaluate(self):
"""
Calculate the accuracy and coverage for train and validation sets
"""
# why? :crying_emoji:
self.val_marginals = self.vf.val_marginals
self.train_marginals = self.vf.train_marginals
self.val_accuracy = self.calculate_accuracy(self.val_marginals, self.b,
self.Y_val)
self.train_accuracy = self.calculate_accuracy(self.train_marginals,
self.b, self.Y_train)
self.val_coverage = self.calculate_coverage(self.val_marginals, self.b,
self.Y_val)
# exit(-1)
self.train_coverage = self.calculate_coverage(self.train_marginals,
self.b, self.Y_train)
return self.val_accuracy, self.train_accuracy, self.val_coverage, self.train_coverage
def heuristic_stats(self):
'''For each heuristic, we want the following:
- idx of the features it relies on
- if dt, then the thresholds?
'''
stats_table = np.zeros((len(self.hf), 6))
for i in range(len(self.hf)):
stats_table[i, 0] = int(self.feat_combos[i][0])
try:
stats_table[i, 1] = int(self.feat_combos[i][1])
except:
stats_table[i, 1] = -1.
stats_table[i, 2] = self.calculate_accuracy(
self.L_val[:, i], self.b, self.Y_val)
stats_table[i, 3] = self.calculate_accuracy(
self.L_train[:, i], self.b, self.Y_train)
stats_table[i, 4] = self.calculate_coverage(
self.L_val[:, i], self.b, self.Y_val)
stats_table[i, 5] = self.calculate_coverage(
self.L_train[:, i], self.b, self.Y_train)
#Make table
column_headers = [
'Feat 1', 'Feat 2', 'Val Acc', 'Train Acc', 'Val Cov', 'Train Cov'
]
pandas_stats_table = pd.DataFrame(stats_table, columns=column_headers)
return pandas_stats_table
```
#### File: reef/program_synthesis/multi_label_aggregator.py
```python
from pprint import pprint
from program_synthesis.functions import count_abstains
import numpy as np
from scipy import sparse
from .label_aggregator import LabelAggregator, odds_to_prob
"""
Problem: die Marginals nach dem one vs all approach sind sich für beide Klassen vieeeeeel zu ähnlich, im Vergleich zu den eindeutigen Ergebnissen davor
"""
class MultiLabelAggregator(object):
"""LabelAggregator Object that learns the accuracies for the heuristics.
Copied from Snorkel v0.4 NaiveBayes Model with minor changes for simplicity"""
def __init__(self, n_classes):
self.w = [None for c in range(n_classes)]
self.n_classes = n_classes
# gets as input L_train
def train(self, X, n_iter=1000, w0=None, rate=0.01, alpha=0.5, mu=1e-6, \
sample=False, n_samples=100, evidence=None, warm_starts=False, tol=1e-6, verbose=False):
# print("X", X)
# print("count abstains", count_abstains(X))
# exit(-1)
# create one vs all matrix
for i in range(self.n_classes):
one_vs_all_X = self._one_vs_all(
X, i) # <- macht das Sinn für multilabel?!
one_vs_all_label_aggregator = LabelAggregator()
one_vs_all_label_aggregator.train(one_vs_all_X,
rate=1e-3,
mu=1e-6,
verbose=False)
self.w[i] = one_vs_all_label_aggregator.w
def _one_vs_all(self, X, label):
# input: -1 abstain, 0,1,2,... labels
# output: -1 other labels, 0 abstain, 1 this label
X_new = np.full(X.shape, -1)
X_new[X == -1] = 0
X_new[X == label] = 1
return X_new
def marginals(self, X):
# x ist L_val -> also -1 abstain, 0 label A, 1 Label B, 2 Label C etc.
marginals = [None] * self.n_classes
# print("w", self.w)
for i, w in enumerate(self.w):
# bevor ich X.dot(w) mache muss ich X erst wieder transformieren
X_new = sparse.csr_matrix(self._one_vs_all(X, i))
marginals[i] = odds_to_prob(X_new.dot(w))
# -> they don't add up to 1! is it because of the intference of abstain?
marginals = np.transpose(marginals)
return np.array(marginals)
```
#### File: reef/program_synthesis/verifier.py
```python
from pprint import pprint
import numpy as np
from scipy import sparse
from .label_aggregator import LabelAggregator
from .multi_label_aggregator import MultiLabelAggregator
def odds_to_prob(l):
"""
This is the inverse logit function logit^{-1}:
l = \log\frac{p}{1-p}
\exp(l) = \frac{p}{1-p}
p = \frac{\exp(l)}{1 + \exp(l)}
"""
return np.exp(l) / (1.0 + np.exp(l))
class Verifier(object):
"""
A class for the Snorkel Model Verifier
"""
def __init__(self, L_train, L_val, Y_val, n_classes, has_snorkel=True):
self.L_train = L_train.astype(int)
self.L_val = L_val.astype(int)
self.Y_val = Y_val
self.n_classes = n_classes
def train_gen_model(self, deps=False, grid_search=False):
"""
Calls appropriate generative model
"""
# print(self.L_train)
gen_model = MultiLabelAggregator(self.n_classes)
gen_model.train(self.L_train, rate=1e-3, mu=1e-6, verbose=True)
# print("L_train", self.L_train)
# marginals = gen_model.marginals(self.L_train)
# for x, marginal in zip(self.L_train, marginals):
# print(x, "\t -> \t", np.argmax(marginal), "\t", marginal)
self.gen_model = gen_model
def assign_marginals(self):
"""
Assigns probabilistic labels for train and val sets
"""
# generate all pairs of possible labels and see what the result is
# import itertools
# for test_x in itertools.product([-1, 0, 1], repeat=3):
# print(test_x, "\t -> \t",
# self.gen_model.marginals(np.array(test_x)))
# --> test_x muss jetzt natürlich auch als -1, +1 encoding präsentiert werden!
# print("L_val", self.L_val)
self.val_marginals = self.gen_model.marginals(self.L_val)
self.train_marginals = self.gen_model.marginals(self.L_train)
# print("L_val", self.L_val)
# print("vf marg", self.val_marginals)
# exit(-3)
# for marginal in self.val_marginals:
# print(marginal)
#print 'Learned Accuracies: ', odds_to_prob(self.gen_model.w)
def find_vague_points(self, gamma=0.1, b=0.5):
"""
Find val set indices where marginals are within thresh of b
# returns the first point of the validation set which is as close as gamma to the marginal
"""
# print("gamma:", gamma)
# print("b:", b)
# print("val_marginals", self.val_marginals)
result = []
for i, marginal in enumerate(self.val_marginals):
max_prob = np.amax(marginal)
if max_prob - b <= gamma:
result.append(i)
# print("val_idx", val_idx)
# exit(-1)
return result
def find_incorrect_points(self, b=0.5):
print("find_incorrect_points klappt ni")
""" Find val set indices where marginals are incorrect """
# <- @todo: wird L_val richtig berechnet?!
L_val = 2 * (self.val_marginals > b) - 1
val_idx = np.where(L_val != self.Y_val)
return val_idx[0]
```
|
{
"source": "jgontrum/aiotus",
"score": 2
}
|
#### File: docs/source/conf.py
```python
import os
import re
import sys
aiotus_path = os.path.join(os.path.dirname(__file__), '../..')
aiotus_path = os.path.abspath(aiotus_path)
sys.path.insert(0, aiotus_path)
def get_version():
"""Return package version from setup.py (hacky)."""
try:
filename = os.path.join(os.path.dirname(__file__), '../..', 'setup.py')
with open(filename, 'r') as fd:
setup_py = fd.read()
m = re.search(r'version="(\d+\.\d+\.\d+)"', setup_py)
return m.group(1)
except:
sys.exit('Unable to get package version from setup.py.')
project = 'aiotus'
copyright = '2020, <NAME>'
author = '<NAME>'
release = get_version()
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx_autodoc_typehints',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'aiohttp': ('https://aiohttp.readthedocs.io/en/stable/', None),
'yarl': ('https://yarl.readthedocs.io/en/stable/', None),
}
exclude_patterns = []
html_static_path = ['_static']
html_theme = 'alabaster'
html_theme_options = {
'description': 'Asynchronous client side implementation of the tus protocol for Python.',
}
templates_path = ['_templates']
```
|
{
"source": "jgontrum/am-parser",
"score": 2
}
|
#### File: am-parser/analyzers/compare_amconll_qtgui.py
```python
import sys
import os
import shutil # for copying files (svg file: save file dialog)
import argparse
from tempfile import TemporaryDirectory
# GUI
from PyQt5 import QtSvg, QtCore, QtGui
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QGridLayout, QVBoxLayout
from PyQt5.QtWidgets import QMainWindow, QDialog, QFileDialog
from PyQt5.QtWidgets import QWidget, QLabel, QPushButton, QListWidget, QListWidgetItem
from PyQt5.QtWidgets import QAbstractItemView
from PyQt5.QtWidgets import QSizePolicy
from compare_amconll import get_key_amsentpairs, get_list_of_keys
class DialogSvgMaximized(QDialog):
"""
Dialog window class for enlarged SVGs
Containing just this SVG (starts as maximized) and a 'Close' button
>> app = QApplication([])
>> dlg = DialogSvgMaximized("filename.svg")
>> dlg.show()
>> sys.exit(app.exec_())
"""
def __init__(self, svgfilename: str, parent=None):
"""
Initialize maximized dialog window displaying SVG (and a 'Close' button)
:param svgfilename: File to be displayed inside the dialog window
"""
super().__init__(parent)
self.setWindowTitle("Enlarged SVG image")
# Set the central widget and the general layout
self.dlgLayout = QVBoxLayout()
self.setLayout(self.dlgLayout)
# todo [input validation] (svg file exists)
assert(os.path.isfile(svgfilename))
# todo [enhancement] maybe add buttons to zoom in/out???
self.wdg_svg = QtSvg.QSvgWidget()
self.wdg_svg.load(svgfilename)
self.dlgLayout.addWidget(self.wdg_svg)
# button to cancel
self.btn_cnl = QPushButton("Close")
self.btn_cnl.clicked.connect(self.close)
self.dlgLayout.addWidget(self.btn_cnl)
self.showMaximized() # full screen
return
class PyCompareUi(QMainWindow):
"""PyCompare's View (GUI)."""
def __init__(self, direc: TemporaryDirectory, useid: bool, amf1gf: dict,
sent_keys: list):
"""
Initializes GUI
:param direc: TemporaryDirectory: where svg files and such are saved...
:param useid: id or sentence as key? (-> displayed in sentence label)
:param amf1gf: dict(key: str -> (file1: AMSentence, goldf: AMSentence))
:param sent_keys: keys for amf1gf (sorted)
"""
super().__init__()
# Set some main window's properties
self.setWindowTitle("GUI PyQt5 Compare AMCoNLL files")
# Set the central widget and the general layout
self.generalLayout = QGridLayout()
self._centralWidget = QWidget(self)
self.setCentralWidget(self._centralWidget)
self._centralWidget.setLayout(self.generalLayout)
# todo [input validation] keys match amf1gf
if len(sent_keys) == 0:
raise RuntimeError
self.useid = useid
self.direc = direc # .name
self.amf1gf = amf1gf # key -> (f1: Amconllsent,gf: Amconllsent)
self.sent_keys = sent_keys
self.total = len(sent_keys)
# create and display some widgets
self._create()
self.current_idx = 0
self._update()
self.showMaximized() # full screen
return
def get_current_key(self):
return self.sent_keys[self.current_idx]
def get_svgs(self):
"""
Given the current key (and hence sentence) call to_tex_svg
-> Need pdflatex, inkscape and dot installed (plus command line
cat and tr commands), assumes valid AMSentence and
let's hope that there is no special character escaping stuff missing
:raises RuntimeError if svg files couldn't be produced
:return: pair of filepath to file1 and goldfile svgs
"""
key = self.get_current_key()
sent_f1, sent_gf = self.amf1gf[key]
sent_f1.to_tex_svg(self.direc, prefix="f1_") # am_sents_f1[key]
sent_gf.to_tex_svg(self.direc, prefix="gf_") # am_sents_gf[key]
# this relies on to_tex_svg creating the necessary files
fname_svg1 = os.path.join(self.direc, "f1_sentence2.svg")
fname_svg2 = os.path.join(self.direc, "gf_sentence2.svg")
if not os.path.isfile(fname_svg1) or not os.path.isfile(fname_svg2):
# print(";; Warning: no svg output found - check error messages")
# maybe pdflatex or dot had a problem (special characters?)
raise RuntimeError("Couldn't find SVG files for sentence!")
return fname_svg1, fname_svg2
def get_sentence(self, key: str) -> str:
"""
Get string representation of sentence (eventually + id)
Uses gold file string
:param key: str; key for sentence dictionary (either id or sentence)
:return: sentence string. if self.useid, prefixed with id
"""
_, sent_gf = self.amf1gf[key]
sentence = key
if self.useid:
sentence += " " + \
' '.join(sent_gf.get_tokens(shadow_art_root=False))
return sentence
def _update(self):
"""
Call this function when a new sentence should be displayed
Assuming the current_idx was already changed,
updates the displayed information to reflect new sentence:
- Changes sentence number
- Changes sentence displayed
- Changes the two svg images
- Disables previous/next buttons if needed (new sentence is last/first)
:return: None
"""
# scroll list: select (highlight) new current sentence &
# scroll such that it is the top one displayed
self.current_item.setSelected(True)
self.sents_scroll.scrollToItem(self.current_item,
QAbstractItemView.PositionAtTop)
# update displayed number
self.lbl_no.setText(f"{self.current_idx+1} / {self.total}")
# update displayed sentence
sentence = self.get_sentence(key=self.get_current_key())
self.lbl_sent.setText(sentence)
# update images
f1svg, gfsvg = self.get_svgs()
self.svg1_filen = f1svg # for enlarge dialog
self.svg2_filen = gfsvg # for enlarge dialog
self.wdg_svg1.load(f1svg)
self.wdg_svg2.load(gfsvg)
# check if buttons need to be disabled (first/last sentence)
self._eventually_disable_buttons()
return
def _eventually_disable_buttons(self):
"""Disables buttons if needed (for last and first sentence)"""
isfirst = (self.current_idx == 0)
self.btn_prev.setDisabled(isfirst)
islast = (self.current_idx == len(self.sent_keys) - 1)
self.btn_next.setDisabled(islast)
return
def _next_sent(self):
"""What needs to happen when the next sentence button is clicked"""
assert(0 <= self.current_idx < len(self.sent_keys))
self.current_item.setSelected(False)
self.current_idx += 1
self.current_item = self.sents_scroll.item(self.current_idx)
self._update()
return
def _prev_sent(self):
"""What needs to happen when the previous sentence button is clicked"""
assert(0 <= self.current_idx < len(self.sent_keys))
self.current_item.setSelected(False)
self.current_idx -= 1
self.current_item = self.sents_scroll.item(self.current_idx)
self._update()
return
def _on_item_changed(self, curr, prev):
"""clicked on a different item in the sentence list (just like prev/next button"""
prev.setSelected(False)
self.current_idx = self.sents_scroll.indexFromItem(curr).row()
self.current_item = curr
self._update()
return
def _save_svg(self, filename: str):
newname, suffix = QFileDialog.getSaveFileName(self, filter=".svg",
caption="Save SVG file")
if newname != '':
shutil.copy2(src=filename, dst=newname+suffix)
return
def _save_svg1(self):
self._save_svg(filename=self.svg1_filen)
def _save_svg2(self):
self._save_svg(filename=self.svg2_filen)
def _enlarge_svg(self, filename: str):
self.dlg = DialogSvgMaximized(filename)
# block main window: need to close dialog in order to use main window
self.dlg.setWindowModality(QtCore.Qt.ApplicationModal)
self.dlg.show()
return
def _enlarge_svg1(self):
self._enlarge_svg(filename=self.svg1_filen)
def _enlarge_svg2(self):
self._enlarge_svg(filename=self.svg2_filen)
def _create(self):
"""Create GUI: initialize all necessary widgets and arrange them"""
btn_size = 30
self.lbl_no = QLabel(text="<No>", parent=self._centralWidget)
self.lbl_no.setToolTip("Sentence no. X / Y total sentences")
# Sentence
self.lbl_sent = QLabel(text="<Sentence>", parent=self._centralWidget)
self.lbl_sent.setToolTip("Current sentence")
self.lbl_sent.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
# buttons: 'previous' and 'next' button
self.btn_prev = QPushButton(text="Prev", parent=self._centralWidget)
self.btn_next = QPushButton(text="Next", parent=self._centralWidget)
self.btn_prev.setToolTip("Change to previous sentence")
self.btn_next.setToolTip("Change to next sentence")
self.btn_prev.setFixedSize(btn_size*2, btn_size)
self.btn_next.setFixedSize(btn_size * 2, btn_size)
self.btn_prev.clicked.connect(self._prev_sent)
self.btn_next.clicked.connect(self._next_sent)
# SVGs (the AM dependency trees)
# https://doc.qt.io/qt-5/qtsvg-index.html
# https://stackoverflow.com/questions/44776474/display-svg-image-in-qtwebview-with-the-right-size
self.wdg_svg1 = QtSvg.QSvgWidget(parent=self._centralWidget)
# self.wdg_svg1.clicked.connect(self._enlarge_svg1)
# self.wdg_svg1.load(file="")
self.wdg_svg2 = QtSvg.QSvgWidget(parent=self._centralWidget)
# svg_style = "border-radius: 5px; border: 2px solid black; " \
# "background-color: rgb(235,235,235)"
# self.wdg_svg1.setStyleSheet(svg_style)
# self.wdg_svg2.setStyleSheet(svg_style)
# Maximize and save buttons (right to the respective SVG image)
self._perfile_layout1 = QVBoxLayout()
self._perfile_layout2 = QVBoxLayout()
self.perfile_buttons1 = QWidget(parent=self._centralWidget)
self.perfile_buttons2 = QWidget(parent=self._centralWidget)
self.perfile_buttons1.setLayout(self._perfile_layout1)
self.perfile_buttons2.setLayout(self._perfile_layout2)
# 'Maximize' buttons
self.btn_enlarge1 = QPushButton(text="Max.", parent=self.perfile_buttons1)
self.btn_enlarge2 = QPushButton(text="Max.", parent=self.perfile_buttons2)
self.btn_enlarge1.setToolTip("Show image in separate window, maximized")
self.btn_enlarge2.setToolTip("Show image in separate window, maximized")
self.btn_enlarge1.setFixedSize(btn_size*2, btn_size)
self.btn_enlarge2.setFixedSize(btn_size*2, btn_size)
self.btn_enlarge1.clicked.connect(self._enlarge_svg1)
self.btn_enlarge2.clicked.connect(self._enlarge_svg2)
# 'Save' buttons
self.btn_savesvg1 = QPushButton(text="Save", parent=self.perfile_buttons1)
self.btn_savesvg2 = QPushButton(text="Save", parent=self.perfile_buttons2)
self.btn_savesvg1.setToolTip("Save SVG to file")
self.btn_savesvg2.setToolTip("Save SVG to file")
self.btn_savesvg1.setFixedSize(btn_size*2, btn_size)
self.btn_savesvg2.setFixedSize(btn_size*2, btn_size)
self.btn_savesvg1.clicked.connect(self._save_svg1)
self.btn_savesvg2.clicked.connect(self._save_svg2)
# add widgets to per file layout
self._perfile_layout1.addWidget(self.btn_enlarge1)
self._perfile_layout1.addWidget(self.btn_savesvg1)
self._perfile_layout2.addWidget(self.btn_enlarge2)
self._perfile_layout2.addWidget(self.btn_savesvg2)
# List of all sentences: scrollable, can click on sentences
# see also stackoverflow.com how-to-scroll-qlistwidget-to-selected-item
self.sents_scroll = QListWidget(parent=self._centralWidget)
self.sents_scroll.setMinimumSize(100, 50)
for i, sent_key in enumerate(self.sent_keys):
sentence = self.get_sentence(key=sent_key) # (id +) sentence
it = QListWidgetItem(f"{i+1:02} || {sentence}",
parent=self.sents_scroll)
if i == 0:
self.current_item = it
self.sents_scroll.setCurrentItem(self.current_item)
self.current_item.setSelected(True)
self.sents_scroll.currentItemChanged.connect(self._on_item_changed)
# organize widgets in (main) grid layout
# - row 0: sentence ...................... and X/Y sents
# - row 1/2: SVG1 / 2 ...................... and maximize/save button
# - row 3: list (scrollable) of sentences and previous/next buttons
self.generalLayout.addWidget(self.lbl_no, 0, 1)
self.generalLayout.addWidget(self.lbl_sent, 0, 0)
self.generalLayout.addWidget(self.btn_prev, 3, 1, alignment=QtCore.Qt.AlignBottom)
self.generalLayout.addWidget(self.btn_next, 4, 1, alignment=QtCore.Qt.AlignTop)
self.generalLayout.addWidget(self.wdg_svg1, 1, 0, 1, 1)
self.generalLayout.addWidget(self.wdg_svg2, 2, 0, 1, 1)
self.generalLayout.addWidget(self.perfile_buttons1, 1, 1, alignment=QtCore.Qt.AlignTop)
self.generalLayout.addWidget(self.perfile_buttons2, 2, 1, alignment=QtCore.Qt.AlignTop)
self.generalLayout.addWidget(self.sents_scroll, 3, 0, 2, 1)
# Sizing and what changes when the window is resized:
# Rows with SVG should occupy most of the available space,
# the top row shouldn't consume much space (no matter what window size),
# the last row (scrollbar) can consume more space if window size
# increases, but prefer to give more room to the SVGs
# https://doc.qt.io/qtforpython/PySide2/QtWidgets/QSizePolicy.html
# self.generalLayout.setSpacing(0)
self.lbl_sent.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.lbl_no.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.wdg_svg1.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.wdg_svg2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.sents_scroll.setSizePolicy(QSizePolicy.Preferred,
QSizePolicy.Expanding)
self.generalLayout.setRowStretch(1, 8) # increase SVG size more
self.generalLayout.setRowStretch(2, 8)
self.generalLayout.setRowStretch(3, 1) # increase scroll bar size less
return
def main_gui(sent_keys: list, am_f1gf: dict, use_id: bool):
"""
Starts PyQt5 GUI comparing to amconll files (their intersection)
:param sent_keys: list of keys of am_f1gf: ordering of sent. presentation
:param am_f1gf: key is id/sentence, value if (AMSentence,AMSentence) pair
:param use_id: whether the keys in sent_keys are ids or sentence strings
:return: None
"""
# Note: no input validation is done: specifically if all k in sent_keys
# are valid keys of am_f1gf
app = QApplication([])
with TemporaryDirectory() as direc: # for svg, tex files..
view = PyCompareUi(direc=direc, useid=use_id, amf1gf=am_f1gf,
sent_keys=sent_keys)
view.show()
# exec_ listens for events
sys.exit(app.exec_())
def main(argv):
"""
Start PyQt5 GUI comparing two amconll files (at least, their intersection)
Given two amconll files (system file and gold file), computes intersection
and displays it in a GUI. Sentence equality is either determined by
sentence ID equality (--useid) or sentence string equality
(modulo some very basic handling for special characters and such).
"""
optparser = argparse.ArgumentParser(
add_help=True,
description="compares two amconll files (GUI version)")
optparser.add_argument("file1", help="system output", type=str)
optparser.add_argument("gold_file", help="gold file", type=str)
optparser.add_argument("--useid", action="store_true",
help="use id instead of string equality")
# todo [enhancement] random number argument (None or seed)
opts = optparser.parse_args(argv[1:])
file1 = opts.file1
gold_file = opts.gold_file
for file in [file1, gold_file]:
if not os.path.isfile(file):
raise RuntimeError(f"Not a valid file: {file}")
# compute overlap
use_id = opts.useid # if False, uses sentence string, otherwise id
am_f1gf = get_key_amsentpairs(use_id=use_id, file1=file1, file2=gold_file)
# get list of keys of am_f1gf (optional: random shuffling)
# remember keys are either sentence ids (--useid) or sentence strings (else)
seed = 42
if seed is not None:
print(f";; Shuffle keys using random seed {str(seed)}")
target_keys = get_list_of_keys(d=am_f1gf, randomseed=seed)
# start GUI
main_gui(sent_keys=target_keys, am_f1gf=am_f1gf, use_id=use_id)
return
if __name__ == '__main__':
main(sys.argv)
```
#### File: jgontrum/am-parser/demo_server.py
```python
import os
import sys
import time
import traceback
from io import StringIO
from tempfile import TemporaryDirectory, TemporaryFile
from typing import Dict, Any
import logging
import json
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import prepare_environment
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.iterators import DataIterator
from allennlp.models.archival import load_archive
from allennlp.common import Params
from graph_dependency_parser.components.dataset_readers.amconll_tools import from_raw_text, parse_amconll
from graph_dependency_parser.components.evaluation.predictors import AMconllPredictor
from graph_dependency_parser.components.spacy_interface import spacy_tokenize
from graph_dependency_parser.graph_dependency_parser import GraphDependencyParser
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO) # turn on logging.
import graph_dependency_parser.graph_dependency_parser
import graph_dependency_parser.important_imports
import argparse
parser = argparse.ArgumentParser(description="Run the am-parser as a server.")
parser.add_argument('archive_file', type=str, help='path to an archived trained model')
parser.add_argument('am_tools', type=str, help='path to am-tools.jar')
parser.add_argument('-k',
type=int,
default=6,
help='number of supertags to be used')
parser.add_argument('-t', "--threads",
type=int,
default=1,
help='number of threads')
parser.add_argument("--port",
type=int,
default=8888,
help='Port to be used')
parser.add_argument("--mtool",
type=str,
default=None,
help='Path to main.py of mtool, for visualizations of the graphs.')
parser.add_argument("--lookup",
type=str,
default="downloaded_models/lookup/lookupdata17/",
help='Path to AMR-2017 lookup data.')
parser.add_argument("--wordnet",
type=str,
default="downloaded_models/wordnet3.0/dict/",
help='Path to wordnet')
parser.add_argument('--give_up',
type=float,
default=1,
help='number of seconds until fixed-tree decoder backs off to k-1')
parser.add_argument('-v',
action='store_true',
default=False,
help='verbose logging')
cuda_device = parser.add_mutually_exclusive_group(required=False)
cuda_device.add_argument('--cuda-device',
type=int,
default=-1,
help='id of GPU to use (if any)')
parser.add_argument('--weights-file',
type=str,
help='a path that overrides which weights file to use')
parser.add_argument('-o', '--overrides',
type=str,
default="",
help='a JSON structure used to override the experiment configuration')
args = parser.parse_args()
if args.v:
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO) # turn on logging.
# Disable some of the more verbose logging statements
logging.getLogger('allennlp.common.params').disabled = True
logging.getLogger('allennlp.nn.initializers').disabled = True
logging.getLogger('allennlp.modules.token_embedders.embedding').setLevel(logging.INFO)
# Load from archive
archive = load_archive(args.archive_file, args.cuda_device, args.overrides, args.weights_file)
config = archive.config
config.formalism = "DUMMY"
prepare_environment(config)
model = archive.model
model.eval()
if not isinstance(model, GraphDependencyParser):
raise ConfigurationError("The loaded model seems not to be an am-parser (GraphDependencyParser)")
# Load the evaluation data
# Try to use the validation dataset reader if there is one - otherwise fall back
# to the default dataset_reader used for both training and validation.
validation_dataset_reader_params = config.pop('validation_dataset_reader', None)
if validation_dataset_reader_params is not None:
dataset_reader = DatasetReader.from_params(validation_dataset_reader_params)
else:
dataset_reader = DatasetReader.from_params(config.pop('dataset_reader'))
predictor = AMconllPredictor(dataset_reader, args.k, args.give_up, args.threads, model=model)
requires_art_root = {"DM": True, "PAS": True, "PSD": True, "EDS": False, "AMR-2015": False, "AMR-2017": False}
requires_ne_merging = {"DM": False, "PAS": False, "PSD": False, "EDS": False, "AMR-2015": True, "AMR-2017": True}
import asyncio
import json
if args.mtool:
# Load mtool codecs
dname = os.path.dirname(os.path.abspath(args.mtool))
sys.path.append(dname)
import codec.amr
import codec.sdp
def get_mtool_graph(g, format):
stream = StringIO(g)
if format == "dm" or format == "psd":
r, _ = next(codec.sdp.read(stream, framework=format))
elif format == "amr":
r, _ = next(codec.amr.read(stream))
r.normalize("edges")
return r
import jnius_config
jnius_config.set_classpath(".", args.am_tools)
from jnius import autoclass
class AMToolsInterface:
def evaluate(self, input_file: str, output_path: str) -> str:
raise NotImplementedError()
class DMInterface(AMToolsInterface):
def __init__(self):
self.main = autoclass("de.saar.coli.amrtagging.formalisms.sdp.dm.tools.ToSDPCorpus")
def evaluate(self, input_file: str, output_path: str) -> str:
save_to = input_file + "_o"
self.main.main(["-c", input_file, "-o", save_to])
return save_to + ".sdp"
class PSDInterface(AMToolsInterface):
def __init__(self):
self.main = autoclass("de.saar.coli.amrtagging.formalisms.sdp.psd.tools.ToSDPCorpus")
def evaluate(self, input_file: str, output_path: str) -> str:
save_to = input_file + "_o"
self.main.main(["-c", input_file, "-o", save_to])
return save_to + ".sdp"
class EDSInterface(AMToolsInterface):
def __init__(self):
self.main = autoclass("de.saar.coli.amrtagging.formalisms.eds.tools.EvaluateCorpus")
def evaluate(self, input_file: str, output_path: str) -> str:
save_to = input_file + "_o"
self.main.main(["-c", input_file, "-o", save_to])
return save_to + ".amr.txt"
class AMRInterface(AMToolsInterface):
def __init__(self, lookupdata: str, wordnet_path: str):
self.lookupdata = lookupdata
self.wordnet_path = wordnet_path
self.main = autoclass("de.saar.coli.amrtagging.formalisms.amr.tools.EvaluateCorpus")
def evaluate(self, input_file: str, output_path: str) -> str:
self.main.main(
["-c", input_file, "-o", output_path, "--relabel", "--wn", self.wordnet_path, "--lookup", self.lookupdata,
"--th", "10"])
return output_path + "/parserOut.txt"
formalism_to_class = {"DM": DMInterface(), "PAS": DMInterface(), "PSD": PSDInterface(), "EDS": EDSInterface(),
"AMR-2017": AMRInterface(args.lookup, args.wordnet)}
def postprocess(filename, output_path, formalism):
"""
if [ "$type" = "DM" ] || [ "$type" = "PAS" ]; then
java -cp $jar de.saar.coli.amrtagging.formalisms.sdp.dm.tools.ToSDPCorpus -c $amconll -o $output$type
elif [ "$type" = "PSD" ]; then
java -cp $jar de.saar.coli.amrtagging.formalisms.sdp.psd.tools.ToSDPCorpus -c $amconll -o $output$type
elif [ "$type" = "EDS" ]; then
java -cp $jar de.saar.coli.amrtagging.formalisms.eds.tools.EvaluateCorpus -c $amconll -o "$output"$type
elif [ "$type" = "AMR-2017" ]; then
bash scripts/eval_AMR_new.sh $amconll $output $jar
fi
"""
t = time.time()
o_fil = formalism_to_class[formalism].evaluate(filename, output_path)
format = ""
if formalism in {"DM", "PSD", "PAS"}:
format = "dm"
elif formalism == "EDS":
format = "amr"
elif "AMR" in formalism:
format = "amr"
else:
return f"ERROR: formalism {formalism} not known.", ""
with open(o_fil) as f:
text = f.read()
graph_time = time.time() - t
t = time.time()
# Create svg file.
svg = ""
if args.mtool:
with TemporaryDirectory() as direc:
# os.system(f"python3 {args.mtool} --normalize edges --read {format} --write dot {o_fil} {direc}/o.dot") # takes long, like 0.26s
graph = get_mtool_graph(text, format)
with open(direc + "/o.dot", "w") as f:
graph.dot(f)
os.system(f"dot -Tsvg {direc}/o.dot -o {direc}/o.svg")
with open(f"{direc}/o.svg") as f:
svg = f.read()
svg_time = time.time() - t
return (text, graph_time), (svg, svg_time)
async def handle_client(reader, writer):
request = (await reader.read(4048)).decode('utf8') # read a maximum of 4048 bytes, that's more than enough
print("Request", request)
ret_val = {"errors": [], "times" : {"amdep" : 0.0 , "svg" : 0.0, "graph" : 0.0, "amdep-svg" : 0.0}}
# times: amdep: parse time, svg: time to visualize graph, graph: evaluation time from amdep to graph, amdep-svg: viz. of amdep tree.
t1 = time.time()
try:
json_req = json.loads(request)
print("-- as json", json_req)
sentence = json_req["sentence"]
if len(sentence) > 256:
raise ValueError("Your input exceeded the maximal input length")
formalisms = json_req["formats"]
words = spacy_tokenize(sentence)
with TemporaryDirectory() as direc:
ret_val["sentence"] = sentence
ret_val["parses"] = {f: {} for f in formalisms}
for formalism in formalisms:
if formalism not in model.tasks:
err = f"Model was not trained on '{formalism}' but on {list(model.tasks.keys())}"
print(err)
ret_val["errors"].append(err)
continue
if formalism not in requires_art_root:
err = f"Server doesn't know how to handle '{formalism}' although the model was trained on it."
print(err)
ret_val["errors"].append(err)
continue
t = time.time()
# Create input and save to file:
sentences = [from_raw_text(sentence.rstrip("\n"), words, requires_art_root[formalism], dict(),
requires_ne_merging[formalism])]
temp_path = direc + f"/sentences_{formalism}.amconll"
output_filename = direc + "/parsed_" + formalism + ".amconll"
with open(temp_path, "w") as f:
for s in sentences:
f.write(str(s))
f.write("\n\n")
predictor.parse_and_save(formalism, temp_path, output_filename)
# Read AM dependency tree
with open(output_filename) as f:
ret_val["parses"][formalism]["amdep"] = f.read()
ret_val["times"]["amdep"] += time.time() - t
# ...and as svg:
t = time.time()
with open(output_filename) as f:
amdep = next(parse_amconll(f))
#with open(direc + "/amdep.dot", "w") as g:
# g.write(amdep.to_dot())
#os.system(f"dot -Tsvg {direc}/amdep.dot -o {direc}/amdep.svg")
#with open(direc + "/amdep.svg") as g:
# ret_val["parses"][formalism]["amdep-svg"] = g.read()
ret_val["parses"][formalism]["amdep-svg"] = amdep.displacy_svg()
ret_val["times"]["amdep-svg"] += time.time() - t
# Evaluate to graph
(raw_graph, graph_time), (svg, svg_time) = postprocess(output_filename, direc, formalism)
ret_val["parses"][formalism]["graph"] = raw_graph
if svg:
ret_val["parses"][formalism]["svg"] = svg
ret_val["times"]["graph"] += graph_time
ret_val["times"]["svg"] += svg_time
except BaseException as ex: #
err = "".join(traceback.TracebackException.from_exception(ex).format_exception_only())
ret_val["errors"].append(err)
print("Ignoring error:")
print(err)
writer.write(bytes(json.dumps(ret_val), "utf8"))
await writer.drain()
writer.close()
t2 = time.time()
print("Handling request took", t2 - t1)
print("Breakdown:",ret_val["times"])
loop = asyncio.get_event_loop()
loop.create_task(asyncio.start_server(handle_client, 'localhost', args.port))
loop.run_forever()
```
#### File: jgontrum/am-parser/demo_simulate_request.py
```python
import asyncio
async def tcp_echo_client(message, loop):
reader, writer = await asyncio.open_connection('127.0.0.1', 8888,
loop=loop)
print('Send: %r' % message)
writer.write(message.encode())
data = await reader.read(40_000)
print('Received: %r' % data.decode())
print('Close the socket')
writer.close()
message = """
{
"sentence": "the boy wants to sleep.",
"formats": ["AMR-2017", "EDS"]
}
"""
loop = asyncio.get_event_loop()
loop.run_until_complete(tcp_echo_client(message, loop))
loop.close()
```
#### File: graph_dependency_parser/am_algebra/tree.py
```python
class Tree:
"""
A simple tree class
"""
def __init__(self, node, children):
self.node = node
self.children = children
def add_child(self,child):
self.children.append(child)
@staticmethod
def from_heads(heads, conll_sentence):
def parse(i):
mother = Tree((i,conll_sentence[i]),[])
for j in range(len(heads)):
if heads[j] == i:
mother.add_child(parse(j))
return mother
return parse(0) #articial root is at position 0
def fold(self, f):
"""
Folding on trees: f takes a node a list of things that f produces
Returns a single thing (for instance a Tree or a number)
"""
if len(self.children) == 0:
return f(self.node,[])
return f(self.node,[c.fold(f) for c in self.children])
def fold_double(self, f):
"""
Folding on trees: f takes a node a list of things that f produces
Returns a single thing (for instance a Tree or a number)
"""
if len(self.children) == 0:
return f(self.node,[],[])
return f(self.node,self.children,[c.fold_double(f) for c in self.children])
def map(self,f):
"""
applies f to all nodes of the tree. changes the tree
"""
self.node = f(self.node)
for c in self.children:
c.map(f)
def size(self):
if len(self.children) == 0:
return 1
return 1+sum(c.size() for c in self.children)
def max_arity(self):
if len(self.children) == 0:
return 0
return max(len(self.children), max(c.max_arity() for c in self.children))
def postorder(self):
if self.children == []:
yield self
else:
for c in self.children:
for x in c.postorder():
yield x
yield self
def _to_str(self,depth=0):
if len(self.children) == 0:
return 4*depth*" "+str(self.node)
return 3*depth*" "+"["+str(self.node)+"\n {}]".format("\n".join( c._to_str(depth+1) for c in self.children))
def __str__(self):
return self._to_str()
def __repr__(self):
if len(self.children) == 0:
return "("+str(self.node) +")"
return "({} {})".format(str(self.node)," ".join(c.__repr__() for c in self.children))
if __name__ == "__main__":
t = Tree("a",[Tree("b",[]),Tree("c",[Tree("d",[])])])
#~ h = [-1, 2, 29, 8, 8, 2, 2, 8, 0, 5, 5, 14, 14, 14, 21, 17, 17, 18, 14, 14, 29, 22, 20, 22, 22, 29, 27, 29, 29, 4]
#~ t = Tree.from_heads(list(h),range(len(h)))
#~ print(t)
#print(t.fold(lambda n,children: Tree(n[0],children)))
#t.map(lambda node: node[0]) #the same thing as folding, but with side-effect
#print(t)
print(t.size())
print(list(t.postorder()))
```
#### File: graph_dependency_parser/components/cle.py
```python
from typing import List, Tuple
from dependency_decoding import chu_liu_edmonds # requires https://github.com/andersjo/dependency_decoding
from allennlp.nn.util import get_device_of
import numpy as np
import torch
def cle_decode(scores, lengths):
"""
Parses a batch of sentences
:param scores torch.Tensor of shape (batch_size,tokens, tokens), the length of the sentences is an array of length batch_size that specifies how long the sentences are
:param lengths: actual lengths of the sentences, tensor of shape (batch_size,)
:return: a tensor of shape (batch_size, tokens) that contains the heads of the tokens. Positions that go over the sentence length are filled with -1.
"""
heads = []
scores = scores.detach().cpu().double().numpy()
lengths = lengths.cpu().numpy()
bs, toks, _ = scores.shape
for m,l in zip(scores,lengths):
r,_ = chu_liu_edmonds(m[:l,:l]) #discard _score_ of solution
h = np.concatenate([r, -np.ones(toks-l,dtype=np.long)])
heads.append(h)
return torch.from_numpy(np.stack(heads))
def get_head_dict(heads):
"""
Takes a list of heads for a sentence and returns a dictionary that maps words to the set with their children
:param heads:
:return:
"""
#usually, you want to call get_head_dict(some_heads[1:]) #strip off -1
head_dict = dict()
for (m,h) in enumerate(heads):
if h not in head_dict:
head_dict[h] = set()
head_dict[h].add(m+1)
return head_dict
def cle_loss(scores: torch.Tensor, lengths : torch.Tensor, gold_heads : torch.Tensor, normalize_wrt_seq_len : bool):
"""
Parses a batch of sentences and computes a hinge loss (see code by <NAME>: https://github.com/elikip/bist-parser)
:param scores torch.Tensor of shape (batch_size,tokens, tokens), the length of the sentences is an array of length batch_size that specifies how long the sentences are
:param gold_heads: Tensor of shape (batch_size, tokens) that contains the correct head for every word.
:param lengths: actual lengths of the sentences, tensor of shape (batch_size,)
:return: a scalar torch.Tensor with the hinge loss
"""
losses : torch.Tensor = 0
device = get_device_of(scores)
scores = scores.cpu()
#scores_np = scores.detach().double().numpy()
gold_heads = gold_heads.cpu().numpy()
lengths = lengths.cpu().numpy()
for m,g,l in zip(scores,gold_heads,lengths):
#m: shape (tokens, tokens)
#g: shape (tokens,)
#l: scalar, sentence length
range = np.arange(l)
#remove padding at the end:
m = m[:l, :l]
g = g[:l] # -> shape (l,)
# make gold solution look worse by cost augmentation (in the original, make non-gold look better)/introduce margin:
m[range, g] -= 1.0 # cost augmentation
r,_ = chu_liu_edmonds(m.detach().double().numpy()) #discard _score_ of solution, -> r has shape (l,)
# this implementation says that head of artificial root is -1, but the rest of the pipeline says the head of the artificial root is the artificial root itself (i.e. 0):
r[0] = 0
r = np.array(r)
scores_of_solution = m[range,r] #extract the scores belonging to the decoded edges -> shape (l,)
scores_of_gold = m[range,g] # extract the scores belonging to the gold edges -> shape (l,)
r = torch.from_numpy(r)
g = torch.from_numpy(g)
zero = torch.zeros(1,dtype=torch.float32)
#where predicted head differs from gold head, add the score difference to the loss term:
loss_term = torch.sum(torch.where(torch.eq(r,g), zero, scores_of_solution-scores_of_gold))
if normalize_wrt_seq_len:
loss_term /= l
losses += loss_term
if device < 0:
return losses
return losses.to(device)
def find_root(heads : List[int], best_supertags : List[int], label_scores:np.array, root_edge_label_id : int, bot_id : int, modify : bool) -> Tuple[List[int],int]:
"""
Selects the root and potentially changes some attachments. We take everything attached to the artificial root (index 0) and regroup it under the actual root.
Exempted from this are words whose highest scoring supertag is \bot.
We find the root by looking at those children of index 0 that have the highest edge label scoring for being ROOT.
:param heads: a list of sentence length that gives the head of each position
:param best_supertags: a list of sentence length with highest scoring supertags (as ints)
:param label_scores: a numpy array of shape (sentence length, num of labels)
that contains the scores for the edge labels on the edges given in heads.
:param root_edge_label_id: the id of the edge label ROOT from the vocabulary
:param bot_id: the id of the supertag \bot from the vocabulary.
:param modify: change the heads? Or only find the root?
:return: return the (modified) list of heads and the index of the actual root.
"""
assert len(best_supertags) == len(heads)
assert label_scores.shape[0] == len(heads)
head_dict = get_head_dict(heads)
#those positions that are attached to 0 and whose best supertag is not \bot. If the supertag is bot, then we only look at those that are heads themselves
attached_to_0: List[int] = [index for index in head_dict[0] if best_supertags[index-1] != bot_id or index in head_dict]
if len(attached_to_0) > 0:
root_scores = []
for dependent_of_0 in attached_to_0:
root_scores.append(label_scores[dependent_of_0-1,root_edge_label_id])
new_root_id : int = attached_to_0[np.argmax(np.array(root_scores))]
if modify:
for e in attached_to_0:
if e != new_root_id:
heads[e-1] = new_root_id
else:
if len(heads) == 1: # single word sentence
new_root_id = 1 #1-based
else:
attached_to_0 = list(head_dict[0])
print("WARNING: choosing root node arbitrarily!")
if attached_to_0:
new_root_id = attached_to_0[0] #any element
else:
raise ValueError("Nothing attached to 0?")
return heads, new_root_id
```
#### File: components/evaluation/commands.py
```python
import os
from abc import ABC, abstractmethod
from tempfile import TemporaryDirectory
from typing import List, Iterable, Dict, Tuple
from allennlp.common import Params, Registrable
import subprocess
import re
import json
from graph_dependency_parser.components.utils import flatten, merge_dicts
class BaseEvaluationCommand(ABC, Registrable):
"""
An evaluation command takes two files (gold and system output) and returns a dictionary with scores.
"""
@abstractmethod
def evaluate(self, gold_file: str, system_output:str) -> Dict[str,float]:
raise NotImplementedError()
@BaseEvaluationCommand.register("bash_evaluation_command")
class BashEvaluationCommand(BaseEvaluationCommand):
"""
An evaluation command that can be configured with jsonnet files.
Executes a bash command, taps into the output and returns metrics extracted using regular expressions.
"""
def __init__(self, command : str, result_regexes: Dict[str, str], show_output: bool = True) -> None:
"""
Sets up an evaluator.
:param command: a bash command that will get executed. Use {system_output} and {gold_file} as placeholders.
:param result_regexes: a dictionary mapping metric names to tuples of line number and regexes how to extract the values of the respective metrics.
evaluate will return a dictionary where the keys are the metric names and the regexes are used to extract
the respective values of the metrics in the specified lines. From each regex, we take the group "value". That is, use (?P<value>...) in your regex!
:param if output of evaluation command should be printed.
"""
self.command = command
self.result_regex = result_regexes
self.show_output = show_output
for line_number,regex in result_regexes.values():
assert "(?P<value>" in regex,f"Regex {regex} doesn't seem to contain the group ?P<value>"
def evaluate(self, system_output: str, gold_file: str) -> Dict[str, float]:
"""
Calls a bash command and extracts metrics.
:param system_output:
:param gold_file:
:return: a dictionary that maps metric names to their values
"""
with TemporaryDirectory() as direc:
cmd = self.command.format(system_output=system_output, gold_file=gold_file, tmp=direc)
with subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE) as proc:
result = bytes.decode(proc.stdout.read()) # output of shell commmand as string
result_lines = result.split("\n")
if self.show_output:
print(result)
metrics = dict()
for metric_name, (line_number,regex) in self.result_regex.items():
m = re.search(regex, result_lines[line_number])
if m:
val = float(m.group("value"))
metrics[metric_name] = val
if self.show_output:
print(metrics)
return metrics
@BaseEvaluationCommand.register("amr_evaluation_command")
class AMREvaluationCommand(BaseEvaluationCommand):
"""
An evaluation command for AMR that can be configured with jsonnet files.
"""
def __init__(self, amr_year : str, tool_dir : str, alto_path: str, show_output: bool = True) -> None:
"""
Sets up an evaluator.
:param amr_year: 2015 or 2017
:param tool_dir: the path to the evaluation tools used for AMR (2019rerun)
:param alto_path: the path to the Alto .jar file
:param show_output: show Smatch results on commmand line?
"""
self.amr_year = amr_year
assert amr_year in ["2015","2017"]
self.tool_dir = tool_dir
self.alto_path = alto_path
self.show_output = show_output
def evaluate(self, system_output: str, gold_file: str) -> Dict[str, float]:
"""
Calls the evaluation functions and returns extracted metrics.
:param system_output:
:param gold_file:
:return: a dictionary that maps metric names to their values
"""
assert gold_file in ["dev","test"], f"In case of AMR, set gold_file in the validation_evaluator to dev or test (got {gold_file})"
with TemporaryDirectory() as direc:
os.system(f"java -cp {self.alto_path} de.saar.coli.amrtagging.formalisms.amr.tools.EvaluateCorpus -c {system_output} -o {direc}")
if "dev" == gold_file:
if self.amr_year == "2017":
os.system(f"bash {self.tool_dir}/scripts/eval_dev17.sh {direc} {self.alto_path}")
else:
os.system(f"bash {self.tool_dir}/scripts/eval_dev.sh {direc} {self.alto_path}")
elif "test" == gold_file:
if self.amr_year == "2017":
os.system(f"bash {self.tool_dir}/scripts/eval_test17.sh {direc} {self.alto_path}")
else:
os.system(f"bash {self.tool_dir}/scripts/eval_test.sh {direc} {self.alto_path}")
else:
raise ValueError(f"Given gold file {gold_file} I can't determine if this is dev or test data")
metrics = dict()
with open(direc + "/smatch.txt") as f:
lines = f.readlines()
for line in lines:
name, score = line.split(": ")
metrics[name] = 100 * float(score)
if self.show_output:
print (metrics)
return metrics
@BaseEvaluationCommand.register("json_evaluation_command")
class JsonEvaluationCommand(BaseEvaluationCommand):
"""
An evaluation command that can be configured with jsonnet files.
Executes a bash command, taps into the output and returns metrics extracted using json.
"""
def __init__(self, commands : List[List[str]], show_output: bool = True) -> None:
"""
Sets up an evaluator.
:param commands: a list of pairs of (metric_prefix, command) that will get executed. Use {system_output} and {gold_file} and {tmp} as placeholders.
{tmp} points to a private temporary directory. if metric_prefix is the empty string, no metric will be saved.
:param if output of evaluation command should be printed.
"""
self.commands = commands
for cmd in self.commands:
assert len(cmd) == 2, "Should get a tuple of [metric_prefix, command] but got "+str(cmd)
self.show_output = show_output
def evaluate(self, system_output: str, gold_file: str) -> Dict[str, float]:
"""
Calls the bash commands and extracts metrics for
:param system_output:
:param gold_file:
:return: a dictionary that maps metric names to their values
"""
metrics = dict()
with TemporaryDirectory() as direc:
for prefix,cmd in self.commands:
cmd = cmd.format(system_output=system_output, gold_file=gold_file, tmp=direc)
with subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE) as proc:
result = bytes.decode(proc.stdout.read()) # output of shell commmand as string
if self.show_output:
print(result)
if prefix:
try:
result_json = json.loads(result)
metrics = merge_dicts(metrics, prefix, flatten(result_json))
except json.decoder.JSONDecodeError: #probably not intended for us
if self.show_output:
print("<-- not well-formed json, ignoring")
if self.show_output:
print(metrics)
return metrics
@BaseEvaluationCommand.register("dummy_evaluation_command")
class DummyEvaluationCommand(BaseEvaluationCommand):
"""
Does nothing, returns empty dictionary.
"""
def __init__(self) -> None:
pass
def evaluate(self, system_output: str, gold_file: str) -> Dict[str, float]:
return dict()
```
#### File: components/evaluation/iterator.py
```python
import torch
from typing import List, Dict, Iterable
from allennlp.data import DataIterator
from allennlp.data.dataset import Instance
import allennlp.nn.util as util
import numpy
def forward_on_instances(model,
instances: Iterable[Instance], data_iterator: DataIterator) -> List[Dict[str, numpy.ndarray]]:
"""
Basically a copy of Model.forward_on_instances, but also takes a DataIterator in order to be more efficient.
Takes a list of :class:`~allennlp.data.instance.Instance`s, converts that text into
arrays using this model's :class:`Vocabulary`, passes those arrays through
:func:`self.forward()` and :func:`self.decode()` (which by default does nothing)
and returns the result. Before returning the result, we convert any
``torch.Tensors`` into numpy arrays and separate the
batched output into a list of individual dicts per instance. Note that typically
this will be faster on a GPU (and conditionally, on a CPU) than repeated calls to
:func:`forward_on_instance`.
Parameters
----------
model : AllenNLP model, required
The model to run.
instances : List[Instance], required
The instances to run the model on.
data_iterator: DataIterator, required
The DataIterator used for going over the data (e.g. BucketIterator)
Returns
-------
A list of the models output for each instance.
"""
data_iterator.index_with(model.vocab)
with torch.no_grad():
return_val: List[Dict[str, numpy.ndarray]] = []
cuda_device = model._get_prediction_device()
for dataset in data_iterator._create_batches(instances, shuffle=False):
batch_size = len(dataset.instances)
dataset.index_instances(model.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
outputs = model.decode(model(**model_input))
instance_separated_output: List[Dict[str, numpy.ndarray]] = [{} for _ in dataset.instances]
for name, output in list(outputs.items()):
if isinstance(output, torch.Tensor):
# NOTE(markn): This is a hack because 0-dim pytorch tensors are not iterable.
# This occurs with batch size 1, because we still want to include the loss in that case.
if output.dim() == 0:
output = output.unsqueeze(0)
if output.size(0) != batch_size:
model._maybe_warn_for_unseparable_batches(name)
continue
output = output.detach().cpu().numpy()
elif len(output) != batch_size:
model._maybe_warn_for_unseparable_batches(name)
continue
for instance_output, batch_element in zip(instance_separated_output, output):
instance_output[name] = batch_element
return_val.extend(instance_separated_output)
return return_val
```
#### File: components/evaluation/predictors.py
```python
import os
from abc import ABC, abstractmethod
from typing import List, Iterable, Dict, Tuple, Union
from allennlp.common import Params, Registrable
from allennlp.data import DatasetReader, DataIterator
from allennlp.data.iterators import BasicIterator, BucketIterator
from allennlp.models import Model
from allennlp.common.util import lazy_groups_of
from graph_dependency_parser.am_algebra.label_decoder import AMDecoder
from graph_dependency_parser.components.dataset_readers.amconll import AMConllDatasetReader
from graph_dependency_parser.components.dataset_readers.amconll_tools import AMSentence
from graph_dependency_parser.components.evaluation.commands import BaseEvaluationCommand
from graph_dependency_parser.components.evaluation.iterator import forward_on_instances
import tempfile
import socket
class Predictor (Registrable):
"""
A class that can make predictions for an input file. Not to be confused with AllenNLP's own predictors.
"""
def __init__(self, dataset_reader : DatasetReader, data_iterator: DataIterator = None ,evaluation_command : BaseEvaluationCommand = None, model : Model = None, batch_size:int = 64):
"""
Creates a predictor from an AMConllDatasetReader, optionally takes an AllenNLP model. The model can also be given later using set_model.
If evaluation is required, en evaluation_command can be supplied as well.
:param dataset_reader: an AMConllDatasetReader
:param evaluation_command:
:param model:
"""
assert isinstance(dataset_reader, AMConllDatasetReader), "A predictor in the am-parser must take an AMConllDatasetReader"
self.dataset_reader = dataset_reader
self.model = model
self.evaluation_command = evaluation_command
self.batch_size = batch_size
if data_iterator is None:
self.data_iterator = BasicIterator()
else:
self.data_iterator = data_iterator
def set_model(self, model : Model):
self.model = model
def set_evaluation_command(self, evaluation_command : BaseEvaluationCommand):
self.evaluation_command = evaluation_command
def parse_and_save(self, formalism: str, input_file : str, output_file: str) -> None:
"""
Parses an input file and saves it to some given output file. Old content will be overwritten.
:param input_file:
:param formalism: the name of the formalism of the input_file
:param output_file:
:return:
"""
raise NotImplementedError()
def parse_and_eval(self, formalism:str, input_file : str, gold_file: str, filename :Union[str, None]) -> Dict[str,float]:
"""
Given an input file and a gold standard file, parses the input, saves the output in a temporary directory
and calls the evaluation command
:param input_file:
:param formalism: the name of the formalism of the input_file
:param gold_file:
:return: a dictionary with evaluation metrics as delivered by evaluation_command
"""
assert self.evaluation_command, "parse_and_eval needs evaluation_command to be given"
if not filename:
with tempfile.TemporaryDirectory() as tmpdirname:
filename = tmpdirname+"/prediction"
self.parse_and_save(formalism, input_file, filename)
return self.evaluation_command.evaluate(filename,gold_file)
else:
self.parse_and_save(formalism, input_file, filename)
return self.evaluation_command.evaluate(filename, gold_file)
@classmethod
def from_config(cls, config_file : str, serialization_dir : str) -> "Predictor":
"""
Creates a predictor from a configuration file (jsonnet) and a model directory.
:param config_file:
:param serialization_dir:
:return:
"""
params = Params.from_file(config_file)
model = Model.load(params, serialization_dir)
dr = DatasetReader.from_params(params["dataset_reader"])
assert isinstance(dr, AMConllDatasetReader), "A predictor in the am-parser must take an AMConllDatasetReader"
return cls(dr,model=model)
@Predictor.register("amconll_predictor")
class AMconllPredictor(Predictor):
"""
Predictor that calls the fixed-tree decoder.
"""
def __init__(self, dataset_reader: DatasetReader, k:int,give_up:float, threads:int = 4,data_iterator: DataIterator = None,
evaluation_command: BaseEvaluationCommand = None, model: Model = None, batch_size: int = 64, give_up_k_1 : float = None):
"""
Creates a predictor from an AMConllDatasetReader, optionally takes an AllenNLP model. The model can also be given later using set_model.
If evaluation is required, en evaluation_command can be supplied as well.
:param dataset_reader: an AMConllDatasetReader
:param k: number of supertags to be used during decoding
:param give_up: time limit in seconds before retry parsing with k-1 supertags
:param threads: number of parallel threads to parse corpus
:param give_up_k_1: if given, how long to wait before skipping sentence entirely ("back off" from k=1 to k=0)
:param evaluation_command:
:param model:
"""
super().__init__(dataset_reader,data_iterator,evaluation_command,model,batch_size)
self.k = k
self.threads = threads
self.give_up = give_up
if give_up_k_1 is None:
self.give_up_k_1 = give_up
else:
self.give_up_k_1 = give_up_k_1
def parse_and_save(self, formalism : str, input_file : str, output_file: str) -> None:
"""
Parses an input file and saves it to some given output file. Old content will be overwritten.
:param input_file:
:param formalism: the name of the formalism of the input_file
:param output_file:
:return:
"""
assert self.model, "model must be given, either to the constructor or to set_model"
instances = self.dataset_reader.read([[formalism, input_file]]) #we need to give the formalism to amconll dataset_reader
prev_training_status = self.model.training
self.model.train(False)
predictions = self.dataset_reader.restore_order(forward_on_instances(self.model, instances,self.data_iterator))
self.model.train(prev_training_status) #reset training status to whatever it was before
i2edge_label = [ self.model.vocab.get_token_from_index(i,namespace=formalism+"_head_tags") for i in range(self.model.vocab.get_vocab_size(formalism+"_head_tags"))]
decoder = AMDecoder(output_file,i2edge_label)
for pred in predictions:
attributes = pred["attributes"]
attributes["batch_size"] = pred["batch_size"]
attributes["normalized_nn_time"] = pred["batch_time"] / pred["batch_size"]
attributes["normalized_prepare_ftd_time"] = pred["normalized_prepare_ftd_time"]
attributes["host"] = socket.gethostname()
attributes["parser"] = "ftd"
am_sentence = AMSentence(pred["words"],attributes) #(form,replacement,lemma,pos,ne)
sentence = list(zip(am_sentence.get_tokens(shadow_art_root=False),am_sentence.get_replacements(), am_sentence.get_lemmas(), am_sentence.get_pos(), am_sentence.get_ner(), am_sentence.get_ranges()))
decoder.add_sentence(pred["root"],pred["predicted_heads"],pred["label_logits"],pred["lexlabels"],pred["supertags"], sentence, am_sentence.attributes_to_list())
decoder.decode(self.threads,self.k,self.give_up,self.give_up_k_1)
class Evaluator(Registrable):
"""
For use in configuration files. Abstract class that only defines what an evaluator should look like.
"""
def eval(self, model, epoch,model_path=None) -> Dict[str,float]:
raise NotImplementedError()
@Evaluator.register("standard_evaluator")
class StandardEvaluator(Evaluator):
"""
A wrapper around a predictor that remembers system input and gold file
Intended for easy use in configuration files.
"""
def __init__(self, formalism:str, system_input : str, gold_file : str, predictor : Predictor, use_from_epoch: int = 1) -> None:
self.formalism = formalism
self.system_input = system_input
self.gold_file = gold_file
self.predictor = predictor
self.use_from_epoch = use_from_epoch
def eval(self,model, epoch, model_path=None) -> Dict[str, float]:
if epoch < self.use_from_epoch:
return dict()
self.predictor.set_model(model)
if model_path:
filename = model_path + "/" + "dev_epoch_"+str(epoch)+".amconll"
return self.predictor.parse_and_eval(self.formalism, self.system_input, self.gold_file, filename=filename)
else: #use temporary directory
return self.predictor.parse_and_eval(self.formalism, self.system_input, self.gold_file,None)
@Evaluator.register("dummy_evaluator")
class DummyEvaluator(Evaluator):
def eval(self,model, epoch,model_path=None) -> Dict[str, float]:
return dict()
@Evaluator.register("empty_mrp_evaluator")
class EmptyMRPEvaluator(Evaluator):
"""
A wrapper around a predictor that remembers system input.
"""
def __init__(self, formalism:str, system_input : str, predictor : Predictor, postprocessing : List[str]) -> None:
"""
:param formalism:
:param system_input:
:param predictor:
:param postprocessing: a list of strings with postprocessing commands, you can use {system_output} as a placeholder
"""
self.postprocessing = postprocessing
self.formalism = formalism
self.system_input = system_input
self.predictor = predictor
def eval(self,model, epoch, model_path=None) -> Dict[str, float]:
self.predictor.set_model(model)
if model_path:
filename = model_path + "/" + "test_"+str(self.formalism)+".amconll"
self.predictor.parse_and_save(self.formalism, self.system_input, filename)
for cmd in self.postprocessing:
cmd = cmd.format(system_output=filename)
os.system(cmd)
return dict()
else: #use temporary directory
raise ValueError("Need to get model_path!")
```
#### File: components/losses/DM.py
```python
from allennlp.nn.util import get_text_field_mask, get_range_vector
from allennlp.nn.util import get_device_of, masked_log_softmax, get_lengths_from_binary_sequence_mask
from typing import Dict, Optional, Tuple, Any, List
import torch
from graph_dependency_parser.components.losses.base import EdgeLoss, EdgeExistenceLoss, EdgeLabelLoss
@EdgeExistenceLoss.register("dm_edge_loss")
class DMLoss (EdgeExistenceLoss):
"""
Dozat & Manning - Loss.
"""
def loss(self, edge_scores: torch.Tensor,
head_indices: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
"""
Computes the edge loss for a sequence given gold head indices and tags.
Parameters
----------
edge_scores : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
head_indices : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The indices of the heads for every word.
head_tags : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for every word.
mask : ``torch.Tensor``, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
Returns
-------
arc_nll : ``torch.Tensor``, required.
The negative log likelihood from the arc loss.
"""
float_mask = mask.float()
batch_size, sequence_length, _ = edge_scores.size()
# shape (batch_size, 1)
range_vector = get_range_vector(batch_size, get_device_of(edge_scores)).unsqueeze(1)
# shape (batch_size, sequence_length, sequence_length)
normalised_arc_logits = masked_log_softmax(edge_scores,
mask) * float_mask.unsqueeze(2) * float_mask.unsqueeze(1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(edge_scores))
child_index = timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()
# shape (batch_size, sequence_length)
arc_loss = normalised_arc_logits[range_vector, child_index, head_indices]
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
arc_loss = arc_loss[:, 1:]
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
arc_nll = -arc_loss.sum()
if self.normalize_wrt_seq_len:
arc_nll /= valid_positions.float()
return arc_nll
@EdgeLabelLoss.register("dm_label_loss")
class DMLabelLoss(EdgeLabelLoss):
def loss(self, edge_label_logits:torch.Tensor, mask:torch.Tensor, head_tags:torch.Tensor) -> torch.Tensor:
"""
Computes the arc and tag loss for a sequence given gold head indices and tags.
Parameters
----------
edge_label_logits : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, num_head_tags),
that contains raw predictions for incoming edge labels
head_tags : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for every word.
mask : ``torch.Tensor``, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
Returns
-------
tag_nll : ``torch.Tensor``, required.
The negative log likelihood from the edge label loss.
"""
float_mask = mask.float()
batch_size, sequence_length, _ = edge_label_logits.size()
# shape (batch_size, 1)
range_vector = get_range_vector(batch_size, get_device_of(edge_label_logits)).unsqueeze(1)
# shape (batch_size, sequence_length, num_head_tags)
normalised_edge_label_logits = masked_log_softmax(edge_label_logits,
mask.unsqueeze(-1)) * float_mask.unsqueeze(-1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(edge_label_logits))
child_index = timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()
# shape (batch_size, sequence_length)
tag_loss = normalised_edge_label_logits[range_vector, child_index, head_tags]
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
tag_loss = tag_loss[:, 1:]
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
if self.normalize_wrt_seq_len:
return -tag_loss.sum() / valid_positions.float()
else:
return -tag_loss.sum()
```
#### File: components/losses/supertagging.py
```python
import torch
from allennlp.common import Registrable
from allennlp.nn.util import sequence_cross_entropy_with_logits
class SupertaggingLoss(Registrable):
"""
Softmax cross entropy loss, made usable for configuration files.
"""
def __init__(self, normalize_wrt_seq_len : bool = False, label_smoothing : int = None):
super().__init__()
self.normalize_wrt_seq_len = normalize_wrt_seq_len
self.label_smoothing = label_smoothing
def loss(self,logits: torch.Tensor, gold_labels : torch.Tensor, mask : torch.Tensor) -> torch.Tensor:
"""
Computes the loss.
:param logits: tensor of shape (batch_size, seq_len, num_classes)
:param gold_labels: tensor of shape (batch_size, seq_len)
:param mask: tensor of shape (batch_size, seq_len)
:return:
"""
l = sequence_cross_entropy_with_logits(logits,gold_labels,mask, label_smoothing=self.label_smoothing)
#sequence_cross entropy automatically normalizes by batch, so multiply by batch again
if not self.normalize_wrt_seq_len:
l *= logits.size(0)
return l
```
#### File: graph_dependency_parser/components/utils.py
```python
from typing import Dict
def flatten(d : Dict):
"""
Flattens a dictionary and uses the path separated with _ to give unique key names.
:param d:
:return:
"""
r = dict()
agenda = [ (key,[],d) for key in d.keys()]
while agenda:
key,path,d = agenda.pop()
if not isinstance(d[key],dict):
r["_".join(path+[str(key)])] = d[key]
else:
for subkey in d[key].keys():
agenda.append((subkey,path+[str(key)],d[key]))
return r
def merge_dicts(x: Dict, prefix:str, y: Dict):
r = dict()
for k,v in x.items():
r[k] = v
for k,v in y.items():
r[prefix+"_"+k] = v
return r
```
#### File: am-parser/graph_dependency_parser/graph_dependency_parser.py
```python
import time
from typing import Dict, Optional, Any, List
import logging
from overrides import overrides
import torch
from torch.nn.modules import Dropout
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask
from graph_dependency_parser.components.weight_sharer import MTLWeightSharer
from graph_dependency_parser.components.AMTask import AMTask
from graph_dependency_parser.components.spacy_token_embedder import TokenToVec
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("graph_dependency_parser")
class GraphDependencyParser(Model):
"""
This dependency graph_dependency_parser is a blueprint for several graph-based dependency parsers.
There are several possible edge models and loss functions.
For decoding, the CLE algorithm is used (during training attachments scores are usually based on greedy decoding)
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use to generate representations
of tokens.
edge_model: ``components.edge_models.EdgeModel``, required.
The edge model to be used.
loss_function: ``components.losses.EdgeLoss``, required.
The (edge) loss function to be used.
supertagger: ``components.supertagger.FragmentSupertagger``, required.
The supertagging model that predicts graph constants (graph fragments + types)
lexlabeltagger: ``components.supertagger.LexlabelTagger``, required.
The supertagging model that predicts lexical labels for the supertags.
supertagger_loss: ``components.losses.supertagging.SupertaggingLoss``, required.
The loss function for the supertagging model.
lexlabel_loss: ``components.losses.supertagging.SupertaggingLoss``, required.
The loss function for the lexical label tagger.
loss_mixing : Dict[str,float] = None,
The mixing coefficients for the different losses. Valid loss names are "edge_existence",
"edge_label","supertagging" and "lexlabel".
pos_tag_embedding : ``Embedding``, optional.
Used to embed the ``pos_tags`` ``SequenceLabelField`` we get as input to the model.
lemma_embedding : ``Embedding``, optional.
Used to embed the ``lemmas`` ``SequenceLabelField`` we get as input to the model.
ne_embedding : ``Embedding``, optional.
Used to embed the ``ner_labels`` ``SequenceLabelField`` we get as input to the model.
use_mst_decoding_for_validation : ``bool``, optional (default = True).
Whether to use Edmond's algorithm to find the optimal minimum spanning tree during validation.
If false, decoding is greedy.
dropout : ``float``, optional, (default = 0.0)
The variational dropout applied to the output of the encoder and MLP layers.
input_dropout : ``float``, optional, (default = 0.0)
The dropout applied to the embedded text input.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
validation_evaluator: ``ValidationEvaluator``, optional (default=``None``)
If provided, will be used to compute external validation metrics after each epoch.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: MTLWeightSharer,
tasks: List[AMTask],
pos_tag_embedding: Embedding = None,
lemma_embedding: Embedding = None,
ne_embedding: Embedding = None,
input_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
tok2vec : Optional[TokenToVec] = None) -> None:
super(GraphDependencyParser, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.encoder = encoder
self.tok2vec = tok2vec
self._pos_tag_embedding = pos_tag_embedding or None
self._lemma_embedding = lemma_embedding
self._ne_embedding = ne_embedding
self._input_dropout = Dropout(input_dropout)
self._head_sentinel = torch.nn.Parameter(torch.randn([1, 1, encoder.get_output_dim()]))
representation_dim = text_field_embedder.get_output_dim()
if pos_tag_embedding is not None:
representation_dim += pos_tag_embedding.get_output_dim()
if self._lemma_embedding is not None:
representation_dim += lemma_embedding.get_output_dim()
if self._ne_embedding is not None:
representation_dim += ne_embedding.get_output_dim()
assert len(tasks) > 0, "List of tasks must not be empty"
self.tasks : Dict[str, AMTask] = {t.name : t for t in tasks}
if self.tok2vec:
representation_dim += self.tok2vec.get_output_dim()
check_dimensions_match(representation_dim, encoder.get_input_dim(),
"text field embedding dim", "encoder input dim")
for t in tasks:
t.check_all_dimensions_match(encoder.get_output_dim())
for formalism,task in sorted(self.tasks.items(), key=lambda nt: nt[0]):
#sort by name of formalism for consistent ordering
self.add_module(formalism,task)
initializer(self)
@overrides
def forward(self, # type: ignore
words: Dict[str, torch.LongTensor],
pos_tags: torch.LongTensor,
lemmas: torch.LongTensor,
ner_tags: torch.LongTensor,
metadata: List[Dict[str, Any]],
supertags: torch.LongTensor = None,
lexlabels: torch.LongTensor = None,
head_tags: torch.LongTensor = None,
head_indices: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
words : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, sequence_length)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
pos_tags : ``torch.LongTensor``, required
The output of a ``SequenceLabelField`` containing POS tags.
POS tags are required regardless of whether they are used in the model,
because they are used to filter the evaluation metric to only consider
heads of words which are not punctuation.
metadata : List[Dict[str, Any]], optional (default=None)
A dictionary of metadata for each batch element which has keys:
words : ``List[str]``, required.
The tokens in the original sentence.
pos : ``List[str]``, required.
The dependencies POS tags for each word.
head_tags : = edge_labels torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold edge labels for the arcs
in the dependency parse. Has shape ``(batch_size, sequence_length)``.
head_indices : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer indices denoting the parent of every
word in the dependency parse. Has shape ``(batch_size, sequence_length)``.
Returns
-------
An output dictionary consisting of:
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
arc_loss : ``torch.FloatTensor``
The loss contribution from the unlabeled arcs.
edge_label_loss : ``torch.FloatTensor``
The loss contribution from the edge labels.
heads : ``torch.FloatTensor``
The predicted head indices for each word. A tensor
of shape (batch_size, sequence_length).
edge_labels : ``torch.FloatTensor``
The predicted head types for each arc. A tensor
of shape (batch_size, sequence_length).
mask : ``torch.LongTensor``
A mask denoting the padded elements in the batch.
"""
t0 = time.time()
if 'formalism' not in metadata[0]:
raise ConfigurationError("metadata is missing 'formalism' key.\
Please use the amconll dataset reader.")
formalism_of_batch = metadata[0]['formalism']
for entry in metadata:
if entry['formalism'] != formalism_of_batch:
raise ConfigurationError("Two formalisms in the same batch.")
if not formalism_of_batch in self.tasks.keys():
raise ConfigurationError(f"Got formalism {formalism_of_batch} but I only have these tasks: {list(self.tasks.keys())}")
if self.tok2vec:
token_ids = words["tokens"]
embedded_text_input = self.tok2vec.embed(self.vocab, token_ids) #shape (batch_size, seq len, encoder dim)
concatenated_input = [embedded_text_input, self.text_field_embedder(words)]
else:
embedded_text_input = self.text_field_embedder(words)
concatenated_input = [embedded_text_input]
if pos_tags is not None and self._pos_tag_embedding is not None:
concatenated_input.append(self._pos_tag_embedding(pos_tags))
elif self._pos_tag_embedding is not None:
raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.")
if self._lemma_embedding is not None:
concatenated_input.append(self._lemma_embedding(lemmas))
if self._ne_embedding is not None:
concatenated_input.append(self._ne_embedding(ner_tags))
if len(concatenated_input) > 1:
embedded_text_input = torch.cat(concatenated_input, -1)
mask = get_text_field_mask(words)
embedded_text_input = self._input_dropout(embedded_text_input)
encoded_text_parsing, encoded_text_tagging = self.encoder(formalism_of_batch, embedded_text_input, mask) #potentially weight-sharing
batch_size, seq_len, encoding_dim = encoded_text_parsing.size()
head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim)
# Concatenate the artificial root onto the sentence representation.
encoded_text_parsing = torch.cat([head_sentinel, encoded_text_parsing], 1)
if encoded_text_tagging is not None: #might be none when batch is of formalism without tagging (UD)
batch_size, seq_len, encoding_dim = encoded_text_tagging.size()
head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim)
# Concatenate the artificial root onto the sentence representation.
encoded_text_tagging = torch.cat([head_sentinel, encoded_text_tagging], 1)
mask = torch.cat([mask.new_ones(batch_size, 1), mask], 1)
if head_indices is not None:
head_indices = torch.cat([head_indices.new_zeros(batch_size, 1), head_indices], 1)
if head_tags is not None:
head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1)
ret = self.tasks[formalism_of_batch](encoded_text_parsing, encoded_text_tagging, mask, pos_tags, metadata, supertags, lexlabels, head_tags, head_indices)
t1 = time.time()
# Save time and batch size, but save it separately for each batch element.
ret["batch_size"] = torch.ones(batch_size, dtype=torch.long) * batch_size
ret["batch_time"] = torch.ones(batch_size) * (t1-t0)
return ret
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
"""
In contrast to its name, this function does not perform the decoding but only prepares it.
Therefore, we take the result of forward and perform the following steps (for each sentence in batch):
- remove padding
- identifiy the root of the sentence, group other root-candidates under the proper root
- collect a selection of supertags to speed up computation (top k selection is done later)
:param output_dict: result of forward
:return: output_dict with the following keys added:
- lexlabels: nested list: contains for each sentence, for each word the most likely lexical label (w/o artificial root)
- supertags: nested list: contains for each sentence, for each word the most likely lexical label (w/o artificial root)
"""
formalism = output_dict.pop("formalism")
return self.tasks[formalism].decode(output_dict)
@overrides
def get_metrics(self, reset: bool = False, model_path = None) -> Dict[str, float]:
r = dict()
for name,task in self.tasks.items():
for metric, val in task.metrics(parser_model=self, reset=reset, model_path=model_path).items():
r[name+"_"+metric] = val
return r
```
#### File: graph_dependency_parser/svg/dot_tools.py
```python
def relex(label, lex_label, lemma, form, replacement, pos):
lex_label = lex_label.replace("$LEMMA$", lemma)
lex_label = lex_label.replace("$FORM$", form)
lex_label = lex_label.replace("$REPL$", replacement)
lex_label = lex_label.replace("$POS$", pos)
return label.replace("--LEX--", lex_label)
def parse_penman(graph_fragment):
import penman
return penman.decode(graph_fragment)
def penman_to_dot(graph_fragment, lex_label, lemma, form, replacement, pos, prefix="n"):
"""
Converts a supertag to a little dot graph.
"""
import penman
if isinstance(graph_fragment, str):
g = penman.decode(graph_fragment)
else:
g = graph_fragment
name2name = dict()
accounted_for = set()
counter = 0
r = ""
for f,rel, to in g.triples:
if f not in name2name:
new_name = prefix+str(counter)
counter += 1
name2name[f] = new_name
if rel != ":instance" and to not in name2name:
new_name = prefix+str(counter)
counter += 1
name2name[to] = new_name
for f,rel, to in g.triples:
if rel == ":instance":
is_root = f == g.top
if to is None:
source = f.split("<")[1][:-1]
if is_root:
r += name2name[f] + ' [label="' + source + '", fontcolor="red", style="bold"];\n'
else:
r += name2name[f] + ' [label="' + source + '", fontcolor="red"];\n'
else:
label = relex(to, lex_label, lemma, form, replacement, pos)
if is_root:
r += name2name[f] + ' [style="bold", label="' + label + '"];\n'
else:
r += name2name[f] + ' [label="' + label + '"];\n'
accounted_for.add(name2name[f])
else:
r += name2name[f] + " -> " + name2name[to] + ' [label="' + rel[1:] + '"];\n'
assert set(accounted_for) == set(name2name.values())
return r, name2name[g.top]
import os
import subprocess
import re
def compile_dot(fname):
os.system("dot -Tpdf "+fname+".dot -o "+fname+".pdf")
def get_dot(graph, format):
with subprocess.Popen("dot -T"+format, shell=True, stdout=subprocess.PIPE,stdin=subprocess.PIPE) as proc:
proc.stdin.write(bytes(graph,"utf8"))
proc.stdin.close()
return bytes.decode(proc.stdout.read()) # output of shell commmand as string
def dot_strip_svg_header(svg):
return "\n".join(svg.split("\n")[3:])
class DotSVG:
"""
Quick, dirty and limited method to manipulate the output of dot -Tsvg
"""
width_pattern = re.compile('width="([^"]+)"')
height_pattern = re.compile('height="([^"]+)"')
def __init__(self, dot_script):
self.s = get_dot(dot_script, "svg")
def get_width(self):
m = re.search(self.width_pattern, self.s)
return m.group(1) #e.g. 89pt
def get_height(self):
m = re.search(self.height_pattern, self.s)
return m.group(1)
def get_str(self):
return self.s
def get_str_without_header(self):
return dot_strip_svg_header(self.s)
def set_xy(self, x,y):
self.s = self.s.replace("<svg",'<svg x="'+x+'" y="'+y+'"')
def set_width(self, w):
self.s = re.sub(self.width_pattern, 'width="'+w+'"', self.s)
def set_height(self, h):
self.s = re.sub(self.height_pattern, 'height="'+h+'"', self.s)
```
#### File: jgontrum/am-parser/main.py
```python
from time import sleep
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from typing import List, Any
from pydantic import BaseSettings, BaseModel
from parser import Parser
class Settings(BaseSettings):
name: str = "AMR Parser"
target: str = "local"
version: str = "dev"
allowed_origins: List[str] = ["http://localhost", "http://localhost:8080"]
archive_path: str = "/app/external_data/raw_text_model.tar.gz"
wordnet_path: str = "/app/external_data/wordnet3.0/dict/"
lookup_path: str = "/app/external_data/lookup/lookupdata17/"
am_tools_path: str = "/app/external_data/am-tools.jar"
cuda_device: int = -1
overrides: str = ""
weights_file: str = None
@property
def short_name(self) -> str:
return self.name.replace(" ", "_").lower()
settings = Settings()
class AMRParseRequest(BaseModel):
sentence: str
class AMRParseResponse(BaseModel):
amr: str
# Create the service and hide the documentation if it is deployed in production
app = FastAPI(
title=settings.name,
docs_url=None if settings.target == "docker" else "/docs",
redoc_url=None if settings.target == "docker" else "/redoc",
)
# Configure CORS
if settings.allowed_origins:
app.add_middleware(
CORSMiddleware,
allow_origins=settings.allowed_origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
parser = Parser(settings.archive_path, settings.cuda_device, settings.overrides, settings.weights_file,
settings.lookup_path, settings.wordnet_path, settings.am_tools_path)
@app.post("/")
def parse(request: AMRParseRequest):
return AMRParseResponse(amr=parser.parse(request.sentence.strip()))
```
#### File: am-parser/ucca/convert_irtg_to_mrp.py
```python
from get_edges_from_mrp import test, get_id2lex, get_mrp_edges
import re
'''
def get_input(edge_dict, label_dict):
input = []
terminal_ids = [id for id in label_dict.keys() if type(id) == int]
for node in sorted(terminal_ids):
if label_dict[node] != "Non-Terminal" and not label_dict[node].startswith("NONTERMINAL"):
#if type(label_dict[node]) != int:
input.append(label_dict[node])
#print(input)
else:
pass
input_tokenized = detokenizer.detokenize(input, return_str = True)
return input_tokenized
'''
def get_nodes(label_dict, input):
nodes = []
for node in label_dict.keys():
if label_dict[node] in input.lower().split():
if ' ' not in label_dict[node]:
node_anchor_dict = {}
span_regex = re.compile(re.escape(label_dict[node]))
span_match = span_regex.search(input)
(begin, end) = span_match.span()
node_anchor_dict = {'id': node, 'anchors' :[{'from': begin, 'to':end}]}
nodes.append(node_anchor_dict)
else:
node_anchor_dict = {}
multi_word_exp = label_dict[node].split()
node_anchor_dict['id'] = node
node_anchor_dict['anchors'] = []
for word in multi_word_exp:
span_regex = re.compile(re.escape(label_dict[node]))
span_match = span_regex.search(input)
(begin, end) = span_match.span()
from_begin = {'from':begin, 'to':end}
node_anchor_dict['anchors'].append(from_begin)
nodes.append(node_anchor_dict)
else:
node_anchor_dict = {}
node_anchor_dict['id']=node
nodes.append(node_anchor_dict)
return nodes
def get_tops(edge_dict):
us=set()
vs=set()
for (u, v) in edge_dict.keys():
us.add(u)
vs.add(v)
tops = list(us.difference(vs))
return tops
def get_edges(edge_dict):
edges = []
for (u, v) in edge_dict.keys():
mrp_edges = {'source': u, 'target':v, 'label': edge_dict[(u,v)]}
edges.append(mrp_edges)
return edges
def irtg2mrp(edge_dict, label_dict):
input = get_input(edge_dict, label_dict)
nodes = get_nodes(label_dict, input)
edges = get_edges(edge_dict)
tops = get_tops(edge_dict)
mrp = {}
mrp['input'] = input
mrp['nodes'] = nodes
mrp['edges'] = edges
mrp['tops'] = tops
return mrp
```
#### File: am-parser/ucca/edge_to_irtg.py
```python
import sys
def edge2irtg(edge_dict, id_label_dict):
output = ''
for key in list(edge_dict.keys()):
if str(key[0]) +'/' + str(id_label_dict[key[0]]) in output.split():
label_begin_edge = key[0]
else:
label_begin_edge = str(key[0]) +'/'+ str(id_label_dict[key[0]])
if str(key[1]) +'/' +str(id_label_dict[key[1]]) in output.split():
label_end_edge = key[1]
else:
label_end_edge = str(key[1]) +'/'+ str(id_label_dict[key[1]])
edge = str(label_begin_edge) + ' -' + str(edge_dict[key]) + '-> ' + str(label_end_edge) + '; '
output += edge
new_format = '[' + output[0:-2] + ']'
return new_format
```
#### File: am-parser/ucca/mrp_to_irtg.py
```python
import sys
import json
import collections
from edge_to_irtg import edge2irtg
from get_edges_from_mrp import get_id2lex, get_mrp_edges
from convert_irtg_to_mrp import get_edges, get_input, get_mrp_edges, get_nodes, get_tops, irtg2mrp
from eliminate_h_top import eliminate_h
from a_star_mrp import *
from process_c import *
labels = 'L LR LA H P S A C N D T E R F G Q U'.split()
priority_dict = {label:index for (index, label) in enumerate(labels)}
non_deducible = ["id", "flavour", "framework", "version", "time"]
mrp_data_path = sys.argv[1]
companion_data = json.load(open(sys.argv[2], 'r', encoding = 'utf8'))
outdir = sys.argv[3]
def update_id_labels(edge_dict, label_dict):
for (u,v) in edge_dict.keys():
if type(u) == str:
label_dict[u] = u
elif u - 1111 >= 0:
if int(str(u)[:-4]) in label_dict.keys():
label_dict[u] = label_dict[int(str(u)[:-4])]
else: label_dict[u] = 'Non-Terminal'
nodes_in_edge_dict = list(set([node for edge in edge_dict.keys() for node in edge]))
label_dict_nodes = list(label_dict.keys())
for edge in edge_dict.keys():
for node in edge:
if node not in label_dict.keys():
label_dict[node] = 'Non-Terminal'
return label_dict
with open(mrp_data_path,encoding='utf8', errors='ignore') as infile:
counter = 0
for line in infile:
#print(line)
mrp_dict = json.loads(line)
id = mrp_dict["id"]
print(id)
edges = get_mrp_edges(mrp_dict, get_remote = True)
edges = eliminate_h(edges)
labels = get_id2lex(mrp_dict)
compressed_edges = compress_c_edge(edges)
compressed_labels = update_id_labels(compressed_edges, labels)
irtg_format_compressed = edge2irtg(compressed_edges, labels)
print(irtg_format_compressed)
node_tokens = node_to_token_index(companion_data, mrp_dict, compressed_labels, id)
#print(companion_data)
#print(compressed_labels)
#print(node_tokens)
alignments = align(compressed_edges, priority_dict, mrp_dict, node_tokens, compressed_labels)
```
#### File: am-parser/utilities/ucca_tokenranges.py
```python
import json
import sys
import itertools
from tqdm import tqdm
def extract_anchor(j):
return (j["from"], j["to"])
def extract_all_anchors(j):
if "anchors" in j:
return [extract_anchor(a) for a in j["anchors"]]
else:
return []
companion_filename = sys.argv[1]
mrp_filename = sys.argv[2]
print(f"Comparing tokens in companion file {companion_filename} with MRP file {mrp_filename}")
# collect token ranges in companion data
companion_tokens = {}
with open(companion_filename, "r") as f:
for line in tqdm(f):
j = json.loads(line)
id = j["id"]
anchors = [[extract_anchor(a) for a in b["anchors"]] for b in j["nodes"]]
companion_tokens[id] = set(itertools.chain(*anchors))
# collect token ranges in UCCA MRP graphs
ucca_mrp_files = [mrp_filename]
num_missing_tr = 0
num_total_tr = 0
for file in ucca_mrp_files:
print(f"Analyzing {file}...")
with open(file, "r") as f:
for line in tqdm(f):
j = json.loads(line)
id = j["id"]
companion_anchors = companion_tokens[id]
anchors = [extract_all_anchors(b) for b in j["nodes"]]
for anch in itertools.chain(*anchors):
num_total_tr += 1
if not anch in companion_anchors:
print(f"[{id}] mismatched anchor: {anch}")
num_missing_tr += 1
print(f"{num_missing_tr} anchors mismatched out of {num_total_tr} ({100*num_missing_tr/num_total_tr:.2f}%)")
```
|
{
"source": "jgontrum/amrlib",
"score": 2
}
|
#### File: amrlib/amr_view/processor_gtos.py
```python
import logging
from threading import Thread
from ..models.generate_t5.inference import Inference
from ..graph_processing.amr_loading import split_amr_meta
logger = logging.getLogger(__name__)
class ProcessorGTOS(object):
def __init__(self, config, disabled=False):
self.model_dir = config['gtos_model_dir']
self.num_ret_seq = config.get('gtos_num_ret_seq', 1)
self.num_beams = config.get('gtos_num_beams', 1)
self.batch_size = config.get('gtos_batch_size', 1)
self.device = config.get('gtos_device')
self.inference = None
if disabled:
logger.warning('!!! ProcessorGTOS disabled for debug !!!')
else:
lm_thread = Thread(target=self.load_model) # loads self.inference
lm_thread.start()
def is_ready(self):
return self.inference is not None
def run(self, amr_text):
if self.inference is None:
return
_, graph_lines = split_amr_meta(amr_text)
graph_lines = [g.strip() for g in graph_lines]
graph_lines = ' '.join(graph_lines)
answers, clips = self.inference.generate([graph_lines], disable_progress=True)
if clips[0]:
logger.warning('Graph was clipped')
# Concatenate multiple return sequences
string = ''
for i, ans in enumerate(answers):
string += '%2d) %s\n' % (i+1, ans)
return string[:-1] # strip final line-feed
def load_model(self):
self.inference = Inference(self.model_dir, num_beams=self.num_beams, device=self.device,
num_ret_seq=self.num_ret_seq, batch_size=self.batch_size)
logger.info('Graph to sequence model ready')
print('Graph to sequence model ready')
```
#### File: amrlib/utils/logging.py
```python
import logging
from logging import DEBUG, INFO, WARN, ERROR
def setup_logging(logfname=None, level=None):
# Remove any existing handler (ie.. penman has logging.basicConfig() in __init__.py)
# Note that in python 3.6 there is no "force" in basicConfig()
# From https://stackoverflow.com/questions/12158048/changing-loggings-basicconfig-which-is-already-set
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Setup the logger
if level is None:
level = logging.INFO
format = '[%(levelname)s %(filename)s ln=%(lineno)s] %(message)s'
if logfname is not None:
logging.basicConfig(level=level, filename=logfname, filemode='w', format=format)
else:
logging.basicConfig(level=level, format=format)
# Penman spews a lot of messages
def silence_penman():
logging.getLogger('penman.layout').setLevel(logging.ERROR)
logging.getLogger('penman._lexer').setLevel(logging.ERROR)
logging.getLogger('pe').setLevel(logging.ERROR) # penman._parse.py
# Silense the request library
def silence_requests():
logging.getLogger('urllib3.connectionpool').setLevel(logging.ERROR)
```
|
{
"source": "jgontrum/cruft",
"score": 2
}
|
#### File: cruft/_commands/update.py
```python
import json
from pathlib import Path
from subprocess import DEVNULL, PIPE, CalledProcessError, run # nosec
from tempfile import TemporaryDirectory
from typing import Optional, Set
import click
import typer
from . import utils
from .utils import example
@example(skip_apply_ask=False)
@example()
def update(
project_dir: Path = Path("."),
cookiecutter_input: bool = False,
skip_apply_ask: bool = True,
skip_update: bool = False,
checkout: Optional[str] = None,
strict: bool = True,
directory: str = "."
) -> bool:
"""Update specified project's cruft to the latest and greatest release."""
cruft_file = utils.cruft.get_cruft_file(project_dir)
# If the project dir is a git repository, we ensure
# that the user has a clean working directory before proceeding.
if not _is_project_repo_clean(project_dir):
typer.secho(
"Cruft cannot apply updates on an unclean git project."
" Please make sure your git working tree is clean before proceeding.",
fg=typer.colors.RED,
)
return False
cruft_state = json.loads(cruft_file.read_text())
with TemporaryDirectory() as tmpdir_:
# Initial setup
tmpdir = Path(tmpdir_)
repo_dir = tmpdir / "repo"
current_template_dir = tmpdir / "current_template"
new_template_dir = tmpdir / "new_template"
deleted_paths: Set[Path] = set()
# Clone the template
repo = utils.cookiecutter.get_cookiecutter_repo(cruft_state["template"], repo_dir, checkout)
last_commit = repo.head.object.hexsha
# Bail early if the repo is already up to date
if utils.cruft.is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"Nothing to do, project's cruft is already up to date!", fg=typer.colors.GREEN
)
return True
# Generate clean outputs via the cookiecutter
# from the current cruft state commit of the cookiectter and the updated
# cookiecutter.
_ = utils.generate.cookiecutter_template(
output_dir=current_template_dir,
repo=repo,
cruft_state=cruft_state,
project_dir=project_dir,
cookiecutter_input=cookiecutter_input,
checkout=cruft_state["commit"],
deleted_paths=deleted_paths,
update_deleted_paths=True,
)
new_context = utils.generate.cookiecutter_template(
output_dir=new_template_dir,
repo=repo,
cruft_state=cruft_state,
project_dir=project_dir,
cookiecutter_input=cookiecutter_input,
checkout=last_commit,
deleted_paths=deleted_paths,
)
# Given the two versions of the cookiecutter outputs based
# on the current project's context we calculate the diff and
# apply the updates to the current project.
if _apply_project_updates(
current_template_dir, new_template_dir, project_dir, skip_update, skip_apply_ask, directory
):
# Update the cruft state and dump the new state
# to the cruft file
cruft_state["commit"] = last_commit
cruft_state["context"] = new_context
cruft_file.write_text(utils.cruft.json_dumps(cruft_state))
typer.secho(
"Good work! Project's cruft has been updated and is as clean as possible!",
fg=typer.colors.GREEN,
)
return True
#################################################
# Calculating project diff and applying updates #
#################################################
def _is_git_repo(directory: Path):
# Taken from https://stackoverflow.com/a/16925062
# This works even if we are in a sub folder in a git
# repo
output = run(
["git", "rev-parse", "--is-inside-work-tree"], stdout=PIPE, stderr=DEVNULL, cwd=directory
)
if b"true" in output.stdout:
return True
return False
def _is_project_repo_clean(directory: Path):
if not _is_git_repo(directory):
return True
output = run(["git", "status", "--porcelain"], stdout=PIPE, stderr=DEVNULL, cwd=directory)
if output.stdout.strip():
return False
return True
def _apply_patch_with_rejections(diff: str, expanded_dir_path: Path, directory: str):
try:
run(
["git", "apply", "--reject", f"--directory={directory}"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
typer.secho(
(
"Project directory may have *.rej files reflecting merge conflicts with the update."
" Please resolve those conflicts manually."
),
fg=typer.colors.YELLOW,
)
def _apply_three_way_patch(diff: str, expanded_dir_path: Path, directory: str):
try:
run(
["git", "apply", "-3", f"--directory={directory}"],
input=diff.encode(),
stderr=PIPE,
stdout=PIPE,
check=True,
cwd=expanded_dir_path,
)
except CalledProcessError as error:
typer.secho(error.stderr.decode(), err=True)
if _is_project_repo_clean(expanded_dir_path):
typer.secho(
"Failed to apply the update. Retrying again with a different update stratergy.",
fg=typer.colors.YELLOW,
)
_apply_patch_with_rejections(diff, expanded_dir_path, directory)
def _apply_patch(diff: str, expanded_dir_path: Path, directory: str):
# Git 3 way merge is the our best bet
# at applying patches. But it only works
# with git repos. If the repo is not a git dir
# we fall back to git apply --reject which applies
# diffs cleanly where applicable otherwise creates
# *.rej files where there are conflicts
if _is_git_repo(expanded_dir_path):
_apply_three_way_patch(diff, expanded_dir_path, directory)
else:
_apply_patch_with_rejections(diff, expanded_dir_path, directory)
def _apply_project_updates(
old_main_directory: Path,
new_main_directory: Path,
project_dir: Path,
skip_update: bool,
skip_apply_ask: bool,
directory: str
) -> bool:
diff = utils.diff.get_diff(old_main_directory, new_main_directory)
if not skip_apply_ask and not skip_update:
input_str: str = "v"
while input_str == "v":
typer.echo(
'Respond with "s" to intentionally skip the update while marking '
"your project as up-to-date or "
'respond with "v" to view the changes that will be applied.'
)
input_str = typer.prompt(
"Apply diff and update?",
type=click.Choice(("y", "n", "s", "v")),
show_choices=True,
default="y",
)
if input_str == "v":
if diff.strip():
utils.diff.display_diff(old_main_directory, new_main_directory)
else:
click.secho("There are no changes.", fg=typer.colors.YELLOW)
if input_str == "n":
typer.echo("User cancelled Cookiecutter template update.")
return False
elif input_str == "s":
skip_update = True
if not skip_update and diff.strip():
_apply_patch(diff, project_dir, directory)
return True
```
|
{
"source": "jgontrum/elmo_from_conll",
"score": 3
}
|
#### File: jgontrum/elmo_from_conll/conll_parser.py
```python
import argparse
from conllu import parse_incr
def cmdline_args():
# Make parser object
p = argparse.ArgumentParser(
description="Extracts sentences from a CoNLL file and outputs them "
"to stdout.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument("conll_file",
help="Path to the ConNLL file.")
return p.parse_args()
if __name__ == '__main__':
args = cmdline_args()
for token_list in parse_incr(open(args.conll_file)):
sentence = [
token["form"].strip() for token in token_list
if len(token["form"]) and isinstance(token["id"], int)
]
if sentence:
print(" ".join(sentence))
```
|
{
"source": "jgontrum/indri_suite",
"score": 2
}
|
#### File: indri_suite/api/evaluate.py
```python
import os
from indri_suite import logger
from indri_suite import options
from indri_suite.common.cli_clients import IndriCLIClient, TrecEvalCLIClient
from indri_suite.common.indri_client import IndriClient, BadQueryException
from indri_suite.common.trec_eval_client import TrecEvalClient
def post(request):
"""
Route: /api/v1/evaluate
See config/api.yml for OpenAPI specification.
:param request: JSON body
:return:
"""
logger.info("Received evaluation request: '{}'.".format(request))
# Check that paths are valid (to prevent errors and for security)
if not os.path.isfile(request['settings']['gold_eval_path']):
return "Qrel file not found: {}. Please use absolute paths.".format(
request['settings']['gold_eval_path']), 400
if not os.path.isdir(request['settings']['index_path']):
return "Index directory not found: {}. Please use absolute paths.".format(
request['settings']['index_path']), 400
if not os.path.isdir(request['settings']['corpus_path']):
return "Corpus directory not found: {}. Please use absolute paths.".format(
request['settings']['corpus_path']), 400
indri_client = IndriClient(
request['settings']['index_path'],
request['settings']['corpus_path'],
IndriCLIClient(options['bin']['indri_query']),
tmp_folder=options['tmp_folder']
)
eval_client = TrecEvalClient(
request['settings']['gold_eval_path'],
indri_client,
TrecEvalCLIClient(options['bin']['trec_eval'])
)
try:
ret = eval_client.evaluate(request['query'], request['query_id'])
except BadQueryException as e:
message = str(e)
if not str(e):
message = "No relevant documents found, trec_eval failed."
logger.warning("Failed to evaluate request: {}".format(message))
return message, 400
logger.info("Successfully evaluated request.".format(request))
return ret, 200
```
#### File: indri_suite/common/trec_eval_client.py
```python
import logging
import os
import subprocess
import time
from indri_suite.common.indri_client import BadQueryException
class TrecEvalClient:
def __init__(self, gold_eval_path, indri_client, trec_eval_cli):
self.indri_client = indri_client
self.gold_eval_path = gold_eval_path
self.trec_eval_cli = trec_eval_cli
self.logger = logging.getLogger('main')
def _process_eval_output(self, eval_output, query_id):
"""
Parse the output from trec_eval and return the data for a
given query id.
:param eval_output:
:param query_id:
:return:
"""
output = {}
for line in eval_output.decode().split("\n"):
if not line.strip():
continue
category, current_query_id, score = line.split()
if current_query_id.strip() == query_id:
if category.startswith("num"):
output[category.strip()] = int(score.strip())
else:
output[category.strip()] = float(score.strip())
return output
def _get_gold_documents(self, query_id):
"""
From a qrel file, get all document ids for the current query id.
:param query_id:
:return:
"""
for line in open(self.gold_eval_path):
if line.startswith(str(query_id) + " "):
_, _, doc_id, match = line.split()
if match.strip() == "1":
yield doc_id.strip()
def _get_retrieved_documents(self, result, query_id):
for line in result.split("\n"):
if line.startswith(str(query_id) + " "):
_, _, doc_id, _, score, _ = line.split()
yield doc_id.strip(), float(score)
def _is_query_id_valid(self, query_id):
for line in open(self.gold_eval_path):
if line.startswith(str(query_id) + " "):
return True
return False
def evaluate(self, query, query_id):
if not self._is_query_id_valid(query_id):
raise BadQueryException("The query id '{}' can not be found "
"in the qrel file.".format(query_id))
t = time.time()
# Run query and save the raw output of Indri for trec_eval
query_output = self.indri_client.raw_query(query, query_id)
filename = self.indri_client.tmp_folder + "query_results_" + \
str(int(time.time())) + ".trec"
with open(filename, "w") as f:
f.write(query_output.decode())
self.logger.info("Evaluation: Search took {0:.2f}s".format(
time.time() - t))
t = time.time()
# Evaluate with trec_eval and then delete all temp files
try:
eval_output = self.trec_eval_cli.run(self.gold_eval_path, filename)
os.remove(filename)
except subprocess.CalledProcessError:
os.remove(filename)
raise BadQueryException(query_output.decode())
structured_output = self._process_eval_output(eval_output, query_id)
self.logger.info("Evaluation: trec_eval took {0:.2f}s".format(
time.time() - t))
t = time.time()
# Generate the different document categories:
# - Gold documents
# - relevant_retrieved_documents: True positives
# - irrelevant_retrieved_documents: False positives
# - relevant_not_retrieved_documents: False negatives
gold_documents = set(self._get_gold_documents(query_id))
retrieved_documents = [{
"document_id": doc_id,
"score": score,
"index": i
} for i, (doc_id, score) in enumerate(
list(
self._get_retrieved_documents(
query_output.decode(), query_id)))
]
relevant_retrieved_documents = gold_documents.intersection(
set([ref['document_id'] for ref in retrieved_documents])
)
irrelevant_retrieved_documents = set(
[ref['document_id'] for ref in
retrieved_documents]) - gold_documents
relevant_not_retrieved_documents = gold_documents - set(
[ref['document_id'] for ref in
retrieved_documents])
# Return the evaluation result and get the document text for all
# returned document ids
# TODO Here is too much code duplication, simplify.
ret = {
"raw_eval_output": eval_output.decode(),
"eval": structured_output,
"relevant_documents": [{
"document_id": doc_id,
"document": self.indri_client.lookup_document(
self.indri_client.document_index.get(
doc_id))
}
for doc_id in sorted(gold_documents)
],
"irrelevant_retrieved": [{
"document_id": doc_id,
"document": self.indri_client.lookup_document(
self.indri_client.document_index.get(
doc_id))
}
for doc_id in sorted(irrelevant_retrieved_documents)[:30]
],
"relevant_retrieved": [{
"document_id": doc_id,
"document": self.indri_client.lookup_document(
self.indri_client.document_index.get(
doc_id))
}
for doc_id in sorted(relevant_retrieved_documents)
],
"relevant_not_retrieved": [{
"document_id": doc_id,
"document": self.indri_client.lookup_document(
self.indri_client.document_index.get(
doc_id))
}
for doc_id in sorted(relevant_not_retrieved_documents)
],
"missing_relevant_documents": list(
sorted(
gold_documents.difference(
set(self.indri_client.document_index.keys()))))
}
self.logger.info("Evaluation: Document lookup took {0:.2f}s".format(
time.time() - t))
return ret
```
|
{
"source": "jgontrum/parseridge",
"score": 2
}
|
#### File: parseridge/corpus/sentence.py
```python
import random
from copy import deepcopy
from typing import List, Optional, Union, Iterable
import conllu
from parseridge.corpus.token import Token
from parseridge.parser.configuration import Configuration
from parseridge.utils.helpers import Action, T
from parseridge.utils.logger import LoggerMixin
class Sentence(LoggerMixin):
def __init__(
self,
tokens: List[Token],
text: str = None,
meta: Optional[dict] = None,
sentence_id: int = None,
):
self._iter = 0
self.text = text
self.meta = meta or {}
self.id = sentence_id
self.tokens = [Token.create_root_token()] + tokens
for token in self:
if token.head is None:
token.parent = None
else:
token.parent = self.tokens[token.head]
token.dependents = [
other_token.id
for other_token in self.tokens
if other_token.head == token.id
]
for i, token in enumerate(self._calculate_token_order()):
token.projective_order = i
if not self.text:
self.text = " ".join([token.form for token in self[:-1]])
def _calculate_token_order(
self, queue: Optional[List[Token]] = None, index: Optional[int] = None
):
if queue is None:
queue = [self[0]]
index = self[0].id
return self._calculate_token_order(queue, index)
else:
results = []
# Get all the tokens that are dependents of the token
# at the current index and left to it.
left_dependents = [token for token in self[:index] if token.head == index]
for dependent in left_dependents:
results += self._calculate_token_order(queue, dependent.id)
# Place the current token in the middle
results.append(self[index])
# Get all the dependents right to it
right_dependents = [token for token in self[index:] if token.head == index]
for dependent in right_dependents:
results += self._calculate_token_order(queue, dependent.id)
return results
def to_conllu(self) -> conllu.TokenList:
return conllu.TokenList(
[token.serialize() for token in self[1:]], metadata=self.meta
)
def get_empty_copy(self) -> "Sentence":
"""
Returns a copy of the sentence but without any gold
relations or labels. This is used in the training process
to build a predicted dependency tree from one with
gold annotations.
"""
new_tokens = [token.get_unparsed_token() for token in self[1:]]
return Sentence(new_tokens, text=self.text, meta=self.meta, sentence_id=self.id)
def __repr__(self) -> str:
return self.to_conllu().serialize()
def __len__(self) -> int:
return len(self.tokens)
def __getitem__(self, i: int) -> Union[Token, List[Token]]:
# Look up tokens for a list of indices
if isinstance(i, list):
return [self[j] for j in i]
# Normal index / slice lookup
return self.tokens[i]
def __iter__(self) -> "Sentence":
return self
def __next__(self) -> Optional[Token]:
if self._iter >= len(self):
self._iter = 0
raise StopIteration
else:
self._iter += 1
return self[self._iter - 1]
@classmethod
def from_conllu(cls, conllu_string: str) -> Iterable["Sentence"]:
"""
Generator that reads a string containing a treebank in CoNLL-U format
and produces Sentence objects for all sentences in the treebank.
:param conllutring:
:return:
"""
for sentence in conllu.parse(conllu_string):
yield cls(
# Add all tokens, but ignore parataxis (here the id is a tuple)
tokens=[
Token(**token) for token in sentence if isinstance(token["id"], int)
],
text=sentence.metadata["text"],
meta=sentence.metadata,
)
class ConfigurationIterator:
"""
Iterates over a sequence of optimal configurations for this sentence.
Note that the yielded configuration object is mutable and will change during
the iteration!
"""
def __init__(self, sentence):
self.sentence = deepcopy(sentence)
self.configuration = Configuration(
sentence=self.sentence, contextualized_input=None, model=None
)
def __next__(self):
if self.configuration.is_terminal:
raise StopIteration
else:
return self._get_next_configuration(self.configuration)
def __iter__(self):
return self
@staticmethod
def _get_next_configuration(configuration):
actions = ConfigurationIterator._get_actions(configuration)
costs, shift_case = configuration.get_transition_costs(actions)
valid_actions = [action for action in actions if costs[action.transition] == 0]
best_action = random.choice(valid_actions)
configuration.update_dynamic_oracle(best_action, shift_case)
configuration.apply_transition(best_action)
return configuration
@staticmethod
def _get_actions(configuration):
actions = []
if configuration.shift_conditions:
actions.append(Action(relation=None, transition=T.SHIFT, score=1.0))
if configuration.swap_conditions:
actions.append(Action(relation=None, transition=T.SWAP, score=1.0))
if configuration.left_arc_conditions:
actions.append(
Action(
relation=configuration.top_stack_token.relation,
transition=T.LEFT_ARC,
score=1.0,
)
)
if configuration.right_arc_conditions:
actions.append(
Action(
relation=configuration.top_stack_token.relation,
transition=T.RIGHT_ARC,
score=1.0,
)
)
return actions
```
#### File: evaluation/callbacks/base_eval_callback.py
```python
from abc import ABC
from typing import Any
from parseridge.utils.logger import LoggerMixin
class EvalCallback(LoggerMixin, ABC):
_order = 0
def on_initialization(self, **kwargs: Any) -> None:
pass
def on_eval_begin(self, **kwargs: Any) -> None:
pass
def on_epoch_begin(self, **kwargs: Any) -> None:
pass
def on_batch_begin(self, **kwargs: Any) -> None:
pass
def on_batch_end(self, **kwargs: Any) -> None:
pass
def on_epoch_end(self, **kwargs: Any) -> None:
pass
def on_eval_end(self, **kwargs: Any) -> None:
pass
def on_shutdown(self, **kwargs: Any) -> None:
pass
```
#### File: evaluation/callbacks/save_parsed_sentences_callback.py
```python
import os
from typing import Any, Optional, List
from parseridge.parser.evaluation.callbacks.base_eval_callback import EvalCallback
class EvalSaveParsedSentencesCallback(EvalCallback):
_order = 1000
def __init__(self, output_dir_path: Optional[str] = None) -> None:
self.output_dir_path = output_dir_path
self.current_epoch = 0
def on_epoch_end(
self, pred_sentences_serialized: List[str], corpus_type: str, **kwargs: Any
) -> None:
if self.output_dir_path:
filename = self.output_dir_path.rstrip("/")
filename += f"/epoch_{self.current_epoch}-{corpus_type}.conllu"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
f.write("".join(pred_sentences_serialized))
def on_eval_begin(self, epoch: int, **kwargs) -> None:
self.current_epoch = epoch
```
#### File: evaluation/callbacks/yaml_callback.py
```python
import datetime
import os
from argparse import Namespace
from time import time
from typing import Any, Dict, Optional
import yaml
from parseridge.parser.evaluation.callbacks.base_eval_callback import EvalCallback
class EvalYAMLReporter(EvalCallback):
_order = 10
def __init__(self, yaml_path: Optional[str] = None):
self.yaml_path = yaml_path
self.content = {}
self.t0 = time()
if self.yaml_path:
os.makedirs(os.path.dirname(self.yaml_path), exist_ok=True)
def _save(self):
if self.yaml_path:
with open(self.yaml_path, "w") as f:
yaml.safe_dump(self.content, f)
def on_initialization(self, cli_args: Optional[Namespace], **kwargs: Any) -> None:
self.t0 = time()
self.content["start_time"] = datetime.datetime.now().isoformat()
self.content["epochs"] = {}
if cli_args:
self.content["parameters"] = vars(cli_args)
self._save()
def on_shutdown(self, **kwargs: Any) -> None:
self.content["end_time"] = datetime.datetime.now().isoformat()
self.content["duration"] = time() - self.t0
self._save()
def on_eval_end(
self, scores: Dict[str, Dict[str, float]], loss: float, epoch: int, **kwargs: Any
) -> None:
self.content["epochs"][epoch] = {
"epoch": epoch,
"train_las": scores["train"]["las"],
"train_uas": scores["train"]["uas"],
"train_others": scores["train"]["all"],
"dev_las": scores["dev"]["las"],
"dev_uas": scores["dev"]["uas"],
"dev_others": scores["dev"]["all"],
"test_las": scores["test"]["las"] or 0.0,
"test_uas": scores["test"]["uas"] or 0.0,
"test_others": scores["test"]["all"] or {},
"train_loss": loss,
}
self._save()
```
#### File: parser/evaluation/evaluator.py
```python
from argparse import Namespace
from collections import defaultdict
from dataclasses import dataclass
from typing import List, Dict, Tuple, Callable, Union, Optional
import torch
from parseridge.corpus.corpus import CorpusIterator, Corpus
from parseridge.corpus.sentence import Sentence
from parseridge.corpus.treebank import Treebank
from parseridge.parser.configuration import Configuration
from parseridge.parser.evaluation.callbacks.base_eval_callback import EvalCallback
from parseridge.parser.evaluation.callbacks.handler import EvalCallbackHandler
from parseridge.parser.evaluation.conll_eval import CoNLLEvaluationScript
from parseridge.parser.modules.data_parallel import Module
from parseridge.parser.training.dynamic_trainer import DynamicTrainer
from parseridge.utils.helpers import T
from parseridge.utils.logger import LoggerMixin
SCORES = Dict[str, Union[float, Dict[str, Dict[str, float]]]]
@dataclass
class Evaluator(LoggerMixin):
model: Module
treebank: Treebank
callbacks: Optional[List[EvalCallback]] = None
cli_args: Optional[Namespace] = None
batch_size: int = 64
eval_function: Callable = CoNLLEvaluationScript().get_las_score_for_sentences
def __post_init__(self) -> None:
self.callback_handler = EvalCallbackHandler(callbacks=self.callbacks or [])
self.callback_handler.on_initialization(
model=self.model, treebank=self.treebank, cli_args=self.cli_args
)
def shutdown(self):
self.callback_handler.on_shutdown()
def evaluate(self, epoch: int = -1, loss: float = 0.0) -> Dict[str, Dict[str, float]]:
self.model.eval()
self.callback_handler.on_eval_begin(epoch=epoch)
train_scores = self._evaluate_corpus(
self.treebank.train_corpus, corpus_type="train"
)
dev_scores = self._evaluate_corpus(self.treebank.dev_corpus, corpus_type="dev")
test_scores = defaultdict(float)
test_scores["all"] = defaultdict(float)
if self.treebank.test_corpus:
test_scores = self._evaluate_corpus(
self.treebank.test_corpus, corpus_type="test"
)
scores = {
"train": {
"las": train_scores["las"],
"uas": train_scores["uas"],
"all": train_scores["all"],
},
"dev": {
"las": dev_scores["las"],
"uas": dev_scores["uas"],
"all": dev_scores["all"],
},
"test": {
"las": test_scores["las"] if test_scores else None,
"uas": test_scores["uas"] if test_scores else None,
"all": test_scores["all"] if test_scores else None,
},
}
self.callback_handler.on_eval_end(scores=scores, loss=loss, epoch=epoch)
return scores
def _evaluate_corpus(self, corpus: Corpus, corpus_type: str) -> SCORES:
self.callback_handler.on_epoch_begin(dataset=corpus, corpus_type=corpus_type)
gold_sentences: List[Sentence] = []
pred_sentences: List[Sentence] = []
iterator = CorpusIterator(corpus, batch_size=self.batch_size, train=False)
for i, batch in enumerate(iterator):
self.callback_handler.on_batch_begin(
batch=i, batch_data=batch, corpus_type=corpus_type
)
pred, gold = self._run_prediction_batch(batch)
pred_sentences += pred
gold_sentences += gold
self.callback_handler.on_batch_end(
batch=i,
batch_data=batch,
gold_sentences=gold,
pred_sentences=pred,
corpus_type=corpus_type,
)
serialized_gold = [
sentence.to_conllu().serialize()
for sentence in sorted(gold_sentences, key=lambda s: s.id)
]
serialized_pred = [
sentence.to_conllu().serialize()
for sentence in sorted(pred_sentences, key=lambda s: s.id)
]
scores = self.eval_function(serialized_gold, serialized_pred)
self.callback_handler.on_epoch_end(
scores=scores,
gold_sentences=gold_sentences,
pred_sentences=pred_sentences,
gold_sentences_serialized=serialized_gold,
pred_sentences_serialized=serialized_pred,
corpus_type=corpus_type,
)
return scores
def _run_prediction_batch(self, batch) -> Tuple[List[Sentence], List[Sentence]]:
pred_sentences = []
gold_sentences = []
sentence_features, sentences = batch
token_sequences = sentence_features[:, 0, :]
sentence_lengths = torch.tensor(
data=[len(sentence) for sentence in sentences],
dtype=torch.int64,
device=self.model.device,
)
contextualized_tokens_batch = self.model.get_contextualized_input(
token_sequences, sentence_lengths
)
configurations = [
Configuration(
sentence,
contextualized_input,
self.model,
sentence_features=sentence_feature,
)
for contextualized_input, sentence, sentence_feature in zip(
contextualized_tokens_batch, sentences, sentence_features
)
]
while configurations:
# Pass the stacks and buffers through the MLPs in one batch
configurations = DynamicTrainer.predict_logits(configurations, self.model)
# The actual computation of the loss must be done sequentially
for configuration in configurations:
# Predict a list of possible actions: Transitions, their
# label (if the transition is LEFT/ RIGHT_ARC) and the
# score of the action based on the MLP output.
actions = configuration.predict_actions()
if not configuration.swap_possible:
# Exclude swap options
actions = [action for action in actions if action.transition != T.SWAP]
assert actions
best_action = Configuration.get_best_action(actions)
if best_action.transition == T.SWAP:
configuration.num_swap += 1
configuration.apply_transition(best_action)
if configuration.is_terminal:
pred_sentences.append(configuration.predicted_sentence)
gold_sentences.append(configuration.sentence)
# Remove all finished configurations
configurations = [c for c in configurations if not c.is_terminal]
return pred_sentences, gold_sentences
```
#### File: parser/modules/add_and_norm_layer.py
```python
from torch import nn, Tensor
from parseridge.parser.modules.data_parallel import Module
class AddAndNormLayer(Module):
def __init__(self, model_size: int, dropout: float = 0.1, **kwargs):
super().__init__(**kwargs)
self.input_size = self.output_size = model_size
self.dropout = nn.Dropout(p=dropout)
self.layer_norm = nn.LayerNorm(model_size)
def forward(self, input: Tensor, output: Tensor):
return self.layer_norm(input + self.dropout(output))
```
#### File: modules/attention/positional_encodings.py
```python
import math
import numpy as np
import torch
from torch import nn
from parseridge.parser.modules.data_parallel import Module
class PositionalEncoder(Module):
def __init__(
self, model_size: int = 128, max_length: int = 200, dropout: float = 0.1, **kwargs
):
super().__init__(**kwargs)
self.model_size = self.input_size = self.output_size = model_size
position_enc = np.array(
[
[pos / np.power(10000, 2 * i / model_size) for i in range(model_size)]
for pos in range(1, max_length + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1
self.pe = torch.from_numpy(position_enc).float().to(self.device)
self.pe = self.pe.requires_grad_(False)
self.norm = nn.LayerNorm(self.model_size)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
if x.size(1) == 0:
return x
# make embeddings relatively larger
x *= math.sqrt(self.model_size)
pe = self.pe[: x.size(1)]
return self.norm(self.dropout(x + pe))
```
#### File: modules/attention/universal_attention.py
```python
from typing import Tuple, Optional
import torch
from torch import nn, Tensor
from parseridge.parser.modules.attention.soft_attention import Attention
from parseridge.parser.modules.utils import initialize_xavier_dynet_, mask_
class UniversalAttention(Attention):
def __init__(
self,
query_dim: int,
query_output_dim: Optional[int] = None,
key_output_dim: Optional[int] = None,
value_output_dim: Optional[int] = None,
**kwargs,
):
super().__init__(
query_dim=query_dim,
key_dim=query_dim,
query_output_dim=query_output_dim,
key_output_dim=key_output_dim,
value_output_dim=value_output_dim,
**kwargs,
)
self.query_param = nn.Parameter(torch.rand(query_dim))
def forward(
self, keys: Tensor, sequence_lengths: Tensor, values: Tensor = None, **kwargs
) -> Tuple[Tensor, Tensor, Tensor]:
queries = self.query_param.expand(keys.size(0), -1)
return super().forward(queries, keys, sequence_lengths, values)
class LinearAttention(Attention):
def __init__(self, query_dim: int, query_output_dim: Optional[int] = None, **kwargs):
super().__init__(
query_dim=query_dim,
key_dim=query_dim,
query_output_dim=query_output_dim,
key_output_dim=query_output_dim,
**kwargs,
)
self.learn_input = nn.Sequential(
nn.Linear(in_features=query_dim, out_features=query_dim), nn.Tanh()
)
self.similarity_function = nn.Linear(in_features=query_dim, out_features=1)
initialize_xavier_dynet_(self)
def forward(
self, keys: Tensor, sequence_lengths: Tensor, values: Tensor = None, **kwargs
) -> Tuple[Tensor, Tensor, Tensor]:
if values is None:
values = keys
keys = self.learn_input(keys)
# Compare keys to queries
attention_logits = self.similarity_function(keys)
# Mask scores for padding keys
attention_logits = mask_(attention_logits, sequence_lengths, device=self.device)
# Apply normalization function (e.g. softmax)
attention_energies = self.normalize(attention_logits)
# Multiply the values with the attention scores
weighted_values = values * attention_energies
# Compute a weighted average to get a sequence encoding
context_vector = torch.sum(weighted_values, dim=1)
return context_vector, weighted_values, attention_energies
```
#### File: parser/training/base_trainer.py
```python
from abc import ABC, abstractmethod
from typing import List, Union
import torch
from torch.optim.optimizer import Optimizer
from parseridge.corpus.corpus import Corpus
from parseridge.corpus.training_data import ConLLDataset
from parseridge.parser.modules.data_parallel import Module
from parseridge.parser.training.callbacks.base_callback import Callback
from parseridge.parser.training.callbacks.handler import CallbackHandler
from parseridge.parser.training.callbacks.model_training_callback import (
ModelTrainingCallback,
)
from parseridge.parser.training.hyperparameters import Hyperparameters
from parseridge.utils.logger import LoggerMixin
class Trainer(LoggerMixin, ABC):
def __init__(
self, model: Module, optimizer: Optimizer, callbacks: List[Callback] = None
):
self.model = model
self.optimizer = optimizer
self.callback_handler = CallbackHandler(
callbacks=callbacks or [], model=self.model, optimizer=self.optimizer
)
self.callback_handler.register_callback(ModelTrainingCallback())
self.last_epoch = 0
def register_callbacks(self, callbacks: List[Callback]) -> None:
for callback in callbacks:
self.callback_handler.register_callback(callback)
@abstractmethod
def fit(
self,
epochs: int,
training_data: Union[Corpus, ConLLDataset],
hyper_parameters: Hyperparameters = None,
**kwargs,
) -> None:
pass
def fit_one_cycle(
self,
training_data: Union[Corpus, ConLLDataset],
hyper_parameters: Hyperparameters = None,
**kwargs,
) -> None:
return self.fit(
epochs=1,
training_data=training_data,
hyper_parameters=hyper_parameters,
**kwargs,
)
def learn(self, loss: torch.Tensor) -> None:
self.callback_handler.on_loss_begin(loss=loss)
# Compute the gradients
self.callback_handler.on_backward_begin()
loss.backward()
self.callback_handler.on_backward_end()
# Update the weights
self.optimizer.step()
self.callback_handler.on_step_end()
# Clear all previous gradients
self.optimizer.zero_grad()
```
#### File: training/callbacks/evaluation_callback.py
```python
from typing import Any
from parseridge.parser.training.callbacks.base_callback import Callback
class EvaluationCallback(Callback):
"""
This is a basic callback that ensures that the model is set to training mode in the
beginning of an epoch and that all the gradients are cleared.
"""
_order = 100
def __init__(self, evaluator):
self.evaluator = evaluator
def on_train_end(self, **kwargs: Any) -> None:
self.evaluator.shutdown()
def on_epoch_end(self, epoch: int, epoch_loss: float, **kwargs: Any) -> None:
self.evaluator.evaluate(epoch=epoch, loss=epoch_loss)
```
#### File: training/callbacks/learning_rate_finder_callback.py
```python
from dataclasses import dataclass, field
from typing import Any, List
from parseridge.parser.training.callbacks.base_callback import Callback, StopTraining
@dataclass
class LearningRateFinderCallback(Callback):
_order = 100
scheduler: Any
smooth_loss: float = 0.05
max_num_iterations: int = 100
best_loss: float = float("inf")
learning_rate_history: List[float] = field(default_factory=list)
loss_history: List[float] = field(default_factory=list)
_num_iterations: int = 0
def on_batch_end(self, batch_loss: float, **kwargs: Any) -> None:
if batch_loss is None:
return
learning_rate = self.scheduler.get_lr()[0]
loss = batch_loss
if self.smooth_loss and self.loss_history:
loss = self.smooth_loss * loss + (1 - self.smooth_loss) * self.loss_history[-1]
if loss < self.best_loss:
self.best_loss = loss
self.learning_rate_history.append(learning_rate)
self.loss_history.append(loss)
self._num_iterations += 1
if self._num_iterations > self.max_num_iterations:
self.logger.info("Reached max number of iterations.")
raise StopTraining()
```
#### File: training/callbacks/lr_scheduler_callback.py
```python
from typing import Any
from parseridge.parser.training.callbacks.base_callback import Callback
class LRSchedulerCallback(Callback):
_order = 10
def __init__(self, scheduler: Any, when: str = "after_epoch"):
assert when in ["after_epoch", "after_batch"], f"'{when}' not valid."
self.when = when
self.scheduler = scheduler
def on_epoch_end(self, **kwargs: Any) -> None:
if self.when == "after_epoch":
self.scheduler.step()
def on_batch_end(self, **kwargs: Any) -> None:
if self.when == "after_batch":
self.scheduler.step()
```
#### File: training/callbacks/progress_bar_callback.py
```python
from typing import Any
from tqdm.auto import tqdm
from parseridge.parser.training.callbacks.base_callback import Callback
class ProgressBarCallback(Callback):
"""
Shows a progress bar during training.
"""
def __init__(self, moving_average: int = 64):
self._pbar = None
self.template = "[{epoch:02d}/{epochs:02d}] | Batch Loss: {loss:8.4f}"
self.prev_loss = []
self.moving_average = moving_average
self.batch_size = None
self.num_epochs = None
self.current_epoch = None
def on_train_begin(self, epochs: int, batch_size: int, **kwargs: Any) -> None:
self.batch_size = batch_size
self.num_epochs = epochs
def on_epoch_begin(
self, epoch: int, num_batches: int, training_data: Any, **kwargs: Any
) -> None:
self.current_epoch = epoch
self._pbar = tqdm(total=len(training_data), leave=True)
self._pbar.set_description(
self.template.format(epoch=self.current_epoch, epochs=self.num_epochs, loss=0)
)
def on_epoch_end(self, epoch_loss: float, **kwargs: Any) -> None:
self._pbar.set_description(
"[{epoch:02d}/{epochs:02d}] | Epoch Loss: {loss:8.4f}".format(
epoch=self.current_epoch, epochs=self.num_epochs, loss=epoch_loss
)
)
self._pbar.close()
def on_batch_end(self, batch_loss: float, batch_data: Any, **kwargs: Any) -> None:
if batch_loss is not None:
self.prev_loss.append(batch_loss)
avg_loss = sum(self.prev_loss) / len(self.prev_loss)
self.prev_loss = self.prev_loss[-self.moving_average :]
else:
if self.prev_loss:
avg_loss = sum(self.prev_loss) / len(self.prev_loss)
else:
avg_loss = 0
self._pbar.set_description(
self.template.format(
epoch=self.current_epoch, epochs=self.num_epochs, loss=avg_loss
)
)
batch_length = len(batch_data[0])
self._pbar.update(batch_length)
```
#### File: training/callbacks/save_model_callback.py
```python
import os
from typing import Any, Optional
import torch
from parseridge.parser.modules.data_parallel import Module
from parseridge.parser.training.callbacks.base_callback import Callback
class SaveModelCallback(Callback):
_order = 5
def __init__(self, folder_path: Optional[str] = None):
self.folder = folder_path
if self.folder:
os.makedirs(folder_path, exist_ok=True)
def on_epoch_end(self, epoch: int, model: Module, **kwargs: Any) -> None:
if self.folder:
file_name = f"{self.folder}/epoch_{epoch}.torch"
torch.save(model.state_dict(), file_name)
```
#### File: parser/training/static_trainer.py
```python
from torch.utils.data import DataLoader
from parseridge.corpus.training_data import ConLLDataset
from parseridge.parser.loss import Criterion
from parseridge.parser.training.base_trainer import Trainer
from parseridge.parser.training.callbacks.base_callback import StopEpoch, StopTraining
from parseridge.parser.training.hyperparameters import Hyperparameters
class StaticTrainer(Trainer):
"""
This trainer uses pre-generated training samples.
"""
def fit(
self,
epochs: int,
training_data: ConLLDataset,
hyper_parameters: Hyperparameters = None,
**kwargs,
) -> None:
if not isinstance(training_data, ConLLDataset):
raise ValueError(
f"The StaticTrainer requires a ConLLDataset object for training, but "
f"received a {type(training_data)} object."
)
hyper_parameters = (hyper_parameters or Hyperparameters()).update(**kwargs)
initial_epoch = self.last_epoch
self.callback_handler.on_train_begin(
epochs=epochs + initial_epoch, hyper_parameters=hyper_parameters
)
for epoch in range(initial_epoch + 1, epochs + initial_epoch + 1):
try:
self._run_epoch(epoch, training_data, hyper_parameters)
except StopTraining:
self.logger.info(f"Stopping training after {epoch} epochs.")
break
self.callback_handler.on_train_end()
def _run_epoch(
self, epoch: int, training_data: ConLLDataset, hyper_parameters: Hyperparameters
):
train_dataloader = DataLoader(
dataset=training_data,
batch_size=hyper_parameters.batch_size,
shuffle=True,
collate_fn=ConLLDataset.collate_batch,
)
num_batches = int(len(training_data) / hyper_parameters.batch_size)
self.callback_handler.on_epoch_begin(
epoch=epoch, num_batches=num_batches, training_data=training_data
)
criterion = Criterion(loss_function=hyper_parameters.loss_function)
epoch_loss = 0
for i, batch_data in enumerate(train_dataloader):
try:
self.callback_handler.on_batch_begin(batch=i, batch_data=batch_data)
batch = ConLLDataset.TrainingBatch(*batch_data)
pred_transitions, pred_relations = self.model(
stacks=batch.stacks,
stack_lengths=batch.stack_lengths,
buffers=batch.buffers,
buffer_lengths=batch.buffer_lengths,
token_sequences=batch.sentences,
sentence_lengths=batch.sentence_lengths,
)
# Compute loss. Depending on the chosen loss strategy only a part of the
# arguments will actually be used in the computations of the loss value.
loss = criterion(
pred_transitions=pred_transitions,
gold_transitions=batch.gold_transitions,
pred_relations=pred_relations,
gold_relations=batch.gold_relations,
wrong_transitions=batch.wrong_transitions,
wrong_transitions_lengths=batch.wrong_transitions_lengths,
wrong_relations=batch.wrong_relations,
wrong_relations_lengths=batch.wrong_relations_lengths,
)
self.learn(loss)
loss = loss.item()
epoch_loss += loss
self.last_epoch = epoch
self.callback_handler.on_batch_end(
batch=i, batch_data=batch_data, batch_loss=loss
)
except StopEpoch:
self.logger.info(f"Stopping epoch after {i}/{num_batches} batches.")
break
self.callback_handler.on_epoch_end(epoch=epoch, epoch_loss=epoch_loss)
```
|
{
"source": "jgontrum/snippets",
"score": 3
}
|
#### File: jgsnippets/elasticsearch/elasticsearch.py
```python
from elasticsearch import helpers
from elasticsearch.exceptions import NotFoundError
def query(es, query=None, index=None, doc_type=None):
rs = es.search(body=query, index=index, doc_type=doc_type)
rs = rs or {}
hits = rs.get("hits", {}).get("hits", [])
for hit in list(hits):
ret = hit.get("_source", {})
ret['_id'] = hit.get("_id", "_UNKNOWNid__")
ret['_score'] = hit.get("_score", -1)
yield ret
def find(es, query=None, index=None, doc_type=None, scroll='60s', size=100,
raw=False):
rs = helpers.scan(es, index=index, doc_type=doc_type, scroll=scroll,
size=size, query=query)
for doc in rs:
if raw:
yield doc
else:
ret = doc.get("_source", {})
ret['id_'] = doc.get("id_", "_UNKNOWNid__")
yield ret
def insert(es, id_, query, index, doc_type):
rs = es.index(index, doc_type, query, id=id_)
rs = rs or {}
return rs.get("result") == 'created'
def reindex(es, id_, query, index, doc_type):
rs = es.index(index, doc_type, query, id=id_)
rs = rs or {}
return rs.get("result") == 'updated'
def update(es, id_, query, index, doc_type):
rs = es.update(index, doc_type, id_, query)
return rs.get("result") == 'updated'
def get_by_id(es, id_, index, doc_type):
try:
rs = es.get(index, id_, doc_type)
rs = rs or {}
except NotFoundError:
return {}
return rs.get("_source", {})
def count(es, query, index, doc_type):
rs = es.count(index=index, doc_type=doc_type, body=query)
rs = rs or {}
return rs.get("count", 0)
```
#### File: jgsnippets/strings/encoding.py
```python
def clean_encoding(text):
if not isinstance(text, str):
return ""
mapping = {
"Ã\u009f": 'ß',
"ö": 'ö',
"é": "é",
"ü": "ü",
"ä": "ä",
"ß": 'ß',
"ä": 'ä',
"Ä": 'Ä',
"ö": 'ö',
"Ö": 'Ö',
"ü": 'ü',
"Ü": 'Ü'
}
for bad, good in mapping.items():
text = text.replace(bad, good)
return text
```
#### File: jgsnippets/strings/format.py
```python
import json
import pprint as ppprint
def jprint(text: str):
print(json.dumps(text, indent=2))
def pprint(obj):
ppprint.pprint(obj)
```
|
{
"source": "jgonzal1/python-csv-to-json",
"score": 4
}
|
#### File: jgonzal1/python-csv-to-json/csvToJson.py
```python
import csv
import json
import sys
from collections import defaultdict
'''
main function which takes 2 col length CSVs
'''
def transform(argv):
csvfile = open(argv[0],'r')
fichJSON = open(argv[1],'w')
fieldnames = ("Col1", "Col2")
reader = csv.DictReader(csvfile, fieldnames) # assigned the json keys with fieldnames
output = []
for each in reader:
row = {}
for field in fieldnames:
row[field] = each[field]
output.append(row)
json.dump(output, fichJSON, indent=3, sort_keys=True)
regroup = defaultdict(list)
if __name__ == '__main__':
transform(sys.argv[1:])
```
|
{
"source": "jgonzal3/devml",
"score": 3
}
|
#### File: devml/devml/mkdata.py
```python
from subprocess import (Popen, PIPE)
import os
import csv
import json
from .ts import (convert_datetime, date_index)
import pandas as pd
from sensible.loginit import logger
log = logger(__name__)
#Git Globals
GIT_COMMIT_FIELDS = ['id', 'author_name', 'author_email', 'date', 'message']
GIT_LOG_FORMAT = ['%H', '%an', '%ae', '%ad', '%s']
GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e'
GIT_LOG_CMD = 'git log --no-merges --date=local --format="%s"' % GIT_LOG_FORMAT
GIT_UNIQUE_CMD = "git remote -v"
GIT_REPO_NAME = """basename `git rev-parse --show-toplevel`"""
GIT_CSV_COLUMN_NAMES = ["date","author_email", "author_name",
"id", "message", "repo"]
def df_from_csv(path):
df = pd.read_csv(path)
return df
def df_from_logs(logs):
df = pd.DataFrame.from_dict(logs)
return df
def generate_repo_name():
"""Returns short name of git repo"""
p = Popen(GIT_REPO_NAME, shell=True, stdout=PIPE)
repo_name = p.stdout.read().strip()
log_msg = "Repo Name: %s" % repo_name
log.info(log_msg)
return repo_name
def log_to_dict(path, repo_id=None):
"""Converts Git Log To A Python Dict"""
#don't process the same repo twice
guid = get_git_uid()
if guid == repo_id:
guid_true_msg = "guid: %s | repo_id: %s are equal: SKIP" % (guid, repo_id)
log.info(guid_true_msg)
return False, False
else:
not_true_msg = "guid: %s" % guid
log.info(not_true_msg)
os.chdir(path) #change directory to process git log
repo_name = generate_repo_name()
p = Popen(GIT_LOG_CMD, shell=True, stdout=PIPE)
(git_log, _) = p.communicate()
try:
git_log = git_log.decode('utf8').strip('\n\x1e').split("\x1e")
except UnicodeDecodeError:
log.exception("utf8 encoding is incorrect, trying ISO-8859-1")
git_log = git_log.decode('ISO-8859-1').strip('\n\x1e').split("\x1e")
git_log = [row.strip().split("\x1f") for row in git_log]
git_log = [dict(list(zip(GIT_COMMIT_FIELDS, row))) for row in git_log]
for dictionary in git_log:
dictionary["repo"]=repo_name
repo_msg = "Found %s Messages For Repo: %s" % (len(git_log), repo_name)
log.info(repo_msg)
return git_log, guid
def log_df(path):
"""Returns a Pandas DataFrame of git log history"""
git_log = log_to_dict(path)
df = pd.DataFrame.from_dict(git_log)
return df
def log_to_csv(filename, git_log, column_headers=None):
"""Writes python dict of git log to csv file"""
if column_headers is None:
column_headers = GIT_CSV_COLUMN_NAMES
with open(filename, mode='w') as outfile:
fname_msg = "Creating Git Log File: %s" % filename
log.info(fname_msg)
writer = csv.writer(outfile)
writer.writerow(column_headers)
for row in git_log:
try:
writer.writerow([row["date"],row["author_email"],
row["author_name"], row["id"], row["message"],
row["repo"]])
except KeyError:
except_msg = "Skipping row: %s" % row
log.exception(except_msg)
return filename
def subdirs(path):
"""Yields a list of subdirectories for a given path"""
for name in os.listdir(path):
if os.path.isdir(os.path.join(path,name)):
full_path = os.path.abspath(os.path.join(path, name))
sdir_msg = "Found repo: %s" % full_path
log.info(sdir_msg)
yield full_path
def create_org_df(path):
"""Returns a Pandas Dataframe of an Org"""
original_cwd = os.getcwd()
logs = create_org_logs(path)
org_df = pd.DataFrame.from_dict(logs)
#convert date to datetime format
datetime_converted_df = convert_datetime(org_df)
#Add A Date Index
converted_df = date_index(datetime_converted_df)
new_cwd = os.getcwd()
cd_msg = "Changing back to original cwd: %s from %s" % (original_cwd, new_cwd)
log.info(cd_msg)
os.chdir(original_cwd)
return converted_df
def create_projectarea_df(ccmServer, projectArea, userId, password):
"""Returns a Pandas DataFrome of change sets delivered to components in a project area"""
# Get all the users managed by this server, we need this to get the author_email
columns = ['date', 'id', 'author_name', 'author_email', 'message', 'repo', 'commits']
# login to EWM CCM
p = Popen(f'lscm login -r {ccmServer} -u {userId} -P {password}', shell=True, stdout=PIPE)
resp = p.stdout.read().decode('utf-8')
if not resp.startswith('Logged in'):
log.error(f'Cannot login to {ccmServer}')
return None
df = pd.DataFrame(columns=columns)
p = Popen(f'lscm list users -r {ccmServer} -j', shell=True, stdout=PIPE)
ewmUsers = json.load(p.stdout)
users = {user['name']: user['mail'] for user in ewmUsers}
# get the components in the project area
p = Popen(f'lscm list components -r {ccmServer} -j', shell=True, stdout=PIPE)
components = json.load(p.stdout)
components = pd.DataFrame.from_dict(components['components']) if components != None else None
if components is None or len(components) == 0:
log.info(f'Could not get components in project area: "{projectArea}"')
return df
for component in components.name:
# get all the completed change sets delivered to this component
p = Popen(f'lscm list changesets -r {ccmServer} -C "{component}" -j', shell=True, stdout=PIPE)
changes = None
try:
changes = json.load(p.stdout)
changes = changes['changes'] if changes != None else None
except:
continue
changesDicts = [dict(list(zip(columns, [change['modified'],change['uuid'],change['author'],None,change['comment'],component,None]))) for change in changes]
df = pd.concat([df, pd.DataFrame.from_dict(changesDicts)])
df['date'] = pd.to_datetime(df['date'])
pd.DataFrame.set_index(df, keys='date', drop=True, inplace=True)
df['author_email'] = df['author_name'].apply(lambda author_name: users[author_name] if author_name in users else None)
df['commits']=1
p = Popen(f'lscm logout -r {ccmServer}', shell=True, stdout=PIPE)
return df
def get_git_uid():
"""
Uniquely identify git repo:
https://stackoverflow.com/questions/34874343/
how-can-i-uniquely-identify-a-git-repository
This isn't great, used git remote instead
"""
p = Popen(GIT_UNIQUE_CMD, shell=True, stdout=PIPE)
(guid, _) = p.communicate()
return guid
def create_org_logs(path):
"""Iterate through all paths in current working directory,
make log dict"""
combined_log = []
guid = False
for sdir in subdirs(path):
repo_msg = "Processing Repo: %s" % sdir
log.info(repo_msg)
git_log, guid2 = log_to_dict(sdir, guid)
#Only set guid if it isn't False
if guid2:
guid = guid2
if not git_log:
msg = "repo already processed: git_log value == [%s]" % git_log
log.info(msg)
continue
else:
combined_log += git_log
log_entry_msg = "Found a total log entries: %s" % len(combined_log)
log.info(log_entry_msg)
return combined_log
```
#### File: devml/devml/util.py
```python
import pandas as pd
from .ts import (convert_datetime, date_index)
def zipped_csv_to_df(path='ext/data/pallets.csv.zip'):
"""Imports a compressed CSV to DF.
Additionally, preps it for regular usage by adding date index
"""
df = pd.read_csv(path, compression='zip',
header=0, sep=',', quotechar='"')
#convert date to datetime format
datetime_converted_df = convert_datetime(df)
#Add A Date Index
converted_df = date_index(datetime_converted_df)
return converted_df
def csv_to_df(path):
"""Imports a CSV File to DF"""
df = pd.read_csv(path, header=0, sep=',', quotechar='"')
#convert date to datetime format
datetime_converted_df = convert_datetime(df)
#Add A Date Index
converted_df = date_index(datetime_converted_df)
return converted_df
```
|
{
"source": "jgonzalezdemendibil/movie_publisher",
"score": 2
}
|
#### File: movie_publisher/scripts/merge.py
```python
from __future__ import print_function
import rosbag
import argparse
import os
import sys
def merge(inbags, outbag='output.bag', topics=None, exclude_topics=[], raw=True):
# Open output bag file:
try:
out = rosbag.Bag(outbag, 'a' if os.path.exists(outbag) else 'w')
except IOError as e:
print('Failed to open output bag file %s!: %s' % (outbag, e.message), file=sys.stderr)
return 127
# Write the messages from the input bag files into the output one:
for inbag in inbags:
try:
print(' Processing input bagfile: %s' % inbag)
for topic, msg, t in rosbag.Bag(inbag, 'r').read_messages(topics=topics, raw=raw):
if topic not in args.exclude_topics:
out.write(topic, msg, t, raw=raw)
except IOError as e:
print('Failed to open input bag file %s!: %s' % (inbag, e.message), file=sys.stderr)
return 127
out.close()
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Merge multiple bag files into a single one.')
parser.add_argument('inbag', help='input bagfile(s)', nargs='+')
parser.add_argument('--output', help='output bag file', default='output.bag')
parser.add_argument('--topics', help='topics to merge from the input bag files', nargs='+', default=None)
parser.add_argument('--exclude_topics', help='topics not to merge from the input bag files', nargs='+', default=[])
args = parser.parse_args()
try:
sys.exit(merge(args.inbag, args.output, args.topics, args.exclude_topics))
except Exception, e:
import traceback
traceback.print_exc()
```
|
{
"source": "jgoodell/vinyl-rest-service",
"score": 3
}
|
#### File: jgoodell/vinyl-rest-service/server.py
```python
from flask import Flask
from flask import render_template
from flask import make_response
from flask import redirect
from flask import url_for
from flask import request
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///archive.db'
database = SQLAlchemy(app)
SUPPORTED_CONTENT_TYPES = []
# Models
class Album(database.Model):
'''Album Model Definition'''
id = database.Column(database.Integer, primary_key=True)
title = database.Column(database.String(512), unique=True)
artist = database.Column(database.String(512), unique=False)
year = database.Column(database.Integer, unique=False)
def __init__(self, title, artist, year):
self.title = title
self.artist = artist
self.year = year
def __repr__(self):
return "<Album('%s')>" % self.title
database.create_all()
try:
database.session.add(Album(title='Pump', artist='Aerosmith', year='1989'))
database.session.add(Album(title='Permanent Vacation', artist='Aerosmith',
year='1987'))
database.session.add(Album(title='Done With Mirrors', artist='Aerosmith', year='1985'))
database.session.commit()
except Exception, e:
print("+="*36)
print(e)
print("+="*36)
database.session.rollback()
# Helper Functions
def _content_type_subroutine(accepted_types):
'''Protected method to figure out Content-Type. If
no match is found "text/plain" is returned.'''
content_type_default = "text/plain"
for accepted_type in accepted_types:
for supported_content_type in SUPPORTED_CONTENT_TYPES:
if accepted_type == supported_content_type:
return accepted_type
return content_type_default
def determine_content_type(accept_string):
'''Determines response content type based
on Accept header. Returning the Content-Type
as a string.'''
return _content_type_subroutine(accept_string.split(','))
# Views
@app.route("/", methods=['GET'])
def root():
'''View Handler for /'''
albums = Album.query.all()
content_type = determine_content_type(request.headers['Accept'])
if request.method == 'GET':
return render_template('index.html', albums=albums)
else:
return make_response(render_template('405.html', method=request.method), 405)
@app.route("/archive/<artist>/<title>/<year>/", methods=['GET', 'PUT', 'POST'])
def album(artist, title, year):
'''View Handler for /<artist/<title>/<year>/.'''
content_type = determine_content_type(request.headers['Accept'])
if request.method == 'GET':
try:
album = Album.query.filter_by(title=title).one()
except Exception, e:
return make_response(render_template('404.html', title=title), 404)
return render_template('album.html', album=album)
elif request.method == 'POST':
try:
album = Album(artist=artis, title=title, year=int(year))
except TypeError, e:
return make_response(render_template('400.html', title=str(e)), 400)
database.session.add(album)
database.session.commit()
return redirect(url_for('root'))
elif request.method == 'PUT':
try:
album = Album.query.filter_by(title=title).one()
except Exception, e:
print(e)
return make_response(render_template('404.html', title=title), 404)
album.artist = artist
album.title = title
album.year = int(year)
database.session.save(album)
return redirect(url_for('root'))
elif request.method == 'DELETE':
return redirect(url_for('root'))
else:
print('BOOM!')
return make_response(render_template('405.html', method=request.method), 405)
if __name__ == "__main__":
app.run()
```
|
{
"source": "jgoodknight/ClusterPool",
"score": 2
}
|
#### File: ClusterPool/src/Dispatcher.py
```python
import os
import time
import math
import random
import hickle
import cPickle as pickle
import datetime
import subprocess
import warnings
import shutil
import numpy as np
SLURM_TEMP_FILE_STORAGE_DIRECTORY = "/n/regal/aspuru-guzik_lab/jgoodknight/"
class Dispatcher(object):
TEMPLATE_FILE_PYTHON_CALCULATION = os.path.dirname(os.path.realpath(__file__)) + "/GENERAL/python_calculation.template"
TEMPLATE_FILE_PYTHON_TRAWLER = os.path.dirname(os.path.realpath(__file__)) + "/GENERAL/python_trawler.template"
TEMPLATE_FILE_NAME = "$FILE_NAME"
TEMPLATE_FUNCTION_NAME = "$FUNCTION_NAME"
TEMPLATE_SPECIAL_SAVE_LOCATION = "$SPECIAL_SAVE_LOCATION"
TEMPLATE_NORMAL_SAVE_BOOL = "$NORMAL_SAVE"
statically_saved_filenames = set()
def __init__(self):
self.starting_date = str(datetime.date.today())
self.current_id_number = 0
try:
os.mkdir(self.TEMP_FILE_STORAGE_DIRECTORY)
print("temp directory created!")
except OSError:
print("temp directory already exists")
while True:
dispatcher_id = random.randint(10000, 99999)
try:
file_dir_str = self.TEMP_FILE_STORAGE_DIRECTORY + str(dispatcher_id)
print(file_dir_str)
os.mkdir(file_dir_str)
break
except OSError:
continue
self.dispatcher_id = dispatcher_id
tempFile = open(SLURM.TEMPLATE_FILE_PYTHON_CALCULATION, 'rb')
self.python_calculation_template_string = tempFile.read()
tempFile.close()
self.process_id_TO_filename = {}
try:
os.mkdir(self.file_directory())
except OSError:
print "directory already made"
def save_python_object_file_return_id(self, object_to_save):
ID_number = self.__generate_id_number__()
fileToBeSaved = open(self.object_filename(ID_number), 'wb')
pickle.dump(object_to_save, fileToBeSaved)
fileToBeSaved.close()
return ID_number
@staticmethod
def save_shared_object_return_filename(python_object, name_string):
try:
os.mkdir(Dispatcher.TEMP_FILE_STORAGE_DIRECTORY)
except OSError:
print "directory already made"
filename = Dispatcher.TEMP_FILE_STORAGE_DIRECTORY + name_string + str(np.random.randint(100000)) + ".pkl"
while filename in Dispatcher.statically_saved_filenames:
filename = Dispatcher.TEMP_FILE_STORAGE_DIRECTORY + name_string + str(np.random.randint(100000)) + ".pkl"
fileToBeSaved = open(filename, 'wb')
pickle.dump(python_object, fileToBeSaved)
fileToBeSaved.close()
Dispatcher.statically_saved_filenames.add(filename)
return filename
def file_directory(self):
return self.TEMP_FILE_STORAGE_DIRECTORY + str(self.dispatcher_id) + "/"
def base_filename(self, id_number):
return self.file_directory() + str(id_number)
def python_filename(self, id_number):
return self.file_directory() + str(id_number) + ".py"
def object_filename(self, id_number):
return self.file_directory() + str(id_number) + ".uncalculated.pkl"
def error_filename(self, id_number):
return self.file_directory() + str(id_number) + ".err"
def output_filename(self, id_number):
return self.file_directory() + str(id_number) + ".out"
def calculated_object_filename(self, id_number):
return self.file_directory() + str(id_number) + ".CALCULATED.pkl"
def bash_script_filename(self, id_number):
return self.file_directory() + "script_" + str(id_number) + ".sh"
def __is_id_used__(self, id_number):
return os.path.isfile(self.object_filename(id_number))
def __generate_id_number__(self):
while True:
new_id = self.current_id_number + 1
self.current_id_number = new_id
if self.__is_id_used__(new_id):
continue
else:
self.current_id_number = new_id
return new_id
def is_process_finished(self, ID_number):
return os.path.isfile(self.calculated_object_filename(ID_number))
def return_calculated_object(self, ID_number):
if not self.is_process_finished(ID_number):
raise Exception("process is not finished yet!")
filename = self.calculated_object_filename(ID_number)
myFile = open(filename, 'rb')
output = pickle.load(myFile)
myFile.close()
return output
def delete_all_temp_files(self, ID_number):
# os.remove(self.python_filename(ID_number))
# os.remove(self.object_filename(ID_number))
os.remove(self.calculated_object_filename(ID_number))
# os.remove(self.output_filename(ID_number))
# os.remove(self.error_filename(ID_number))
# os.remove(self.bash_script_filename(ID_number))
def cancel_unfinished_processes(self):
pass
def kill_dispatcher(self):
try:
shutil.rmtree(self.file_directory())
self.cancel_unfinished_processes()
except:
warnings.warn("error found in kill_dispatcher!")
def start_subprocess_trawlers(self, number):
for i in range(number):
self.__start_subprocess_trawler__(i)
def __start_process__(self, ID_number):
raise Exception("abstract method must be implemented!")
def __initiate_bash_shell_command__(self, ID_number):
raise Exception("abstract method must be implemented!")
class SLURM(Dispatcher):
id_string = "SLURM"
TEMP_FILE_STORAGE_DIRECTORY = SLURM_TEMP_FILE_STORAGE_DIRECTORY
PENDING = 'PENDING'
TIMEOUT = 'TIMEOUT'
CANCELLED = 'CANCELLED'
NODE_FAIL = 'NODE_FAIL'
PREEMPT = 'PREEMPTED'
SUSPENDED = 'SUSPENDED'
FAILED = 'FAILED'
BOOT_FAIL = 'BOOT_FAIL'
COMPLETED = 'COMPLETED'
CONFIGURING = 'CONFIGURING'
COMPLETING = 'COMPLETING'
PREEMPTED = 'PREEMPTED'
RESIZING = 'RESIZING'
TEMPLATE_NUMBER_CORES = "$NUMBER_CORES"
TEMPLATE_NUMBER_MINUTES = "$NUMBER_MINUTES"
TEMPLATE_QUEUE_NAME = "$QUEUE_NAME"
TEMPLATE_MEMORY_MB = "$MEMORY_MB"
TEMPLATE_OUTPUT_FILE_NAME = "$OUTPUT_FILENAME"
TEMPLATE_PYTHON_SCRIPT_NAME = "$PYTHON_SCRIPT_NAME"
TEMPLATE_DIRECTORY_NAME = "$DIRECTORY_NAME"
TEMPLATE_NUMBER = "$NUMBER"
DEFAULT_QUEUE_NAME = "aspuru-guzik"
TEMPLATE_FILE_SCRIPT_SUBMISSION = os.path.dirname(os.path.realpath(__file__)) + "/SLURM/slurm_submission_script.template"
dispatcher_id_TO_cluster_process_id = {}
TIMEOUT_FAILURE_TIME_MULTIPLIER = 1.05
TARGET_PROCESS_TIMES_MINUTES = 60.0
MAX_CLUSTER_START_ATTEMPTS = 100
RESTART_WAITING_TIME_MINUTES = 5.0
restart_counts = {}
MAX_RESTART_COUNT = 25
def __init__(self, time_requested_minutes, memory_requested_MB, number_of_cores = 1, number_of_machines = 1, partition_name = None):
super(SLURM, self).__init__()
if partition_name == None:
partition_name = SLURM.DEFAULT_QUEUE_NAME
self.partition_name = partition_name
self.time_requested_minutes = int(math.ceil(time_requested_minutes))
if self.time_requested_minutes == 0:
self.time_requested_minutes = 1
self.memory_requested_MB = int(memory_requested_MB)
self.number_of_cores = number_of_cores
self.number_of_machines = number_of_machines
self.number_calculations_per_core = int(SLURM.TARGET_PROCESS_TIMES_MINUTES / self.time_requested_minutes )
self.__process_sent_off_count__ = 0
print "will send %s calculations to each core" % str(self.number_calculations_per_core)
self.time_requested_minutes = self.time_requested_minutes * (self.number_calculations_per_core + 1) + 5
self.update_bash_submission_string()
self.dispatcher_id_TO_cluster_process_id = {}
self.cluster_process_ids = []
try:
os.mkdir(Dispatcher.TEMP_FILE_STORAGE_DIRECTORY)
except OSError:
pass
def __start_subprocess_trawler__(self, number):
f = open(Dispatcher.TEMPLATE_FILE_PYTHON_TRAWLER, "rb")
python_file_text = f.read()
f.close()
python_file_text = python_file_text.replace(SLURM.TEMPLATE_NUMBER, str(number))
python_file_text = python_file_text.replace(SLURM.TEMPLATE_DIRECTORY_NAME, self.file_directory())
trawler_id = "d" + str(self.dispatcher_id) + "_T" + str(number)
python_file = open(self.file_directory() + trawler_id + ".py", "wb")
python_file.write(python_file_text)
python_file.close()
bash_script_text = self.bash_submission_template_string.replace(SLURM.TEMPLATE_PYTHON_SCRIPT_NAME, self.file_directory() + trawler_id + ".py")
bash_script_text = bash_script_text.replace(SLURM.TEMPLATE_OUTPUT_FILE_NAME, self.file_directory() + trawler_id)
bash_script_file = open(self.file_directory() + trawler_id + ".sh", "wb")
bash_script_file.write(bash_script_text)
bash_script_file.close()
command = ["sbatch", self.file_directory() + trawler_id + ".sh"]
process_started = False
attempt_count = 0
while process_started == False:
attempt_count = attempt_count + 1
try:
output = subprocess.check_output(command)
cluster_process_id = output.split()[3]
try:
cluster_id = int(cluster_process_id)
self.cluster_process_ids.append(cluster_id)
except ValueError:
warnings.warn("CLUSTER FAILED TO START JOB, TRYING AGAIN")
time.sleep(SLURM.RESTART_WAITING_TIME_MINUTES * 60)
continue
process_started = True
except subprocess.CalledProcessError, e:
print e.output
if attempt_count < SLURM.MAX_CLUSTER_START_ATTEMPTS:
# print "trying again to start process %s" % str(ID_number)
time.sleep(SLURM.RESTART_WAITING_TIME_MINUTES * 60)
continue
else:
raise Exception("The number of restart attempts for trawler %s is TOO DAMN HIGH" % str(trawler_id))
def cancel_unfinished_processes(self):
for p_id in self.cluster_process_ids:
try:
subprocess.call(['scancel', str(p_id)])
except subprocess.CalledProcessError, e:
warnings.warn("Problem cancelling process " + str(p_id))
print e.output
#
def update_bash_submission_string(self):
f = open(SLURM.TEMPLATE_FILE_SCRIPT_SUBMISSION, 'rb')
self.bash_submission_template_string = f.read()
f.close()
self.bash_submission_template_string = self.bash_submission_template_string.replace(SLURM.TEMPLATE_QUEUE_NAME, self.partition_name)
self.bash_submission_template_string = self.bash_submission_template_string.replace(SLURM.TEMPLATE_NUMBER_CORES, str(self.number_of_cores))
self.bash_submission_template_string = self.bash_submission_template_string.replace(SLURM.TEMPLATE_NUMBER_MINUTES, str(self.time_requested_minutes))
self.bash_submission_template_string = self.bash_submission_template_string.replace(SLURM.TEMPLATE_MEMORY_MB, str(self.memory_requested_MB))
class normal(Dispatcher):
id_string = "normal"
TEMP_FILE_STORAGE_DIRECTORY = "./TEMP/"
def __init__(self, *args):
super(normal, self).__init__()
def __initiate_bash_shell_command__(self, ID_number):
output = "python " + self.python_filename(ID_number)
return output
def __start_process__(self, ID_number):
os.system(self.__initiate_bash_shell_command__(ID_number) + " &")
return 0
def __start_subprocess_trawler__(self, number):
f = open(Dispatcher.TEMPLATE_FILE_PYTHON_TRAWLER, "rb")
python_file_text = f.read()
f.close()
python_file_text = python_file_text.replace(SLURM.TEMPLATE_NUMBER, str(number))
python_file_text = python_file_text.replace(SLURM.TEMPLATE_DIRECTORY_NAME, self.file_directory())
trawler_id = "d" + str(self.dispatcher_id) + "_T" + str(number)
python_filename = self.file_directory() + trawler_id + ".py"
python_file = open(python_filename, "wb")
python_file.write(python_file_text)
python_file.close()
os.system("python %s &" % python_filename)
dictionary_of_dispatcher_classes = {SLURM.id_string : SLURM, normal.id_string : normal}
def return_appropriate_dispatcher_object(dispatcher_id_string):
return dictionary_of_dispatcher_classes[dispatcher_id_string]
```
|
{
"source": "jgoodknight/spectroscopy",
"score": 2
}
|
#### File: src/experiments/Dynamics.py
```python
import copy
import time
import pickle
import datetime
import warnings
import cmath
import numpy as np
import scipy.integrate
import multiprocessing
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import spectroscopy.Spacetime as Spacetime
import spectroscopy.TimeElectronicWavefunction as TimeElectronicWavefunction
import spectroscopy.TimeFunction as TimeFunction
import spectroscopy.TimePerturbation as TimePerturbation
import experimentBase
import ClusterPool.ClusterPool as ClusterPool
import ClusterPool.Dispatcher as Dispatcher
NUMBER_PROCESSES = 5
USE_CLUSTERPOOL = True
def BIG_RED_BUTTON(ppObject):
"just here to make embarassingly parallel calculations easier"
print "BOMBS AWAY"
return ppObject.calculate()
class Base(experimentBase.experiment):
"Just calculates the Pump/probe signal of your chosen system: no frills"
experiment_type_string = "Basic_Pump_Probe_"
def __init__(self, space,
electronicHamiltonian,
MuxMuyMuzElectronicOperatorTuple,
pumpBeamTuple,
maximumEvolutionTime,
initial_state_quantum_numbers = None,
save_EWF_every_n_steps = 100,
override_pulse_overlap_time_interval = None,
IDstring = ""):
#store all the variables
self.mySpace = space
self.dipoleTuple = MuxMuyMuzElectronicOperatorTuple
self.excitationMuTuple = (self.dipoleTuple[0].excitationOperator(), self.dipoleTuple[1].excitationOperator(), self.dipoleTuple[2].excitationOperator())
self.relaxationMuTuple = (self.dipoleTuple[0].relaxationOperator(), self.dipoleTuple[1].relaxationOperator(), self.dipoleTuple[2].relaxationOperator())
self.pumpEx, self.pumpEy, self.pumpEz = pumpBeamTuple
self.myElectronicHamiltonian = electronicHamiltonian
self.myFreePropagator = electronicHamiltonian.myPropagator()
self.intitialEWF = self.myElectronicHamiltonian.groundStateElectronicWavefunction()
self.initial_energy = self.intitialEWF.initial_energy
#We are given a target maximum waiting and a specific number of time points to take.
#We then have to scale that to the closest multiple of mySpace.dt to make the propagation work properly
self.save_EWF_every_n_steps = save_EWF_every_n_steps
self.calculated = False
self.IDstring = IDstring
if override_pulse_overlap_time_interval == None:
self.pulse_overlap_ending_time = 2.0 * 2.0 * max(map(lambda x: x.timePillow, pumpBeamTuple))
else:
self.pulse_overlap_ending_time = override_pulse_overlap_time_interval
self.pulse_overlap_ending_index = self.pulse_overlap_ending_time / self.mySpace.dt
self.pulse_overlap_ending_index = int(self.pulse_overlap_ending_index)
self.maximumTime = self.pulse_overlap_ending_time + maximumEvolutionTime
self.Number_of_evolution_points = int(self.maximumTime / self.mySpace.dt)
self.monomer = (self.mySpace.electronicDimensionality == 2)
self.delta_function_excitation = (self.dipoleTuple[0] + self.dipoleTuple[1] + self.dipoleTuple[2]) * copy.deepcopy(self.intitialEWF)
def perturbationOperatorAtTime(self, T):
return self.pumpEx.valueAtTime(T) * self.dipoleTuple[0] + self.pumpEy.valueAtTime(T) * self.dipoleTuple[1] + self.pumpEz.valueAtTime(T) * self.dipoleTuple[2]
class Basic_One_Gaussian_Excitation(Base):
experiment_type_string = "Basic_Dynamics_"
def calculate(self):
if self.calculated:
print "You've already done the calculation, look at the results instead!"
return
overallStartTime = time.time()
tStep = 0.0
t = 0.0
c = -1.0j * self.mySpace.dt / ( 2.0 * self.mySpace.unitHandler.HBAR)
old_application = self.perturbationOperatorAtTime(-self.mySpace.dt) * self.intitialEWF * cmath.exp(1.0j * self.initial_energy * self.mySpace.dt)
old_EWF = 0.0 * old_application
#relevant_observables
time_series = []
electric_field_emission_series = []
electronic_density_matrix_series = []
total_population_series = []
pulse_amplitude_series = []
saved_ewf_series = []
saved_ewf_times_series = []
n = self.mySpace.electronicDimensionality
self.n=n
save_counter = 0
for tStep in range(self.Number_of_evolution_points):
t = tStep * self.mySpace.dt
time_series.append(t)
pulse_amp = np.sqrt(np.abs(self.pumpEx.valueAtTime(t))**2 + np.abs(self.pumpEy.valueAtTime(t))**2 + np.abs(self.pumpEz.valueAtTime(t))**2)
pulse_amplitude_series.append(pulse_amp)
phase_factor = cmath.exp(-1.0j * self.initial_energy * t)
new_application = (self.perturbationOperatorAtTime(t) * self.intitialEWF) * phase_factor
thingToPropagate = old_EWF + c * old_application
propagated_thing = self.myFreePropagator.APPLY(thingToPropagate)
new_EWF = propagated_thing + c * new_application
#Save relevant observables
#ELECTRONIC DENSITY MATRIX CALCULATION
total_population = 0.0
new_density_matrix = np.zeros((n,n), dtype=np.complex)
for i in range(0, n):
for j in range(0, n):
new_density_matrix[i,j] = new_EWF[i].overlap( new_EWF[j])
if i == j:
total_population = total_population + new_density_matrix[i,j]
if tStep == self.pulse_overlap_ending_index:
normalizer = total_population
total_population_series.append(total_population)
electronic_density_matrix_series.append(new_density_matrix)
#ELECTRIC FIELD CALCULATION
emission = 1.0j * cmath.exp(1.0j * self.initial_energy * t) * self.delta_function_excitation.overlap(new_EWF)
electric_field_emission_series.append(emission)
#SAVE EWF?
if save_counter == self.save_EWF_every_n_steps:
#SAVE SHIT
saved_ewf_series.append(new_EWF)
saved_ewf_times_series.append(t)
#RESET COUNTER
save_counter = 0
else:
save_counter = save_counter + 1
#RE-SET LOOP
old_application = new_application
old_EWF = new_EWF
total_population_series = np.array(total_population_series)
self.maximum_population = np.max(total_population_series)
self.electronic_density_matrix_series = np.array(electronic_density_matrix_series)
self.electric_field_emission_series = np.array(electric_field_emission_series)
self.time_series = np.array(time_series)
self.time_series_fs = self.mySpace.unitHandler.femtosecondsFromTime(self.time_series)
self.pulse_amplitude_series = np.array(pulse_amplitude_series)
self.pulse_amplitude_series = self.pulse_amplitude_series / np.max(self.pulse_amplitude_series)
time_series_non_overlap = self.time_series[self.pulse_overlap_ending_index:]
# density_matrix_series_non_overlap = self.electronic_density_matrix_series[self.pulse_overlap_ending_index]
# density_matrix_frequency_series = np.fft.fftshift(np.fft.fft(density_matrix_series_non_overlap, axis = 0))
self.emission_spectrum = np.fft.fftshift(np.fft.fft(self.electric_field_emission_series[self.pulse_overlap_ending_index:]))
self.frequency_series = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(n = time_series_non_overlap.shape[0], d= self.mySpace.dt))
self.pulse_spectrum = np.abs(self.pumpEx.myFourierTransformedFunction(self.frequency_series))
self.pulse_spectrum += np.abs(self.pumpEy.myFourierTransformedFunction(self.frequency_series))
self.pulse_spectrum += np.abs(self.pumpEz.myFourierTransformedFunction(self.frequency_series))
# self.t_0 = time_series_non_overlap[0]
# T_total = time_series_non_overlap[-1] - time_series_non_overlap[0]
# self.density_matrix_frequency_series = T_total * density_matrix_frequency_series / np.sqrt(.02 * np.pi)
# self.density_matrix_frequency_series = T_total * np.exp(-1.0j * self.frequency_series * t_0) * density_matrix_frequency_series / np.sqrt(.02 * np.pi)
self.frequency_series_wavenumbers = self.mySpace.unitHandler.wavenumbersFromEnergyUnits(self.frequency_series)
self.calculated = True
self.saved_ewf_series = saved_ewf_series
self.saved_ewf_time_series = np.array(saved_ewf_times_series)
self.saved_ewf_time_series_fs = self.mySpace.unitHandler.femtosecondsFromTime(self.saved_ewf_time_series)
self.timeElapsed_seconds = time.time() - overallStartTime
print self.IDstring + ": time elapsed (min) for Dynamics calculation", self.timeElapsed_seconds / 60.0
self.save(self.IDstring)
return self
class ClusterScanPulseWidthPulseCenter(experimentBase.experiment):
experiment_type_string = "Witness_Cluster_Pump_Probe_"
def __init__(self, space,
electronicHamiltonian,
MuxMuyMuzElectronicOperatorTuple,
maximumEvolutionTime,
pulseCenterFrequencies,
minimumPulseWidth,
maximumPulseWidth,
numberOfPulseWidthExperimentsToDo,
initial_state_quantum_numbers = None,
save_EWF_every_n_steps = 100,
string_identity = "",
log_pulse_width_scale = False):
#store all the variables
self.mySpace = space
self.string_identity = string_identity
self.muTuple = MuxMuyMuzElectronicOperatorTuple
self.myElectronicHamiltonian = electronicHamiltonian
if minimumPulseWidth / self.mySpace.dt < 1.0:
minimumPulseWidth = self.mySpace.dt
warnings.warn("requested minimum pulse width smaller than time discretization! changing minimum pulse width to dt")
if log_pulse_width_scale:
self.pulseWidthsToCalculate = np.logspace(np.log10(minimumPulseWidth), np.log10(maximumPulseWidth), numberOfPulseWidthExperimentsToDo)
else:
self.pulseWidthsToCalculate = np.linspace(minimumPulseWidth, maximumPulseWidth, numberOfPulseWidthExperimentsToDo)
self.pulseWidthsToCalculate_femtoseconds = self.mySpace.unitHandler.femtosecondsFromTime(self.pulseWidthsToCalculate)
self.pulseWidthsToCalculate_FWHM_femtoseconds = (2.0 *np.sqrt(2.0 * np.log(2.0))) * self.pulseWidthsToCalculate_femtoseconds
self.pulseCenterToCalculate = pulseCenterFrequencies
pulseBeamTuples = []
for width in self.pulseWidthsToCalculate:
for center in self.pulseCenterToCalculate:
newPulseX = TimeFunction.GaussianCosinePulse(self.mySpace, centerOmega = center, timeSpread = width)
newPulseY = TimeFunction.GaussianCosinePulse(self.mySpace, centerOmega = center, timeSpread = width)
newPulseZ = TimeFunction.zeroTimeFunction(self.mySpace)
pulseBeamTuples.append((newPulseX.plusFunction(), newPulseY.plusFunction(), newPulseZ))
self.pumpBeamTuplesToCalculate = pulseBeamTuples
self.save_EWF_every_n_steps = save_EWF_every_n_steps
self.initial_state_quantum_numbers = initial_state_quantum_numbers
self.maximumEvolutionTime = maximumEvolutionTime
self.pulse_overlap_ending_time = 6.0 * maximumPulseWidth
self.pulse_overlap_ending_index = self.pulse_overlap_ending_time / self.mySpace.dt
self.pulse_overlap_ending_index = int(self.pulse_overlap_ending_index)
#now it's time to figure out how many processors to use
def calculate(self):
#first create all the objects:
self.listOfDynamicsExperiments = []
for ii, pumpTuple in enumerate(self.pumpBeamTuplesToCalculate):
print "It is now", datetime.datetime.now()
idNo = self.string_identity + "_sigma= %s" % str(pumpTuple[0].sigma)+ "_center= %s" % str(pumpTuple[0].omega)
newPP = Basic_One_Gaussian_Excitation(self.mySpace,
self.myElectronicHamiltonian,
self.muTuple,
pumpTuple,
self.maximumEvolutionTime,
initial_state_quantum_numbers = self.initial_state_quantum_numbers,
save_EWF_every_n_steps = self.save_EWF_every_n_steps,
override_pulse_overlap_time_interval = self.pulse_overlap_ending_time,
IDstring = idNo)
self.listOfDynamicsExperiments.append(newPP)
#
print "creating pool"
myPool = ClusterPool.Pool()
print "running subprocesses"
self.listOfDynamicsExperiments = myPool.map('calculate', self.listOfDynamicsExperiments, index_of_slowest_calculation = -1)
print "done!"
if __name__ == "__main__":
pass
```
#### File: src/experiments/experimentBase.py
```python
import os
import os.path
import hickle
import datetime
import cPickle as pickle
import datetime
class experiment(object):
"object which defines I/O operations for an experiment"
def __directoryName__(self):
date = str(datetime.date.today())
directoryName = './data/' + date + "/"
return directoryName
def __save_util__(self, dirName, titleString):
directories = dirName.split("/")
last_dir = ""
for subdirName in directories:
if subdirName == ".":
last_dir = subdirName
continue
last_dir = last_dir + "/" + subdirName
try:
os.mkdir(last_dir)
except OSError:
pass
self.fileName = last_dir + self.experiment_type_string + titleString + ".pkl"
i = 0
if os.path.exists(self.fileName):
while os.path.exists(self.fileName):
i = i + 1
self.fileName = last_dir + self.experiment_type_string + titleString + "---" + str(i) + ".pkl"
fileToBeSaved = open(self.fileName, 'wb')
pickle.dump(self, fileToBeSaved, protocol=-1)
print "file saved: ", self.fileName
fileToBeSaved.close()
def save(self, titleString):
self.__save_util__(self.__directoryName__(), titleString)
def save_in_cwd(self, titleString):
self.__save_util__(os.getcwd(), titleString)
@staticmethod
def openSavedExperiment(filename):
myFile = open(filename, 'rb')
return pickle.load(myFile)
if __name__ == "__main__":
print "Hi!"
```
#### File: src/experiments/TransientGrating.py
```python
import copy
import time
import os
import math
import random
import hickle
import cPickle as pickle
import datetime
import warnings
import numpy as np
import scipy
import scipy.integrate
import scipy.fftpack
import multiprocessing
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
from matplotlib import animation
except:
animation = object()
import spectroscopy.TimeElectronicWavefunction as TimeElectronicWavefunction
import spectroscopy.TimeFunction as TimeFunction
import spectroscopy.TimePerturbation as TimePerturbation
import experimentBase
USE_CLUSTERPOOL = False
NUMBER_PROCESSES = 8
try:
import ClusterPool.ClusterPool as ClusterPool
import ClusterPool.Dispatcher as Dispatcher
except ImportError:
warnings.warn("ClusterPool Module Not Found, Witness experiments will not work")
class base_TG(experimentBase.experiment):
"setup code for all Transient Grating style experiments"
def __init__(self, space,
electronicHamiltonian,
MuxMuyMuzElectronicOperatorTuple,
initialElectronicWavefunction,
pumpBeamTuple,
maximumPopulationTime,
numberOfPopulationTimePoints,
maximumProbeTime,
numberOfProbeTimePoints,
opticalGap = 0.0,
IDstring = "",
numberOfProcessors = None,
use_clusterpool = USE_CLUSTERPOOL):
#store all the variables
self.mySpace = space
self.use_clusterpool = use_clusterpool
self.MuX, self.MuY, self.MuZ = MuxMuyMuzElectronicOperatorTuple
self.dipoleTuple = MuxMuyMuzElectronicOperatorTuple
self.pumpEx, self.pumpEy, self.pumpEz = pumpBeamTuple
self.intitialEWF = initialElectronicWavefunction
self.myElectronicHamiltonian = electronicHamiltonian
self.myFreePropagator = electronicHamiltonian.myPropagator()
#We are given a target maximum waiting and a specific number of time points to take.
#We then have to scale that to the closest multiple of mySpace.dt to make the propagation work properly
self.numberOfPopulationTimePoints = int(numberOfPopulationTimePoints)
targetMaxPopulationTime = float(maximumPopulationTime)
targetPopulationDT = targetMaxPopulationTime / float(numberOfPopulationTimePoints)
self.timeStepsPerPopulationStep = int(np.round(targetPopulationDT / self.mySpace.dt, 0))
#What if it's zero!? We can't have that
if self.timeStepsPerPopulationStep == 0:
self.timeStepsPerPopulationStep = 1
self.populationDT = self.timeStepsPerPopulationStep * self.mySpace.dt
self.maxPopulationTime = self.populationDT * (self.numberOfPopulationTimePoints - 1)
self.populationTimes = []
self.minProbeTime = 0.0
self.ProbeTimePoints = numberOfProbeTimePoints
#
self.probeDT = self.mySpace.dt
self.maxProbeTime = self.minProbeTime + self.probeDT * (self.ProbeTimePoints )
self.probeTimes = np.arange(self.minProbeTime, self.maxProbeTime, self.probeDT)
#
self.firstProbeEx = copy.deepcopy(self.pumpEx)
self.firstProbeEy = copy.deepcopy(self.pumpEy)
self.firstProbeEz = copy.deepcopy(self.pumpEz)
self.probePower = self.firstProbeEx.totalPower() + self.firstProbeEy.totalPower() + self.firstProbeEz.totalPower()
self.calculated = False
self.IDstring = IDstring
self.opticalGap = opticalGap
#maximum time the experiment will go on in any case
#the padding for all 3 (really 2) pulses
total_pillow_padding = 4.0 * 3.0 * 2.0 * max(map(lambda x: x.timePillow, pumpBeamTuple))
#plus the maxmimum probe spacing
max_time_to_propagate_probe = self.maxProbeTime
#plus the population time
self.maximumTime = self.maxPopulationTime + total_pillow_padding + max_time_to_propagate_probe
self.maximumTimeSteps = int(self.maximumTime / self.mySpace.dt)
#this line will break the code for some unknown reason
#TODO: find out WHAT THE HECK IS UP HERE with needing a signal divisible by four. probably has something to do with the local oscillator
#this line makes the code work
self.maximumTimeSteps = self.maximumTimeSteps + (4 - self.maximumTimeSteps % 4) - 1
self.maximumTime = self.maximumTimeSteps * (self.mySpace.dt)
self.mySpace.unitHandler.femtosecondsFromTime(self.maximumTime)
self.pulse_overlap_ending_time = 6.0 * max(self.firstProbeEx.sigma, self.firstProbeEy.sigma, self.firstProbeEz.sigma)
self.pulse_overlap_ending_index = self.pulse_overlap_ending_time / self.populationDT
self.pulse_overlap_ending_index = int(self.pulse_overlap_ending_index)
if numberOfProcessors is None:
self.numberOfProcessors = multiprocessing.cpu_count() - 2
else:
self.numberOfProcessors = numberOfProcessors
self.totalSignalShapeTuple = (self.numberOfPopulationTimePoints, self.ProbeTimePoints)
self.excitationMuTuple = (self.dipoleTuple[0].excitationOperator(), self.dipoleTuple[1].excitationOperator(), self.dipoleTuple[2].excitationOperator())
self.relaxationMuTuple = (self.dipoleTuple[0].relaxationOperator(), self.dipoleTuple[1].relaxationOperator(), self.dipoleTuple[2].relaxationOperator())
self.monomer = (self.mySpace.electronicDimensionality == 2)
def calculate(self):
Exception("USE NON-ABSTRACT CLASS")
class PopulationTimeScan(base_TG):
"Transient Grating for multiple population evolution times. RAM efficient"
experiment_type_string = "Basic_Transient_Grating_"
def calculate(self):
overallStartTime = time.time()
if self.calculated:
print "You've already done the calculation, look at the results instead!"
return self
startTime = time.time()
zeroOrderTimeWavefunction = TimeElectronicWavefunction.timeElectronicWavefunction(self.mySpace)
zeroOrderTimeWavefunction.applyOperatorsNTimesOnInitialWavefunction([self.myFreePropagator], N = self.maximumTimeSteps, initialWF = self.intitialEWF)
print "zero order done"
print "elapsed Time: ", time.time() - startTime
pulse1xPLUS = self.firstProbeEx.plusFunction()
pulse1yPLUS = self.firstProbeEy.plusFunction()
pulse1zPLUS = self.firstProbeEz.plusFunction()
pulse1PLUStuple = (pulse1xPLUS, pulse1yPLUS, pulse1zPLUS)
pulse1xMINUS = self.firstProbeEx.minusFunction()
pulse1yMINUS = self.firstProbeEy.minusFunction()
pulse1zMINUS = self.firstProbeEz.minusFunction()
pulse1MINUStuple = (pulse1xMINUS, pulse1yMINUS, pulse1zMINUS)
k1PlusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.excitationMuTuple, ExEyEzTimeFunctionTuple = pulse1PLUStuple, maxTime = self.maximumTime)
k1MinusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.relaxationMuTuple, ExEyEzTimeFunctionTuple = pulse1MINUStuple, maxTime = self.maximumTime)
startTime = time.time()
k1PlusWavefunction = k1PlusInteractor.goFromTimeWavefunction(zeroOrderTimeWavefunction)
print "first order done"
print "elapsed Time: ", time.time() - startTime
startTime = time.time()
k2Plus_k1MinusWavefunction = k1MinusInteractor.goFromTimeWavefunction(k1PlusWavefunction)
print "second order done"
print "elapsed Time: ", time.time() - startTime
probeEx = copy.deepcopy(self.firstProbeEx)
probeEy = copy.deepcopy(self.firstProbeEy)
probeEz = copy.deepcopy(self.firstProbeEz)
#important for filter length
lowest_frequency_from_H = self.myElectronicHamiltonian.omega_low
signal_size = (self.numberOfPopulationTimePoints , k1PlusWavefunction.length())
signal_lo_size = signal_size
self.totalSignal_lo_frequency = np.zeros(signal_lo_size, dtype = np.complex)
#FREQUENCY SIGNALS
self.totalSignal_xFrequency = np.zeros(signal_size, dtype = np.complex)
self.totalSignal_yFrequency = np.zeros(signal_size, dtype = np.complex)
self.totalSignal_zFrequency = np.zeros(signal_size, dtype = np.complex)
current_T = 0.0
tot = self.numberOfPopulationTimePoints
max_T_index = tot - 1
for population_time_index in range(tot):
self.populationTimes.append(current_T)
startTime = time.time()
i = population_time_index
pulse3tuple = (probeEx, probeEy, probeEz)
pulse3PLUStuple = (probeEx.plusFunction(), probeEy.plusFunction(), probeEz.plusFunction())
pulse3MINUStuple = (probeEx.minusFunction(), probeEy.minusFunction(), probeEz.minusFunction())
k3PlusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.excitationMuTuple, ExEyEzTimeFunctionTuple = pulse3PLUStuple, maxTime = self.maximumTime)
k3MinusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.relaxationMuTuple, ExEyEzTimeFunctionTuple = pulse3MINUStuple, maxTime = self.maximumTime)
k3PLUS_Wavefunction = interactionHelper(k3PlusInteractor, zeroOrderTimeWavefunction)
k3PLUS_Wavefunction = k3PLUS_Wavefunction.calculate()
k1PLUS_k3MINUS_Wavefunction = interactionHelper(k3MinusInteractor, k1PlusWavefunction)
k1PLUS_k3MINUS_Wavefunction = k1PLUS_k3MINUS_Wavefunction.calculate()
if self.monomer == False:
k1PLUS_k3PLUS_Wavefunction = interactionHelper(k3PlusInteractor, k1PlusWavefunction)
k1PLUS_k3PLUS_Wavefunction = k1PLUS_k3PLUS_Wavefunction.calculate()
else:
k1PLUS_k3PLUS_Wavefunction = None
k1PLUS_k2MINUS_k3PLUS_Wavefunction = interactionHelper(k3PlusInteractor, k2Plus_k1MinusWavefunction)
k1PLUS_k2MINUS_k3PLUS_Wavefunction = k1PLUS_k2MINUS_k3PLUS_Wavefunction.calculate()
# k1 - k2 + k3 // none
signal1Helper = overlapHelper(zeroOrderTimeWavefunction, k1PLUS_k2MINUS_k3PLUS_Wavefunction, self.dipoleTuple, pulse3PLUStuple, lowest_frequency_from_H, population_time_index, max_T_index, n = self.timeStepsPerPopulationStep)
signal1Helper = signal1Helper.calculate()
obj = signal1Helper
self.totalSignal_xFrequency[i] = self.totalSignal_xFrequency[i] + obj.xfrequency_spectrum
self.totalSignal_yFrequency[i] = self.totalSignal_xFrequency[i] + obj.yfrequency_spectrum
self.totalSignal_zFrequency[i] = self.totalSignal_xFrequency[i] + obj.zfrequency_spectrum
self.totalSignal_lo_frequency[i] = self.totalSignal_lo_frequency[i] + obj.local_oscillator_signal
signal1Helper = None
k1PLUS_k2MINUS_k3PLUS_Wavefunction = None
# k3 // k2-k1
signal2Helper = overlapHelper(k2Plus_k1MinusWavefunction, k3PLUS_Wavefunction, self.dipoleTuple, pulse3PLUStuple, lowest_frequency_from_H, population_time_index, max_T_index, n = self.timeStepsPerPopulationStep)
signal2Helper = signal2Helper.calculate()
obj = signal2Helper
self.totalSignal_xFrequency[i] = self.totalSignal_xFrequency[i] + obj.xfrequency_spectrum
self.totalSignal_yFrequency[i] = self.totalSignal_xFrequency[i] + obj.yfrequency_spectrum
self.totalSignal_zFrequency[i] = self.totalSignal_xFrequency[i] + obj.zfrequency_spectrum
self.totalSignal_lo_frequency[i] = self.totalSignal_lo_frequency[i] + obj.local_oscillator_signal
signal2Helper = None
k3PLUS_Wavefunction = None
# k1 // k2 - k3
signal3Helper = overlapHelper(k1PLUS_k3MINUS_Wavefunction, k1PlusWavefunction, self.dipoleTuple, pulse3PLUStuple, lowest_frequency_from_H, population_time_index, max_T_index, n = self.timeStepsPerPopulationStep)
signal3Helper = signal3Helper.calculate()
obj = signal3Helper
self.totalSignal_xFrequency[i] = self.totalSignal_xFrequency[i] + obj.xfrequency_spectrum
self.totalSignal_yFrequency[i] = self.totalSignal_xFrequency[i] + obj.yfrequency_spectrum
self.totalSignal_zFrequency[i] = self.totalSignal_xFrequency[i] + obj.zfrequency_spectrum
self.totalSignal_lo_frequency[i] = self.totalSignal_lo_frequency[i] + obj.local_oscillator_signal
signal3Helper = None
k1PLUS_k3MINUS_Wavefunction = None
# k1 + k3 // k2
if self.monomer == False:
signal5Helper = overlapHelper(k1PlusWavefunction, k1PLUS_k3PLUS_Wavefunction, self.dipoleTuple, pulse3PLUStuple, lowest_frequency_from_H, population_time_index, max_T_index, n = self.timeStepsPerPopulationStep)
signal5Helper = signal5Helper.calculate()
obj = signal5Helper
self.totalSignal_xFrequency[i] = self.totalSignal_xFrequency[i] + obj.xfrequency_spectrum
self.totalSignal_yFrequency[i] = self.totalSignal_xFrequency[i] + obj.yfrequency_spectrum
self.totalSignal_zFrequency[i] = self.totalSignal_xFrequency[i] + obj.zfrequency_spectrum
self.totalSignal_lo_frequency[i] = self.totalSignal_lo_frequency[i] + obj.local_oscillator_signal
signal5Helper = None
k1PLUS_k3PLUS_Wavefunction = None
else:
pass
probeEx = probeEx.pulseCopyJumpedForward(self.populationDT)
probeEy = probeEy.pulseCopyJumpedForward(self.populationDT)
probeEz = probeEz.pulseCopyJumpedForward(self.populationDT)
current_T = current_T + self.populationDT
print "elapsed Time: ", time.time() - startTime
self.populationTimes = np.array(self.populationTimes)
self.frequency_values = obj.frequency_values
self.frequency_values_wavenumbers = self.mySpace.unitHandler.wavenumbersFromEnergyUnits(self.frequency_values)
self.populationTimes = np.array(self.populationTimes)
self.populationTimes_fs = self.mySpace.unitHandler.femtosecondsFromTime(self.populationTimes)
#No imaginary unit or factor of 2 is needed because the auxiliary object does this calculation
self.pumpProbe_signal = -np.real(scipy.integrate.simps(self.totalSignal_lo_frequency, axis = 1) * (self.frequency_values[1] - self.frequency_values[0])) / (2.0 * np.pi)
self.totalSignal_frequency_power = np.abs(self.totalSignal_xFrequency)**2 + np.abs(self.totalSignal_yFrequency)**2 + np.abs(self.totalSignal_zFrequency)**2
self.totalSignal_frequency_abs = np.sqrt(self.totalSignal_frequency_power)
self.calculationTime = time.time() - overallStartTime
return self
class PopulationTimeScan_cluster(base_TG):
"Transient Grating for multiple population evolution times. RAM efficient"
experiment_type_string = "Cluster_Transient_Grating_"
def calculate(self):
overallStartTime = time.time()
if self.calculated:
print "You've already done the calculation, look at the results instead!"
return self
startTime = time.time()
zeroOrderTimeWavefunction = TimeElectronicWavefunction.timeElectronicWavefunction(self.mySpace)
zeroOrderTimeWavefunction.applyOperatorsNTimesOnInitialWavefunction([self.myFreePropagator], N = self.maximumTimeSteps, initialWF = self.intitialEWF)
print "zero order done"
print "elapsed Time: ", time.time() - startTime
zero_order_file_location = Dispatcher.Dispatcher.save_shared_object_return_filename(zeroOrderTimeWavefunction, "zero_order_ewf")
print "zero order located at: ", zero_order_file_location
saved_shared_filenames = [zero_order_file_location]
pulse1xPLUS = self.firstProbeEx.plusFunction()
pulse1yPLUS = self.firstProbeEy.plusFunction()
pulse1zPLUS = self.firstProbeEz.plusFunction()
pulse1PLUStuple = (pulse1xPLUS, pulse1yPLUS, pulse1zPLUS)
pulse1xMINUS = self.firstProbeEx.minusFunction()
pulse1yMINUS = self.firstProbeEy.minusFunction()
pulse1zMINUS = self.firstProbeEz.minusFunction()
pulse1MINUStuple = (pulse1xMINUS, pulse1yMINUS, pulse1zMINUS)
k1PlusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.excitationMuTuple, ExEyEzTimeFunctionTuple = pulse1PLUStuple, maxTime = self.maximumTime)
k1MinusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.relaxationMuTuple, ExEyEzTimeFunctionTuple = pulse1MINUStuple, maxTime = self.maximumTime)
startTime = time.time()
k1PlusWavefunction = k1PlusInteractor.goFromTimeWavefunction(zeroOrderTimeWavefunction)
zeroOrderTimeWavefunction = None
print "first order done"
print "elapsed Time: ", time.time() - startTime
oneAndTwoPlus_file_location = Dispatcher.Dispatcher.save_shared_object_return_filename(k1PlusWavefunction, "1and2Plus_ewf" )
saved_shared_filenames.append(oneAndTwoPlus_file_location)
startTime = time.time()
k2Plus_k1MinusWavefunction = k1MinusInteractor.goFromTimeWavefunction(k1PlusWavefunction)
TwoPlus_OneMinus_file_location = Dispatcher.Dispatcher.save_shared_object_return_filename(k2Plus_k1MinusWavefunction, "2Plus_1Minus_ewf")
saved_shared_filenames.append(TwoPlus_OneMinus_file_location)
signal_size = (self.numberOfPopulationTimePoints , k1PlusWavefunction.length())
k1PlusWavefunction = None
k2Plus_k1MinusWavefunction = None
print "second order done"
print "elapsed Time: ", time.time() - startTime
probeEx = copy.deepcopy(self.firstProbeEx)
probeEy = copy.deepcopy(self.firstProbeEy)
probeEz = copy.deepcopy(self.firstProbeEz)
#important for filter length
lowest_frequency_from_H = self.myElectronicHamiltonian.omega_low
signal_lo_size = signal_size
self.totalSignal_lo_frequency = np.zeros(signal_lo_size, dtype = np.complex)
#FREQUENCY SIGNALS
self.totalSignal_xFrequency = np.zeros(signal_size, dtype = np.complex)
self.totalSignal_yFrequency = np.zeros(signal_size, dtype = np.complex)
self.totalSignal_zFrequency = np.zeros(signal_size, dtype = np.complex)
my_tg_cluster_helpers = []
current_T = 0.0
total_T_points = self.numberOfPopulationTimePoints
max_T_index = total_T_points - 1
for population_time_index in range(total_T_points):
self.populationTimes.append(current_T)
startTime = time.time()
i = population_time_index
pulse3tuple = (probeEx, probeEy, probeEz)
pulse3PLUStuple = (probeEx.plusFunction(), probeEy.plusFunction(), probeEz.plusFunction())
pulse3MINUStuple = (probeEx.minusFunction(), probeEy.minusFunction(), probeEz.minusFunction())
k3PlusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.excitationMuTuple, ExEyEzTimeFunctionTuple = pulse3PLUStuple, maxTime = self.maximumTime)
k3MinusInteractor = TimePerturbation.electricDipoleOneInteraction(space = self.mySpace, electronicHamiltonian = self.myElectronicHamiltonian, MuxMuyMuzElectronicOperatorTuple = self.relaxationMuTuple, ExEyEzTimeFunctionTuple = pulse3MINUStuple, maxTime = self.maximumTime)
pulse3PlusTuple = (probeEx.plusFunction(), probeEy.plusFunction(), probeEz.plusFunction())
#cluster_TG_helper give it:
#T_index
#ket
#bra
#muTuple
#pulse3Tuple
# zero_order_file_location
# oneAndTwoPlus_file_location
# TwoPlus_OneMinus_file_location
# k1 - k2 + k3 // none
bra = zero_order_file_location
ket = (k3PlusInteractor, TwoPlus_OneMinus_file_location)
new_cluster_helper = TG_Cluster_Helper(population_time_index, self.dipoleTuple, pulse3PlusTuple, bra, ket, lowest_frequency_from_H, max_T_index = max_T_index, n = self.timeStepsPerPopulationStep)
my_tg_cluster_helpers.append(new_cluster_helper)
# k3 // k2-k1
bra = TwoPlus_OneMinus_file_location
ket = (k3PlusInteractor, zero_order_file_location)
new_cluster_helper = TG_Cluster_Helper(population_time_index, self.dipoleTuple, pulse3PlusTuple, bra, ket, lowest_frequency_from_H, max_T_index = max_T_index, n = self.timeStepsPerPopulationStep)
my_tg_cluster_helpers.append(new_cluster_helper)
# k1 // k2 - k3
bra = (k3MinusInteractor, oneAndTwoPlus_file_location)
ket = oneAndTwoPlus_file_location
new_cluster_helper = TG_Cluster_Helper(population_time_index, self.dipoleTuple, pulse3PlusTuple, bra, ket, lowest_frequency_from_H, max_T_index = max_T_index, n = self.timeStepsPerPopulationStep)
my_tg_cluster_helpers.append(new_cluster_helper)
if self.monomer == False:
#k1 + k3 // k2
bra = oneAndTwoPlus_file_location
ket = (k3PlusInteractor, oneAndTwoPlus_file_location)
new_cluster_helper = TG_Cluster_Helper(population_time_index, self.dipoleTuple, pulse3PlusTuple, bra, ket, lowest_frequency_from_H, max_T_index = max_T_index, n = self.timeStepsPerPopulationStep)
my_tg_cluster_helpers.append(new_cluster_helper)
else:
pass
probeEx = probeEx.pulseCopyJumpedForward(self.populationDT)
probeEy = probeEy.pulseCopyJumpedForward(self.populationDT)
probeEz = probeEz.pulseCopyJumpedForward(self.populationDT)
current_T = current_T + self.populationDT
if self.use_clusterpool:
print "creating pool"
myPool = ClusterPool.Pool()
print "running subprocesses"
calculated_tg_cluster_helpers = myPool.map('calculate', my_tg_cluster_helpers)
else:
print "creating pool"
myPool = multiprocessing.Pool(NUMBER_PROCESSES)
print "running subprocesses"
calculated_tg_cluster_helpers = myPool.map(lambda x: x.calculate(), my_tg_cluster_helpers)
for calculated_tg_cluster_helper in calculated_tg_cluster_helpers:
i = calculated_tg_cluster_helper.population_time_index
obj = calculated_tg_cluster_helper
self.totalSignal_xFrequency[i] = self.totalSignal_xFrequency[i] + obj.xfrequency_spectrum
self.totalSignal_yFrequency[i] = self.totalSignal_yFrequency[i] + obj.yfrequency_spectrum
self.totalSignal_zFrequency[i] = self.totalSignal_zFrequency[i] + obj.zfrequency_spectrum
self.totalSignal_lo_frequency[i] = self.totalSignal_lo_frequency[i] + obj.local_oscillator_signal
self.frequency_values = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(obj.local_oscillator_signal.shape[0], d = self.mySpace.dt))
self.frequency_values_wavenumbers = self.mySpace.unitHandler.wavenumbersFromEnergyUnits(self.frequency_values)
self.populationTimes = np.array(self.populationTimes)
self.populationTimes_fs = self.mySpace.unitHandler.femtosecondsFromTime(self.populationTimes)
#No 2.0 or 1.0j is needed because the helper function does those multiplications
self.pumpProbe_signal = -np.real( scipy.integrate.simps(self.totalSignal_lo_frequency, axis = 1) * (self.frequency_values[1] - self.frequency_values[0])) / (2.0 * np.pi)
self.totalSignal_frequency_power = np.abs(self.totalSignal_xFrequency)**2 + np.abs(self.totalSignal_yFrequency)**2 + np.abs(self.totalSignal_zFrequency)**2
self.totalSignal_frequency_abs = np.sqrt(self.totalSignal_frequency_power)
self.calculationTime = time.time() - overallStartTime
#delete shared files
for filename in saved_shared_filenames:
os.remove(filename)
return self
class TG_Cluster_Helper(experimentBase.experiment):
useFilter = True
base_frequency_multiple = 2.0
def load_object_from_file(self, file_location):
object_file = open(file_location, "rb")
loaded_object = pickle.load(object_file)
object_file.close()
return loaded_object
def __init__(self, population_time_index, muTuple, pulse3PlusTuple, bra, ket, lowest_frequency, max_T_index, n):
"bra and ket are either filename strings or a tuple of an interactor object and a file string pointing to a wavefunction to be interacted with first"
self.bra = bra
self.ket = ket
self.muTuple = muTuple
self.pulse3PlusTuple = pulse3PlusTuple
self.lowest_frequency = lowest_frequency
self.population_time_index = population_time_index
self.max_T_index = max_T_index
self.n = n
self.calculated = False
def calculate(self):
if self.calculated:
return self
dt = self.muTuple[0].mySpace.dt
if isinstance(self.bra, tuple):
interactor = self.bra[0]
wavefunction_location = self.bra[1]
wavefunction = self.load_object_from_file(wavefunction_location)
calculated_bra = interactor.goFromTimeWavefunction(wavefunction)
else:
calculated_bra = self.load_object_from_file(self.bra)
#print "calculated bra object: ", calculated_bra
if isinstance(self.ket, tuple):
interactor = self.ket[0]
wavefunction_location = self.ket[1]
wavefunction = self.load_object_from_file(wavefunction_location)
calculated_ket = interactor.goFromTimeWavefunction(wavefunction)
else:
calculated_ket = self.load_object_from_file(self.ket)
#print "calculated ket object: ", calculated_ket
signalTuple = calculated_ket.timeOverlapWithOtherBraEWFOfPolarization(calculated_bra, self.muTuple)
xSignal = 1.0j * signalTuple[0]
ySignal = 1.0j * signalTuple[1]
zSignal = 1.0j * signalTuple[2]
#clear memory
self.bra = None
self.ket = None
self.muTuple = None
n_time_point = xSignal.shape[0]
if TG_Cluster_Helper.useFilter:
filter_length_time = TG_Cluster_Helper.base_frequency_multiple * (2.0 * np.pi / self.lowest_frequency )
filter_length_steps = int(filter_length_time / dt)
xf = n_time_point - (self.max_T_index - self.population_time_index) * self.n - 1
x0 = xf - filter_length_steps
# Here we define a third order polynomial that follows the following equations:
# f(x0) = 1
# f(xf) = 0
# f'(x0) = 0
# f'(xf) = 0
denominator = (x0 - xf)**3
a = -2.0 / denominator
b = 3.0 * (x0 + xf) / denominator
c = -6.0 * x0 * xf / denominator
d = (3.0 * x0 * xf**2 - xf**3) / denominator
filterVals = np.ones(n_time_point, dtype = np.complex)
for i in range(x0, xf+1):
filterVals[i] = a * (i**3) + b * (i**2) + c * i + d
for i in range(xf+1, n_time_point):
filterVals[i] = 0.0
else:
filterVals = 1.0
ft_constant = (dt) * n_time_point
xfrequency_spectrum = np.fft.fftshift(np.fft.ifft(xSignal * filterVals)) * ft_constant
yfrequency_spectrum = np.fft.fftshift(np.fft.ifft(ySignal * filterVals)) * ft_constant
zfrequency_spectrum = np.fft.fftshift(np.fft.ifft(zSignal * filterVals)) * ft_constant
self.xfrequency_spectrum = xfrequency_spectrum
self.yfrequency_spectrum = yfrequency_spectrum
self.zfrequency_spectrum = zfrequency_spectrum
self.local_oscillator_signal = np.array([])
self.frequency_values = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(n_time_point, d = dt))
try:
convoluterX = self.pulse3PlusTuple[0].myFourierTransformedFunction(self.frequency_values)
except Exception:
convoluterX = 0
try:
convoluterY = self.pulse3PlusTuple[1].myFourierTransformedFunction(self.frequency_values)
except Exception:
convoluterY = 0
try:
convoluterZ = self.pulse3PlusTuple[2].myFourierTransformedFunction(self.frequency_values)
except Exception:
convoluterZ = 0
local_oscillator_signal = convoluterX*np.conj(xfrequency_spectrum) + convoluterY*np.conj(yfrequency_spectrum) + convoluterZ*np.conj(zfrequency_spectrum)
self.local_oscillator_signal = 2.0 * local_oscillator_signal
self.calculated = True
#clear memory
self.pulse3PlusTuple = None
return self
class WitnessExperiment(experimentBase.experiment):
experiment_type_string = "Witness_Cluster_Transient_Grating_"
def __init__(self, space,
electronicHamiltonian,
MuxMuyMuzElectronicOperatorTuple,
initialElectronicWavefunction,
maximumEvolutionTime,
numberOfTimePoints,
centerFrequency,
minimumPulseWidth,
maximumPulseWidth,
maximumProbeTime,
numberOfProbeTimePoints,
numberOfPulseWidthExperimentsToDo,
numberOfProcessors = None,
id_string = "",
string_identity = ""):
#store all the variables
self.mySpace = space
self.string_identity = string_identity
self.muTuple = MuxMuyMuzElectronicOperatorTuple
self.intitialEWF = initialElectronicWavefunction
self.myElectronicHamiltonian = electronicHamiltonian
self.pulseWidthsToCalculate = np.linspace(minimumPulseWidth, maximumPulseWidth, numberOfPulseWidthExperimentsToDo)
self.pulseWidthsToCalculate_femtoseconds = self.mySpace.unitHandler.femtosecondsFromTime(self.pulseWidthsToCalculate)
pulseBeamTuples = []
for width in self.pulseWidthsToCalculate:
newPulseX = TimeFunction.GaussianCosinePulse(self.mySpace, centerOmega = centerFrequency, timeSpread = width)
newPulseY = TimeFunction.GaussianCosinePulse(self.mySpace, centerOmega = centerFrequency, timeSpread = width)
newPulseZ = TimeFunction.zeroTimeFunction(self.mySpace)
pulseBeamTuples.append((newPulseX, newPulseY, newPulseZ))
self.pumpBeamTuplesToCalculate = pulseBeamTuples
self.TmaxPp = float(maximumEvolutionTime)
self.dtPp = float(maximumEvolutionTime) / float(numberOfTimePoints)
self.Npp = int(numberOfTimePoints)
self.maximumProbeTime = maximumProbeTime
self.numberOfProbeTimePoints = numberOfProbeTimePoints
self.pulse_overlap_ending_time = 6.0 * maximumPulseWidth
self.pulse_overlap_ending_index = self.pulse_overlap_ending_time / self.dtPp
self.pulse_overlap_ending_index = int(self.pulse_overlap_ending_index)
#now it's time to figure out how many processors to use
if numberOfProcessors is None:
#Give the computer some wiggle room to work whilst running the calculation
self.numberOfProcessors = multiprocessing.cpu_count() - 1
else:
self.numberOfProcessors = numberOfProcessors
def calculate(self):
#first create all the objects:
self.listOfTGExperiments = []
self.W_list = []
self.W_fascimile_n1_list = []
self.W_fascimile_n2_list = []
pp_signals = []
pp_fascimile_n1_signals = []
pp_fascimile_n2_signals = []
for ii, pumpTuple in enumerate(self.pumpBeamTuplesToCalculate):
print "It is now", datetime.datetime.now()
idNo = self.string_identity + "_sigma= %s" % str(pumpTuple[0].sigma)
newTG = PopulationTimeScan_cluster(space = self.mySpace,
electronicHamiltonian = self.myElectronicHamiltonian,
MuxMuyMuzElectronicOperatorTuple = self.muTuple,
initialElectronicWavefunction = self.intitialEWF,
pumpBeamTuple = pumpTuple,
maximumPopulationTime = self.TmaxPp,
numberOfPopulationTimePoints = self.Npp,
maximumProbeTime = self.maximumProbeTime,
numberOfProbeTimePoints = self.numberOfProbeTimePoints,
IDstring = idNo,
numberOfProcessors = self.numberOfProcessors)
print idNo, "calculating...."
newTG = newTG.calculate()
newTG.save(idNo)
#this saves a tremendous amount of memory, but makes data lookup harder
# self.listOfTGExperiments.append(newTG)
self.listOfTGExperiments.append(None)
dOmega = newTG.frequency_values[1] - newTG.frequency_values[0]
pp = newTG.pumpProbe_signal
pp_fascimile_n1 = scipy.integrate.simps(newTG.totalSignal_frequency_abs, dx = dOmega, axis = 1)
pp_fascimile_n2 = scipy.integrate.simps(newTG.totalSignal_frequency_abs**2, dx = dOmega, axis = 1)
pp_signals.append(pp)
pp_fascimile_n1_signals.append(pp_fascimile_n1)
pp_fascimile_n2_signals.append(pp_fascimile_n2)
#non-overlaphe just came in
n_ft_time_point = pp[self.pulse_overlap_ending_index:].shape[0]
self.pp_oscillation_frequencies = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(n_ft_time_point, d = self.dtPp))
self.pp_oscillation_frequencies_wavenumbers = self.mySpace.unitHandler.wavenumbersFromEnergyUnits(self.pp_oscillation_frequencies)
total_signal_shape = (len(pp_signals), pp.shape[0])
nonOverlap_signal_shape = (len(pp_signals), pp[self.pulse_overlap_ending_index:].shape[0])
total_pp_signal = np.zeros(total_signal_shape, dtype = np.complex)
total_pp_fascimile_n1_signal = np.zeros(total_signal_shape, dtype = np.complex)
total_pp_fascimile_n2_signal = np.zeros(total_signal_shape, dtype = np.complex)
total_ft_pp_signal = np.zeros(nonOverlap_signal_shape, dtype = np.complex)
for i, tg in enumerate(self.listOfTGExperiments):
total_pp_signal[i] = pp_signals[i]
total_pp_fascimile_n1_signal[i] = pp_fascimile_n1_signals[i]
total_pp_fascimile_n2_signal[i] = pp_fascimile_n2_signals[i]
for i, tg in enumerate(self.listOfTGExperiments):
pp_nonOverlap = total_pp_signal[i, self.pulse_overlap_ending_index:]
pp_fascimile_n1_nonOverlap = total_pp_fascimile_n1_signal[i, self.pulse_overlap_ending_index:]
pp_fascimile_n2_nonOverlap = total_pp_fascimile_n2_signal[i, self.pulse_overlap_ending_index:]
#average values
pp_average = np.average(pp_nonOverlap)
pp_fascimile_n1_average = np.average(pp_fascimile_n1_nonOverlap)
pp_fascimile_n2_average = np.average(pp_fascimile_n2_nonOverlap)
#signals to be used for W
pp_forW = pp_nonOverlap - pp_average
pp_fascimile_n1_forW = pp_fascimile_n1_nonOverlap - pp_fascimile_n1_average
pp_fascimile_n2_forW = pp_fascimile_n2_nonOverlap - pp_fascimile_n2_average
#FT values
total_ft_pp_signal[i] = np.fft.fftshift(np.fft.fft(pp_forW))
#W values
W = scipy.integrate.simps(np.abs(pp_forW)**2) * self.dtPp
W_fascimile_n1 = scipy.integrate.simps(np.abs(pp_fascimile_n1_forW)**2) * self.dtPp
W_fascimile_n2 = scipy.integrate.simps(np.abs(pp_fascimile_n2_forW)**2) * self.dtPp
self.W_list.append(W)
self.W_fascimile_n1_list.append(W_fascimile_n1)
self.W_fascimile_n2_list.append(W_fascimile_n2)
self.total_pp_signal = total_pp_signal
self.total_pp_fascimile_n1_signal = total_pp_fascimile_n1_signal
self.total_pp_fascimile_n2_signal = total_pp_fascimile_n2_signal
self.pulseWidthsToCalculate_FWHM_femtoseconds = (2.0 *np.sqrt(2.0 * np.log(2.0))) * self.pulseWidthsToCalculate_femtoseconds
self.total_ft_pp_signal = total_ft_pp_signal
print "ALL DONE! :D"
def BIG_RED_BUTTON(ppObject):
"just here to make embarassingly parallel calculations easier"
return ppObject.calculate()
class interactionHelper(object):
def __init__(self, interactor, wavefunction):
self.interactor = interactor
self.wavefunction = wavefunction
def calculate(self):
output = self.interactor.goFromTimeWavefunction(self.wavefunction)
self.interactor = None
self.wavefunction = None
return output
class overlapHelper(object):
base_frequency_multiple = 2.0
useFilter = True
def __init__(self, bra, ket, muTuple, pulse3PlusTuple, lowest_frequency, population_time_index, max_T_index, n):
self.bra = bra
self.ket = ket
self.muTuple = muTuple
self.pulse3PlusTuple = pulse3PlusTuple
self.lowest_frequency = lowest_frequency
self.population_time_index = population_time_index
self.max_T_index = max_T_index
self.n = n
self.calculated = False
def calculate(self):
if self.calculated:
return self
dt = self.bra.mySpace.dt
signalTuple = self.ket.timeOverlapWithOtherBraEWFOfPolarization(self.bra, self.muTuple)
xSignal = 1.0j * signalTuple[0]
ySignal = 1.0j * signalTuple[1]
zSignal = 1.0j * signalTuple[2]
#clear memory
self.bra = None
self.ket = None
self.muTuple = None
n_time_point = xSignal.shape[0]
time_values = dt * np.array(range(0, n_time_point))
if overlapHelper.useFilter:
filter_length_time = overlapHelper.base_frequency_multiple * (2.0 * np.pi / self.lowest_frequency )
filter_length_steps = int(filter_length_time / dt)
xf = n_time_point - 1 - (self.max_T_index - self.population_time_index) * self.n
x0 = xf - filter_length_steps
# Here we define a third order polynomial that follows the following equations:
# f(x0) = 1
# f(xf) = 0
# f'(x0) = 0
# f'(xf) = 0
denominator = (x0 - xf)**3
a = -2.0 / denominator
b = 3.0 * (x0 + xf) / denominator
c = -6.0 * x0 * xf / denominator
d = (3.0 * x0 * xf**2 - xf**3) / denominator
filterVals = np.ones(n_time_point, dtype = np.complex)
for i in range(x0, xf+1):
filterVals[i] = a * (i**3) + b * (i**2) + c * i + d
for i in range(xf+1, n_time_point):
filterVals[i] = 0.0
else:
filterVals = 1.0
ft_constant = (time_values[1] - time_values[0]) * n_time_point
xfrequency_spectrum = np.fft.fftshift(np.fft.ifft(xSignal * filterVals)) * ft_constant
yfrequency_spectrum = np.fft.fftshift(np.fft.ifft(ySignal * filterVals)) * ft_constant
zfrequency_spectrum = np.fft.fftshift(np.fft.ifft(zSignal * filterVals)) * ft_constant
self.frequency_values = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(n_time_point, d = dt))
self.xfrequency_spectrum = xfrequency_spectrum
self.yfrequency_spectrum = yfrequency_spectrum
self.zfrequency_spectrum = zfrequency_spectrum
self.time_values = time_values
self.local_oscillator_signal = np.array([])
try:
convoluterX = self.pulse3PlusTuple[0].myFourierTransformedFunction(self.frequency_values)
except Exception:
convoluterX = 0
try:
convoluterY = self.pulse3PlusTuple[1].myFourierTransformedFunction(self.frequency_values)
except Exception:
convoluterY = 0
try:
convoluterZ = self.pulse3PlusTuple[2].myFourierTransformedFunction(self.frequency_values)
except Exception:
convoluterZ = 0
local_oscillator_signal = convoluterX*np.conj(xfrequency_spectrum) + convoluterY*np.conj(yfrequency_spectrum) + convoluterZ*np.conj(zfrequency_spectrum)
self.local_oscillator_signal = 2.0 * local_oscillator_signal
self.calculated = True
#clear memory
self.pulse3PlusTuple = None
return self
if __name__ == "__main__":
print "heeeeey"
```
#### File: spectroscopy/src/TimeElectronicWavefunction.py
```python
import copy
import time
import matplotlib.pyplot as plt
import scipy.integrate
try:
from matplotlib import animation
except:
animation = object() #allows this to still run on clusters without the animation package installed
import numpy as np
import scipy
import Spacetime
import NuclearOperator
import NuclearWavefunction
import ElectronicWavefunction
import ElectronicOperator
import TimeNuclearWavefunction
class timeElectronicWavefunction(object):
"""A collection of electronic wavefunctions to represent the evolution of
an electronic/nuclear state"""
def __init__(self, SpaceToExistIn):
self.mySpace = SpaceToExistIn
self.dim = self.mySpace.electronicDimensionality
self.listOfTimePositionAmplitudes = None
self.timeSeries = []
self.__currentTimeIndex = None #the most recently calculated index
def allocateSpace(self, nSteps):
"guess that you will use nSteps of time steps"
self.listOfTimePositionAmplitudes = []
for i in range(self.dim):
self.listOfTimePositionAmplitudes.append(self.mySpace.functionSpacetimeZero(nSteps))
def zero_copy(self):
"Make an identical copy, except that it's zero"
output = copy.deepcopy(self)
for i in range(self.dim):
old_amp = self.listOfTimePositionAmplitudes[i]
old_amp_shape = old_amp.shape
zero = np.zeros(old_amp_shape, np.complex)
output.listOfTimePositionAmplitudes[i] = zero
return output
def allocateMoreSpace(self, nSteps):
"guess that you will use nSteps more of time steps"
#calculate new size
oldSize = self.listOfTimePositionAmplitudes[0].shape[0]
newSize = oldSize + nSteps
#allocate that space
newListOfTimePositionAmplitudes = []
for i in range(self.dim):
newListOfTimePositionAmplitudes.append(self.mySpace.functionSpacetimeZero(newSize))
#preserve old data
for elecIndex in range(self.dim):
newListOfTimePositionAmplitudes[elecIndex][0:oldSize] = self.listOfTimePositionAmplitudes[elecIndex]
self.listOfTimePositionAmplitudes = newListOfTimePositionAmplitudes
def setInitialWavefunction(self, initialWF):
"set the weavefunction at time zero to be the given wavefunction"
self.initialWF = initialWF
self.__currentTimeIndex = 0
self[self.__currentTimeIndex] = initialWF
self.timeSeries.append(0.0)
def applyOperatorsAndAdvance(self, listOfOperators, overrideDT = None):
"applies the given list of operators and returns the new Electronic Wavefunction"
if overrideDT is not None:
dt = overrideDT
else:
dt = self.mySpace.dt
self.timeSeries.append(self.timeSeries[-1] + dt)
self.__currentTimeIndex = self.__currentTimeIndex + 1
#now to apply the operator
newEWF = self[self.__currentTimeIndex -1]
for operator in listOfOperators:
newEWF = operator.APPLY( newEWF )
self[self.__currentTimeIndex] = newEWF
return newEWF
def applyOperatorsNTimes(self, listOfOperators, N, overrideDT = None):
"Applys a list of operators N times and stores the output, returns the last applied state"
out = self.currentElectronicWavefunction()
for i in range(N+2):
out = self.applyOperatorsAndAdvance(listOfOperators, overrideDT)
return out
def applyOperatorsNTimesOnInitialWavefunction(self, listOfElectronicOperators, N, initialWF, overrideDT = None):
"Returns last electronic wavefunction to be calculated"
self.allocateSpace(N + 1)
self.setInitialWavefunction(initialWF)
#check that dt is the same
if overrideDT is not None:
dt = overrideDT
else:
dt = self.mySpace.dt
#now to apply the operator
for timeIndex in range(N): #not +1?
newEWF = self.applyOperatorsAndAdvance(listOfElectronicOperators, overrideDT = dt)
return newEWF
def currentTime(self):
"Most current time used in a calculation"
return self.timeSeries[self.__currentTimeIndex]
def currentElectronicWavefunction(self):
"most currently calculated function"
return self[self.__currentTimeIndex]
def length(self):
"number of calculated steps in time"
return len(self.timeSeries)
def shape(self):
"primarily used for error checking to see the amount of data, returns a string"
output = ""
for i in range(self.dim):
output = output + str(self.listOfTimePositionAmplitudes[i].shape)+ ", "
return output
def normSeries(self):
"For error-checking: how does the norm of the wavefunction change as a function of time"
norms = []
for WF in self:
norms.append(WF.norm())
return np.array(norms)
def overlapSeies_with_constant_EWF_after_operator_application(self, constant_EWF, operator):
values = []
for WF in self:
values.append(WF.overlap(operator * constant_EWF))
return np.array(values)
def autocorrelationInFrequencySpace(self):
"Autocorrelation as a function of frequency"
t, ACF = self.autocorrelation()
return self.mySpace.genericOneDimensionalFourierTransformFromZero(t, ACF)
def timeExpectationValue(self, operator):
"Takes an operator and brakets it with this wavefunction for all extant times"
timeValues = []
for ii in range(self.listOfTimePositionAmplitudes[0].shape[0]):
ewf = self[ii]
operatorActedOnEWF = operator * ewf
newValue = ewf.overlap(operatorActedOnEWF)
timeValues.append(newValue)
return np.array(timeValues)
def timeExpectationValueOfPolarizationAndOverlapWithElectricField(self, dipoleTuple, electricFieldTuple):
"Takes a tuple of dipole operators and a tuple of electric field operators to calculate. I don't actually think this should be used anywhere..."
output = 0.0
output = self.timeExpectationValue(dipoleTuple[0]) * electricFieldTuple[0].valueAtTime(self.timeSeries)
output = output + self.timeExpectationValue(dipoleTuple[1]) * electricFieldTuple[1].valueAtTime(self.timeSeries)
output = output + self.timeExpectationValue(dipoleTuple[2]) * electricFieldTuple[2].valueAtTime(self.timeSeries)
return scipy.integrate.simps(output, dx = self.mySpace.dt)
def timeOverlapWithOtherBraEWFOfPolarizationAndOverlapWithElectricField(self, braEWF, dipoleTuple, electricFieldTuple):
"""This is the workhorse function which treats self as the ket, and calculates
the overlap with the supplied wavefunction after applying the dipole operator
then takes the dot product with the supplied electric field and integrates"""
output = []
for i, EWF in enumerate(self):
xVal = EWF.overlap(dipoleTuple[0] * braEWF[i]) * electricFieldTuple[0].valueAtTime(self.timeSeries[i])
yVal = EWF.overlap(dipoleTuple[1] * braEWF[i]) * electricFieldTuple[1].valueAtTime(self.timeSeries[i])
zVal = EWF.overlap(dipoleTuple[2] * braEWF[i]) * electricFieldTuple[2].valueAtTime(self.timeSeries[i])
output.append(xVal + yVal + zVal)
return scipy.integrate.simps(output, dx = self.mySpace.dt)
def timeOverlapWithOtherBraEWFOfPolarization(self, braEWF, dipoleTuple):
"""Function which treats self as the ket, and calculates the expectation value
of the dipole operator then outputs a time vector"""
xOutput = []
yOutput = []
zOutput = []
for i, EWF in enumerate(self):
xVal = EWF.overlap(dipoleTuple[0] * braEWF[i])
yVal = EWF.overlap(dipoleTuple[1] * braEWF[i])
zVal = EWF.overlap(dipoleTuple[2] * braEWF[i])
xOutput.append(xVal)
yOutput.append(yVal)
zOutput.append(zVal)
return (np.array(xOutput), np.array(yOutput), np.array(zOutput))
def grabTimeNuclearWavefunction(self, index):
"Will give you a nuclear time wavefunction for the given electronic index"
output = TimeNuclearWavefunction.timeNuclearWavefunction(self.mySpace)
output.timePositionAmplitude = self.listOfTimePositionAmplitudes[index]
return output
##DEFINE ITERATION METHODS OVER TIME
def __iter__(self):
self.counter = 0
return self
def next(self):
try:
self.counter = self.counter + 1
return self[self.counter - 1]
except IndexError:
raise StopIteration
def __getitem__(self,index):
"outputs the spatial electronic wavefunction at time index index"
index = int(index)
nucWFlist = []
for ii in range(self.dim):
newNucWF = NuclearWavefunction.nuclearWavefunction(self.mySpace)
newNucWF.xAmplitude = self.listOfTimePositionAmplitudes[ii][index]
nucWFlist.append(newNucWF)
out = ElectronicWavefunction.electronicWavefunction(self.mySpace, nucWFlist)
return out
def __setitem__(self, index, ewf):
"sets the spatial electronic wavefunction at time index, index, to be ewf"
if index < 0:
index = index + self.__currentTimeIndex
for elecIndex in range(self.dim):
self.listOfTimePositionAmplitudes[elecIndex][index] = ewf[elecIndex].xAmplitude
self.__currentTimeIndex = index
def animate1D(self, fileName, numberOfFrames=None):
"Animate a 1D nuclear wavefunction as it evolves in time"
d = self.mySpace.nuclearDimensionality
if d != 1:
raise NuclearWavefunction.unplotableNuclearWavefunction()
listOfPlottingAmplitudes = map(np.abs, self.listOfTimePositionAmplitudes)
yMin = min(map(np.min, listOfPlottingAmplitudes))
yMax = max(map(np.max, listOfPlottingAmplitudes))
xVals = self.mySpace.xValues
fig = plt.figure()
for ii in range(self.dim):
im = plt.plot(xVals, listOfPlottingAmplitudes[ii][0], label = str(ii))
plt.ylim((yMin, yMax))
plt.legend()
ax = fig.gca()
def animate(i, data, ax, fig):
ax.cla()
for ii in range(self.dim):
try:
im = plt.plot(xVals, listOfPlottingAmplitudes[ii][i], label = str(ii))
except:
return None
plt.ylim((yMin, yMax))
ax = fig.gca()
plt.legend()
plt.title(str(i))
return im,
anim = animation.FuncAnimation(fig, animate,
frames = numberOfFrames, interval=20, blit=True, fargs=(listOfPlottingAmplitudes, ax, fig))
anim.save(fileName, fps=20)
def animate2D(self, electronicIndex, fileName, numberOfFrames):
"Animate a 2D nuclear wavefunction as it evolves in time"
d = self.mySpace.nuclearDimensionality
if d != 2:
raise NuclearWavefunction.unplotableNuclearWavefunction()
plottingAmplitude = np.abs(self.listOfTimePositionAmplitudes[electronicIndex])
zMin = np.min(plottingAmplitude)
zMax = np.max(plottingAmplitude)
contourLevels = 100
contourSpacings = np.linspace(zMin, zMax, contourLevels)
fig = plt.figure()
im = plt.contourf(plottingAmplitude[0], contourSpacings)
ax = fig.gca()
def animate(i, data, ax, fig):
ax.cla()
im = ax.contourf(data[i], contourSpacings)
plt.title(str(i))
return im,
anim = animation.FuncAnimation(fig, animate,
frames = numberOfFrames, interval=20, blit=True, fargs=(plottingAmplitude, ax, fig))
anim.save(fileName, fps=20)
#override arithmetic
def __mul__(self, other):
output = copy.copy(self)
for ii in range(self.dim):
output.listOfTimePositionAmplitudes[ii] = output.listOfTimePositionAmplitudes[ii] * other
return output
def __neg__(self):
output = copy.copy(self)
for ii in range(self.dim):
output.listOfTimePositionAmplitudes[ii] = -output.listOfTimePositionAmplitudes[ii]
return output
def __add__(self, other):
output = copy.copy(self)
for ii in range(self.dim):
output.listOfTimePositionAmplitudes[ii] = output.listOfTimePositionAmplitudes[ii] + other.listOfTimePositionAmplitudes[ii]
return output
if __name__ == "__main__":
#Some test code
mySpace = Spacetime.Spacetime(xMax = 10,
numberOfNuclearDimenions = 2,
numberOfElectronicDimensions = 4,
numberOfSimulationSpacePointsPerNuclearDimension = 200,
dt = .05)
omega0 = 2.0
omegaOff = 1.0
testHarmonicOscillator1 = NuclearOperator.harmonicOscillator(mySpace,
omega=omega0,
mass=1,
center=-2,
energyOffset = .1)
testHarmonicOscillator2 = NuclearOperator.harmonicOscillator(mySpace,
omega= 2 * omega0,
mass=1,
center=0,
energyOffset = 0)
testHarmonicOscillator3 = NuclearOperator.harmonicOscillator(mySpace,
omega=omegaOff,
mass=1,
center=2,
energyOffset = 2)
testHarmonicOscillator4 = NuclearOperator.harmonicOscillator(mySpace,
omega= .5*omegaOff,
mass=1,
center=3,
energyOffset = 0)
testNuclearHamiltonian1 = NuclearOperator.nuclearHamiltonian(mySpace, listOfOneDimensionalHamiltonians = [testHarmonicOscillator1, testHarmonicOscillator2 ] )
testNuclearHamiltonian2 = NuclearOperator.nuclearHamiltonian(mySpace, listOfOneDimensionalHamiltonians = [testHarmonicOscillator2, testHarmonicOscillator3 ] )
testNuclearHamiltonian3 = NuclearOperator.nuclearHamiltonian(mySpace, listOfOneDimensionalHamiltonians = [testHarmonicOscillator3, testHarmonicOscillator4 ] )
testNuclearHamiltonian4 = NuclearOperator.nuclearHamiltonian(mySpace, listOfOneDimensionalHamiltonians = [testHarmonicOscillator4, testHarmonicOscillator1 ] )
testNuclearWavefunction1 = NuclearWavefunction.nuclearWavefunction(mySpace, groundStateNuclearHamiltonian = testNuclearHamiltonian1 )
testNuclearWavefunction2 = NuclearWavefunction.nuclearWavefunction(mySpace, groundStateNuclearHamiltonian = testNuclearHamiltonian2 )
testNuclearWavefunction3 = NuclearWavefunction.nuclearWavefunction(mySpace, groundStateNuclearHamiltonian = testNuclearHamiltonian3 )
testNuclearWavefunction4 = NuclearWavefunction.nuclearWavefunction(mySpace, groundStateNuclearHamiltonian = testNuclearHamiltonian4 )
electronicCoupling = NuclearOperator.constantPositionNuclearOperator(mySpace, .5)
assumptionsForExponent = {'diagonalKinetic': True,
'diagonalPotential' : False,
'OneNonDiagonal2by2' : False,
'2by2Indeces' : (1, 2)}
testElectronicHamiltonian = ElectronicOperator.ElectronicHamiltonian(mySpace, [(0,0, testNuclearHamiltonian2),
(1,1, testNuclearHamiltonian3),
(2,2, testNuclearHamiltonian3),
(2,1, electronicCoupling),
(1,2, electronicCoupling),
(3,3, testNuclearHamiltonian1)])
testEWF = ElectronicWavefunction.electronicWavefunction(mySpace,
listOfNuclearWavefunctions = [testNuclearWavefunction1, testNuclearWavefunction2, testNuclearWavefunction3, testNuclearWavefunction4],
Normalize=True)
testElectronicPropagator = testElectronicHamiltonian.myPropagator(assumptionsDICT=assumptionsForExponent)
testTimeEWF = timeElectronicWavefunction(mySpace)
print "starting propagation"
startTime = time.time()
testTimeEWF.applyOperatorNTimesOnInitialWavefunction(testElectronicPropagator, 200, testEWF)
print "elapsed time, ", time.time() - startTime
t, s = testTimeEWF.autocorrelation()
w, sw = testTimeEWF.autocorrelationInFrequencySpace()
plt.figure()
plt.plot(t, np.abs(s))
plt.figure()
plt.plot(w, np.abs(sw))
print t.shape
#
# testTimeEWF = timeElectronicWavefunction(mySpace)
# testTimeEWF.setExpectedNumberOfSteps(200)
# testTimeEWF.setInitialWavefunction(testEWF)
# print "starting propagation"
# startTime = time.time()
# for i in range(200):
# testTimeEWF.applyAndExtendOnce([testElectronicPropagator])
# print "elapsed time, ", time.time() - startTime
# t, s = testTimeEWF.autocorrelation()
# w, sw = testTimeEWF.autocorrelationInFrequencySpace()
#
# plt.figure()
# plt.plot(t, np.abs(s))
# plt.figure()
# plt.plot(w, np.abs(sw))
# print t.shape
#testTimeEWF.animate2D(1, 'testCoupledElectronicPropagation2D_2.mp4', 200)
```
#### File: spectroscopy/src/TimeFunction.py
```python
import copy
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
import scipy.interpolate
import Spacetime
class timeFunction(object):
"""
Abstract class to keep track of useful items for a function in time.
Must have a defined beginning and end, before and after which the function
is identically zero
"""
#Override arithmetic functions
def __mul__(self, other):
if isinstance(other, zeroTimeFunction):
return other
out = copy.copy(self)
out.myFunction = lambda t: self.myFunction(t) * other.myFunction(t)
return out
def __add__(self, other):
if isinstance(other, zeroTimeFunction):
return self
out = copy.copy(self)
out.myFunction = lambda t: self.myFunction(t) + other.myFunction(t)
newMin = min(out.timeRangeTuple[0], other.timeRangeTuple[0])
newMax = max(out.timeRangeTuple[1], other.timeRangeTuple[1])
out.timeRangeTuple = (newMin, newMax)
return out
def plot(self, nPoints = 2000):
"Plots from user-defined beginning to end of function for specified number of points"
plt.figure()
tVals = np.linspace(self.timeRangeTuple[0], self.timeRangeTuple[1], nPoints)
t = self.mySpace.unitHandler.femtosecondsFromTime(tVals)
fValues = self.myFunction(tVals)
plt.plot(t, fValues)
plt.xlabel(r"$t$ (fs)")
def plotInSimulationSpace(self):
"Plots using beginning/end and dt from mySpace"
plt.figure()
tVals = np.arange(self.timeRangeTuple[0], self.timeRangeTuple[1] + self.mySpace.dt, self.mySpace.dt)
t = self.mySpace.unitHandler.femtosecondsFromTime(tVals)
fValues = self.myFunction(tVals)
plt.plot(t, fValues)
plt.xlabel(r"$t$ (fs)")
def plotFT(self, maxFrequency, resolution):
"Plots the Fourier Transform of the function"
plt.figure()
w, s = self.fourierTransform(maxFrequency, resolution)
w = self.mySpace.unitHandler.wavenumbersFromEnergyUnits(w)
plt.plot(w, np.abs(s))
plt.xlabel(r"$\omega$ (cm$^{-1}$)")
def FT(self, maxFrequency, resolution):
"returns the Fourier Transform frewuency in wavenumbers and the amplitude"
w, s = self.fourierTransform(maxFrequency, resolution)
w = self.mySpace.unitHandler.wavenumbersFromEnergyUnits(w)
return w, s
def fourierTransform(self, maxFrequency, resolution):
tMax = 1.0 / resolution
N = maxFrequency / resolution
tValues = np.linspace(-tMax, tMax, N)
fValues = self.myFunction(tValues)
return self.mySpace.genericOneDimensionalFourierTransformFromZero(tValues, fValues, gaussianFilter=False)
def valueAtTime(self, t):
return self.myFunction(t)
def maxTime(self):
return self.timeRangeTuple[1]
def minTime(self):
return self.timeRangeTuple[0]
def shiftForward(self, deltaT):
self.T = self.T + deltaT
self.timeRangeTuple = (self.timeRangeTuple[0] + deltaT, self.timeRangeTuple[1] + deltaT)
return self
def integrate(self, dt):
"Integrates Amplitude to desired dt"
DT = dt
tValues = np.arange(self.minTime(), self.maxTime() + DT, DT)
Values = self.valueAtTime(tValues)
return scipy.integrate.simps(Values, dx = DT)
def integrateInSimulationSpace(self):
"Integrates Amplitude to mySpace.dt"
return self.integrate(self.mySpace.dt)
def setPhase(self, newPhase):
self.phi = newPhase
class zeroTimeFunction(timeFunction):
"A Time Function smart enough to know it's zero"
def __init__(self, space):
self.mySpace = space
self.myFunction = 0.0
self.timeRangeTuple = (np.inf, -np.inf)
self.timePillow = 0.0
self.T = 0.0
self.sigma = 0
def valueAtTime(self, t):
try:
return np.zeros(t.shape)
except:
return 0.0
def shiftForward(self, deltaT):
return self
def integrate(self, a):
return 0.0
def totalPower(self):
return 0.0
def plusFunction(self):
return zeroTimeFunction(self.mySpace)
def minusFunction(self):
return zeroTimeFunction(self.mySpace)
def pulseCopyAtNewTime(self, newTime):
return self
def pulseCopyJumpedForward(self, amountOfTimeToJumpForward):
return self
def fourierTransform(self):
return zeroTimeFunction(self.mySpace)
def myFourierTransformedFunction(self, w):
return np.zeros(w.shape)
class deltaFunction(timeFunction):
"A Function with ampltiude a/dt at specified time=loaction"
def __init__(self, space, location, dt=None):
self.mySpace = space
if dt is None:
self.DT = self.mySpace.dt
else:
self.DT = dt
self.height = 1.0 / self.mySpace.dt
self.T = location
self.timePillow = 5.0 * self.mySpace.dt #just to be safe
self.timeRangeTuple = (location - self.DT, location + self.DT)
def valueAtTime(self, t):
try:
t.shape
except:
if np.abs(t - self.T) < self.DT:
return self.height
else:
return 0.0
ones = np.ones(t.shape)
zeros = np.zeros(t.shape)
output = np.where(np.abs(t - self.T) > self.DT, zeros, ones)
return output * self.height
def pulseCopyAtNewTime(self, newTime):
output = copy.deepcopy(self)
output.T = newTime
return output
def pulseCopyJumpedForward(self, amountOfTimeToJumpForward):
output = copy.deepcopy(self)
output.T = amountOfTimeToJumpForward + self.T
return output
def totalPower(self):
return 1.0
class GaussianPulse(timeFunction):
"An abstract object to hold useful methods for dealing with Gaussian pulses"
def __init__(self,
space,
centerOmega,
timeSpread,
centerTime=None,
pulseDirection = None,
phi = 0.0,
amplitude = 1.00E-8,
normalizePower = False,
normalizeIntegral = True,
frequency_sign = 1.0):
self.mySpace = space
if pulseDirection is None: #not important yet, may be implemented later
pass
self.omega = centerOmega
self.sigma = timeSpread
self.phi = 0
self.k = pulseDirection
targetTimePillow = 6.0 * self.sigma #amount of padding needed between center of pulse and esdge of time
nDtInTargetPillow = np.ceil(targetTimePillow / self.mySpace.dt) + 1
self.timePillow = nDtInTargetPillow * self.mySpace.dt
#if no time is specified, then just give the pulse the needed pillow
if centerTime == None:
centerTime = self.timePillow
self.frequency_sign = frequency_sign
#search for the closest index to the specified center time
self.T = centerTime
self.timeRangeTuple = (self.T - self.timePillow, self.T + self.timePillow)
#the goal is to normalize the power or integral of the square of the function to one
self.amp = amplitude #start here; this is needed to make the function work
if normalizePower:
normalizer = self.totalPower()
self.amp = self.amp / math.sqrt(normalizer)
if normalizeIntegral:
normalizer = self.totalIntegral()
self.amp = self.amp / normalizer
def shiftToCenterTime(self, newT):
"Gives pulse a new center time"
delta_T = newT - self.T
self.T = newT
self.timeRangeTuple = (self.timeRangeTuple[0] + delta_T, self.timeRangeTuple[1] + delta_T)
def totalPower(self):
"Integral of the absolute value squared of the amplitude"
DT = self.mySpace.dt #/ 10.0
tValues = np.arange(self.minTime(), self.maxTime(), DT)
Values = np.abs(self.valueAtTime(tValues))**2.0
return scipy.integrate.simps(Values, dx = DT)
def totalIntegral(self):
"Integral of the absolute value of the amplitude"
DT = self.mySpace.dt #/ 10.0
tValues = np.arange(self.minTime(), self.maxTime(), DT)
Values = np.abs(self.valueAtTime(tValues))
return scipy.integrate.simps(Values, dx = DT)
def pulseCopyAtNewTime(self, newTime):
"Make copy and move to new time"
output = copy.deepcopy(self)
output.T = newTime
return output
def pulseCopyJumpedForward(self, amountOfTimeToJumpForward):
"Make copy and jump the pulse forward"
output = copy.deepcopy(self)
output.T = amountOfTimeToJumpForward + self.T
return output
class GaussianPulseTooCloseToEdgeOfTimeException(Exception):
def __init__(self, value):
self.value = value
class GaussianCosinePulse(GaussianPulse):
"Completely Real Pulse"
def myFunction(self, t):
coef = self.amp
shiftedTime = t - self.T
cosArg = -self.omega * shiftedTime + self.phi
cosTerm = np.cos(cosArg)
gausArg = -shiftedTime**2.0 / (2.0 * self.sigma**2.0)
gausTerm = np.exp(gausArg)
return coef * cosTerm * gausTerm
def myFourierTransformedFunction(self, w):
return self.minusFunction().myFourierTransformedFunction(w) + self.plusFunction().myFourierTransformedFunction(w)
def plusFunction(self):
return GaussianKPlusPulse(
self.mySpace,
self.omega,
self.sigma,
self.T,
self.k,
self.phi,
self.amp / 2.0,
normalizePower = False,
normalizeIntegral = False,
frequency_sign = -1.0)
def minusFunction(self):
return GaussianKMinusPulse(
self.mySpace,
self.omega,
self.sigma,
self.T,
self.k,
self.phi,
self.amp / 2.0,
normalizePower = False,
normalizeIntegral = False,
frequency_sign = 1.0)
class GaussianKPlusPulse(GaussianPulse):
"'Forward' Pulse which is complex and has positive energy"
def myFunction(self, t):
coef = self.amp
shiftedTime = t - self.T
expArg = -self.omega * shiftedTime + self.phi
expTerm = np.exp(1.0j * expArg)
gausArg = -shiftedTime**2.0 / (2.0 * self.sigma**2)
gausTerm = np.exp(gausArg)
return coef * expTerm * gausTerm
def myFourierTransformedFunction(self, w):
"""Defined as $\int e^{i \omega t} E(t) dt $ """
coef = self.amp * np.sqrt(2.0 * np.pi * self.sigma**2.0)
oscPart = np.exp(1.0j *w * self.T)
expPart = np.exp(-self.sigma**2 * (self.omega - w)**2 / 2.0)
return coef * oscPart * expPart
class GaussianKMinusPulse(GaussianPulse):
"'Backward' Pulse which is complex and negative energy"
def myFunction(self, t):
coef = self.amp
shiftedTime = t - self.T
expArg = self.omega * shiftedTime - self.phi
expTerm = np.exp(1.0j * expArg)
gausArg = -shiftedTime**2.0 / (2.0 * self.sigma**2.0)
gausTerm = np.exp(gausArg)
return coef * expTerm * gausTerm
def myFourierTransformedFunction(self, w):
"""Defined as $\int e^{i \omega t} E(t) dt $ """
coef = self.amp * np.sqrt(2.0 * np.pi * self.sigma**2.0)
oscPart = np.exp(1.0j *w * self.T)
expPart = np.exp(-self.sigma**2 * (self.omega + w)**2 / 2.0)
return coef * oscPart * expPart
class GaussianRazorBladedPulse(timeFunction):
"Object to handle a pulse which has been razor-bladed in frequency space"
k_MULTIPLIER = 80000000.0
time_window_high_frequency_multiplier = 10.0
time_domain_multiplier = 40
ZERO_TOLERANCE = 1.0E-3
def __init__(self, gaussian_pulse, cutoff_omega_low, cutoff_omega_high):
self.my_underlying_pulse = copy.deepcopy(gaussian_pulse)
self.mySpace = self.my_underlying_pulse.mySpace
self.low_frequency_cutoff = cutoff_omega_low
self.high_frequency_cutoff = cutoff_omega_high
self.T = gaussian_pulse.T
self.sigma = gaussian_pulse.sigma
self.frequency_sign = gaussian_pulse.frequency_sign
self.omega = gaussian_pulse.omega
self.k = GaussianRazorBladedPulse.k_MULTIPLIER / gaussian_pulse.mySpace.dt
self.normalizer = self.my_underlying_pulse.totalIntegral()
self.frequency_over_time_max_value_ratio = 1.0
dt = gaussian_pulse.mySpace.dt
self.dt = dt
window_size_t = GaussianRazorBladedPulse.time_window_high_frequency_multiplier / self.high_frequency_cutoff
underlying_start_time, underlying_end_time = gaussian_pulse.timeRangeTuple
t_max = self.T + GaussianRazorBladedPulse.time_domain_multiplier * max(np.abs(underlying_start_time), np.abs(underlying_end_time)) + dt
t_min = self.T - GaussianRazorBladedPulse.time_domain_multiplier * max(np.abs(underlying_start_time), np.abs(underlying_end_time))
self.master_t = np.arange(t_min, t_max, dt)
self.master_w = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(self.master_t.shape[0], d = self.master_t[1] - self.master_t[0]))
self.f_shift_center = 0.0
self.master_frequency_values = self.myFourierTransformedFunction_initial(self.master_w)
self.master_frequency_interpolant = scipy.interpolate.interp1d(self.master_w, self.master_frequency_values, fill_value=0.0, bounds_error = False)
self.master_time_values = np.fft.fftshift(np.fft.ifft(self.master_frequency_values)) / (t_max - t_min)
self.master_time_interpolant = scipy.interpolate.interp1d(self.master_t, self.master_time_values)
#find a more reasonable start time to allow easier calculation:
finding_start = True
t_f = underlying_start_time
maximum_time_amplitude = np.max(np.abs(self.master_time_values))
while finding_start:
t_0 = t_f - window_size_t
t_vals = np.linspace(t_0, t_f, 100)
func_values = self.master_time_interpolant(t_vals)
rms_Err = np.std(np.abs(func_values) / maximum_time_amplitude)
if rms_Err <GaussianRazorBladedPulse.ZERO_TOLERANCE:
start_time = t_0
finding_start = False
t_f = t_0
print "found start time at %f femtoseconds" % self.mySpace.unitHandler.femtosecondsFromTime(start_time)
finding_end = True
t_0 = underlying_end_time
while finding_end:
t_f = t_0 + window_size_t
t_vals = np.linspace(t_0, t_f, 100)
func_values = self.master_time_interpolant(t_vals)
rms_Err = np.std(np.abs(func_values) / maximum_time_amplitude)
if rms_Err < GaussianRazorBladedPulse.ZERO_TOLERANCE:
end_time = t_f
finding_end = False
t_0 = t_f
print "found end time at %f femtoseconds" % self.mySpace.unitHandler.femtosecondsFromTime(end_time)
self.timeRangeTuple = (start_time , end_time )
self.natural_time = np.arange(start_time, end_time, dt)
self.n_time_point = self.natural_time.shape[0]
self.natural_frequencies = 2.0 * np.pi * np.fft.fftshift(np.fft.fftfreq(self.n_time_point, d = self.dt))
self.time_interpolant = scipy.interpolate.interp1d(self.natural_time, self.master_time_interpolant(self.natural_time), fill_value=0.0, bounds_error = False)
#we want the pulse to turn on at t=0
shift = - start_time
self.shiftForward(shift)
#where is the center of the pulse?
max_index = np.argmax(self.myFunction(self.natural_time))
max_amplitude_time = self.natural_time[max_index]
start_pillow = max_amplitude_time - self.timeRangeTuple[0]
end_pillow = self.timeRangeTuple[1] - max_amplitude_time
self.timePillow = np.max([start_pillow, end_pillow])
def myFunction(self, t):
return self.normalizer * self.time_interpolant(t)
def myFourierTransformedFunction_initial(self, w):
return self.normalizer * self.my_underlying_pulse.myFourierTransformedFunction(w) * self.razorBladeWindow(w)
def myFourierTransformedFunction(self, w):
return np.exp(-1.0j * w * self.f_shift_center) * self.master_frequency_interpolant(w)
def razorBladeWindow(self, w):
return 1.0 / ( (1.0 + np.exp(-2.0 * self.k * (w - self.low_frequency_cutoff))) * (1.0 + np.exp(-2.0 * self.k * (self.high_frequency_cutoff - w))) )
# return 1.0 / ((1.0 + np.exp(-2.0 * self.k * (self.high_frequency_cutoff - w))) )
def totalIntegral(self):
tValues = self.natural_time
DT = tValues[1] - tValues[0]
Values = np.abs(self.myFunction(self.natural_time))
return scipy.integrate.simps(Values, dx = DT)
def totalPower(self):
tValues = self.natural_time
DT = tValues[1] - tValues[0]
Values = np.abs(self.myFunction(self.natural_time))**2
return scipy.integrate.simps(Values, dx = DT)
def shiftForward(self, jump_amount):
amplitude = self.myFunction(self.natural_time)
self.natural_time = self.natural_time + jump_amount
self.f_shift_center = self.f_shift_center + jump_amount
self.timeRangeTuple = (self.timeRangeTuple[0] + jump_amount, self.timeRangeTuple[1] + jump_amount)
self.time_interpolant = scipy.interpolate.interp1d(self.natural_time, amplitude, fill_value=0.0, bounds_error = False)
current_integral_amount = self.totalIntegral()
self.normalizer = self.normalizer / current_integral_amount
return self
def plusFunction(self):
return GaussianRazorBladedPulse(self.my_underlying_pulse, self.low_frequency_cutoff, self.high_frequency_cutoff)
def minusFunction(self):
return GaussianRazorBladedPulse(self.my_underlying_pulse, self.low_frequency_cutoff, self.high_frequency_cutoff)
if __name__ == "__main__":
#Some useful test code
n=100
mySpace = Spacetime.Spacetime(xMax = 10,
numberOfNuclearDimenions = 2,
numberOfElectronicDimensions = 4,
numberOfSimulationSpacePointsPerNuclearDimension = 200,
dt = .05)
a = GaussianCosinePulse(mySpace,
centerOmega=7.5,
timeSpread=.25,
centerTime=1,
amplitude=.6)
b = GaussianCosinePulse(mySpace,
centerOmega=12.5,
timeSpread=.5,
centerTime=8,
amplitude=.6)
c = GaussianCosinePulse(mySpace,
centerOmega=12.5,
timeSpread=.9,
centerTime=9,
amplitude=.6)
d = c * b
d.plot()
plt.show()
print d.integrate(1.0)
```
|
{
"source": "jgoodlet/punter",
"score": 2
}
|
#### File: punter/tests/test_punter.py
```python
import pytest
import punter.exceptions as exc
from punter.api import search
from punter.helpers import get_query_type, get_endpoint
class TestQueryType(object):
def test_query_type_email_valid(self):
emails = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
]
for email in emails:
assert get_query_type(email) == 'email'
def test_query_type_email_malformed(self):
emails = [
'john<EMAIL> @ g<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'john@'
'@', ''
]
for email in emails:
assert get_query_type(email) == ''
def test_query_type_domain_valid(self):
domains = [
'www.google.com',
'github.com',
'github.org',
'github.edu',
'github.io',
'github.me'
]
for domain in domains:
assert get_query_type(domain) == 'domain'
def test_query_type_domain_malformed(self):
domains = [
'//www.google.com/',
'https:/github.com/',
'htt://github.org',
'githubedu',
'.io',
'github.'
]
for domain in domains:
assert get_query_type(domain) == ''
class TestEndpont(object):
def test_empty_query(self):
with pytest.raises(exc.InvalidQueryStringException):
key = 'd08d2ba22218d1b59df239d03fc5e66adfaec2b2'
result = get_endpoint(key, '', 0, '')
class TestSearch(object):
def test_api_key_empty(self):
with pytest.raises(exc.InvalidAPIKeyException):
search('', 'www.google.com')
def test_api_key_invalid_length(self):
with pytest.raises(exc.InvalidAPIKeyException):
search('1234567890', 'www.google.com')
def test_api_key_valid(self):
key = 'd08d2ba22218d1b59df239d03fc5e66adfaec2b2'
result = search(key, 'www.google.com')
assert result is not None
```
|
{
"source": "jgoodman8/pyhist",
"score": 2
}
|
#### File: tests/integration/test_history.py
```python
import os
import pickle
from typing import List
import pytest
from git import Repo
from pyhist.history.history import History
from pyhist.history.pyhist_item import PyHistItem
from pyhist.history.history_exception import HistoryException
class TestHistory:
@pytest.fixture(scope="function")
def git_repo(self) -> Repo:
os.system("git init")
repo = Repo(".git")
os.system("touch testfile1")
repo.git.add("testfile1")
repo.git.commit("-m" "Add testfile1")
os.system("touch testfile2")
repo.git.add("testfile2")
repo.git.commit("-m" "Add testfile2")
yield repo
os.system("rm testfile1")
os.system("rm testfile2")
os.system("rm -rf .git")
@pytest.fixture(scope="function")
def pyhist_items(self, git_repo: Repo) -> List[PyHistItem]:
history = [
PyHistItem(commit=commit, version=None, is_version=False)
for commit in git_repo.iter_commits(git_repo.active_branch.name)
]
with open(".pyhist", "wb") as f:
pickle.dump(history, f)
f.close()
yield history
os.system(f"rm .pyhist")
def test_is_initialized_FileNotExists_ReturnsFalse(self):
history = History()
assert not history.is_initialized()
def test_is_initialized_FileExists_ReturnsTrue(
self, pyhist_items: List[PyHistItem]
):
history = History()
assert history.is_initialized()
def test_load_history_FromInitializedFile_CommitsAreLoaded(
self, pyhist_items: List[PyHistItem]
):
history = History()
history.load_history()
assert history.pyhist_items == history.pyhist_items
def test_load_history_FromInitializedFile_VersionsAreLoaded(
self, pyhist_items: List[PyHistItem]
):
history = History()
history.load_history()
assert history.pyhist_items == history.pyhist_items
def test_load_history_NotInitialized_HistoryExceptionIsRaised(self):
history = History()
with pytest.raises(
HistoryException,
match='PyHist is not initialized. Please, type "pyhist --init"',
):
history.load_history()
def test_save_history_RemoveCommitAndSave_FileUpdatedWithoutCommit(
self, pyhist_items: List[PyHistItem]
):
# arrange
history = History()
history.load_history()
# act
history.remove_commit(history.pyhist_items[0].commit)
history.save_history()
# assert
new_history = History()
new_history.load_history()
assert len(new_history.pyhist_items) == 1
assert new_history.pyhist_items == history.pyhist_items
```
#### File: pyhist/versioning/semantic_versioning.py
```python
import re
from typing import List
from git import Commit
from pyhist.history.history import History
from pyhist.history.git_history import GitHistory
from pyhist.versioning.commit_type import CommitType
from pyhist.versioning.version import Version
class SemanticVersioning:
def __init__(
self, git_history: GitHistory, history: History,
):
self.__git_history: GitHistory = git_history
self.__history: History = history
self.version: Version = None
def update_version(
self, added_commits: List[Commit], removed_commits: List[Commit]
) -> None:
for commit in removed_commits:
if self._is_release_commit(commit.message) or self.is_versioning_commit(
commit.message
):
self.version.create_from_version(
version=self.__history.get_last_version()
)
elif self._is_minor_change(commit.message):
self.version.decrease_minor()
elif self._is_patch_change(commit.message):
self.version.decrease_patch()
for commit in added_commits:
if self.is_versioning_commit(commit.message):
pass
elif self._is_minor_change(commit.message):
self.version.increase_minor()
elif self._is_patch_change(commit.message):
self.version.increase_patch()
self.version.update()
def generate_release(self) -> None:
self.version.increase_major()
@classmethod
def _is_minor_change(cls, commit_message: str) -> bool:
return cls._is_feature(commit_message)
@classmethod
def _is_patch_change(cls, commit_message: str) -> bool:
return (
cls._is_fix(commit_message)
or cls._is_refactor(commit_message)
or cls._is_docs(commit_message)
or cls._is_test(commit_message)
or cls._is_style(commit_message)
or cls._is_chore(commit_message)
or cls._is_performance(commit_message)
)
@classmethod
def _is_release_commit(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Release.value
)
@classmethod
def is_versioning_commit(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Versioning.value
)
@classmethod
def _is_feature(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Feature.value
)
@classmethod
def _is_fix(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Fix.value
)
@classmethod
def _is_refactor(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Refactor.value
)
@classmethod
def _is_performance(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Performance.value
)
@classmethod
def _is_docs(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Docs.value
)
@classmethod
def _is_test(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Test.value
)
@classmethod
def _is_chore(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Chore.value
)
@classmethod
def _is_style(cls, commit_message: str) -> bool:
return cls._check_semantic_commit(
commit_message, commit_type=CommitType.Style.value
)
@classmethod
def _check_semantic_commit(cls, commit_message: str, commit_type: str) -> bool:
return commit_message[: len(commit_type)] == commit_type
@classmethod
def parse_version_from_commit(cls, commit: Commit) -> Version:
version_match = re.search(r"(.*)([0-9]\.[0-9]\.[0-9])(.*)", commit.message)
if version_match is not None and version_match.lastindex == 3:
return Version().create_from_str_version(version_match[2])
```
|
{
"source": "jgoodson/TraGeC",
"score": 2
}
|
#### File: tragec/models/modeling.py
```python
import logging
import typing
import pytorch_lightning as pl
import torch
import torch.optim as optim
from torch import nn
from torch.nn import LayerNorm
from .tape_model import TAPEModelMixin
from .configuration import BioConfig
logger = logging.getLogger(__name__)
class BioModel(pl.LightningModule, TAPEModelMixin):
# From songlab-cal TAPE: https://github.com/songlab-cal/tape
# Modified substantially
r""" Base class for all models.
:class:`~BioModel` takes care of storing the configuration of
the models and handles methods for loading/downloading/saving models as well as a
few methods commons to all models to (i) resize the sequence_rep embeddings and (ii) prune
heads in the self-attention heads. These come from TAPEModelMixin and are derived from
the methods the TAPE library uses for storing models.
These require an _init_weights() method to be implemented by derived classes if they
need initialization of anything not present in this version.
BioModel also includes the setup to make thes complete Pytorch-Lightning modules.
These methods include configure_optimizers, and the three step functions.
These require a forward() and _compare() method to be implemented by derived classes.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~BioConfig`
to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names`
(string) as keys and `url` (string) of associated pretrained weights as values.
- ``base_model_prefix``: a string indicating the attribute associated to the
base model in derived classes of the same architecture adding modules on top
of the base model.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, BioConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class "
"`BioConfig`. To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
# Save config in model
self.config = config
self.save_hyperparameters()
def configure_optimizers(self) -> typing.Tuple[list, list]:
learning_rate = self.config.learning_rate
optimizer = self.config.optimizer
param_optimizer = self.named_parameters()
no_decay = ['bias', 'gamma', 'beta', 'LayerNorm']
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
if optimizer == 'adamw':
optimizer = optim.AdamW(optimizer_grouped_parameters, lr=learning_rate)
elif optimizer == 'lamb':
from torch_optimizer import Lamb
optimizer = Lamb(optimizer_grouped_parameters, lr=learning_rate)
elif optimizer == 'sgd':
optimizer = optim.SGD(optimizer_grouped_parameters, lr=learning_rate)
elif optimizer == 'novograd':
from torch_optimizer import NovoGrad
optimizer = NovoGrad(optimizer_grouped_parameters, lr=learning_rate)
elif isinstance(optimizer, str):
OPT = getattr(optim, optimizer, False)
if OPT:
optimizer = OPT(optimizer_grouped_parameters, lr=learning_rate)
else:
try:
import torch_optimizer
except ImportError:
raise ImportError(
"Specified optimizer {optimizer} is not available and torch_optimizer not available")
OPT = getattr(torch_optimizer, optimizer, False)
if OPT:
optimizer = OPT(optimizer_grouped_parameters, lr=learning_rate)
else:
raise ImportError("Specified optimizer {optimizer} is not available")
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer,
max_lr=self.config.learning_rate,
total_steps=self.config.total_steps,
pct_start=self.config.warmup_steps / self.config.total_steps,
anneal_strategy='linear')
return [optimizer], [{'scheduler': scheduler, 'interval': 'step', 'frequency': 1}]
def training_step(self, train_batch: typing.Dict, batch_idx: typing.Optional[int] = None) -> torch.Tensor:
results = self.forward(**train_batch)
loss, metrics = self._compare(results, train_batch)
for k, m in metrics.items():
self.log(f'train/{k}', m, sync_dist=True)
self.log('train/loss', loss, sync_dist=True)
return loss
def validation_step(self, batch: typing.Dict, batch_idx: typing.Optional[int] = None) -> torch.Tensor:
results = self.forward(**batch)
loss, metrics = self._compare(results, batch)
for k, m in metrics.items():
self.log(f'val/{k}', m)
self.log('val/loss', loss)
return metrics
def test_step(self, batch: typing.Dict, batch_idx: typing.Optional[int] = None):
results = self.forward(**batch)
loss, metrics = self._compare(results, batch)
for k, m in metrics.items():
self.log(f'test/{k}', m)
self.log('test/loss', loss)
def create_sinusoidal_embeddings(n_pos, dim, out):
out.requires_grad = False
positions = torch.arange(0, n_pos)[:, None]
dimensions = torch.arange(0, dim)
position_enc = (positions / torch.pow(10000, 2 * (dimensions // 2) / dim)).to(out.device)
out[:, 0::2] = torch.sin(position_enc[:, 0::2])
out[:, 1::2] = torch.cos(position_enc[:, 1::2])
class ProteinEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
Modified From songlab-cal TAPE: https://github.com/songlab-cal/tape
"""
def __init__(self, config: BioConfig, position_embeddings: bool = True):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0)
if position_embeddings:
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size)
if config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(
n_pos=config.max_position_embeddings, dim=config.hidden_size, out=self.position_embeddings.weight
)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be
# able to load any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self,
input_ids: torch.Tensor,
token_type_ids: typing.Optional[torch.Tensor] = None,
position_ids: typing.Optional[torch.Tensor] = None,
**kwargs) -> torch.Tensor:
seq_length = input_ids.size()[1]
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
if hasattr(self, 'position_embeddings'):
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
else:
embeddings = words_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class GeCEmbeddings(nn.Module):
"""Construct the embeddings from gene, (strand and spacing embeddings).
"""
def __init__(self, config: BioConfig, position_embeddings: bool = True):
super().__init__()
self.generep_embeddings = nn.Linear(
config.input_rep_size, config.hidden_size)
if position_embeddings:
self.position_embeddings: nn.Embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(
n_pos=config.max_position_embeddings, dim=config.hidden_size, out=self.position_embeddings.weight
)
self.direction_embeddings: nn.Embedding = nn.Embedding(3, config.hidden_size)
self.length_embeddings: nn.Embedding = nn.Embedding(config.gene_max_length // config.gene_length_bin_size + 1,
config.hidden_size)
self.gene_length_bin_size = config.gene_length_bin_size
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be
# able to load any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self,
gene_reps: torch.Tensor,
strands: typing.Optional[torch.Tensor] = None,
lengths: typing.Optional[torch.Tensor] = None,
**kwargs) -> torch.Tensor:
if strands is None:
strands = torch.zeros_like(gene_reps[:, :, 0], dtype=torch.long)
else:
strands = strands.long()
if lengths is None:
lengths = torch.ones_like(gene_reps[:, :, 0], dtype=torch.long)
else:
lengths = strands.long()
generep_embeddings = self.generep_embeddings(gene_reps)
direction_embeddings = self.direction_embeddings(strands + 1)
length_embeddings = self.length_embeddings(torch.clamp(lengths, 1, self.length_embeddings.num_embeddings) //
self.gene_length_bin_size)
embeddings = generep_embeddings + direction_embeddings + length_embeddings
if hasattr(self, 'position_embeddings'):
position_ids = torch.arange(gene_reps.size()[1], dtype=torch.long, device=gene_reps.device)
position_ids = position_ids.unsqueeze(0).expand(gene_reps.shape[:-1])
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
```
#### File: tragec/models/models_t5.py
```python
from torch import nn
from torch.nn import LayerNorm
from transformers import T5Config
from .modeling import BioModel, GeCEmbeddings, ProteinEmbeddings
from .configuration import BioConfig
from ..tasks.registry import create_and_register_models
from .utils_t5 import T5Stack
URL_PREFIX = "https://models.fire.tryps.in/models/tragec/"
T5_PRETRAINED_MODEL_ARCHIVE_MAP = {}
T5_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class BioT5Config(BioConfig, T5Config):
pretrained_config_archive_map = T5_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
**kwargs):
T5Config.__init__(self, **kwargs)
super().__init__(hidden_size=hidden_size,
**kwargs)
# Adapt comparable argument names from T5Config for consistency with BioBertConfig
self.d_model = hidden_size
if 'intermediate_size' in kwargs:
self.d_ff = kwargs['intermediate_size']
self.num_layers = num_hidden_layers
self.num_heads = num_attention_heads
self.use_cache = False
self.feed_forward_proj = 'gated-gelu'
@property
def hidden_size(self):
return self.d_model
@hidden_size.setter
def hidden_size(self, value):
self.d_model = 0
class BioT5AbstractModel(BioModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BioT5Config
pretrained_model_archive_map = T5_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "t5"
class BioT5Model(BioT5AbstractModel):
def __init__(self, config):
super().__init__(config)
self.model = T5Stack(config)
self.init_weights()
def forward(self,
sequence_rep,
input_mask=None,
**kwargs):
return self.model(inputs_embeds=self.embedding(sequence_rep, **kwargs),
attention_mask=input_mask)
class GeCT5Model(BioT5Model):
def __init__(self, config):
super().__init__(config)
self.embedding = GeCEmbeddings(config, position_embeddings=False)
class ProteinT5Model(BioT5Model):
def __init__(self, config):
super().__init__(config)
self.embedding = ProteinEmbeddings(config, position_embeddings=False)
create_and_register_models(locals(), BioT5AbstractModel, GeCT5Model, ProteinT5Model, 't5')
```
#### File: tragec/tasks/task_multiclass.py
```python
import torch
from torch import nn
from tragec.registry import registry
from tragec.models.modeling import BioModel
from tragec.tasks.tasks import SimpleConv, BioDataModule
from tragec.datasets import ProteinDomainDataset
from tragec.tasks.extras_multiclass import pos_weights
class BioSequenceMultiClassification(BioModel):
def __init__(self, config):
super().__init__(config)
self.classify = MultiLabelClassificationHead(
config.output_size, config.num_labels)
self.pos_weights = config.pos_weights
self.init_weights()
def forward(self,
sequence_rep,
input_mask=None,
**kwargs):
outputs = self.model(sequence_rep, input_mask=input_mask, **kwargs)
sequence_output, pooled_output = outputs[:2]
outputs = self.classify(sequence_output) + outputs[2:]
# (loss), prediction_scores, (hidden_states), (attentions)
return outputs
def _compare(self, results, batch):
targets = batch['targets']
logits = results[0]
if self.pos_weights:
loss_fct = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(self.pos_weights, device=logits.device))
else:
loss_fct = torch.nn.BCEWithLogitsLoss()
classification_loss = loss_fct(logits, targets)
# Roughly calculate best thresholds per-sequence based on F1-score
thresholds, metrics = optimize_thresholds(logits.detach(), targets)
f1, precision, recall, accuracy = metrics.mean(0)
metrics = {
'f1_score': f1,
'precision': precision,
'recall': recall,
'accuracy': accuracy,
'mean_prob': torch.sigmoid(logits).mean(),
}
loss_and_metrics = (classification_loss, metrics)
return loss_and_metrics
def create_multiclass_model(base_cls, base_model, name, seqtype):
def __init__(self, config):
base_cls.__init__(self, config)
BioSequenceMultiClassification.__init__(self, config)
self.model = base_model(config)
self.tokenizer = 'iupac'
self.init_weights()
mc_model = type(
f'{base_model.__name__.replace("Model", "")}ForMultilabelClassification',
(base_cls, BioSequenceMultiClassification),
{'__init__': __init__}
)
if seqtype == 'prot':
registry.register_task_model('protein_domain', f'{seqtype}_{name.lower()}', mc_model)
return mc_model
@registry.register_task('protein_domain', model_kwargs={'num_labels': 14808, 'pos_weights': pos_weights})
class ProteinDomainPredictionModule(BioDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset = ProteinDomainDataset
self.split_names = ('train', 'valid', 'holdout')
self.train_split = 'train'
self.val_split = 'valid'
self.test_split = 'holdout'
class MultiLabelClassificationHead(nn.Module):
def __init__(self, input_size: int, num_labels: int):
super().__init__()
self.classify = SimpleConv(input_size, 512, num_labels)
def forward(self, sequence_output):
logits = self.classify(sequence_output).max(1)[0]
outputs = (logits,)
return outputs # (loss), logits
def scores(y_true: torch.Tensor, y_pred: torch.Tensor) -> torch.Tensor:
'''Calculate F1 score. Can work with gpu tensors
The original implmentation is written by <NAME> on Kaggle.
Returns
-------
torch.Tensor
`ndim` == 1. 0 <= val <= 1
Reference
---------
- https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
- https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
'''
assert y_true.ndim in (1, 2)
assert y_pred.ndim in (1, 2)
tp = (y_true * y_pred).sum(-1).to(torch.float32)
tn = ((1 - y_true) * (1 - y_pred)).sum(-1).to(torch.float32)
fp = ((1 - y_true) * y_pred).sum(-1).to(torch.float32)
fn = (y_true * (1 - y_pred)).sum(-1).to(torch.float32)
epsilon = 1e-7
precision = tp / (tp + fp + epsilon)
recall = tp / (tp + fn + epsilon)
accuracy = (tp + tn) / (tp + tn + fp + fn)
f1 = 2 * (precision * recall) / (precision + recall + epsilon)
return torch.stack((f1, precision, recall, accuracy))
def optimize_thresholds(c: torch.Tensor, y: torch.Tensor):
best_t = torch.zeros(c.shape[0])
best_metrics = torch.zeros((c.shape[0], 4))
probs = torch.sigmoid(c.float())
for t in range(1, 100):
t = t / 100
metrics = scores(y, (probs > t).float())
for i, (tscore, pre_m) in enumerate(zip(metrics.T, best_metrics)):
if tscore[0] > pre_m[0]:
best_metrics[i], best_t[i] = tscore, t
return best_t, best_metrics
```
#### File: tragec/test/test_datasets.py
```python
import unittest
from tragec.datasets import ProteinFluorescenceDataset, ProteinSecondaryStructureDataset, ProteinStabilityDataset, \
ProteinRemoteHomologyDataset, ProteinnetDataset, ProteinDomainDataset, ProteinMaskedLanguageModelingDataset
from tragec.datasets import GeCMaskedReconstructionDataset, GeCClassificationDataset
class TestTAPEDataset(unittest.TestCase):
config_cls = ProteinFluorescenceDataset
item_type = tuple
item_length = 3
def setUp(self) -> None:
self.dataset = self.config_cls(data_path='tragec/test/data', split='train')
def test_length(self) -> None:
assert len(self.dataset) == 2
def test_getitem(self) -> None:
assert isinstance(self.dataset[0], self.item_type)
assert len(self.dataset[1]) == self.item_length
def test_collate(self) -> None:
batch = self.dataset.collate_fn([self.dataset[0], self.dataset[1]])
assert (isinstance(batch, dict))
class TestSecondaryStructure(TestTAPEDataset):
config_cls = ProteinSecondaryStructureDataset
item_type = tuple
item_length = 3
class TestStability(TestTAPEDataset):
config_cls = ProteinStabilityDataset
item_type = tuple
item_length = 3
class TestHomology(TestTAPEDataset):
config_cls = ProteinRemoteHomologyDataset
item_type = tuple
item_length = 3
class TestProteinnet(TestTAPEDataset):
config_cls = ProteinnetDataset
item_type = tuple
item_length = 4
class TestDomain(TestTAPEDataset):
config_cls = ProteinDomainDataset
item_type = tuple
item_length = 3
class TestMLM(TestTAPEDataset):
config_cls = ProteinMaskedLanguageModelingDataset
item_type = tuple
item_length = 5
class TestTraGeCDataset(TestTAPEDataset):
config_cls = GeCMaskedReconstructionDataset
item_type = tuple
item_length = 5
class TestGeCClassification(TestTraGeCDataset):
config_cls = GeCClassificationDataset
item_type = tuple
item_length = 5
if __name__ == '__main__':
unittest.main()
```
#### File: tragec/test/test_model.py
```python
import unittest
import random
import numpy as np
import torch
from tragec.datasets import GeCMaskedReconstructionDataset, ProteinMaskedLanguageModelingDataset
from tragec.models.models_bert import GeCBertModel, ProteinBertModel, BioBertConfig, GeCBertForMaskedRecon, \
ProteinBertForMLM
from tragec.models.models_t5 import BioT5Config, GeCT5Model, GeCT5ForMaskedRecon
from tragec.models.models_longformer import ProteinLongformerModel, BioLongformerConfig, ProteinLongformerForMLM
test_config_kwargs = dict(
hidden_size=128,
num_hidden_layers=8,
num_attention_heads=8,
intermediate_size=512,
input_rep_size=128,
)
# Testing TODO:
# TODO: Test construction from dict/json
class TestGeCBertRaw(unittest.TestCase):
def setUp(self) -> None:
self.config = BioBertConfig(**test_config_kwargs)
self.model = GeCBertModel(self.config)
def simpleForwardZeros(self, shape, strands: bool = None, lengths: int = None):
if strands:
strands = torch.ones(shape[:-1], dtype=torch.long)
if lengths:
lengths = torch.ones(shape[:-1], dtype=torch.long) * lengths
(seq_output, pooled_output) = self.model(torch.zeros(shape, dtype=torch.float32),
strands=strands,
lengths=lengths)
self.assertEqual(seq_output.shape, shape)
self.assertEqual(pooled_output.shape, (shape[0], shape[2]))
def test_forward(self) -> None:
self.simpleForwardZeros((1, 100, 128))
def test_forward_batch(self) -> None:
self.simpleForwardZeros((4, 100, 128))
def test_forward_strands(self) -> None:
self.simpleForwardZeros((1, 100, 128), strands=True)
def test_forward_lengths(self) -> None:
self.simpleForwardZeros((1, 100, 128), lengths=100)
def test_forward_lengths_over(self) -> None:
self.simpleForwardZeros((1, 100, 128), lengths=100000)
class TestGeCBertRecon(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.config = BioBertConfig(**test_config_kwargs)
self.model = GeCBertForMaskedRecon(self.config)
self.size = (1, 100, 128)
def test_forward(self) -> None:
target = torch.ones(self.size)
seq_output = self.model(torch.zeros(self.size), targets=target)[0]
self.assertEqual(seq_output.shape, self.size)
def test_backward(self) -> None:
data = np.random.standard_normal(self.size[1:])
data = data.astype(np.float32)
m, t = GeCMaskedReconstructionDataset._apply_pseudobert_mask(data)
batch = GeCMaskedReconstructionDataset.collate_fn(
[(m, np.ones(len(m)), t, np.ones(self.size[1]), np.ones(self.size[1]) * 100)]
)
loss = self.model.training_step(batch, None)
loss.backward()
class TestGeCT5Raw(unittest.TestCase):
def setUp(self) -> None:
self.config = BioT5Config(**test_config_kwargs)
self.model = GeCT5Model(self.config)
def simpleForwardZeros(self, shape, strands=None, lengths=None):
(seq_output,) = self.model(torch.zeros(shape, dtype=torch.float32), strands=strands, lengths=lengths)
self.assertEqual(seq_output.shape, shape)
def test_forward(self) -> None:
self.simpleForwardZeros((1, 100, 128))
def test_forward_batch(self) -> None:
self.simpleForwardZeros((4, 100, 128))
def test_forward_strands(self) -> None:
self.simpleForwardZeros((1, 100, 128), strands=torch.ones((1, 100), dtype=torch.long))
self.simpleForwardZeros((1, 100, 128), strands=torch.zeros((1, 100), dtype=torch.long))
self.simpleForwardZeros((1, 100, 128), strands=torch.zeros((1, 100), dtype=torch.long) - 1)
def test_forward_lengths(self) -> None:
self.simpleForwardZeros((1, 100, 128), lengths=torch.ones((1, 100), dtype=torch.long) * 100)
def test_forward_lengths_over(self) -> None:
self.simpleForwardZeros((1, 100, 128), lengths=torch.ones((1, 100), dtype=torch.long) * 100000)
class TestGeCBertRawCP(TestGeCBertRaw):
def setUp(self) -> None:
self.config = BioBertConfig(gradient_checkpointing=True, **test_config_kwargs)
self.model = GeCBertModel(self.config)
class TestGeCT5RawCP(TestGeCT5Raw):
def setUp(self) -> None:
self.config = BioT5Config(gradient_checkpointing=True, **test_config_kwargs)
self.model = GeCT5Model(self.config)
class TestGeCT5Recon(TestGeCBertRecon):
def setUp(self) -> None:
super().setUp()
self.config = BioT5Config(**test_config_kwargs)
self.model = GeCT5ForMaskedRecon(self.config)
self.size = (1, 100, 128)
def simpleForwardZeros(self, shape, ):
(seq_output,) = self.model(torch.zeros(shape, dtype=torch.float32))
self.assertEqual(seq_output.shape, shape)
class TestGeCT5ReconCP(TestGeCT5Recon):
def setUp(self) -> None:
super().setUp()
self.config = BioT5Config(gradient_checkpointing=True, **test_config_kwargs)
self.model = GeCT5ForMaskedRecon(self.config)
self.size = (1, 100, 128)
class TestProtBertRaw(unittest.TestCase):
def setUp(self) -> None:
self.config = BioBertConfig(gradient_checkpointing=True, **test_config_kwargs)
self.model = ProteinBertModel(self.config)
def simpleForwardRandom(self, shape):
(seq_output, pooled_output) = self.model(torch.from_numpy(np.random.randint(0, 30, shape)).long())
self.assertEqual(seq_output.shape, shape + (self.config.hidden_size,))
self.assertEqual(pooled_output.shape, (shape[0], self.config.hidden_size))
def test_forward(self) -> None:
self.simpleForwardRandom((1, 100))
def test_forward_batch(self) -> None:
self.simpleForwardRandom((4, 100))
class TestProtBertMLM(unittest.TestCase):
tokens = ("<pad>",
"<mask>",
"<cls>",
"<sep>",
"<unk>",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",)
def setUp(self) -> None:
self.config = BioBertConfig(gradient_checkpointing=True, vocab_size=30, **test_config_kwargs)
self.model = ProteinBertForMLM(self.config)
self.size = (2, 100)
def test_forward(self) -> None:
input_tokens = target = torch.from_numpy(np.random.randint(0, 30, self.size)).long()
seq_output = self.model(input_tokens, targets=target)[0]
self.assertEqual(seq_output.shape, self.size + (self.config.vocab_size,))
def test_backward(self) -> None:
data = random.choices(self.tokens, k=self.size[1])
ds = ProteinMaskedLanguageModelingDataset(None, 'train')
masked_tokens, labels = ds._apply_bert_mask(data)
masked_token_ids = np.array(
ds.tokenizer.convert_tokens_to_ids(masked_tokens), np.int64)
input_mask = np.ones_like(masked_token_ids)
masked_token_ids = np.array(
ds.tokenizer.convert_tokens_to_ids(masked_tokens), np.int64)
batch = ds.collate_fn(
[(masked_token_ids, input_mask, labels, None, None), ] * self.size[0]
)
loss = self.model.training_step(batch, None)
loss.backward()
class TestProtLongformerRaw(unittest.TestCase):
def setUp(self) -> None:
self.config = BioLongformerConfig(gradient_checkpointing=True, **test_config_kwargs)
self.model = ProteinLongformerModel(self.config)
def simpleForwardRandom(self, shape):
(seq_output, pooled_output) = self.model(torch.from_numpy(np.random.randint(0, 30, shape)).long())
self.assertEqual(seq_output.shape, shape + (self.config.hidden_size,))
self.assertEqual(pooled_output.shape, (shape[0], self.config.hidden_size))
def test_forward(self) -> None:
self.simpleForwardRandom((1, 100))
def test_forward_batch(self) -> None:
self.simpleForwardRandom((4, 100))
class TestProtLongformerMLM(unittest.TestCase):
tokens = ("<pad>",
"<mask>",
"<cls>",
"<sep>",
"<unk>",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",)
def setUp(self) -> None:
self.config = BioLongformerConfig(gradient_checkpointing=True, vocab_size=30, **test_config_kwargs)
self.model = ProteinLongformerForMLM(self.config)
self.size = (2, 100)
def test_forward(self) -> None:
input_tokens = target = torch.from_numpy(np.random.randint(0, 30, self.size)).long()
seq_output = self.model(input_tokens, targets=target)[0]
self.assertEqual(seq_output.shape, self.size + (self.config.vocab_size,))
def test_backward(self) -> None:
data = random.choices(self.tokens, k=self.size[1])
ds = ProteinMaskedLanguageModelingDataset(None, 'train')
masked_tokens, labels = ds._apply_bert_mask(data)
masked_token_ids = np.array(
ds.tokenizer.convert_tokens_to_ids(masked_tokens), np.int64)
input_mask = np.ones_like(masked_token_ids)
masked_token_ids = np.array(
ds.tokenizer.convert_tokens_to_ids(masked_tokens), np.int64)
batch = ds.collate_fn(
[(masked_token_ids, input_mask, labels, None, None), ] * self.size[0]
)
loss = self.model.training_step(batch, None)
loss.backward()
if __name__ == '__main__':
unittest.main()
```
#### File: tragec/utils/file_utils.py
```python
from __future__ import (absolute_import, division, print_function, unicode_literals)
import argparse
import os
import random
import typing
from time import strftime, gmtime
def int_or_str(arg: str) -> typing.Union[int, str]:
try:
return int(arg)
except ValueError:
return arg
def check_is_file(file_path: typing.Optional[str]) -> typing.Optional[str]:
if file_path is None or os.path.isfile(file_path):
return file_path
else:
raise argparse.ArgumentTypeError(f"File path: {file_path} is not a valid file")
def check_is_dir(dir_path: typing.Optional[str]) -> typing.Optional[str]:
if dir_path is None or os.path.isdir(dir_path):
return dir_path
else:
raise argparse.ArgumentTypeError(f"Directory path: {dir_path} is not a valid directory")
def get_expname(exp_name: typing.Optional[str],
task: typing.Optional[str] = None,
model_type: typing.Optional[str] = None) -> str:
if exp_name is None:
time_stamp = strftime("%y-%m-%d-%H-%M-%S", gmtime())
exp_name = f"{task}_{model_type}_{time_stamp}_{random.randint(0, int(1e6)):0>6d}"
return exp_name
```
|
{
"source": "jgoodyear/OpenDaylightCookbook",
"score": 2
}
|
#### File: chapter3/chapter3-recipe3/ shortest_path.py
```python
from mininet.topo import Topo
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.util import irange,dumpNodeConnections
from mininet.log import setLogLevel
class Fast_Failover_Demo_Topo(Topo):
def __init__(self):
# Initialize topology and default options
Topo.__init__(self)
s1 = self.addSwitch('s1',dpid='0000000000000001')
s2a = self.addSwitch('s2a',dpid='000000000000002a')
s2b = self.addSwitch('s2b',dpid='000000000000002b')
s2c = self.addSwitch('s2c',dpid='000000000000002c')
s3 = self.addSwitch('s3',dpid='0000000000000003')
self.addLink(s1, s2a)
self.addLink(s1, s2b)
self.addLink(s2b, s2c)
self.addLink(s3, s2a)
self.addLink(s3, s2c)
host_1 = self.addHost('h1',ip='10.0.0.1',mac='10:00:00:00:00:01')
host_2 = self.addHost('h2',ip='10.0.0.2',mac='10:00:00:00:00:02')
self.addLink(host_1, s1)
self.addLink(host_2, s3)
topos = { 'shortest_path': ( lambda: Fast_Failover_Demo_Topo() ) }
```
|
{
"source": "jgoppert/aae364_notebook",
"score": 4
}
|
#### File: jgoppert/aae364_notebook/grandprix.py
```python
import numpy as np
import control
import scipy
import matplotlib.pyplot as plt
class SE2:
"""
This is an implementation of the mathematical group SE2, that represents rigid
body motions in the plane. We are using it as it allows us to turn the
non-linear control problem of driving a car on a plane into a linear control
problem that you can solve with the methods learned in this class.
@see http://ethaneade.com/lie.pdf
@see https://www.youtube.com/watch?v=mJ8ZDdA10GY
"""
def from_params(self, v):
"""`
Create group form parameterization.
v: [theta, x, y]
"""
theta, x, y = v
return np.array([
[np.cos(theta), -np.sin(theta), x],
[np.sin(theta), np.cos(theta), y],
[0, 0, 1]
])
def to_params(self, G):
"""
Get parameterization of group.
v = [theta, x, y]
"""
theta = np.arctan2(G[1, 0], G[0, 0])
x = G[0, 2]
y = G[1, 2]
return np.array([theta, x, y])
def wedge(self, v):
"""
This function takes a vector in R^3 and transforms it into an element of
the lie algebra using the wedge operator.
@param v:
v[0] : dtheta - rotational rate
v[1] : ux - velocity in body x frame
v[2] : uy - velocity in body y frame
@return The 3x3 matrix in the lie algebra
"""
dtheta, dx, dy = v
return np.array([
[0, -dtheta, dx],
[dtheta, 0, dy],
[0, 0, 0]
])
def vee(self, Omega):
"""
This takes an element of the lie algebra
and transforms it to a vector in R^n using the vee operator.
@param Omega: element of lie algebra
@return vector in R^3
"""
theta = Omega[1, 0]
x = Omega[0, 2]
y = Omega[1, 2]
return np.array([theta, x, y])
def exp(self, Omega):
"""
This is the exponential map that transforms an element of the lie algebra
se2 to the lie group SE2
"""
theta = Omega[1, 0]
u = np.array([Omega[0, 2], Omega[1, 2]])
if np.abs(theta) < 1e-5:
A = 1
B = 0
else:
A = np.sin(theta)/theta
B = (1 - np.cos(theta))/theta
V = np.array([[A, -B], [B, A]])
p = V.dot(u)
return np.array([
[np.cos(theta), -np.sin(theta), p[0]],
[np.sin(theta), np.cos(theta), p[1]],
[0, 0, 1]
])
def log(self, G):
"""
The is the log map that transforms an element in the lie group SE2 to the
lie algebra se2
"""
theta = np.arctan2(G[1, 0], G[0, 0])
if np.abs(theta) < 1e-5:
A = 1
B = 0
else:
A = np.sin(theta)/theta
B = (1 - np.cos(theta))/theta
V_I = np.array([[A, B], [-B, A]])/(A**2 + B**2)
p = np.array([G[0, 2], G[1, 2]])
u = V_I.dot(p)
return np.array([
[0, -theta, u[0]],
[theta, 0, u[1]],
[0, 0, 0]
])
def test_SE2():
"""
Make sure SE2 is working properly.
"""
G = SE2()
v = np.array([1, 2, 3])
assert np.allclose(G.vee(G.wedge(v)), v)
assert np.allclose(G.vee(G.log(G.exp(G.wedge(v)))), v)
assert np.allclose(G.to_params(G.from_params(v)), v)
test_SE2()
class Sim:
def __init__(self, Controller):
"""
Setup the sim and load the controller.
"""
self.G = SE2()
self.data = {
't': [],
'theta': [],
'x': [],
'y': [],
'theta_r': [],
'x_r': [],
'y_r': [],
'throttle': [],
'velocity': [],
'steering': [],
'wheel': [],
'e_theta': [],
'e_x': [],
'e_y': [],
'track_left_x': [],
'track_left_y': [],
'track_right_x': [],
'track_right_y': [],
'off_track': [],
}
# you can turn on/off noise and disturbance here
self.enable_noise = 1 # turn on noise (0 or 1)
self.enable_disturbance = 1 # turn on disturbance (0 or 1)
# parameters
self.dt = 0.001 # time increment for controller and data storage
self.tf = 5 # final time
self.track = [1, -1, 1, 1, 1, -1, 1, 1] # describes track shape
self.track_length = 5 # length of track in meters
self.verbose = False # show messages
self.width = 0.05 # the width of the track in meters
self.wheelbase = 0.01 # distance from rear axle to front axle
self.disturbance_mag_x = 0 # disturbance due to unmodelled effects
self.disturbance_mag_theta = 1 # magnitude of theta disturbance
self.noise_mag = 5e-1 # magnitude o5 noise for error signal
self.off_track_velocity_penalty = 0.5 # fraction of true velocity when off track [0-1]
self.desired_speed = 2 # desired speed of reference point
self.crash_distance = 0.2
# setup controller
self.controller = Controller(self.dt)
if self.verbose:
print('sim initialized')
def run(self):
if self.verbose:
print('sim started')
# randomize noise and disturbance phase
phi_dist = 0.1*np.pi*np.random.randn()
phi_noise = 0.1*np.pi*np.random.randn()
# put the car at the starting line, facing the right direction
theta0 = 0
x0 = self.width/2
y0 = 0
X = self.G.from_params([theta0, x0, y0])
Xr = self.G.from_params([theta0, 0, 0])
# start reference position as starting line
velocity = 0
distance = 0
crashed = False
for t in np.arange(0, self.tf, self.dt):
# compute error and control
theta_r, x_r, y_r = self.G.to_params(Xr)
theta, x, y = self.G.to_params(X)
Xr = self.G.from_params([theta_r, x_r, y_r])
track_left = Xr.dot(self.G.from_params([0, self.width, 0]))
track_right = Xr.dot(self.G.from_params([0, -self.width, 0]))
track_left_theta, track_left_x, track_left_y = self.G.to_params(track_left)
track_right_theta, track_right_x, track_right_y = self.G.to_params(track_right)
error = self.G.vee(self.G.log(np.linalg.inv(Xr).dot(X)))
# check if you ran off the track
if (np.abs(error[1]) > self.width):
off_track = True
else:
off_track = False
# check if you are way off track
if (np.abs(error[1]) > self.crash_distance):
crashed = True
# reference trajectory, the race course
t_lap = self.track_length/self.desired_speed
leg_d = self.track_length/len(self.track)
leg_dt = leg_d/self.desired_speed
u_r = np.array([0, 0, 0])
for i_leg, turn in enumerate(self.track):
d_lap = distance % self.track_length
if d_lap < (i_leg + 1)*leg_d:
u_r = np.array([self.track[i_leg]*np.pi/2/leg_dt, 0, self.desired_speed])
break
if error[2] > 0:
distance += self.desired_speed*self.dt
else:
u_r = np.array([0, 0, 0])
# add noise
error += self.enable_noise*self.noise_mag*(np.sin(30*2*np.pi*t + phi_noise))*velocity
dXr = self.G.exp(self.G.wedge(u_r*self.dt))
Xr = Xr.dot(dXr)
# call the controller
throttle, steering = self.controller.update(error, u_r)
# update actuators
if throttle < 0:
throttle = 0
elif throttle > 1:
throttle = 1
if steering > 1:
steering = 1
elif steering < -1:
steering = -1
wheel = steering
velocity = throttle
if crashed:
velocity = 0
elif off_track:
velocity = (1-self.off_track_velocity_penalty)*velocity
# simulate disturbance in body frame
dist = self.enable_disturbance*(0.2 + np.sin(3*t*2*np.pi + phi_dist + np.random.randn()))*velocity
disturbance_x = dist*self.disturbance_mag_x
disturbance_theta = dist*self.disturbance_mag_theta
# integrate trajectory
dtheta = velocity*np.tan(wheel)/self.wheelbase + disturbance_theta
dx = disturbance_x
dy = velocity
u = np.array([dtheta, dx, dy])
dX = self.G.exp(self.G.wedge(u*self.dt))
X = X.dot(dX)
# store data
self.data['t'].append(t)
self.data['theta'].append(theta)
self.data['x'].append(x)
self.data['y'].append(y)
self.data['theta_r'].append(theta_r)
self.data['x_r'].append(x_r)
self.data['y_r'].append(y_r)
self.data['throttle'].append(throttle)
self.data['steering'].append(steering)
self.data['velocity'].append(velocity)
self.data['wheel'].append(wheel)
self.data['e_theta'].append(error[0])
self.data['e_x'].append(error[1])
self.data['e_y'].append(error[2])
self.data['track_left_x'].append(track_left_x)
self.data['track_left_y'].append(track_left_y)
self.data['track_right_x'].append(track_right_x)
self.data['track_right_y'].append(track_right_y)
self.data['off_track'].append(off_track)
# convert lists to numpy array for faster plotting
for k in self.data.keys():
self.data[k] = np.array(self.data[k])
if self.verbose:
print('sim complete')
print('Distance: {:10.4f} m'.format(distance))
return distance
def plot(self):
theta = np.linspace(0, 2*np.pi, 1000)
plt.figure(figsize=(10, 10))
plt.plot(self.data['track_left_x'], self.data['track_left_y'], 'g-', label='track', linewidth=3, alpha=0.5)
plt.plot(self.data['track_right_x'], self.data['track_right_y'], 'g-', linewidth=3, alpha=0.5)
plt.plot(self.data['x_r'], self.data['y_r'], 'r-', label='reference', linewidth=3, alpha=0.5)
plt.plot(self.data['x'], self.data['y'], 'b', label='vehicle')
plt.legend()
plt.axis('equal')
plt.title('track')
plt.xlabel('East')
plt.ylabel('North')
plt.grid()
plt.figure(figsize=(10, 30))
n = 3
plt.subplot(n, 1, 1)
plt.plot(self.data['t'], self.data['e_x'], label='e_x')
plt.xlabel('t, sec')
plt.ylabel('m')
plt.legend()
plt.title('cross track error')
plt.grid()
plt.subplot(n, 1, 2)
plt.plot(self.data['t'], self.data['e_y'], label='e_y')
plt.legend()
plt.xlabel('t, sec')
plt.ylabel('m')
plt.title('along track error')
plt.grid()
plt.subplot(n, 1, 3)
plt.plot(self.data['t'], np.rad2deg(self.data['e_theta']), label='e_theta')
plt.legend()
plt.xlabel('t, sec')
plt.ylabel('deg')
plt.title('angle error')
plt.grid()
plt.figure(figsize=(10, 20))
n = 2
plt.subplot(n, 1, 1)
plt.plot(self.data['t'], self.data['throttle'], label='command')
plt.plot(self.data['t'], self.data['velocity'], label='velocity')
plt.legend()
plt.xlabel('t, sec')
plt.ylabel('velocity, m/s')
plt.title('velocity')
plt.grid()
plt.subplot(n, 1, 2)
plt.plot(self.data['t'], np.rad2deg(self.data['steering']), label='command')
plt.plot(self.data['t'], np.rad2deg(self.data['wheel']), label='wheel')
plt.legend()
plt.xlabel('t, sec')
plt.ylabel('angle, deg')
plt.title('steering')
plt.grid()
class DiscreteStateSpace:
"""
Use this class to implement any controller you need.
It takes a continuous time transfer function.
"""
def __init__(self, H, dt):
sys = control.tf2ss(control.c2d(H, dt))
self.x = np.zeros((sys.A.shape[0], 1))
self.A = sys.A
self.B = sys.B
self.C = sys.C
self.D = sys.D
self.dt = sys.dt
def update(self, u):
self.x = self.A.dot(self.x) + self.B.dot(u)
return self.C.dot(self.x) + self.D.dot(u)
def __repr__(self):
return repr(self.__dict__)
```
|
{
"source": "jgoppert/-casadi_f16",
"score": 3
}
|
#### File: jgoppert/-casadi_f16/f16.py
```python
import dataclasses
import numpy as np
import casadi as ca
import control
INTERP_DEFAULT = 'linear'
#INTERP_DEFAULT = 'bspline'
TABLE_CHECK_TOL = 1e-9 # need to increase if using bspline
def saturate(x, min_val, max_val):
"""
A casadi function for saturation.
"""
return ca.if_else(x < min_val, min_val, ca.if_else(x > max_val, max_val, x))
def build_tables():
"""
Constructs force and moment tables for f16.
"""
tables = {}
def create_table2D(name, row_label, col_label, data, abs_row=False, abs_col=False, interp_method=INTERP_DEFAULT):
"""
Creates a table interpolation function with x as rows and y as columns.
"""
assert data[0, 0] == 0
row_grid = data[1:, 0]
col_grid = data[0, 1:]
table_data = data[1:, 1:]
interp = ca.interpolant(name + '_interp', interp_method, [row_grid, col_grid],
table_data.ravel(order='F'))
x = ca.MX.sym('x')
y = ca.MX.sym('y')
if abs_row:
xs = ca.fabs(x)
else:
xs = x
if abs_col:
ys = ca.fabs(y)
else:
ys = y
func = ca.Function('Cx', [x, y], [interp(ca.vertcat(xs, ys))], [row_label, col_label], [name])
# check
for i, x in enumerate(row_grid):
for j, y in enumerate(col_grid):
assert ca.fabs(func(x, y) - table_data[i, j]) < TABLE_CHECK_TOL
return func
def create_damping():
data = np.array([
[-10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45], # alpha, deg
[-0.267, -0.110, 0.308, 1.34, 2.08, 2.91, 2.76, 2.05, 1.50, 1.49, 1.83, 1.21], # CXq
[0.882, 0.852, 0.876, 0.958, 0.962, 0.974, 0.819, 0.483, 0.590, 1.21, -0.493, -1.04], # CYr
[-0.108, -0.108, -0.188, 0.110, 0.258, 0.226, 0.344, 0.362, 0.611, 0.529, 0.298, -2.27], # CYp
[-8.80, -25.8, -28.9, -31.4, -31.2, -30.7, -27.7, -28.2, -29.0, -29.8, -38.3, -35.3], # CZq
[-0.126, -0.026, 0.063, 0.113, 0.208, 0.230, 0.319, 0.437, 0.680, 0.100, 0.447, -0.330], # Clr
[-0.360, -0.359, -0.443, -0.420, -0.383, -0.375, -0.329, -0.294, -0.230, -0.210, -0.120, -0.100], # Clp
[-7.21, -0.540, -5.23, -5.26, -6.11, -6.64, -5.69, -6.00, -6.20, -6.40, -6.60, -6.00], # Cmq
[-0.380, -0.363, -0.378, -0.386, -0.370, -0.453, -0.550, -0.582, -0.595, -0.637, -1.02, -0.840], # Cnr
[0.061, 0.052, 0.052, -0.012, -0.013, -0.024, 0.050, 0.150, 0.130, 0.158, 0.240, 0.150]]) # Cnp
names = ['CXq', 'CYr', 'CYp', 'CZq', 'Clr', 'Clp', 'Cmq', 'Cnr', 'Cnp']
for i, name in enumerate(names):
tables[name] = ca.interpolant('{:s}_interp'.format(name), INTERP_DEFAULT,
[data[0, :]], data[i + 1, :])
# check
for j, x in enumerate(data[0, :]):
assert ca.fabs(tables[name](x) - data[i + 1, j]) < TABLE_CHECK_TOL
create_damping()
tables['Cx'] = create_table2D(
name='Cx', row_label='alpha_deg', col_label='elev_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[-24, -0.099, -0.081, -0.081, -0.063, -0.025, 0.044, 0.097, 0.113, 0.145, 0.167, 0.174, 0.166],
[-12, -0.048, -0.038, -0.040, -0.021, 0.016, 0.083, 0.127, 0.137, 0.162, 0.177, 0.179, 0.167], # elev, deg
[0, -0.022, -0.020, -0.021, -0.004, 0.032, 0.094, 0.128, 0.130, 0.154, 0.161, 0.155, 0.138],
[12, -0.040, -0.038, -0.039, -0.025, 0.006, 0.062, 0.087, 0.085, 0.100, 0.110, 0.104, 0.091],
[24, -0.083, -0.073, -0.076, -0.072, -0.046, 0.012, 0.024, 0.025, 0.043, 0.053, 0.047, 0.040]]).T)
def create_Cy():
beta_deg = ca.MX.sym('beta_deg')
ail_deg = ca.MX.sym('ail_deg')
rdr_deg = ca.MX.sym('rdr_deg')
tables['Cy'] = ca.Function('Cy', [beta_deg, ail_deg, rdr_deg], [-0.02*beta_deg + 0.021*ail_deg/20 + 0.086*rdr_deg/30],
['beta_deg', 'ail_deg', 'rdr_deg'], ['Cy'])
create_Cy()
def create_Cz():
alpha_deg = ca.MX.sym('alpha_deg')
beta_deg = ca.MX.sym('beta_deg')
elev_deg = ca.MX.sym('elev_deg')
data = np.array([
[-10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[0.770, 0.241, -0.100, -0.416, -0.731, -1.053, -1.366, -1.646, -1.917, -2.120, -2.248, -2.229]])
interp = ca.interpolant('Cz_interp', INTERP_DEFAULT, [data[0, :]],
data[1, :])
return ca.Function('Cz',
[alpha_deg, beta_deg, elev_deg],
[interp(alpha_deg)*(1 - (beta_deg/57.3)**2) - 0.19*elev_deg/25.0],
['alpha_deg', 'beta_deg', 'elev_deg'], ['Cz'])
tables['Cz'] = create_Cz()
tables['Cl'] = create_table2D(
name='Cl', row_label='alpha_deg', col_label='beta_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[5, -0.001, -0.004, -0.008, -0.012, -0.016, -0.019, -0.020, -0.020, -0.015, -0.008, -0.013, -0.015],
[10, -0.003, -0.009, -0.017, -0.024, -0.030, -0.034, -0.040, -0.037, -0.016, -0.002, -0.010, -0.019], # beta, deg
[15, -0.001, -0.010, -0.020, -0.030, -0.039, -0.044, -0.050, -0.049, -0.023, -0.006, -0.014, -0.027],
[20, 0.000, -0.010, -0.022, -0.034, -0.047, -0.046, -0.059, -0.061, -0.033, -0.036, -0.035, -0.035],
[25, 0.007, -0.010, -0.023, -0.034, -0.049, -0.046, -0.068, -0.071, -0.060, -0.058, -0.062, -0.059],
[30, 0.009, -0.011, -0.023, -0.037, -0.050, -0.047, -0.074, -0.079, -0.091, -0.076, -0.077, -0.076]]).T, abs_col=True)
tables['Cm'] = create_table2D(
name='Cm', row_label='alpha_deg', col_label='elev_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[-24, 0.205, 0.168, 0.186, 0.196, 0.213, 0.251, 0.245, 0.238, 0.252, 0.231, 0.198, 0.192],
[-12, 0.081, 0.077, 0.107, 0.110, 0.110, 0.141, 0.127, 0.119, 0.133, 0.108, 0.081, 0.093], # elev, deg
[0, -0.046, -0.020, -0.009, -0.005, -0.006, 0.010, 0.006, -0.001, 0.014, 0.000, -0.013, 0.032],
[12, -0.174, -0.145, -0.121, -0.127, -0.129, -0.102, -0.097, -0.113, -0.087, -0.084, -0.069, -0.006],
[24, -0.259, -0.202, -0.184, -0.193, -0.199, -0.150, -0.160, -0.167, -0.104, -0.076, -0.041, -0.005]]).T)
tables['Cn'] = create_table2D(
name='Cn', row_label='alpha_deg', col_label='beta_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[5, 0.018, 0.019, 0.018, 0.019, 0.019, 0.018, 0.013, 0.007, 0.004, -0.014, -0.017, -0.033],
[10, 0.038, 0.042, 0.042, 0.042, 0.043, 0.039, 0.030, 0.017, 0.004, -0.035, -0.047, -0.057], # beta, deg
[15, 0.056, 0.057, 0.059, 0.058, 0.058, 0.053, 0.032, 0.012, 0.002, -0.046, -0.071, -0.073],
[20, 0.064, 0.077, 0.076, 0.074, 0.073, 0.057, 0.029, 0.007, 0.012, -0.034, -0.065, -0.041],
[25, 0.074, 0.086, 0.093, 0.089, 0.080, 0.062, 0.049, 0.022, 0.028, -0.012, -0.002, -0.013],
[30, 0.079, 0.090, 0.106, 0.106, 0.096, 0.080, 0.068, 0.030, 0.064, 0.015, 0.011, -0.001]]).T, abs_col=True)
tables['DlDa'] = create_table2D(
name='DlDa', row_label='alpha_deg', col_label='beta_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[-30, -0.041, -0.052, -0.053, -0.056, -0.050, -0.056, -0.082, -0.059, -0.042, -0.038, -0.027, -0.017],
[-20, -0.041, -0.053, -0.053, -0.053, -0.050, -0.051, -0.066, -0.043, -0.038, -0.027, -0.023, -0.016],
[-10, -0.042, -0.053, -0.052, -0.051, -0.049, -0.049, -0.043, -0.035, -0.026, -0.016, -0.018, -0.014], # beta, deg
[0, -0.040, -0.052, -0.051, -0.052, -0.048, -0.048, -0.042, -0.037, -0.031, -0.026, -0.017, -0.012],
[10, -0.043, -0.049, -0.048, -0.049, -0.043, -0.042, -0.042, -0.036, -0.025, -0.021, -0.016, -0.011],
[20, -0.044, -0.048, -0.048, -0.047, -0.042, -0.041, -0.020, -0.028, -0.013, -0.014, -0.011, -0.010],
[30, -0.043, -0.049, -0.047, -0.045, -0.042, -0.037, -0.003, -0.013, -0.010, -0.003, -0.007, -0.008]]).T)
tables['DlDr'] = create_table2D(
name='DlDr', row_label='alpha_deg', col_label='beta_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[-30, 0.005, 0.017, 0.014, 0.010, -0.005, 0.009, 0.019, 0.005, -0.000, -0.005, -0.011, 0.008],
[-20, 0.007, 0.016, 0.014, 0.014, 0.013, 0.009, 0.012, 0.005, 0.000, 0.004, 0.009, 0.007],
[-10, 0.013, 0.013, 0.011, 0.012, 0.011, 0.009, 0.008, 0.005, -0.002, 0.005, 0.003, 0.005], # beta, deg
[0, 0.018, 0.015, 0.015, 0.014, 0.014, 0.014, 0.014, 0.015, 0.013, 0.011, 0.006, 0.001],
[10, 0.015, 0.014, 0.013, 0.013, 0.012, 0.011, 0.011, 0.010, 0.008, 0.008, 0.007, 0.003],
[20, 0.021, 0.011, 0.010, 0.011, 0.010, 0.009, 0.008, 0.010, 0.006, 0.005, 0.000, 0.001],
[30, 0.023, 0.010, 0.011, 0.011, 0.011, 0.010, 0.008, 0.010, 0.006, 0.014, 0.020, 0.000]]).T)
tables['DnDa'] = create_table2D(
name='DnDa', row_label='alpha_deg', col_label='beta_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[-30, 0.001, -0.027, -0.017, -0.013, -0.012, -0.016, 0.001, 0.017, 0.011, 0.017, 0.008, 0.016],
[-20, 0.002, -0.014, -0.016, -0.016, -0.014, -0.019, -0.021, 0.002, 0.012, 0.015, 0.015, 0.011],
[-10, -0.006, -0.008, -0.006, -0.006, -0.005, -0.008, -0.005, 0.007, 0.004, 0.007, 0.006, 0.006], # beta, deg
[0, -0.011, -0.011, -0.010, -0.009, -0.008, -0.006, 0.000, 0.004, 0.007, 0.010, 0.004, 0.010],
[10, -0.015, -0.015, -0.014, -0.012, -0.011, -0.008, -0.002, 0.002, 0.006, 0.012, 0.011, 0.011],
[20, -0.024, -0.010, -0.004, -0.002, -0.001, 0.003, 0.014, 0.006, -0.001, 0.004, 0.004, 0.006],
[30, -0.022, 0.002, -0.003, -0.005, -0.003, -0.001, -0.009, -0.009, -0.001, 0.003, -0.002, 0.001]]).T)
tables['DnDr'] = create_table2D(
name='DnDr', row_label='alpha_deg', col_label='beta_deg',
data=np.array([ # alpha, deg
[0, -10, -5, 0, 5, 10, 15, 20, 25, 30, 35, 40, 45],
[-30, -0.018, -0.052, -0.052, -0.052, -0.054, -0.049, -0.059, -0.051, -0.030, -0.037, -0.026, -0.013],
[-20, -0.028, -0.051, -0.043, -0.046, -0.045, -0.049, -0.057, -0.052, -0.030, -0.033, -0.030, -0.008],
[-10, -0.037, -0.041, -0.038, -0.040, -0.040, -0.038, -0.037, -0.030, -0.027, -0.024, -0.019, -0.013], # beta, deg
[0, -0.048, -0.045, -0.045, -0.045, -0.044, -0.045, -0.047, -0.048, -0.049, -0.045, -0.033, -0.016],
[10, -0.043, -0.044, -0.041, -0.041, -0.040, -0.038, -0.034, -0.035, -0.035, -0.029, -0.022, -0.009],
[20, -0.052, -0.034, -0.036, -0.036, -0.035, -0.028, -0.024, -0.023, -0.020, -0.016, -0.010, -0.014],
[30, -0.062, -0.034, -0.027, -0.028, -0.027, -0.027, -0.023, -0.023, -0.019, -0.009, -0.025, -0.010]]).T)
tables['thrust_idle'] = create_table2D(
name='thrust_idle', row_label='alt_ft', col_label='mach',
data=np.array([ # alt, ft
[0, 0, 1.0e4, 2.0e4, 3.0e4, 4.0e4, 5.0e4],
[0, 1060, 670, 890, 1140, 1500, 1860],
[0.2, 635, 425, 690, 1010, 1330, 1700],
[0.4, 60, 25, 345, 755, 1130, 1525],
[0.6, -1020, -710, -300, 350, 910, 1360], # mach
[0.8, -2700, -1900, -1300, -247, 600, 1100],
[1.0, -3600, -1400, -595, -342, -200, 700]]).T)
tables['thrust_mil'] = create_table2D(
name='thrust_mil', row_label='alt_ft', col_label='mach',
data=np.array([ # alt, ft
[0, 0, 1.0e4, 2.0e4, 3.0e4, 4.0e4, 5.0e4],
[0, 12680, 9150, 6200, 3950, 2450, 1400],
[0.2, 12680, 9150, 6313, 4040, 2470, 1400],
[0.4, 12610, 9312, 6610, 4290, 2600, 1560], # mach
[0.6, 12640, 9839, 7090, 4660, 2840, 1660],
[0.8, 12390, 10176, 7750, 5320, 3250, 1930],
[1.0, 11680, 9848, 8050, 6100, 3800, 2310]]).T)
tables['thrust_max'] = create_table2D(
name='thrust_max', row_label='alt_ft', col_label='mach',
data=np.array([ # alt, ft
[0, 0, 1.0e4, 2.0e4, 3.0e4, 4.0e4, 5.0e4],
[0, 20000, 15000, 10800, 7000, 4000, 2500],
[0.2, 21420, 15700, 11225, 7323, 4435, 2600],
[0.4, 22700, 16860, 12250, 8154, 5000, 2835], # mach
[0.6, 24240, 18910, 13760, 9285, 5700, 3215],
[0.8, 26070, 21075, 15975, 11115, 6860, 3950],
[1.0, 28886, 23319, 18300, 13484, 8642, 5057]]).T)
def thrust():
power = ca.MX.sym('power')
alt = ca.MX.sym('alt')
rmach = ca.MX.sym('rmach')
tidl = tables['thrust_idle'](alt, rmach)
tmil = tables['thrust_mil'](alt, rmach)
tmax = tables['thrust_max'](alt, rmach)
thrust = ca.if_else(power < 50,
tidl + (tmil - tidl)*power*0.02,
tmil + (tmax - tmil)*(power - 50)*0.02)
return ca.Function('thrust',
[power, alt, rmach],
[thrust],
['power', 'alt', 'mach'],
['thrust'])
tables['thrust'] = thrust()
def propulsion():
dp = ca.MX.sym('dp')
thtl = ca.MX.sym('thtl')
power = ca.MX.sym('power')
power_cmd = ca.MX.sym('power_cmd')
# reciprocal of time constant
rtau = ca.Function('rtau', [dp], [ca.if_else(dp < 25, 1, ca.if_else(dp > 50, 0.1, 1.9 - 0.036*dp))])
# power command vs. throttle relationship
tgear = ca.Function('tgear', [thtl],
[ca.if_else(thtl < 0.77, 64.94*thtl, 217.38*thtl - 117.38)],
['thtl'], ['pow'])
# rate of change of power
pdot = ca.Function('pdot', [power, power_cmd], [
ca.if_else(power_cmd > 50,
ca.if_else(power > 50, 5*(power_cmd - power), rtau(60 - power)*(60 - power)),
ca.if_else(power > 50, 5*(40 - power), rtau(power_cmd - power)*(power_cmd - power))
)
], ['power', 'power_cmd'], ['pdot'])
tables['tgear'] = tgear
tables['pdot'] = pdot
propulsion()
def atmosphere():
vt = ca.MX.sym('vt')
alt = ca.MX.sym('alt')
R0 = 2.377e-3
Tfac = 1 - 0.703e-5*alt
T = ca.if_else(alt > 35000, 390, 519*Tfac)
rho = R0*(Tfac**(4.14))
tables['amach'] = ca.Function('amach', [vt, alt], [vt/(ca.sqrt(1.4*1716.3*T))], ['vt', 'alt'], ['amach'])
tables['qbar'] = ca.Function('qbar', [vt, alt], [0.5*rho*vt**2], ['vt', 'alt'], ['qbar'])
tables['ps'] = ca.Function('qbar', [alt], [1715*rho*T], ['alt'], ['amach'])
atmosphere()
return tables
tables = build_tables()
class CasadiDataClass:
"""
A base class for dataclasses with casadi.
"""
def __post_init__(self):
self.__name_to_index = {}
self.__index_to_name = {}
for i, field in enumerate(self.fields()):
self.__name_to_index[field.name] = i
self.__index_to_name[i] = field.name
@classmethod
def fields(cls):
return dataclasses.fields(cls)
def to_casadi(self):
return ca.vertcat(*self.to_tuple())
def to_tuple(self):
return dataclasses.astuple(self)
def to_dict(self):
return dataclasses.asdict(self)
@classmethod
def from_casadi(cls, v):
return cls(*[v[i] for i in range(v.shape[0])])
@classmethod
def sym(cls, name):
v = ca.MX.sym(name, len(cls.fields()))
return cls(*[v[i] for i in range(v.shape[0])])
def name_to_index(self, name):
return self.__name_to_index[name]
def index_to_name(self, index):
return self.__index_to_name[index]
@dataclasses.dataclass
class State(CasadiDataClass):
"""The vehicle state."""
VT: float = 0 # true velocity, (ft/s)
alpha: float = 0 # angle of attack, (rad)
beta: float = 0 # sideslip angle, (rad)
phi: float = 0 # B321 roll angle, (rad)
theta: float = 0 # B321 pitch angle, (rad)
psi: float = 0 # B321 yaw angle, (rad)
P: float = 0 # body roll rate, (rad/s)
Q: float = 0 # body pitch rate, (rad/s)
R: float = 0 # body yaw rate, (rad/s)
p_N: float = 0 # north position, (m)
p_E: float = 0 # east position, (m)
alt: float = 0 # altitude, (m)
power: float = 0 # power, (0-1)
ail_deg: float = 0 # aileron position, (deg)
elv_deg: float = 0 # elevator position, (deg)
rdr_deg: float = 0 # rudder position, (deg)
@dataclasses.dataclass
class StateDot(CasadiDataClass):
"""The derivative of the vehicle state."""
VT_dot: float = 0 # true velocity derivative, (ft/s^2)
alpha_dot: float = 0 # angle of attack rate, (rad/s)
beta_dot: float = 0 # sideslip rate, (rad/s)
phi_dot: float = 0 # B321 roll rate, (rad/s)
theta_dot: float = 0 # B321 pitch rate, (rad/s)
psi_dot: float = 0 # B321 yaw rate, (rad/s)
P_dot: float = 0 # body roll accel, (rad/s^2)
Q_dot: float = 0 # body pitch accel, (rad/s^2)
R_dot: float = 0 # body yaw accel, (rad/s^2)
V_N: float = 0 # north velocity, (m/s)
V_E: float = 0 # east velocity, (m/s)
alt_dot: float = 0 # climb rate, (m/s)
power_dot: float = 0 # power rate, (NA)
ail_rate_dps: float = 0 # aileron rate, (deg/s)
elv_rate_dps: float = 0 # elevator rate, (deg/s)
rdr_rate_dps: float = 0 # rudder rate, (deg/s)
@dataclasses.dataclass
class Control(CasadiDataClass):
"""The control input."""
thtl: float = 0 # throttle (0-1)
ail_cmd_deg: float = 0 # aileron command, (deg)
elv_cmd_deg: float = 0 # elevator command, (deg)
rdr_cmd_deg: float = 0 # rudder command, (deg)
@dataclasses.dataclass
class Parameters(CasadiDataClass):
"""The constant parameters."""
s: float = 300.0 # reference area, ft^2
b: float = 30.0 # wing span, ft
cbar: float = 11.32 # mean chord, ft
xcgr: float = 0.35 # reference cg, %chord
xcg: float = 0.35 # actual cg, %chord
hx: float = 160.0
g: float = 32.17 # acceleration of gravity, ft/s^2
weight: float = 20490.446 # weight, slugs
axx: float = 9496.0 # moment of inertia about x
ayy: float = 55814.0 # moment of inertia about y
azz: float = 63100.0 # moment of inertia about z
axz: float = 982.0 # xz moment of inertia
def force_moment(x: State, u: Control, p: Parameters):
"""
The function computes the forces and moments acting on the aircraft.
It is important to separate this from the dynamics as the Gazebo
simulator will be used to simulate extra forces and moments
from collision.
"""
# functions
cos = ca.cos
sin = ca.sin
# parameters
weight = p.weight
g = p.g
hx = p.hx
b = p.b
cbar = p.cbar
s = p.s
xcg = p.xcg
xcgr = p.xcgr
# state
VT = x.VT
alpha = x.alpha
beta = x.beta
phi = x.phi
theta = x.theta
P = x.P
Q = x.Q
R = x.R
alt = x.alt
power = x.power
ail_deg = x.ail_deg
elv_deg = x.elv_deg
rdr_deg = x.rdr_deg
# mass properties
mass = weight/g
# air data computer and engine model
amach = tables['amach'](VT, alt)
qbar = tables['qbar'](VT, alt)
thrust = tables['thrust'](power, alt, amach)
# force component buildup
rad2deg = 180/np.pi
alpha_deg = rad2deg*alpha
beta_deg = rad2deg*beta
dail = ail_deg/20.0
drdr = rdr_deg/30.0
cxt = tables['Cx'](alpha_deg, elv_deg)
cyt = tables['Cy'](beta_deg, ail_deg, rdr_deg)
czt = tables['Cz'](alpha_deg, beta_deg, elv_deg)
clt = ca.sign(beta_deg)*tables['Cl'](alpha_deg, beta_deg) \
+ tables['DlDa'](alpha_deg, beta_deg)*dail \
+ tables['DlDr'](alpha_deg, beta_deg)*drdr
cmt = tables['Cm'](alpha_deg, elv_deg)
cnt = ca.sign(beta_deg)*tables['Cn'](alpha_deg, beta_deg) \
+ tables['DnDa'](alpha_deg, beta_deg)*dail \
+ tables['DnDr'](alpha_deg, beta_deg)*drdr
# damping
tvt = 0.5/VT
b2v = b*tvt
cq = cbar*Q*tvt
cxt += cq*tables['CXq'](alpha_deg)
cyt += b2v*(tables['CYr'](alpha_deg)*R + tables['CYp'](alpha_deg)*P)
czt += cq*tables['CZq'](alpha_deg)
clt += b2v*(tables['Clr'](alpha_deg)*R + tables['Clp'](alpha_deg)*P)
cmt += cq*tables['Cmq'](alpha_deg) + czt*(xcgr - xcg)
cnt += b2v*(tables['Cnr'](alpha_deg)*R + tables['Cnp'](alpha_deg)*P) - cyt*(xcgr - xcg)*cbar/b
# get ready for state equations
sth = sin(theta)
cth = cos(theta)
sph = sin(phi)
cph = cos(phi)
qs = qbar*s
qsb = qs*b
rmqs = qs/mass
gcth = g*cth
ay = rmqs*cyt
az = rmqs*czt
qhx = Q*hx
# force
Fx = -mass*g*sth + qs*cxt + thrust
Fy = mass*(gcth*sph + ay)
Fz = mass*(gcth*cph + az)
# moment
Mx = qsb*clt # roll
My = qs*cbar*cmt - R*hx # pitch
Mz = qsb*cnt + qhx # yaw
return ca.vertcat(Fx, Fy, Fz), ca.vertcat(Mx, My, Mz)
def dynamics(x: State, u: Control, p: Parameters):
"""
This function implements wind frame kinematics tied to the force and moment model.
It does not take into account any collision forces.
"""
Fb, Mb = force_moment(x, u, p)
dx = StateDot()
# functions
cos = ca.cos
sin = ca.sin
# parameters
weight = p.weight
g = p.g
axz = p.axz
axzs = axz*axz
axx = p.axx
ayy = p.ayy
azz = p.azz
# state
VT = x.VT
alpha = x.alpha
beta = x.beta
phi = x.phi
theta = x.theta
psi = x.psi
P = x.P
Q = x.Q
R = x.R
power = x.power
ail_deg = x.ail_deg
rdr_deg = x.rdr_deg
elv_deg = x.elv_deg
# mass properties
mass = weight/g
xqr = azz*(azz - ayy) + axzs
xpq = axz*(axx - ayy + azz)
zpq = (axx - ayy)*axx + axzs
gam = axx*azz - axzs
ypr = azz - axx
# get ready for state equations
cbta = cos(beta)
U = VT*cos(alpha)*cbta
V = VT*sin(beta)
W = VT*sin(alpha)*cbta
sth = sin(theta)
cth = cos(theta)
sph = sin(phi)
cph = cos(phi)
spsi = sin(psi)
cpsi = cos(psi)
qsph = Q*sph
pq = P*Q
qr = Q*R
power_cmd = tables['tgear'](u.thtl)
dx.power_dot = tables['pdot'](power, power_cmd)
# kinematics
dx.phi_dot = P + (sth/cth)*(qsph + R*cph)
dx.theta_dot = Q*cph - R*sph
dx.psi_dot = (qsph + R*cph)/cth
# force equations
U_dot = R*V - Q*W + Fb[0]/mass
V_dot = P*W - R*U + Fb[1]/mass
W_dot = Q*U - P*V + Fb[2]/mass
dum = U**2 + W**2
dx.VT_dot = (U*U_dot + V*V_dot + W*W_dot)/VT
dx.alpha_dot = (U*W_dot - W*U_dot) / dum
dx.beta_dot = (VT*V_dot - V*dx.VT_dot)*cbta/dum
dx.P_dot = (xpq*pq - xqr*qr + azz*Mb[0] + axz*Mb[2]) / gam
dx.Q_dot = (ypr*P*R - axz*(P**2 - R**2) + Mb[1]) / ayy
dx.R_dot = (zpq*pq - xpq*qr + axz*Mb[0] + axx*Mb[2]) / gam
# navigation
t1 = sph*cpsi
t2 = cph*sth
t3 = sph*spsi
s1 = cth*cpsi
s2 = cth*spsi
s3 = t1*sth - cph*spsi
s4 = t3*sth + cph*cpsi
s5 = sph*cth
s6 = t2*cpsi + t3
s7 = t2*spsi - t1
s8 = cph*cth
dx.V_N = U*s1 + V*s3 + W*s6
dx.V_E = U*s2 + V*s4 + W*s7
dx.alt_dot = U*sth - V*s5 - W*s8
# actuators
ail_deg = saturate(x.ail_deg, -21.5, 21.5)
elv_deg = saturate(x.elv_deg, -25.0, 25.0)
rdr_deg = saturate(x.rdr_deg, -30.0, 30.0)
def actuator_model(cmd, pos, rate_limit, pos_limit):
rate = saturate(20.202*(cmd - pos), -rate_limit, rate_limit)
return ca.if_else(rate < 0,
ca.if_else(pos < -pos_limit, 0, rate),
ca.if_else(pos > pos_limit, 0, rate))
dx.ail_rate_dps = actuator_model(u.ail_cmd_deg, ail_deg, 60, 21.5)
dx.elv_rate_dps = actuator_model(u.elv_cmd_deg, elv_deg, 60, 25.0)
dx.rdr_rate_dps = actuator_model(u.rdr_cmd_deg, rdr_deg, 60, 30.0)
return dx
def trim_actuators(x, u):
"""
This function sets the actuator output to the actuator command.
"""
x.power = tables['tgear'](u.thtl)
x.ail_deg = u.ail_cmd_deg
x.elv_deg = u.elv_cmd_deg
x.rdr_deg = u.rdr_cmd_deg
return x
def trim_cost(dx: StateDot):
"""
Computes the trim cost based on the state derivative.
"""
return dx.VT_dot**2 + \
100*(dx.alpha_dot**2 + dx.beta_dot**2) + \
10*(dx.P_dot**2 + dx.Q_dot**2 + dx.R_dot**2)
class StateSpace:
"""
A convenience class for create state space representations
easily and for creating subsystems based on the state names.
The class keeps track of the state, input, and output vector
component names.
"""
def __init__(self, A, B, C, D, x, u, y=None, dt=None):
self.A = np.array(A)
self.B = np.array(B)
self.C = np.array(C)
self.D = np.array(D)
self.dt = dt
self.x = {xi: i for i, xi in enumerate(x)}
self.u = {ui: i for i, ui in enumerate(u)}
if y is None:
y = x
self.y = {yi: i for i, yi in enumerate(y)}
def sub_system(self, x, u, y=None):
xi = np.array([self.x[state] for state in x])
ui = np.array([self.u[inp] for inp in u])
if y is None:
y = x
yi = np.array([self.y[out] for out in y])
A = self.A[xi].T[xi].T
B = self.B[xi].T[ui].T
C = self.C[yi].T[xi].T
D = self.D[yi].T[ui].T
return StateSpace(A, B, C, D, x, u, y, self.dt)
def to_control(self):
if self.dt is None:
return control.ss(self.A, self.B, self.C, self.D)
else:
return control.ss(self.A, self.B, self.C, self.D, self.dt)
def __str__(self):
return 'A:\n{:s}\nB:\n{:s}\nC:\n{:s}\nD:\n{:s}\ndt:{:s}\nx:{:s}\nu:{:s}\ny:{:s}'.format(
str(self.A), str(self.B), str(self.C), str(self.D),
str(self.dt), str(self.x), str(self.u), str(self.y))
__repr__ = __str__
def linearize(x0, u0, p0):
"""
A function to perform linearizatoin of the f16 model
Parameters:
x0: state
u0: input
p0: parameters
Returns:
StateSpace: linearized system
"""
x0 = x0.to_casadi()
u0 = u0.to_casadi() # Plot the compensated openloop bode plot
x_sym = ca.MX.sym('x', x0.shape[0])
u_sym = ca.MX.sym('u', u0.shape[0])
x = State.from_casadi(x_sym)
u = Control.from_casadi(u_sym)
dx = dynamics(x, u, p0)
A = ca.jacobian(dx.to_casadi(), x_sym)
B = ca.jacobian(dx.to_casadi(), u_sym)
f_A = ca.Function('A', [x_sym, u_sym], [A])
f_B = ca.Function('B', [x_sym, u_sym], [B])
A = f_A(x0, u0)
B = f_B(x0, u0)
n = A.shape[0]
p = B.shape[1]
C = np.eye(n)
D = np.zeros((n, p))
return StateSpace(A=A, B=B, C=C, D=D,
x=[f.name for f in x.fields()],
u=[f.name for f in u.fields()],
y=[f.name for f in x.fields()])
def trim(x: State, p: Parameters,
phi_dot: float, theta_dot: float, psi_dot: float, gam: float, s0: np.array = None):
"""
Trims the aircraft at the given conditions.
Parameters:
x: vehicle state
p: parameters
phi_dot: Body321 roll rate
theta_dot: Body321 pitch rate
psi_dot: Body321 yaw rate
s0: the initial guess for the trim design vector
Returns:
State: x0
Control: u0
"""
if s0 is None:
s0 = np.zeros(6)
def constrain(x, s):
u = Control(thtl=s[0], elv_cmd_deg=s[1], ail_cmd_deg=s[2], rdr_cmd_deg=s[3])
alpha = s[4]
beta = s[5]
x = trim_actuators(x, u)
x.alpha = alpha
x.beta = beta
cos = ca.cos
sin = ca.sin
tan = ca.tan
atan = ca.arctan
sqrt = ca.sqrt
VT = x.VT
g = p.g
G = psi_dot*VT/g
a = 1 - G*tan(alpha)*sin(beta)
b = sin(gam)/cos(beta)
c = 1 + G**2*cos(beta)**2
# coordinated turn constraint pg. 188
phi = atan(G*cos(beta)/cos(alpha) *
((a - b**2) + b*tan(alpha)*sqrt(c*(1 - b**2) + G**2*sin(beta)**2))
/ (a**2 - b**2*(1 + c*tan(alpha)**2)))
x.phi = phi
# rate of climb constraint pg. 187
a = cos(alpha)*cos(beta)
b = sin(phi)*sin(beta) + cos(phi)*sin(alpha)*cos(beta)
theta = (a*b + sin(gam)*sqrt(a**2 - sin(gam)**2 + b**2)) \
/ (a**2 - sin(gam)**2)
x.theta = theta
# kinematics pg. 20
x.P = phi_dot - sin(theta)*psi_dot
x.Q = cos(phi)*phi_dot + sin(phi)*cos(theta)*psi_dot
x.R = -sin(phi)*theta_dot + cos(phi)*cos(theta)*psi_dot
return x, u
s = ca.MX.sym('s', 6)
x, u = constrain(x, s)
f = trim_cost(dynamics(x, u, p))
nlp = {'x': s, 'f': f}
S = ca.nlpsol('S', 'ipopt', nlp, {
'print_time': 0,
'ipopt': {
'sb': 'yes',
'print_level': 0,
}
})
r = S(x0=s0, lbg=0, ubg=0)
s_opt = r['x']
x, u = constrain(x, s_opt)
return x, u
def simulate(x0: State, f_control, p: Parameters, t0: float, tf: float, dt: float):
"""
Simulate the aircraft for a given control function and initial state.
Parameters:
x0: initial state (see State)
f_control: A function of the form f(t, x), which returns the control u
p: Aircraft parameters
t0: initial time
tf: fintal time
dt: The discrete sampling time of the controller.
"""
xs = ca.MX.sym('x', 16)
x = State.from_casadi(xs)
us = ca.MX.sym('u', 4)
u = Control.from_casadi(us)
dae = {'x': xs, 'p': us, 'ode': dynamics(x, u, p).to_casadi()}
F = ca.integrator('F', 'idas', dae, {'t0': 0, 'tf': dt, 'jit': True})
x = np.array(x0.to_casadi()).reshape(-1)
u0 = f_control(t0, x0)
u = np.array(u0.to_casadi()).reshape(-1)
data = {
't': [0],
'x': [x]
}
t_vect = np.arange(t0, tf, dt)
for t in t_vect:
u0 = f_control(t, x)
u = np.array(u0.to_casadi()).reshape(-1)
x = np.array(F(x0=x, p=u)['xf']).reshape(-1)
data['t'].append(t)
data['x'].append(x)
for k in data.keys():
data[k] = np.array(data[k])
return data
```
#### File: jgoppert/-casadi_f16/test_f16.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pathlib
import f16
import casadi as ca
import pytest
from casadi.tools.graph import graph
import os
TRIM_TOL = 1e-5
def plot_table2D(title, path, x_grid, y_grid, x_label, y_label, f_table):
X, Y = np.meshgrid(x_grid, y_grid)
Z = np.zeros((len(x_grid), len(y_grid)))
for i, x in enumerate(x_grid):
for j, y in enumerate(y_grid):
Z[i, j] = f_table(x, y)
plt.figure()
plt.contourf(X, Y, Z.T, levels=20)
plt.colorbar()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.savefig(path.joinpath('{:s}.png'.format(title)))
plt.close()
def test_tables():
alpha_deg_grid = np.linspace(-15, 50, 20)
beta_deg_grid = np.linspace(-35, 35, 20)
elev_deg_grid = np.linspace(-30, 30, 20)
ail_deg_grid = np.linspace(-30, 30, 20)
mach_grid = np.linspace(0, 1.1, 20)
alt_grid = np.linspace(-1e4, 6e4, 20)
path = pathlib.Path('results')
path.mkdir(parents=True, exist_ok=True)
tables = f16.tables
plot_table2D('Cl', path, alpha_deg_grid, beta_deg_grid, 'alpha_deg', 'beta_deg', tables['Cl'])
plot_table2D('Cm', path, alpha_deg_grid, elev_deg_grid, 'alpha_deg', 'elev_deg', tables['Cm'])
plot_table2D('Cn', path, alpha_deg_grid, beta_deg_grid, 'alpha_deg', 'beta_deg', tables['Cn'])
plot_table2D('Cx', path, alpha_deg_grid, elev_deg_grid, 'alpha_deg', 'elev_deg', tables['Cx'])
plot_table2D('Cy', path, beta_deg_grid, ail_deg_grid, 'beta_deg', 'ail_deg',
lambda x, y: tables['Cy'](x, y, 0))
plot_table2D('Cz', path, alpha_deg_grid, beta_deg_grid, 'alpha_deg', 'beta_deg',
lambda x, y: tables['Cz'](x, y, 0))
plot_table2D('thrust_idle', path, alt_grid, mach_grid, 'alt, ft', 'mach', tables['thrust_idle'])
plot_table2D('thrust_mil', path, alt_grid, mach_grid, 'alt, ft', 'mach', tables['thrust_mil'])
plot_table2D('thrust_max', path, alt_grid, mach_grid, 'alt, ft', 'mach', tables['thrust_max'])
plt.figure()
lift = []
for alpha in alpha_deg_grid:
lift.append(-tables['Cz'](alpha, 0, 0))
plt.plot(alpha_deg_grid, lift)
plt.xlabel('alpha, deg')
plt.ylabel('CL')
plt.savefig(path.joinpath('CL.png'))
plt.close()
plt.figure()
plot_table2D('amach', path, np.linspace(0, 1000), np.linspace(0, 60000), 'VT, ft/s', 'alt, ft', tables['amach'])
plt.close()
names = ['CXq', 'CYr', 'CYp', 'CZq', 'Clr', 'Clp', 'Cmq', 'Cnr', 'Cnp']
for name in names:
plt.figure()
data = [tables[name](alpha) for alpha in alpha_deg_grid]
plt.plot(alpha_deg_grid, data)
plt.xlabel('alpha, deg')
plt.ylabel(name)
plt.savefig(path.joinpath('damp_{:s}.png'.format(name)))
plt.close()
def test_jacobian():
x_sym = ca.MX.sym('x', 16)
u_sym = ca.MX.sym('u', 4)
x = f16.State.from_casadi(x_sym)
u = f16.Control.from_casadi(u_sym)
p = f16.Parameters()
dx = f16.dynamics(x, u, p)
A = ca.jacobian(dx.to_casadi(), x_sym)
B = ca.jacobian(dx.to_casadi(), u_sym)
f_A = ca.Function('A', [x_sym, u_sym], [A])
f_B = ca.Function('B', [x_sym, u_sym], [B])
print('A', f_A(np.ones(16), np.ones(4)))
print('B', f_B(np.ones(16), np.ones(4)))
def test_trim1():
# pg 197
p = f16.Parameters()
x = f16.State(VT=502, alpha=0.03691, theta=0.03691)
u = f16.Control(thtl=0.1385, elv_cmd_deg=-0.7588)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim2():
# pg 197
p = f16.Parameters(xcg=0.3)
x = f16.State(VT=502, alpha=0.03936, theta=0.03936)
u = f16.Control(thtl=0.1485, elv_cmd_deg=-1.931)
x = f16.trim_actuators(x, u)
x.power = f16.tables['tgear'](u.thtl)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim3():
# pg 197
p = f16.Parameters(xcg=0.38)
x = f16.State(VT=502, alpha=0.03544, theta=0.03544)
u = f16.Control(thtl=0.1325, elv_cmd_deg=-0.0559)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim4():
# pg 197
p = f16.Parameters(xcg=0.3)
# psi_dot = 0.3
x = f16.State(VT=502, alpha=0.2485, beta=4.8e-4, phi=1.367, theta=0.05185,
P=-0.0155, Q=0.2934, R=0.06071)
u = f16.Control(
thtl=0.8499, elv_cmd_deg=-6.256,
ail_cmd_deg=0.09891, rdr_cmd_deg=-0.4218)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim5():
# pg 197
p = f16.Parameters(xcg=0.3) # listed as -0.3, must be typo
# theta_dot = 0.3
x = f16.State(VT=502, alpha=0.3006, beta=4.1e-5, theta=0.3006, Q=0.3)
u = f16.Control(
thtl=1.023, elv_cmd_deg=-7.082,
ail_cmd_deg=-6.2e-4, rdr_cmd_deg=0.01655)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < 2e-2 # doesn't converge as close
def test_trim6():
# pg 195
p = f16.Parameters()
x = f16.State(VT=502, alpha=2.392628e-1, beta=5.061803e-4,
phi=1.366289, theta=5.000808e-2, psi=2.340769e-1,
P=-1.499617e-2, Q=2.933811e-1, R=6.084932e-2,
p_N=0, p_E=0, alt=0, power=6.412363e1)
u = f16.Control(thtl=8.349601e-1, elv_cmd_deg=-1.481766,
ail_cmd_deg=9.553108e-2, rdr_cmd_deg=-4.118124e-1)
x = f16.trim_actuators(x, u)
dx = f16.dynamics(x, u, p)
print(dx)
assert f16.trim_cost(dx) < TRIM_TOL
def test_trim_and_linearize():
p = f16.Parameters()
x = f16.State(VT=502)
x0, u0 = f16.trim(x=x, p=p, phi_dot=0, theta_dot=0, psi_dot=0, gam=0)
dx = f16.dynamics(x0, u0, p)
assert f16.trim_cost(dx) < TRIM_TOL
print(dx)
sys = f16.linearize(x0, u0, p)
sys.sub_system(['VT', 'elv_deg', 'alpha', 'Q'], ['elv_cmd_deg'], ['alpha', 'Q'])
print(sys)
ss = sys.to_control()
def test_table_3_5_2():
# pg 187
p = f16.Parameters(xcg=0.4)
x = f16.State(
VT=500, alpha=0.5, beta=-0.2,
phi=-1, theta=1, psi=-1,
P=0.7, Q=-0.8, R=0.9,
p_N=1000, p_E=900, alt=10000)
u = f16.Control(
thtl=0.9, elv_cmd_deg=20,
ail_cmd_deg=-15, rdr_cmd_deg=-20)
x = f16.trim_actuators(x, u)
x.power = 90
dx = f16.dynamics(x, u, p)
dx_compute = np.array(dx.to_casadi())[:, 0]
dx_check = np.array([
-75.23724, -0.8813491, -0.4759990,
2.505734, 0.3250820, 2.145926,
12.62679, 0.9649671, 0.5809759,
342.4439, -266.7707, 248.1241, -58.68999, 0, 0, 0
])
print('\nexpected:\n\t', dx_check)
print('\nactual:\n\t', dx_compute)
print('\nerror:\n\t', dx_check - dx_compute)
assert np.allclose(dx_compute, dx_check, 1e-3)
def test_simulate():
f_control = lambda t, x: f16.Control()
f16.simulate(x0=f16.State(VT=502), f_control= f_control,
p=f16.Parameters(), t0=0, tf=10, dt=0.01)
```
|
{
"source": "jgoppert/fsm",
"score": 3
}
|
#### File: fsm/fsm/fsm.py
```python
from __future__ import print_function
import sys
import antlr4
from generated.FsmLexer import FsmLexer
from generated.FsmParser import FsmParser
from generated.FsmListener import FsmListener
import argparse
class KeyPrinter(FsmListener):
"Simple example"
def exitFsm_state(self, ctx):
"print msg when leaving state"
print("leaving state")
def main(argv):
"The main function"
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
text = antlr4.FileStream(args.filename)
lexer = FsmLexer(text)
stream = antlr4.CommonTokenStream(lexer)
parser = FsmParser(stream)
tree = parser.fsm_main()
print(tree.toStringTree(recog=parser))
printer = KeyPrinter()
walker = antlr4.ParseTreeWalker()
walker.walk(printer, tree)
if __name__ == '__main__':
main(sys.argv)
# vim: set et ft=python fenc=utf-8 ff=unix sts=0 sw=4 ts=4 :
```
|
{
"source": "jgoppert/iekf_analysis",
"score": 3
}
|
#### File: jgoppert/iekf_analysis/data.py
```python
from transforms3d.taitbryan import quat2euler
import matplotlib.pyplot as plt
import numpy as np
from util import X, Xe
class Data(object):
"""
Data object for sim data
"""
def __init__(self):
self.x = []
self.J = []
self.K_mag = []
self.K_gps = []
self.K_accel = []
self.K_lidar = []
self.K_baro = []
self.mag_fault = []
self.gps_fault = []
self.accel_fault = []
self.lidar_fault = []
self.baro_fault = []
self.dx = []
self.xh = []
self.y = []
self.Jh = []
self.t = []
self.P = []
self.euler = None
self.euler_est = None
def finalize(self):
"""
Turn lists to arrays, prepare for plotting
"""
data = self.__dict__
for key in data:
data[key] = np.array(data[key])
try:
self.euler = np.array([quat2euler(qi)
for qi in self.x[:, X.q_nb_0: X.q_nb_3 + 1]])
self.euler_est = np.array([
quat2euler(qi)
for qi in self.xh[:, X.q_nb_0: X.q_nb_3 + 1]])
self.agl_est = self.xh[:, X.terrain_alt] - (-self.xh[:, X.pos_D])
self.agl = self.x[:, X.terrain_alt] - (-self.x[:, X.pos_D])
except IndexError as e:
print(e)
def __repr__(self):
return repr(self.__dict__)
def plot_est(self, i, color, name):
plt.plot(self.t, self.xh[:, i], color + '-', label=name + '-est')
plt.plot(self.t, self.x[:, i], color + '--', label=name)
def plot_est_stddev(self, i, i_error, color, name):
plt.plot(self.t,
self.xh[:, i] + np.sqrt(self.P[:, i_error]), color + '-.')
plt.plot(self.t,
self.xh[:, i] - np.sqrt(self.P[:, i_error]), color + '-.')
def analysis(self):
"""
Show plots of data
"""
plt.rcParams['lines.linewidth'] = 2
plt.figure(figsize=(15, 10))
plt.title('euler angles')
plt.plot(self.t,
np.rad2deg(self.euler_est[:, 2]), 'r-', label='roll-est')
plt.plot(self.t, np.rad2deg(self.euler[:, 2]), 'r--', label='roll')
plt.plot(self.t,
np.rad2deg(self.euler_est[:, 1]), 'g-', label='pitch-est')
plt.plot(self.t, np.rad2deg(self.euler[:, 1]), 'g--', label='pitch')
plt.plot(self.t,
np.rad2deg(self.euler_est[:, 0]), 'b-', label='yaw-est')
plt.plot(self.t, np.rad2deg(self.euler[:, 0]), 'b--', label='yaw')
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.ylabel('deg')
plt.grid()
plt.figure(figsize=(15, 10))
plt.subplot(221)
plt.title('position')
self.plot_est(X.pos_N, 'r', 'N')
self.plot_est(X.pos_E, 'g', 'E')
self.plot_est(X.pos_D, 'b', 'D')
axis = plt.gca().axis()
self.plot_est_stddev(X.pos_N, Xe.pos_N, 'r', 'N')
self.plot_est_stddev(X.pos_E, Xe.pos_E, 'g', 'E')
self.plot_est_stddev(X.pos_D, Xe.pos_D, 'b', 'D')
plt.axis(axis)
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(222)
plt.title('velocity')
self.plot_est(X.vel_N, 'r', 'N')
self.plot_est(X.vel_E, 'g', 'E')
self.plot_est(X.vel_D, 'b', 'D')
axis = plt.gca().axis()
self.plot_est_stddev(X.vel_N, Xe.vel_N, 'r', 'N')
self.plot_est_stddev(X.vel_E, Xe.vel_E, 'g', 'E')
self.plot_est_stddev(X.vel_D, Xe.vel_D, 'b', 'D')
plt.axis(axis)
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(223)
plt.title('gyro bias')
self.plot_est(X.gyro_bias_bx, 'r', 'X')
self.plot_est(X.gyro_bias_by, 'g', 'Y')
self.plot_est(X.gyro_bias_bz, 'b', 'Z')
axis = plt.gca().axis()
self.plot_est_stddev(X.gyro_bias_bx, Xe.gyro_bias_bx, 'r', 'X')
self.plot_est_stddev(X.gyro_bias_by, Xe.gyro_bias_by, 'g', 'Y')
self.plot_est_stddev(X.gyro_bias_bz, Xe.gyro_bias_bz, 'b', 'Z')
plt.axis(axis)
plt.legend(loc='best', ncol=3)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(224)
plt.title('accel scale')
self.plot_est(X.accel_scale, 'r', '')
axis = plt.gca().axis()
self.plot_est_stddev(X.accel_scale, Xe.accel_scale, 'r', '')
plt.axis(axis)
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 10))
plt.subplot(221)
plt.title('agl')
plt.plot(self.t, self.agl, '--')
plt.plot(self.t, self.agl_est, '-')
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 10))
plt.subplot(221)
plt.title('terrain alt')
self.plot_est(X.terrain_alt, 'b', '')
axis = plt.gca().axis()
self.plot_est_stddev(X.terrain_alt, Xe.terrain_alt, 'r', '')
plt.axis(axis)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(222)
plt.title('baro bias')
self.plot_est(X.baro_bias, 'b', '')
axis = plt.gca().axis()
self.plot_est_stddev(X.baro_bias, Xe.baro_bias, 'r', '')
plt.axis(axis)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(223)
plt.title('Invariants')
plt.plot(self.t, self.J, '--')
plt.gca().set_prop_cycle(None)
plt.plot(self.t, self.Jh, '-')
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 5))
plt.title('rotation std dev.')
plt.plot(self.t, np.rad2deg(np.sqrt(self.P[:, Xe.rot_bx])), label='N')
plt.plot(self.t, np.rad2deg(np.sqrt(self.P[:, Xe.rot_by])), label='E')
plt.plot(self.t, np.rad2deg(np.sqrt(self.P[:, Xe.rot_bz])), label='D')
plt.xlabel('t, sec')
plt.ylabel('deg')
plt.legend(loc='best', ncol=3)
plt.grid()
plt.figure(figsize=(15, 15))
plt.subplot(321)
plt.title('K mag')
plt.plot(self.t, self.K_mag)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(322)
plt.title('K gps')
plt.plot(self.t, self.K_gps)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(323)
plt.title('K accel')
plt.plot(self.t, self.K_accel)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(324)
plt.title('K_baro')
plt.plot(self.t, self.K_baro)
plt.xlabel('t, sec')
plt.grid()
plt.subplot(325)
plt.title('K_lidar')
plt.plot(self.t, self.K_lidar)
plt.xlabel('t, sec')
plt.grid()
plt.figure(figsize=(15, 5))
plt.title('faults')
plt.plot(self.t, self.lidar_fault, label='lidar', alpha=0.5)
plt.plot(self.t, self.accel_fault, label='accel', alpha=0.5)
plt.plot(self.t, self.mag_fault, label='mag', alpha=0.5)
plt.plot(self.t, self.gps_fault, label='gps', alpha=0.5)
plt.plot(self.t, self.baro_fault, label='baro', alpha=0.5)
plt.gca().set_ylim([-1, 2])
plt.xlabel('t, sec')
plt.legend(loc='best', ncol=3)
plt.grid()
```
#### File: jgoppert/iekf_analysis/riekf.py
```python
from transforms3d.quaternions import rotate_vector, qinverse, qmult
from transforms3d.taitbryan import euler2mat
import numpy as np
from util import BETA_TABLE, skew, vect2quat, \
X, Y_accel, Y_gps, Y_mag, Y_baro, Y_lidar, params, U, Xe
class RIEKF(object):
"""
Right invariant extended kalman filter
source: https://hal.archives-ouvertes.fr/hal-00494342/document
TODO: handle mag correction only in yaw
"""
def __init__(self, x0, P0, Q):
self.x = x0
self.P0 = P0
self.K_accel = np.zeros((Xe.n, Y_accel.n))
self.K_mag = np.zeros((Xe.n, Y_mag.n))
self.K_gps = np.zeros((Xe.n, Y_gps.n))
self.K_baro = np.zeros((Xe.n, Y_baro.n))
self.K_lidar = np.zeros((Xe.n, Y_lidar.n))
self.P = P0
self.Q = Q
self.g_n = [0, 0, -9.8]
self.set_mag_field(1.0, 0)
self.J = self.compute_invariants(x0, np.zeros(6))
self.accel_fault = False
self.mag_fault = False
self.gps_fault = False
self.baro_fault = False
self.lidar_fault = False
def set_mag_field(self, incl, decl):
"""
Set magnetic field vector fixed in navigation frame
"""
self.B_n = euler2mat(decl, -incl, 0).dot([1, 0, 0])
@staticmethod
def compute_invariants(x, u):
"""
Compute the invariant vector
"""
q_nb = x[:4]
# V_n = x[4:7]
gb_b = x[7:10]
as_b = x[10]
omega_nb_b = u[:3]
a_b = u[3:6]
J_omega_n = rotate_vector(omega_nb_b - gb_b, q_nb)
J_a_n = rotate_vector(a_b/as_b, q_nb)
return np.hstack([J_omega_n, J_a_n])
def compute_dx(self, K, r):
"""
Calculate non-linear correction
"""
x = self.x
q_nb = x[X.q_nb_0:X.q_nb_3 + 1]
a_s = x[X.accel_scale]
q_bn = qinverse(q_nb)
K_q = K[Xe.rot_bx:Xe.rot_bz + 1, :]
K_vel = K[Xe.vel_N:Xe.vel_D + 1, :]
K_omega = K[Xe.gyro_bias_bx:Xe.gyro_bias_bz + 1, :]
K_a = K[Xe.accel_scale, :]
K_pos = K[Xe.pos_N:Xe.pos_D + 1, :]
K_terrain_alt = K[Xe.terrain_alt, :]
K_baro_bias = K[Xe.baro_bias, :]
dx = np.zeros(X.n)
# TODO correct if observable
# r_norm = np.linalg.norm(K_q.dot(r))
# if r_norm > 0.1:
# print('r_norm', r_norm)
dx[X.q_nb_0: X.q_nb_3 + 1] = qmult(vect2quat(K_q.dot(r)), q_nb)
dx[X.vel_N: X.vel_D + 1] = K_vel.dot(r)
dx[X.gyro_bias_bx: X.gyro_bias_bz + 1] = \
rotate_vector(K_omega.dot(r), q_bn)
dx[X.accel_scale] = a_s*K_a.dot(r)
dx[X.pos_N: X.pos_D + 1] = K_pos.dot(r)
dx[X.terrain_alt] = K_terrain_alt.dot(r)
dx[X.baro_bias] = K_baro_bias.dot(r)
return dx
def kalman_correct(self, name, r, H, R):
"""
Calculate kalman gain and apply correction to state and covariance
"""
P = self.P
S = H.dot(P).dot(H.T) + R
fault = False
# fault threshold
beta = r.T.dot(np.linalg.inv(S)).dot(r)
if beta > BETA_TABLE[len(r)]:
# print('fault', name, beta)
fault = True
K = P.dot(H.T).dot(np.linalg.inv(S))
dx = self.compute_dx(K, r)
dP = -K.dot(H).dot(P)
return dx, dP, K, fault
def set_P(self, P):
"""
set P and check bounds
"""
self.P = P
self.bound_P()
def set_x(self, x):
"""
set x and check bounds
"""
self.x = x
self.bound_x()
def correct_baro(self, y_baro, dt):
"""
Perform correction step
"""
x = self.x
# measurement matrix
H = np.zeros((Y_baro.n, Xe.n))
H[Y_baro.asl, Xe.pos_D] = -1
H[Y_baro.asl, Xe.baro_bias] = 1
# measurement covariance matrix
R = params['BARO_Z']**2*np.eye(Y_baro.n)/dt
# residual
yh = -x[X.pos_D] + x[X.baro_bias]
r = np.array([y_baro - yh])
# kalman correction
dx, dP, self.K_baro, self.baro_fault = \
self.kalman_correct('baro', r, H, R)
self.set_P(self.P + dP)
self.set_x(self.x + dx)
def correct_mag(self, y_B_b, dt):
"""
Perform correction step
"""
y_B_b_unit = y_B_b/np.linalg.norm(y_B_b)
B_n_unit = self.B_n/np.linalg.norm(self.B_n)
x = self.x
q_nb = x[X.q_nb_0:X.q_nb_3 + 1]
# measurement matrix
H = np.zeros((Y_mag.n, Xe.n))
H[Y_mag.nx:Y_mag.nz + 1, Xe.rot_bx:Xe.rot_bz + 1] = 2*skew(B_n_unit)
# measurement covariance matrix
R = np.diag([params['MAG_NE'],
params['MAG_NE'],
params['MAG_D']])**2/dt
# residual
r = rotate_vector(y_B_b_unit, q_nb) - B_n_unit
# kalman correction
dx, dP, self.K_mag, self.mag_fault = \
self.kalman_correct('mag', r, H, R)
self.set_P(self.P + dP)
self.set_x(self.x + dx)
def correct_accel(self, y_A_b, dt):
"""
Perform correction step
"""
x = self.x
q_nb = x[X.q_nb_0:X.q_nb_3 + 1]
a_s = x[X.accel_scale]
# measurement matrix
# TODO check math
H = np.zeros((Y_accel.n, Xe.n))
H[Y_accel.bx:Y_accel.bz + 1,
Xe.rot_bx:Xe.rot_bz + 1] = 2*skew(self.g_n)
# measurement covariance matrix
R = params['ACC_C']**2*np.eye(3)/dt
# residual
r = rotate_vector(y_A_b/a_s, q_nb) - self.g_n
# kalman correction
dx, dP, self.K_accel, self.accel_fault = \
self.kalman_correct('accel', r, H, R)
self.set_P(self.P + dP)
self.set_x(self.x + dx)
def correct_gps(self, y_gps, dt):
"""
Perform correction step
"""
x = self.x
p_n = x[X.pos_N: X.pos_D + 1]
V_n = x[X.vel_N: X.vel_D + 1]
I3 = np.eye(3)
# measurement matrix
H = np.zeros((Y_gps.n, Xe.n))
H[Y_gps.pos_N:Y_gps.pos_D + 1, Xe.pos_N:Xe.pos_D + 1] = I3
H[Y_gps.vel_N:Y_gps.vel_D + 1, Xe.vel_N:Xe.vel_D + 1] = I3
# measurement covariance matrix
R = np.diag([params['GPS_XY'],
params['GPS_XY'],
params['GPS_Z'],
params['GPS_VXY'],
params['GPS_VXY'],
params['GPS_VZ']])**2/dt
# residual
r = y_gps - np.hstack([p_n, V_n])
# kalman correction
dx, dP, self.K_gps, self.gps_fault = \
self.kalman_correct('gps', r, H, R)
self.set_P(self.P + dP)
self.set_x(self.x + dx)
def correct_lidar(self, y_lidar, dt):
"""
Perform correction step
"""
x = self.x
# measurement matrix
# d = -pos_d - terrain_alt
H = np.zeros((Y_lidar.n, Xe.n))
H[Y_lidar.d, Xe.pos_D] = -1
H[Y_lidar.d, Xe.terrain_alt] = -1
# measurement covariance matrix
R = params['LDR_Z']**2*np.eye(Y_lidar.n)/dt
# residual
yh = -x[X.pos_D] - x[X.terrain_alt]
r = np.array([y_lidar - yh])
# kalman correction
dx, dP, self.K_lidar, self.lidar_fault = \
self.kalman_correct('lidar', r, H, R)
self.set_P(self.P + dP)
self.set_x(self.x + dx)
def dynamics(self, x, u):
"""
Calculate state derivative
"""
q_nb = x[X.q_nb_0:X.q_nb_3 + 1]
V_n = x[X.vel_N:X.vel_D + 1]
gb_b = x[X.gyro_bias_bx:X.gyro_bias_bz + 1]
as_b = x[X.accel_scale]
omega_nb_b = u[U.omega_nb_bx:U.omega_nb_bz + 1]
a_b = u[U.a_bx:U.a_bz + 1]
# q_bn = qinverse(q_nb)
dq_nb = 0.5*qmult(q_nb, vect2quat(omega_nb_b - gb_b))
dV_n = rotate_vector(a_b/as_b, q_nb) - self.g_n
dgb_b = [0, 0, 0]
das_b = [0]
dp_n = V_n
dagl = 0
dbaro_bias = 0
return np.hstack([dq_nb, dV_n, dgb_b, das_b, dp_n,
dagl, dbaro_bias])
def predict(self, t, u, dt):
"""
Perform prediction step
"""
# normalize quaternion
q_nb_norm = np.linalg.norm(self.x[:4])
if abs(q_nb_norm - 1) > 1e-3:
self.x[:4] /= q_nb_norm
# print('renormalizing rief:', q_nb_norm, 't:', t)
# estimator predict
dx = self.dynamics(self.x, u)*dt
self.set_x(self.x + dx)
# compute invariants
self.J = self.compute_invariants(self.x, u)
# linearize
J_omega_n = self.J[:3]
J_a_n = np.array([self.J[3:6]]).T
I3 = np.eye(3)
# define A matrix
A = np.zeros((Xe.n, Xe.n))
# derivatives of rotation error
A[Xe.rot_bx:Xe.rot_bz + 1,
Xe.gyro_bias_bx:Xe.gyro_bias_bz + 1] = -0.5*I3
# derivative of velocity
A[Xe.vel_N:Xe.vel_D + 1,
Xe.rot_bx:Xe.rot_bz + 1] = -2*skew(J_a_n)
A[Xe.vel_N:Xe.vel_D + 1, Xe.accel_scale] = -J_a_n.reshape(-1)
# derivative of gyro bias
A[Xe.gyro_bias_bx:Xe.gyro_bias_bz + 1,
Xe.gyro_bias_bx:Xe.gyro_bias_bz + 1] = skew(J_omega_n)
# derivative of position
A[Xe.pos_N:Xe.pos_D + 1,
Xe.vel_N:Xe.vel_D + 1] = I3
# propagate covariance
P = self.P
Q = self.Q
dP = (A.dot(P) + P.dot(A.T) + Q)*dt
self.set_P(self.P + dP)
def bound_P(self):
"""
Constrain and bound P
"""
for i in range(self.P.shape[0]):
if self.P[i, i] < 0:
# print('P', i, i, '< 0, resetting', self.P[i, i])
self.P[i, i] = 0;
for j in range(i):
if not np.isfinite(self.P[i, j]):
print('P', i, j, ' is NaN, resetting')
self.P[i, j] = self.P0[i, j]
return
# force symmetric
self.P[j, i] = self.P[i, j]
def bound_x(self):
"""
Constrain and bound x
"""
for i in range(X.gyro_bias_bx, X.gyro_bias_bz + 1):
if self.x[i] < -0.5:
self.x[i] = -0.5
if self.x[i] > 0.5:
self.x[i] = 0.5
if self.x[X.accel_scale] > 2:
self.x[X.accel_scale] = 2
if self.x[X.accel_scale] < 0.5:
self.x[X.accel_scale] = 0.5
```
#### File: jgoppert/iekf_analysis/util.py
```python
import numpy as np
BETA_TABLE = [
0,
8.82050518214,
12.094592431,
13.9876612368,
16.0875642296,
17.8797700658,
19.6465647819,
21.3802576894,
23.0806434845,
24.6673803845,
26.1487953661,
27.6350821245,
29.6565383703,
31.2211113844,
32.7673547211,
34.2967756977,
35.6906782236,
37.0724753352,
38.4549693067,
39.836592699,
]
params = {
'GYRO': 5e-3, # rad/s / sqrt(Hz)
'BARO_Z': 1.0, # m / sqrt(Hz)
'GPS_XY': 0.3, # m / sqrt(Hz)
'GPS_Z': 0.3, # m / sqrt(Hz)
'GPS_VXY': 0.03, # m/s / sqrt(Hz)
'GPS_VZ': 0.03, # m/s / sqrt(Hz)
'LDR_Z': 0.02, # m / sqrt(Hz)
'ACC': 5e-2, # m/s^2 / sqrt(Hz)
'ACC_C': 1, # m/s^2 / sqrt(Hz), weight for correction of acc dir
'MAG_NE': 1e-1, # 1 / sqrt(Hz)
'MAG_D': 1e6, # 1 / sqrt(Hz), large value prevents roll/pitch correction
}
class X(object):
"""
State enumeration, doesn't line up with state space due to
using quaternion instead of infinitesimal euler angles like error state
"""
q_nb_0 = 0
q_nb_1 = 1
q_nb_2 = 2
q_nb_3 = 3
vel_N = 4
vel_E = 5
vel_D = 6
gyro_bias_bx = 7
gyro_bias_by = 8
gyro_bias_bz = 9
accel_scale = 10
pos_N = 11
pos_E = 12
pos_D = 13
terrain_alt = 14
baro_bias = 15
n = 16
class Xe(object):
"""
State error enum, used for state-space for kalman filter
"""
rot_bx = 0
rot_by = 1
rot_bz = 2
vel_N = 3
vel_E = 4
vel_D = 5
gyro_bias_bx = 6
gyro_bias_by = 7
gyro_bias_bz = 8
accel_scale = 9
pos_N = 10
pos_E = 11
pos_D = 12
terrain_alt = 13
baro_bias = 14
n = 15
class U(object):
"""
Input (accel and gyro measurements)
"""
omega_nb_bx = 0
omega_nb_by = 1
omega_nb_bz = 2
a_bx = 3
a_by = 4
a_bz = 5
n = 6
class Y_accel(object):
"""
Acceleratoin measurement in body frame
"""
bx = 0
by = 1
bz = 2
n = 3
class Y_gps(object):
"""
GPS measurement
"""
pos_N = 0
pos_E = 1
pos_D = 2
vel_N = 3
vel_E = 4
vel_D = 5
n = 6
class Y_baro(object):
"""
Y baro
"""
asl = 0
n = 1
class Y_mag(object):
"""
Magnetometer measurement
"""
nx = 0
ny = 1
nz = 2
n = 3
class Y_lidar(object):
"""
Lidar measurement
"""
d = 0
n = 1
class Timer(object):
"""
Event timer
"""
def __init__(self, t0, period):
self.t0 = t0
self.period = period
def ready(self, t):
"""
Returns if timer is ready
"""
if t - self.t0 > self.period:
self.t0 = t
return True
else:
return False
def skew(v):
"""
Return skew (cross-product) matrix
"""
return np.array([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
def vect2quat(v):
"""
Converts a vector to a quaternion with q0 = 0
"""
return np.hstack([[0], v])
# vim: set et fenc=utf-8 ff=unix sts=0 sw=4 ts=4 :
```
|
{
"source": "jgoppert/qualisys_ros",
"score": 2
}
|
#### File: qualisys_ros/scripts/image_combine.py
```python
import rospy
import message_filters
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import cv2
def callback(image_left, image_right):
left = bridge.imgmsg_to_cv2(image_left, "bgr8")
right = bridge.imgmsg_to_cv2(image_right, "bgr8")
img_concat = cv2.hconcat([left, right])
try:
pub_img.publish(bridge.cv2_to_imgmsg(img_concat, "bgr8"))
except CvBridgeError as e:
print(e)
rospy.init_node('image_combine')
pub_img = rospy.Publisher('vr_image', Image, queue_size=1)
bridge = CvBridge()
rospy.loginfo('image combine starting')
image_left = message_filters.Subscriber('/virtual_cam/left/image_raw', Image)
image_right = message_filters.Subscriber('/virtual_cam/right/image_raw', Image)
ts = message_filters.TimeSynchronizer([image_left, image_right], 1)
ts.registerCallback(callback)
rospy.spin()
```
|
{
"source": "jgoralcz/MGDB",
"score": 3
}
|
#### File: MGDB/insertion/Insert.py
```python
class Insert:
"""
INSERT IGNORE into the database.
"""
@staticmethod
def insert(table_name, tuple_str):
"""
the name of the table to INSERT IGNORE into.
:param table_name: the table name.
:type: str
:param tuple_str: the parameters of the VALUES to enter.
:type: str
:return:
"""
return "INSERT IGNORE INTO %s VALUES(%s);\n" % (table_name, tuple_str)
```
|
{
"source": "jgoralcz/pas",
"score": 3
}
|
#### File: pas/src/db.py
```python
import json
from sqlalchemy import create_engine
import logging
log = logging.getLogger(__name__)
def get_database():
"""
gets the database connection
:return: None upon failure.
"""
try:
engine = get_connection_from_profile()
log.info('Connected to postgreSQL database.')
return engine
except IOError:
log.exception('Failed to get database connection.')
return None, 'fail'
def get_connection_from_profile(config_file='config.json'):
"""
sets up database connection from config file.
:param config_file: the postgres config file containing host, user, password, database, port
:return:
"""
with open('config.json') as json_data_file:
data = json.load(json_data_file)
if not (filter(lambda val: val in data['db'].keys(), ['user', 'database', 'host', 'password', 'port'])):
raise Exception('Bad config file: ' + config_file)
return get_engine(data['db'])
def get_engine(db):
"""
get SQLalchemy engine using credentials.
:param db: the database object with the user, database host, password, and port.
:return:
"""
# url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format(
# user=db['user'], passwd=db['password'], host=db['host'], port=db['port'], db=db['database']
# )
url = 'sqlite:///{db}'.format(db=db['database'])
engine = create_engine(url)
return engine
```
|
{
"source": "jgordo04/housinginsights_temp",
"score": 3
}
|
#### File: housinginsights/ingestion/CSVWriter.py
```python
from csv import DictWriter
import os
import copy
logging_path = os.path.abspath("../../logs")
class CSVWriter(object):
'''
Takes a row of data, plus the meta data about it, and creates a clean.csv file
locally that can be later be bulk-uploaded to the database.
'''
def __init__(self, meta, manifest_row, filename=None):
'''
meta: the parsed json from the meta data containing the format expected of each SQL table
manifest_row: a dictionary from manifest.csv for the source file currently being acted on
filename: optional, filename of where to write the data. default is current directory temp_{tablename}.csv
'''
self.manifest_row = manifest_row
self.tablename = manifest_row['destination_table']
self.unique_data_id = manifest_row['unique_data_id']
self.meta = meta
self.fields = meta[self.tablename]['fields']
#DictWriter needs a list of fields, in order, with the same key as the row dict
#sql_fields could be used in the header row. Currently not using because psycopg2 doesn't like it.
self.csv_fields = []
self.sql_fields = []
for field in self.fields:
self.csv_fields.append(field['source_name'])
self.sql_fields.append(field['sql_name'])
#We always want to append this to every table. write() should also append this to provided data
self.dictwriter_fields = copy.copy(self.csv_fields)
self.dictwriter_fields.append('unique_data_id')
self.sql_fields.append('unique_data_id')
#By default, creates a temp csv file wherever the calling module was located
self.filename = 'temp_{}.psv'.format(self.unique_data_id) if filename == None else filename
#remove any existing copy of the file so we are starting clean
self.remove_file()
#Using psycopg2 copy_from does not like having headers in the file. Commenting out
#self.file = open(self.filename, 'w', newline='')
#headerwriter = DictWriter(self.file, fieldnames = self.sql_fields, delimiter="|")
#headerwriter.writeheader()
#self.file.close()
#print("header written")
self.file = open(self.filename, 'a', newline='', encoding='utf-8')
self.writer = DictWriter(self.file, fieldnames=self.dictwriter_fields, delimiter="|")
def write(self, row):
row['unique_data_id'] = self.manifest_row['unique_data_id']
self.writer.writerow(row)
def open(self):
'''
Opens the file for writing. Normally called by init, but can be called
again by the user if they want to re-open the file for writing
'''
def close(self):
'''
Since we can't use a with statement in the object, it's the caller's
responsibility to manually close the file when they are done writing
'''
self.file.close()
#TODO should this be part of the __del__
def remove_file(self):
try:
os.remove(self.filename)
except OSError:
pass
```
#### File: housinginsights/ingestion/DataReader.py
```python
from collections import Counter
from csv import DictReader
import csv
from os import path
import os
import logging
from urllib.request import urlretrieve
from urllib.request import urlopen
import codecs
from datetime import datetime
import dateutil.parser as dateparser
#DataReader edits (FYI for Walt):
# -Renamed DataReader to HIReader (housing insights reader); extended 2 versions of it (ManifestReader and DataReader)
#TODOs
#-convert relative path to full path when passed as argument
class HIReader(object):
"""
Container object that will reads in CSVs and provides them row-by-row through the __iter__ method.
Each object is associated with one specific file through the file path.
File can be local (path_type="file") or remote (path_type="s3"). Note, local files are preferred
when possible for faster processing time and lower bandwidth usage.
"""
def __init__(self, path, path_type="file", encoding="latin-1"):
self.path = path
self._length = None
self._keys = None
self.path_type = path_type
self.encoding = encoding
def __iter__(self):
self._length = 0
self._counter = Counter()
if self.path_type == "file":
with open(self.path, 'r', newline='', encoding=self.encoding) as data:
reader = DictReader(data)
self._keys = reader.fieldnames
for row in reader:
self._length += 1
yield row
elif self.path_type == "url":
ftpstream = urlopen(self.path)
reader = DictReader(codecs.iterdecode(ftpstream, 'latin1'))
self._keys = reader.fieldnames
for row in reader:
self._length += 1
yield row
else:
raise ValueError("Need a path_type")
#TODO add __next__ method for more general purpose use
#https://www.ibm.com/developerworks/library/l-pycon/
def __len__(self):
if self._length is None:
for row in self:
# Read data for length and counter
continue
return self._length
@property
def items(self):
return self.counter.keys()
@property
def keys(self):
_it = iter(self)
next(_it)
return self._keys
def reset(self):
"""
In case it breaks in the middle of reading the file
:return:
"""
self._length = None
class ManifestReader(HIReader):
'''
Adds extra functions specific to manifest.csv. This is the class that
should be used to read the manifest and return it row-by-row.
'''
_include_flags_positive = ['use']
_include_flags_negative = ['pending', 'exclude', 'superseded']
def __init__(self, path='manifest.csv'):
super().__init__(path)
self.unique_ids = {} #from the unique_id column in the manifest
def __iter__(self):
self._length = 0
self._counter = Counter()
with open(self.path, 'r', newline='') as data:
reader = DictReader(data)
self._keys = reader.fieldnames
for row in reader:
self._length += 1
#parse the date into proper format for sql
try:
_date = dateparser.parse(row['data_date'],dayfirst=False, yearfirst=False)
row['data_date'] = datetime.strftime(_date, '%Y-%m-%d')
except ValueError:
row['data_date'] = 'Null'
#return the row
yield row
#Completed, test created
def has_unique_ids(self):
'''
Makes sure that every value in the manifest column 'unique_data_id' is in fact unique.
This makes sure that we can rely on this column to uniquely identify a source CSV file,
and and can connect a record in the SQL manifest to the manifest.csv
'''
self.unique_ids = {}
for row in self:
if row['unique_data_id'] in self.unique_ids:
return False
else:
#don't add flags that won't make it into the SQL database
if row['include_flag'] in ManifestReader._include_flags_positive:
self.unique_ids[row['unique_data_id']] = 'found'
return True
class DataReader(HIReader):
'''
Reads a specific data file. This file must be associated with a specific manifest_row,
which is the dictionary returned by the ManifestReader __iter__ method.
If load_from = "local", the file is loaded from the local file system using
the combo of 'local_folder' and 'filepath' from the manifest. When loading locally,
if the file does not exist __init__ will try to automatically download the file from S3.
If load_from = "s3" the file can be read directly from the web. This is available only for
the future adaptation of this to be used on an EC2 or Lambda job; when running locally, it
is recommended to use the "local" method and let the script download the file to disk automatically.
Users can also run the aws cli sync command before using this tool to get the data.
'''
def __init__(self, meta, manifest_row, load_from="local"):
self.meta = meta
self.manifest_row = manifest_row #a dictionary from the manifest
self.destination_table = manifest_row['destination_table']
if 'encoding' not in manifest_row:
logging.warning(" Warning: encoding not found in manifest. Falling back to latin-1.")
self.encoding = manifest_row.get('encoding', 'latin-1') # Defaults to latin-1 in case key not present in manifest.
self.load_from=load_from
self.s3_path = os.path.join(manifest_row['s3_folder'], manifest_row['filepath'].strip("\/")).replace("\\","/")
if load_from=="s3":
self.path = self.s3_path
self.path_type = "url"
else: #load from file
self.path = os.path.join(path.dirname(__file__), manifest_row['local_folder'], manifest_row['filepath'].strip("\/"))
self.path_type = "file"
if self.manifest_row['include_flag'] == 'use':
self.download_data_file()
self.not_found = [] #Used to log missing fields compared to meta data
super().__init__(self.path, self.path_type, self.encoding)
def validate_or_create_path(self):
root_path = os.path.abspath(os.path.dirname(self.path))
if os.path.exists(root_path):
return
os.makedirs(root_path)
return
def download_data_file(self):
'''
Checks to see if the file already exists locally, if not downloads it
'''
try:
with open(self.path, 'r', newline='') as f:
myreader=csv.reader(f,delimiter=',')
headers = next(myreader)
except FileNotFoundError as e:
self.validate_or_create_path()
logging.info(" file not found. attempting to download file to disk: " + self.s3_path)
urlretrieve(self.s3_path, self.path)
logging.info(" download complete.")
with open(self.path, 'r', newline='') as f:
myreader=csv.reader(f,delimiter=',')
headers = next(myreader)
return headers #not strictly necessary but useful for testing
def should_file_be_loaded(self, sql_manifest_row):
'''
Runs all the checks that the file is OK to use
'''
if self.do_fields_match() == False:
return False
if self.check_include_flag(sql_manifest_row) == False:
return False
return True
def check_include_flag(self, sql_manifest_row):
'''
compares manifest from the csv to manifest in the database.
If the manifest says the file should be used ("use") AND the file is not
already loaded into the database (as indicated by the matching sql_manifest_row), the file
will be added.
The sql object in charge of getting the sql_manifest_row and writing
new sql_manifest_row elements to the database is in charge of making sure
that the sql_manifest_row['status'] field can be trusted as a true representation of
what is in the database currently.
'''
if self.manifest_row['include_flag'] == 'use':
if sql_manifest_row == None:
return True
if sql_manifest_row['status'] != 'loaded':
return True
if sql_manifest_row['status'] == 'loaded':
logging.info(" {} is already in the database, skipping".format(self.manifest_row['unique_data_id']))
return False
else:
logging.info(" {} include_flag is {}, skipping".format(self.manifest_row['unique_data_id'], self.manifest_row['include_flag']))
return False
def do_fields_match(self):
'''
Checks that the csv headers match the expected values
'''
try:
field_list = self.meta[self.destination_table]['fields']
except KeyError:
logging.info(' table "{}" not found in meta data'.format(self.destination_table))
return False
included = {}
#Initialize values - start out assuming all is OK until we identify problems.
return_value = True
self.not_found = []
#Check that all of the data columns are in the meta.json
for field in self.keys:
if not any(d.get('source_name', None) == field for d in field_list):
self.not_found.append('"{}" in CSV not found in meta'.format(field))
return_value = False
#Check that all the meta.json columns are in the data
for field in field_list:
if field['source_name'] not in self.keys:
self.not_found.append(' "{}" in meta.json not found in data'.format(field['source_name']))
return_value = False
#Log our errors if any
if return_value == False:
logging.warning(" do_fields_match: {}. '{}' had missing items:\n{}".format(return_value, self.destination_table, self.not_found))
else:
logging.info(" do_fields_match: {}. meta.json and csv field lists match completely for '{}'".format(return_value, self.destination_table))
return return_value
```
#### File: housinginsights/ingestion/functions.py
```python
import logging
import json
from sqlalchemy.exc import ProgrammingError
import sys, os
from importlib import import_module
sys.path.append(os.path.abspath('../../'))
import housinginsights.ingestion.Cleaners as Cleaners
# Completed, tests not written.
def load_meta_data(filename='meta.json'):
"""
Expected meta data format:
{ tablename: {fields:[
{ "display_name": "Preservation Catalog ID",
"display_text": "description of what this field is",
"source_name": "Nlihc_id",
"sql_name": "nlihc_id",
"type": "object"
}
]}
}
"""
with open(filename) as fh:
meta = json.load(fh)
json_is_valid = True
try:
for table in meta:
for field in meta[table]['fields']:
for key in field:
if key not in ('display_name', 'display_text', 'source_name', 'sql_name', 'type'):
json_is_valid = False
first_json_error = "Location: table: {}, section: {}, attribute: {}".format(table, field, key)
raise ValueError("Error found in JSON, check expected format. {}".format(first_json_error))
except:
raise ValueError("Error found in JSON, check expected format.")
logging.info("{} imported. JSON format is valid: {}".format(filename, json_is_valid))
return meta
def check_or_create_sql_manifest(engine, rebuild=False):
'''
Makes sure we have a manifest table in the database.
If not, it creates it with appropriate fields.
This corresponds to the manifest.csv file, which contains a log
of all the individual data files we have used as well as which
table they each go into.
The csv version of the manifest includes all files we have ever
used, including ones not in the database.
The SQL version of the manifest only tracks those that have been
written to the database, and whether they are still there or
have been deleted.
engine = the SQLalchemy engine to get to the database
rebuild = Boolean as to whether to drop the table first.
'''
try:
db_conn = engine.connect()
sql_query = "SELECT * FROM manifest"
query_result = db_conn.execute(sql_query)
results = [dict(row.items()) for row in query_result]
db_conn.close()
return True
except ProgrammingError as e:
try:
#Create the query with appropriate fields and datatypes
db_conn = engine.connect()
fields = [
("status","text"),
("load_date", "timestamp"),
("include_flag","text"),
("destination_table","text"),
("unique_data_id","text"),
("data_date","date"),
("encoding", "text"),
("local_folder","text"),
("s3_folder","text"),
("filepath","text"),
("notes","text")
]
field_statements = []
for tup in fields:
field_statements.append(tup[0] + " " + tup[1])
field_command = ",".join(field_statements)
create_command = "CREATE TABLE manifest({});".format(field_command)
db_conn.execute(create_command)
db_conn.close()
logging.info("Manifest table created in the SQL database")
return True
except Exception as e:
raise e
def get_cleaner_from_name(meta, manifest_row, name = "GenericCleaner"):
#Import
#module = import_module("module.submodule")
Class_ = getattr(Cleaners, name)
instance = Class_(meta, manifest_row)
return instance
def join_paths(pieces=[]):
'''
Joins arbitrary pieces of a url or path.
Alternative to os.path.join if the second argument might start with "/"
'''
return '/'.join(s.strip('/') for s in pieces)
#Used for testing purposes
if __name__ == '__main__':
pass
instance = get_cleaner_from_name("GenericCleaner")
print(type(instance))
```
#### File: housinginsights/ingestion/make_draft_json.py
```python
import logging
import json
import pandas as pandas
import sys, os
if __name__ == '__main__':
sys.path.append(os.path.abspath('../../'))
from housinginsights.ingestion import ManifestReader
#configuration
#See /logs/example-logging.py for usage examples
logging_filename = "../../logs/ingestion.log"
logging_path = os.path.abspath("../../logs")
logging.basicConfig(filename=logging_filename, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler()) #Pushes everything from the logger to the command line output as well.
# Function to clean column names for the sql_name JSON field.
def sql_name_clean(name):
for item in ["-"," ","."]:
if item in name:
name = name.replace(item, "_")
return name.lower()
def pandas_to_sql_data_type(pandas_type_string):
mapping = {
'object':'text',
'int64':'integer',
'float64':'decimal',
'datetime64':'timestamp'
}
try:
sql_type = mapping[pandas_type_string]
except KeyError:
sql_type = 'text'
return sql_type
def make_draft_json(filename, tablename, encoding): #use the name from constants as default
# Reads the initial CSV and sets up the basic output structure.
dataframe_file = pandas.read_csv(filename, encoding=encoding)
dataframe_iterator = dataframe_file.columns
output = {
tablename: {
"cleaner": tablename + "{}".format("_cleaner"),
"replace_table": True,
"fields": []
}
}
# The meat of the JSON data.
for field in dataframe_iterator:
pandas_type = str(dataframe_file[field].dtypes)
sql_type = pandas_to_sql_data_type(pandas_type)
data = {
"type": sql_type,
"source_name": field,
"sql_name": sql_name_clean(field),
"display_name": sql_name_clean(field),
"display_text":""
}
output[tablename]["fields"].append(data)
output_path = os.path.join(logging_path,(tablename+".json"))
with open(output_path, "w") as results:
json.dump(output, results, sort_keys=True, indent=4)
print(tablename + " JSON table file created.")
def make_all_json(manifest_path):
completed_tables = {}
manifest = ManifestReader(path=manifest_path)
if manifest.has_unique_ids():
for manifest_row in manifest:
tablename = manifest_row['destination_table']
encoding = manifest_row.get('encoding')
if tablename not in completed_tables:
if manifest_row['include_flag'] == 'use':
filepath = os.path.abspath(
os.path.join(manifest_row['local_folder'],
manifest_row['filepath']
))
make_draft_json(filepath, tablename, encoding)
completed_tables[tablename] = filepath
print("completed {}".format(tablename))
else:
print("skipping {}, already loaded".format(manifest_row['unique_data_id']))
print("Finished! Used these files to generate json:")
print(completed_tables)
if __name__ == '__main__':
if 'single' in sys.argv:
# Edit these values before running!
csv_filename = os.path.abspath("/Users/williammcmonagle/GitHub/housing-insights/data/raw/acs/B25058_median_rent_by_tract/2009_5year/ACS_09_5YR_B25058_with_ann.csv")
table_name = "foobar"
encoding = "latin1" #only used for opening w/ Pandas. Try utf-8 if latin1 doesn't work. Put the successful value into manifest.csv
make_draft_json(csv_filename, table_name, encoding)
if 'multi' in sys.argv:
manifest_path = os.path.abspath('../../scripts/manifest.csv')
make_all_json(manifest_path)
```
#### File: housinginsights/sources/wmata_distcalc.py
```python
import sys
import csv
import json
import requests
import os
import time
class WmataApiConn():
def __init__(self,wmata_api_key):
self.wmata_api_key = wmata_api_key
def getMiles(self):
return self.miles
def setMiles(self,miles):
self.miles = miles
self.meters = miles*1609.344
def getMeters(self):
return self.meters
def setMeters(self, meters):
self.meters = meters
self.miles = meters*0.000621371192
def setWmataApiKey(self,wmata_api_key):
self.wmata_api_key = wmata_api_key
def getWmataHeaders(self):
return { 'api_key': self.wmata_api_key}
def setMapBoxApiKey(self, mapbox_api_key):
self.mapbox_api_key = {'access_token':mapbox_api_key}
def getWalkingDistance(self, srcLat, srcLon, destLat, destLon):
"""Returns the walking distance in meters between two locations
Parameters:
srcLat - latitude for source location
srcLon - longitude for source location
destLat - latitude for destination location
destLon - longitude for destination location
mapbox_api_key - api key for mapbox REST services
"""
distReqCoords = srcLon + ',' + srcLat + ';' + destLon + ',' + destLat
mapbox_params = self.mapbox_api_key
# according to documentation, this doesn't work in Python SDK so switched to using REST API
walkDistResponse = requests.get("https://api.mapbox.com/directions/v5/mapbox/walking/" + distReqCoords,
params=mapbox_params)
return walkDistResponse.json()['routes'][0]['legs'][0]['distance']
def setProjectInfo(self,project):
self.lat = project['Proj_lat']
self.lon = project['Proj_lon']
self.nlihcid = project['Nlihc_id']
def findRailStations(self, railStations,radiusinmeters,distCsvWriter):
"""Finds all the rail stations within a given distance from a given project. Writes to the given CSV file.
Parameters:
railStations - json object containing all the wmata rail station information
project - housing project object
radiusinmeters - radius in meteres
distCsvWriter - csvWriter for distance
mapbox_api_key - api key for mapbox REST services
"""
lat = self.lat
lon = self.lon
Nlihc_id = self.nlihcid
for station in railStations:
walkDist = self.getWalkingDistance(lat, lon, str(station['Lat']), str(station['Lon']))
if walkDist <=radiusinmeters:
distCsvWriter.writerow((Nlihc_id, 'rail', station['Code'], "{0:.2f}".format(walkDist.getMiles())))
def findBusStations(self, radiusinmeters, distCsvWriter):
lat = self.lat
lon = self.lon
Nlihc_id = self.nlihcid
wmata_headers = self.getWmataHeaders()
params = {'Lat': lat,
'Lon' : lon,
'Radius':str(radiusinmeters)}
response = requests.get('https://api.wmata.com/Bus.svc/json/jStops', params=params, headers=wmata_headers)
data = response.json()
for stop in data['Stops']:
walkDist = self.getWalkingDistance(lat, lon, str(stop['Lat']), str(stop['Lon']))
if walkDist <= radiusinmeters: #within 0.5 miles walking
distCsvWriter.writerow((Nlihc_id, 'bus', stop['StopID'], "{0:.2f}".format(walkDist.getMiles())))
def writeRailInfo(self, infoCsvWriter):
"""Writes all rail station data to a given CSV writer. Returns the railStations json for future processing
Parameters:
infoCsvWriter - csv writer
wmata_api_key - api key for wmata REST services
"""
print("Writing RAIL INFO")
wmata_headers = self.getWmataHeaders()
railResponse = requests.get("https://api.wmata.com/Rail.svc/json/jStations", headers=wmata_headers)
railStations = railResponse.json()['Stations']
for station in railStations:
#delimit list of lines with colon
lines = station["LineCode1"] #there is always at least one station
for line_code in ["LineCode2", "LineCode3", "LineCode4"]:
if station[line_code] != None:
lines += ":" + station[line_code]
infoCsvWriter.writerow((station['Code'], 'rail',station['Name'],str(station['Lat']), str(station['Lon']),lines))
return railStations
def writeBusInfo(self, infoCsvWriter):
"""Writes all bus station data to a given CSV writer.
Parameters:
infoCsvWriter - csv writer
wmata_api_key - api key for wmata REST services
"""
print("Writing BUS INFO")
wmata_headers = self.getWmataHeaders()
response = requests.get('https://api.wmata.com/Bus.svc/json/jStops', headers=wmata_headers)
data = response.json()
for stop in data['Stops']:
lines = ""
for route in stop['Routes']:
lines = '{}'.format(lines)
stop_id_or_station_code = '{}'.format(route)
lines = lines[1:] #take off the first :
infoCsvWriter.writerow((stop['StopID'], 'bus', stop['Name'], stop['Lat'],stop['Lon'], lines, stop_id_or_station_code))
def main(secretsFileName, csvInputFileName,distOutputFileName,infoOutputFileName):
"""Writes two csvs: 1 for general bus/rail info, 1 with distances to wmata for projects
Parameters:
secretsFileName - json file name that contains various api keys
csvInputFileName - csv file with project information
distOutputFileName - csv file to output to for calculated metro distances for each project
infoOutputFileName - cvs file for general bus & rail info for each wmata station
"""
#pull API keys
api_keys = json.loads(open(secretsFileName).read())
wmata_api_key = api_keys['wmata']['api_key']
mapbox_api_key = api_keys['mapbox']['public-token']
#write out the wmata info csv
infoOutputFile = open(infoOutputFileName, 'wt')
infoCsvWriter = csv.writer(infoOutputFile)
infoCsvWriter.writerow(('code_or_id','type','name','lat','lon','lines','stop_id_or_station_code'))
#saving railStations to compute distances from each project later in the script. reduces network calls.
railStations = writeRailInfo(infoCsvWriter, wmata_api_key)
writeBusInfo(infoCsvWriter, wmata_api_key)
projectsFile = open(csvInputFileName)
distOutputFile = open(distOutputFileName, 'wt')
distCsvWriter = csv.writer(distOutputFile)
reader = csv.DictReader(projectsFile)
distCsvWriter.writerow(('Nlihc_id','type','stop_id_or_station_code','dist_in_miles'))
numrow = 0
for row in reader:
radius = getMeters(0.5)
numrow = numrow+1
#if numrow > 1: break
print("Processing project {} of 400ish".format(numrow))
# find all metro stations within 0.5 miles
print("Starting processing rail stations for {}".format(numrow))
findRailStations(railStations,row,radius,distCsvWriter, mapbox_api_key)
print("Completed processing rail stations for {}".format(numrow))
# find all bus stops within 0.5 miles
print("Starting processing bus stations for {}".format(numrow))
findBusStations(row, radius, distCsvWriter, wmata_api_key, mapbox_api_key)
print("Completed processing bus stations for {}".format(numrow))
if __name__ == '__main__':
if len(sys.argv) < 1:
print("Requires 1 arguments: [csv input file]")
else:
inputFileName = sys.argv[1]
secretsFileName = "../housinginsights/secrets.json"
now = time.strftime("%Y%m%d")
outputDir = "../../data/raw/wmata/" + now
if not os.path.exists(outputDir):
os.makedirs(outputDir)
distOutputFileName = outputDir + "/dist.csv"
infoOutputFileName = outputDir + "/wmatainfo.csv"
print("Will read from {}".format(inputFileName))
print("Will write WMATA_DIST table to {}".format(distOutputFileName))
print("Will write WMATA_INFO table to {}".format(infoOutputFileName))
main(secretsFileName, inputFileName, distOutputFileName, infoOutputFileName)
```
#### File: housinginsights/tools/dbtools.py
```python
from sqlalchemy import create_engine, Column, String, Integer, MetaData, Table
from sqlalchemy.orm import sessionmaker
from subprocess import check_output
import csv
import json
import os
#import docker
secrets_filepath = os.path.join(os.path.dirname(__file__), '../secrets.json')
##########################################################################
# Functions
##########################################################################
def get_connect_str(database_choice):
"""
Loads the secrets json file to retrieve the connection string
"""
with open(secrets_filepath) as fh:
secrets = json.load(fh)
return secrets[database_choice]['connect_str']
def get_database_connection(database_choice):
# Connect to the database
connection_string = get_connect_str(database_choice)
engine = create_engine(connection_string)
database_connection = engine.connect()
return database_connection
def get_database_engine(database_choice):
# Connect to the database
connection_string = get_connect_str(database_choice)
engine = create_engine(connection_string)
return engine
def get_database_session(database_choice):
# Connect to the database
connection_string = get_connect_str(database_choice)
engine = create_engine(connection_string)
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_psycopg2_cursor(database_choice):
connection_string = get_connect_str(database_choice)
engine = create_engine(connection_string)
cursor = engine.raw_connection().cursor()
return cursor
```
|
{
"source": "jgorgenucsd/corr_tf",
"score": 3
}
|
#### File: jgorgenucsd/corr_tf/_correlation_grad.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
correlation_grad_module = tf.load_op_library('./build/libcorrelation_grad.so')
@ops.RegisterGradient("Correlation")
def _correlation_grad_cc(op, grad):
"""
The gradient for `correlation` using the operation implemented in C++.
:param op: `correlation` `Operation` that we are differentiating, which we can use
to find the inputs and outputs of the original op.
:param grad: gradient with respect to the output of the `correlation` op.
:return: gradients with respect to the input of `correlation`.
"""
return correlation_grad_module.correlation_grad(grad, op.inputs[0], op.inputs[1],stride=op.get_attr('stride'),max_displacement=op.get_attr('max_displacement'))
```
#### File: jgorgenucsd/corr_tf/correlation_tests.py
```python
import unittest
import numpy as np
import tensorflow as tf
import correlation_layer as cl
class CorrelationOpTest(unittest.TestCase):
def test_raisesExceptionWithIncompatibleDimensions(self):
''' correlation only accepts 4-dimension tensors, with dimensions (batch_size,height,width,num_channels) '''
with tf.Session(''):
with self.assertRaises(ValueError):
cl.corr([1, 2], [[1, 2], [3, 4]]).eval()
with self.assertRaises(ValueError):
self.assertRaises(cl.corr([1, 2], [1, 2, 3, 4]).eval(), ValueError)
with self.assertRaises(ValueError):
self.assertRaises(cl.corr([1, 2, 3], [[1, 2], [3, 4]]).eval(), ValueError)
def test_raisesExceptionWithTooManyOffsets(self):
''' correlation only accepts up to 2601==51x51 offsets '''
with tf.Session(''):
with self.assertRaises(ValueError):
# Current max_displacement/stride is 25
my_shape = (1,21,21,1)
a = np.ones((my_shape))
b = np.ones((my_shape))
self.assertRaises(cl.corr(a,b,stride=1,max_displacement=50),ValueError);
def test_correlationHardCoded(self):
with tf.Session(''):
batch_size = 1;
width = 21;
depth = 1;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
my_shape = (batch_size,width,width,depth)
result = cl.corr(np.ones(my_shape), np.ones(my_shape)).eval()
self.assertEqual(result.shape[0], my_shape[0])
self.assertEqual(result.shape[1], my_shape[1])
self.assertEqual(result.shape[2], my_shape[2])
self.assertEqual(result.shape[3], expected_depth)
self.assertEqual(result[0,0,0,220], 1)
self.assertEqual(result[0,0,0,0], 0)
np.testing.assert_array_equal(result[0,:,:,220], np.ones((width,width)))
def test_correlationGradientAHardCoded(self):
with tf.Session('') as sess:
batch_size = 1;
height = 21;
width = 21;
depth = 1;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
offsets = []
for row_offset in np.arange(-max_displacement,max_displacement+1,stride):
for col_offset in np.arange(-max_displacement,max_displacement+1,stride):
offsets.append((row_offset,col_offset));
my_shape = (batch_size,height,width,depth)
a = tf.placeholder(tf.float32, shape = my_shape)
feed_a = np.ones(my_shape,dtype=np.float32)
b = 2 * np.ones(my_shape,dtype=np.float32)
result = cl.corr(a, b)
self.assertEqual(int(result.shape[3]), expected_depth)
# Check if it's aligned at all offsets
for offset_index,offset in enumerate(offsets):
result_slices = result[:,:,:,offset_index]
grad_a = tf.gradients(result_slices,a);
gradient_a = sess.run(grad_a,feed_dict={a : feed_a});
row_offset = offset[0]
col_offset = offset[1]
a_row_begin = 0
a_row_end = height-row_offset
b_row_begin = row_offset
b_row_end = height
a_col_begin = 0
a_col_end = width-col_offset
b_col_begin = col_offset
b_col_end = width
if(row_offset < 0):
a_row_begin = -row_offset
a_row_end = height
b_row_begin = 0
b_row_end = height+row_offset
if(col_offset < 0):
a_col_begin = -col_offset
a_col_end = width
b_col_begin = 0
b_col_end = width+col_offset
final_height = a_row_end-a_row_begin
final_width = a_col_end-a_col_begin
np.testing.assert_array_equal(gradient_a[0][0,a_row_begin:a_row_end,a_col_begin:a_col_end,0], 2*np.ones((final_height,final_width)))
def test_correlationGradientAHardCodedLarge(self):
with tf.Session('') as sess:
batch_size = 1;
height = 41;
width = 41;
depth = 1;
stride = 4;
max_displacement = 24;
expected_depth = (2*int(max_displacement/stride)+1)**2;
offsets = []
for row_offset in np.arange(-max_displacement,max_displacement+1,stride):
for col_offset in np.arange(-max_displacement,max_displacement+1,stride):
offsets.append((row_offset,col_offset));
my_shape = (batch_size,height,width,depth)
a = tf.placeholder(tf.float32, shape = my_shape)
feed_a = np.ones(my_shape,dtype=np.float32)
b = 2 * np.ones(my_shape,dtype=np.float32)
result = cl.corr(a, b,stride=stride,max_displacement=max_displacement)
self.assertEqual(int(result.shape[3]), expected_depth)
# Check if it's aligned at all offsets
for offset_index,offset in enumerate(offsets):
result_slices = result[:,:,:,offset_index]
grad_a = tf.gradients(result_slices,a);
gradient_a = sess.run(grad_a,feed_dict={a : feed_a});
row_offset = offset[0]
col_offset = offset[1]
a_row_begin = 0
a_row_end = height-row_offset
b_row_begin = row_offset
b_row_end = height
a_col_begin = 0
a_col_end = width-col_offset
b_col_begin = col_offset
b_col_end = width
if(row_offset < 0):
a_row_begin = -row_offset
a_row_end = height
b_row_begin = 0
b_row_end = height+row_offset
if(col_offset < 0):
a_col_begin = -col_offset
a_col_end = width
b_col_begin = 0
b_col_end = width+col_offset
final_height = a_row_end-a_row_begin
final_width = a_col_end-a_col_begin
np.testing.assert_array_equal(gradient_a[0][0,a_row_begin:a_row_end,a_col_begin:a_col_end,0], 2*np.ones((final_height,final_width)))
def test_correlationGradientBHardCoded(self):
with tf.Session('') as sess:
batch_size = 1;
height = 21;
width = 21;
depth = 1;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
offsets = []
for row_offset in np.arange(-max_displacement,max_displacement+1,stride):
for col_offset in np.arange(-max_displacement,max_displacement+1,stride):
offsets.append((row_offset,col_offset));
my_shape = (batch_size,height,width,depth)
a = np.ones(my_shape,dtype=np.float32)
feed_b = 2*np.ones(my_shape,dtype=np.float32)
b = tf.placeholder(tf.float32, shape = my_shape)
result = cl.corr(a, b)
# Check if it's aligned at all offsets
for offset_index,offset in enumerate(offsets):
result_slices = result[:,:,:,offset_index]
grad_b = tf.gradients(result_slices,b);
gradient_b = sess.run(grad_b,feed_dict={b : feed_b});
row_offset = offset[0]
col_offset = offset[1]
a_row_begin = 0
a_row_end = height-row_offset
b_row_begin = row_offset
b_row_end = height
a_col_begin = 0
a_col_end = width-col_offset
b_col_begin = col_offset
b_col_end = width
if(row_offset < 0):
a_row_begin = -row_offset
a_row_end = height
b_row_begin = 0
b_row_end = height+row_offset
if(col_offset < 0):
a_col_begin = -col_offset
a_col_end = width
b_col_begin = 0
b_col_end = width+col_offset
final_height = b_row_end-b_row_begin
final_width = b_col_end-b_col_begin
np.testing.assert_array_equal(gradient_b[0][0,b_row_begin:b_row_end,b_col_begin:b_col_end,0], np.ones((final_height,final_width)))
def test_correlationRandom(self):
with tf.Session(''):
batch_size = 1;
height = 21;
width = 21;
depth = 1;
my_shape = (batch_size,height,width,depth)
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
offsets = []
for row_offset in np.arange(-max_displacement,max_displacement+1,stride):
for col_offset in np.arange(-max_displacement,max_displacement+1,stride):
offsets.append((row_offset,col_offset));
for i in range(10):
a_rand = np.random.randint(10, size = my_shape)
b_rand = np.random.randint(10, size = my_shape)
result = cl.corr(a_rand, b_rand,stride=stride,max_displacement=max_displacement).eval()
self.assertEqual(result.shape[3], expected_depth)
for offset_index, offset in enumerate(offsets):
row_offset = offset[0]
col_offset = offset[1]
a_row_begin = 0
a_row_end = height-row_offset
b_row_begin = row_offset
b_row_end = height
a_col_begin = 0
a_col_end = width-col_offset
b_col_begin = col_offset
b_col_end = width
if(row_offset < 0):
a_row_begin = -row_offset
a_row_end = height
b_row_begin = 0
b_row_end = height+row_offset
if(col_offset < 0):
a_col_begin = -col_offset
a_col_end = width
b_col_begin = 0
b_col_end = width+col_offset
a_slice = a_rand[:,a_row_begin:a_row_end,a_col_begin:a_col_end,:]
b_slice = b_rand[:,b_row_begin:b_row_end,b_col_begin:b_col_end,:];
result_rand_full = a_slice*b_slice
result_rand = np.sum(result_rand_full,axis=-1)/depth
np.testing.assert_array_equal(result[0,a_row_begin:a_row_end,a_col_begin:a_col_end,offset_index], result_rand[0,:,:])
def test_correlationGradientARandom(self):
with tf.Session('') as sess:
batch_size = 1;
height = 21;
width = 21;
depth = 1;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
offsets = []
for row_offset in np.arange(-max_displacement,max_displacement+1,stride):
for col_offset in np.arange(-max_displacement,max_displacement+1,stride):
offsets.append((row_offset,col_offset));
my_shape = (batch_size,height,width,depth)
a = tf.placeholder(tf.float32, shape = my_shape)
feed_a = np.random.randint(10,size=my_shape).astype(np.float32)
b = 2 * np.random.randint(10,size=my_shape).astype(np.float32)
result = cl.corr(a, b)
# Check if it's aligned at all offsets
for offset_index,offset in enumerate(offsets):
row_offset = offset[0]
col_offset = offset[1]
a_row_begin = 0
a_row_end = height-row_offset
b_row_begin = row_offset
b_row_end = height
a_col_begin = 0
a_col_end = width-col_offset
b_col_begin = col_offset
b_col_end = width
if(row_offset < 0):
a_row_begin = -row_offset
a_row_end = height
b_row_begin = 0
b_row_end = height+row_offset
if(col_offset < 0):
a_col_begin = -col_offset
a_col_end = width
b_col_begin = 0
b_col_end = width+col_offset
result_slice = result[:,:,:,offset_index]
grad_a = tf.gradients(result_slice,a);
gradient_a = sess.run(grad_a,feed_dict={a : feed_a});
np.testing.assert_array_equal(gradient_a[0][0,a_row_begin:a_row_end,a_col_begin:a_col_end,0], b[0,b_row_begin:b_row_end,b_col_begin:b_col_end,0])
def test_correlationGradientBRandom(self):
with tf.Session('') as sess:
batch_size = 1;
height = 21;
width = 21;
depth = 1;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
stride = 2;
max_displacement = 20;
expected_depth = (2*int(max_displacement/stride)+1)**2;
offsets = []
for row_offset in np.arange(-max_displacement,max_displacement+1,stride):
for col_offset in np.arange(-max_displacement,max_displacement+1,stride):
offsets.append((row_offset,col_offset));
my_shape = (batch_size,height,width,depth)
a = np.random.randint(10,size=my_shape).astype(np.float32)
feed_b = np.random.randint(10,size=my_shape).astype(np.float32)
b = tf.placeholder(tf.float32, shape = my_shape)
result = cl.corr(a, b)
# Check if it's aligned at all offsets
for offset_index,offset in enumerate(offsets):
row_offset = offset[0]
col_offset = offset[1]
a_row_begin = 0
a_row_end = height-row_offset
b_row_begin = row_offset
b_row_end = height
a_col_begin = 0
a_col_end = width-col_offset
b_col_begin = col_offset
b_col_end = width
if(row_offset < 0):
a_row_begin = -row_offset
a_row_end = height
b_row_begin = 0
b_row_end = height+row_offset
if(col_offset < 0):
a_col_begin = -col_offset
a_col_end = width
b_col_begin = 0
b_col_end = width+col_offset
result_slice = result[:,:,:,offset_index]
grad_b = tf.gradients(result_slice,b);
gradient_b = sess.run(grad_b,feed_dict={b : feed_b});
np.testing.assert_array_equal(a[0,a_row_begin:a_row_end,a_col_begin:a_col_end,0], gradient_b[0][0,b_row_begin:b_row_end,b_col_begin:b_col_end,0])
if __name__ == '__main__':
suite=unittest.TestLoader().loadTestsFromTestCase(CorrelationOpTest)
unittest.TextTestRunner(verbosity=2).run(suite)
```
#### File: jgorgenucsd/corr_tf/flownet.py
```python
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from keras.engine.topology import Layer
from keras.models import Model, Sequential
from keras import activations
from keras.layers import Activation, Input, Reshape, merge, Lambda, Dropout, Flatten, Dense,LSTM
from keras.layers.merge import add,concatenate,dot
from keras.layers.convolutional import Convolution2D, Deconvolution2D, ZeroPadding2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Add,Concatenate,Dot
import keras.backend as K
import numpy as np
import tensorflow as tf
from keras.optimizers import SGD
import itertools
#from keras.utils.visualize_util import plot
import random
from scipy import misc
from scipy.linalg import logm, expm
import pandas as pd
import scipy
from keras.preprocessing.image import ImageDataGenerator, array_to_img, \
img_to_array, load_img
from os import listdir
from os.path import isfile, join
import matplotlib
HEADLESS = False
if HEADLESS:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Use the custom correlation layer or build one from tensorflow slice operations
use_custom_correlation = True
if use_custom_correlation:
import correlation_layer as cl
QUICK_DEBUG = True
BATCH_SIZE = 3
num_epochs = 10
num_train_sets = 9
loss_order = 2
batch_size = BATCH_SIZE
def myDot():
return Lambda(lambda x: tf.reduce_sum(tf.multiply(x[0],x[1]),axis=-1,keep_dims=True),name = 'myDot')
def get_padded_stride(b,displacement_x,displacement_y,height_8=384/8,width_8=512/8):
slice_height = height_8- abs(displacement_y)
slice_width = width_8 - abs(displacement_x)
start_y = abs(displacement_y) if displacement_y < 0 else 0
start_x = abs(displacement_x) if displacement_x < 0 else 0
top_pad = displacement_y if (displacement_y>0) else 0
bottom_pad = start_y
left_pad = displacement_x if (displacement_x>0) else 0
right_pad = start_x
gather_layer = Lambda(lambda x: tf.pad(tf.slice(x,begin=[0,start_y,start_x,0],size=[-1,slice_height,slice_width,-1]),paddings=[[0,0],[top_pad,bottom_pad],[left_pad,right_pad],[0,0]]),name='gather_{}_{}'.format(displacement_x,displacement_y))(b)
return gather_layer
def get_correlation_layer(conv3_pool_l,conv3_pool_r,max_displacement=20,stride2=2,height_8=384/8,width_8=512/8):
layer_list = []
dotLayer = myDot()
for i in range(-max_displacement, max_displacement+stride2,stride2):
for j in range(-max_displacement, max_displacement+stride2,stride2):
slice_b = get_padded_stride(conv3_pool_r,i,j,height_8,width_8)
current_layer = dotLayer([conv3_pool_l,slice_b])
layer_list.append(current_layer)
return Lambda(lambda x: tf.concat(x, 3),name='441_output_concatenation')(layer_list)
def getEncoderModel(height = 384, width = 512,batch_size=32):
print "Generating model with height={}, width={},batch_size={}".format(height,width,batch_size)
## convolution model
conv_activation = lambda x: activations.relu(x,alpha=0.1) # Use the activation from the FlowNetC Caffe implementation
#conv_activation = "elu"
# left and model
input_l = Input(batch_shape=(batch_size,height, width, 3), name='pre_input')
input_r = Input(batch_shape=(batch_size,height, width, 3), name='nxt_input')
#layer 1, output of layer 1 is height/2 x width/2
conv1 = Convolution2D(64,(7,7), strides = 2,batch_size=batch_size, padding = 'same', name = 'conv1',activation=conv_activation)
conv1_l = conv1(input_l)
conv1_r = conv1(input_r)
#layer 2 output of layer 2 is height/4 x width/4
conv2 = Convolution2D(128, (5, 5), strides = 2,padding = 'same', name='conv2',activation=conv_activation)
conv2_l = conv2(conv1_l)
conv2_r = conv2(conv1_r)
#layer 3 output of layer 3 is height/8 x width8
conv3 = Convolution2D(256, (5, 5), strides = 2,padding = 'same', name='conv3',activation=conv_activation)
conv3_l = conv3(conv2_l)
conv3_r = conv3(conv2_r)
# merge
print "Generating Correlation layer..."
if use_custom_correlation:
corr_layer = Lambda( lambda x: cl.corr(a=x[0],b=x[1],stride=2,max_displacement=20), name= "correlation_layer")([conv3_l,conv3_r])
else:
corr_layer = get_correlation_layer(conv3_l, conv3_r,max_displacement=20,stride2=2,height_8=height/8,width_8=width/8)
# merged convolution
conv3_l_redir = Convolution2D(32,(1,1),name="conv_redir",activation=conv_activation)(conv3_l)
conv3_l_with_corr = concatenate([conv3_l_redir,corr_layer],name="concatenated_correlation")
conv3_1 = Convolution2D(256, (3, 3), padding = 'same', name='conv3_1',activation=conv_activation)(conv3_l_with_corr)
#layer 4, output of layer 4 is height/16 x width/16
conv4 = Convolution2D(512, (3, 3), strides=2,padding = 'same', name='conv4',activation=conv_activation)(conv3_1)
height_16 = height/16; width_16 = width/16
conv4_1 = Convolution2D(512, (3, 3), padding = 'same', name='conv4_1',activation=conv_activation)(conv4)
# layer 5, now /32
conv5 = Convolution2D(512, (3, 3), strides = 2, padding = 'same', name='conv5',activation=conv_activation)(conv4_1)
height_32 = height_16/2; width_32 = width_16/2
conv5_1 = Convolution2D(512, (3, 3), padding = 'same', name='conv5_1',activation=conv_activation)(conv5)
# Layer 6, now /64
conv6 = Convolution2D(1024, (3, 3), strides= 2,padding = 'same', name='conv6',activation=conv_activation)(conv5_1)
height_64 = height_32/2; width_64 = width_32/2
print "Compiling..."
optimizer = SGD(nesterov=True, lr=0.00001, momentum=0.1,decay=0.001);
model = Model(inputs = [input_l, input_r], outputs = conv6)
model.compile(optimizer=optimizer,loss='mean_squared_error')
print "Done"
return model
if __name__ == '__main__':
height = 384
width = 512
encoderModel = getEncoderModel(height=height, width=width,batch_size = batch_size);
encoderModel.summary()
```
|
{
"source": "jgori-ouistiti/CoopIHC",
"score": 2
}
|
#### File: coopihc/agents/BaseAgent.py
```python
from coopihc.base.State import State
from coopihc.base.StateElement import StateElement
from coopihc.base.elements import discrete_array_element, array_element, cat_element
from coopihc.policy.BasePolicy import BasePolicy
from coopihc.observation.RuleObservationEngine import RuleObservationEngine
from coopihc.observation.utils import base_user_engine_specification
from coopihc.observation.utils import base_assistant_engine_specification
from coopihc.inference.BaseInferenceEngine import BaseInferenceEngine
import numpy
import copy
class BaseAgent:
"""A *Coopihc* agent.
Instantiate or subclass this class to define an agent that is compatible with *CoopIHC*.
An agent has 4 components:
* An internal state
* An observation engine, that produces observations of the task and the other agent
* An inference engine, that uses this observation to make the internal state transition towards a new state
* A policy, which, based on the agent's internal state and its observation, picks an action.
By default, this class will be initialized with an empty internal :py:class:`State<coopihc.base.State>`, a random binary :py:class:`BasePolicy<coopihc.policy.BasePolicy>`, a :py:class:`RuleObservationEngine<coopihc.observation.RuleObservationEngine>` that sees everything except the other agent's internal state, and a :py:class:`BaseInference<coopihc.observation.BaseInference>` engine which does not update the state.
The API methods that users of this class can redefine are:
+ ``finit``: a second round of initialization once a bundle has been formed -- useful because at that point the agent has a reference to the other agent and task.
+ ``reset``: to specify how to initialize the agent's state at the end of each game. Policies, inference engines, and observation engines handle their own resets methods.
+ ``render``: specifies what to display.
Some things to know:
* The agent can be used to produce observations, inferences and actions outside of any Bundle. See methods ``observe(), infer(), take_action()``.
* You can override some components, e.g. to override the existing policy of an agent named ``MyNewUser`` with some other policy, you can do the following
.. code-block:: python
changed_policy_user = MyNewUser(override_agent_policy = (some_other_policy, other_policy_kwargs))
:param str role: "user" or "assistant"
:param type \*\*kwargs: keyword values ( each agent_X key expects a valid X object, and X_kwargs expects a valid dictionary of keyword arguments for X)
+ agent_policy
+ agent_inference_engine
+ agent_observation_engine
+ agent_state
+ policy_kwargs
+ inference_engine_kwargs
+ observation_engine_kwargs
+ state_kwargs
:return: A *CoopIHC* and :py:class:`Bundle<coopihc.bundle>`-compatible agent
:rtype: BaseAgent
"""
def __init__(
self,
role,
agent_state=None,
agent_policy=None,
agent_inference_engine=None,
agent_observation_engine=None,
state_kwargs={},
policy_kwargs={},
inference_engine_kwargs={},
observation_engine_kwargs={},
*args,
**kwargs,
):
# Bundles stuff
self._bundle = None
self._bundle_memory = None
self.ax = None
self._parameters = {}
# Set role of agent
if role not in ["user", "assistant"]:
raise ValueError(
"First argument 'role' should be either 'user' or 'assistant'"
)
else:
self.role = role
# Define policy
self._attach_policy(agent_policy, **policy_kwargs)
# Init state
if agent_state is None:
self._state = State(**state_kwargs)
else:
self._state = agent_state
# Define observation engine
self._attach_observation_engine(
agent_observation_engine, **observation_engine_kwargs
)
# Define inference engine
self._attach_inference_engine(agent_inference_engine, **inference_engine_kwargs)
self._override_components(kwargs)
def _override_components(self, init_kwargs):
"""_override_components
Allows the end-user to override any component for any agent via a kwarg
kwargs are as follows:
* 'override_policy' = (policy, policy_kwargs)
* 'override_state' = state
* 'override_observation_engine' = (observation engine, observation engine_kwargs)
* 'override_inference_engine' = (inference engine, inference engine kwargs)
:param init_kwargs: kwargs passed from init
:type init_kwargs: dict
:meta private:
"""
# Override agent policy
agent_policy, agent_policy_kwargs = init_kwargs.get(
"override_policy", (None, None)
)
if agent_policy is not None:
self._attach_policy(agent_policy, **agent_policy_kwargs)
# Override agent state
agent_state = init_kwargs.get("override_state", None)
if agent_state is not None:
self._state = agent_state
# Override agent observation engine
agent_obseng, agent_obseng_kwargs = init_kwargs.get(
"override_observation_engine", (None, None)
)
if agent_obseng is not None:
self._attach_observation_engine(agent_obseng, **agent_obseng_kwargs)
# Override agent inference engine
agent_infeng, agent_infeng_kwargs = init_kwargs.get(
"override_inference_engine", (None, None)
)
if agent_infeng is not None:
self._attach_inference_engine(agent_infeng, **agent_infeng_kwargs)
def __content__(self):
"""Custom class representation.
A custom representation of the class.
:return: dictionary with content for all components.
:rtype: dict
:meta private:
"""
return {
"Name": self.__class__.__name__,
"State": self.state.__content__(),
"Observation Engine": self.observation_engine.__content__(),
"Inference Engine": self.inference_engine.__content__(),
"Policy": self.policy.__content__(),
}
@property
def parameters(self):
if self.bundle:
return self.bundle.parameters
return self._parameters
@property
def bundle(self):
return self._bundle
@bundle.setter
def bundle(self, value):
if type(value).__name__ == "Simulator":
self.bundle_memory = copy.copy(self._bundle)
self._bundle = value
@property
def bundle_memory(self):
return self._bundle_memory
@bundle_memory.setter
def bundle_memory(self, value):
if type(value).__name__ == "Simulator":
return
self._bundle_memory = value
def _simulator_close(self):
self._bundle = self._bundle_memory
@property
def policy(self):
"""Agent policy"""
return self._policy
@policy.setter
def policy(self, value):
self._attach_policy(value)
@property
def inference_engine(self):
"""Agent inference engine"""
return self._inference_engine
@inference_engine.setter
def inference_engine(self, value):
self._attach_inference_engine(value)
@property
def observation_engine(self):
"""Agent observation engine"""
return self._observation_engine
@observation_engine.setter
def observation_engine(self, value):
self._attach_observation_engine
@property
def state(self):
"""Agent internal state"""
return self._state
@property
def observation(self):
"""Last agent observation"""
return (
self.inference_engine.buffer[-1]
if self.inference_engine.buffer is not None
else None
)
@property
def action(self):
"""Last agent action"""
return self.policy.action
@action.setter
def action(self, item):
self.policy.action = item
@property
def user(self):
"""Connected user"""
if self.role == "user":
return self
else:
try:
return self.bundle.user
except AttributeError: # No bundle known
raise AttributeError(
f"Agent{self.__class__.__name__} has not been connected to a user yet."
)
@property
def assistant(self):
"""Connected assistant"""
if self.role == "assistant":
return self
else:
try:
return self.bundle.assistant
except AttributeError: # No bundle known
raise AttributeError(
f"Agent{self.__class__.__name__} has not been connected to a assistant yet."
)
@property
def task(self):
"""Connected task"""
try:
return self.bundle.task
except AttributeError: # No bundle known
raise AttributeError(
f"Agent{self.__class__.__name__} has not been connected to a task yet."
)
# def __getattr__(self, value):
# try:
# return self.parameters.__getitem__(value)
# except:
# raise AttributeError(
# f"{self.__class__.__name__} object has no attribute {value}"
# )
def _attach_policy(self, policy, **kwargs):
"""Attach a policy
Helper function to attach a policy.
:param policy: a CoopIHC policy
:type policy: :doc:mod:`Policy<coopihc/policy>`
:meta private:
"""
if policy is None:
policy = BasePolicy
if type(policy).__name__ == "type":
self._policy = policy(**kwargs)
else:
self._policy = policy
if kwargs != {}:
raise AttributeError(
"Can't input an instantiated policy and associated keyword arguments. Either pass the policy class, or fully instantiate that policy before passing it."
)
self._policy.host = self
def _attach_observation_engine(self, observation_engine, **kwargs):
"""Attach an observation engine
Helper function to attach an observation engine.
:param observation_engine: a CoopIHC observation engine
:type observation_engine: :doc:mod:`Observation Engine<coopihc/observation>`
:meta private:
"""
if observation_engine is None:
if self.role == "user":
observation_engine = RuleObservationEngine(
deterministic_specification=base_user_engine_specification
)
elif self.role == "assistant":
observation_engine = RuleObservationEngine(
deterministic_specification=base_assistant_engine_specification
)
else:
raise NotImplementedError
if type(observation_engine).__name__ == "type":
self._observation_engine = observation_engine(**kwargs)
else:
self._observation_engine = observation_engine
if kwargs != {}:
raise AttributeError(
"Can't input an instantiated observation engine and associated keyword arguments. Either pass the observation engine class, or fully instantiate that policy before passing it."
)
self._observation_engine.host = self
def _attach_inference_engine(self, inference_engine, **kwargs):
"""Attach an inference engine
Helper function to attach an inference engine.
:param inference: a CoopIHC inference engine
:type inference: :doc:mod:`Inference Engine<coopihc/inference>`
:meta private:
"""
if inference_engine is None:
inference_engine = BaseInferenceEngine()
else:
inference_engine = inference_engine
if type(inference_engine).__name__ == "type":
self._inference_engine = inference_engine(**kwargs)
else:
self._inference_engine = inference_engine
if kwargs != {}:
raise AttributeError(
"Can't input an instantiated inference engine and associated keyword arguments. Either pass the inference engine class, or fully instantiate that policy before passing it."
)
self._inference_engine.host = self
def _base_reset(self, all=True, dic=None, random=True):
"""Reset function called by the Bundle.
This method is called by the bundle to reset the agent. It defines a bunch of actions that should be performed upon each reset. It namely calls the reset method that can be modified by the end-user of the library.
:param all: which components to reset, defaults to True
:type all: bool, optional
:param dic: reset dictionary, defaults to None.
:type dic: [type], optional
:meta private:
"""
if all:
self.policy.reset(random=random)
self.inference_engine.reset(random=random)
self.observation_engine.reset(random=random)
if not dic:
if random:
self.state.reset()
self.reset()
return
# forced reset with dic
for key in list(self.state.keys()):
value = dic.get(key)
if isinstance(value, StateElement):
self.state[key] = value
continue
elif isinstance(value, numpy.ndarray):
self.state[key][...] = value
elif value is None:
continue
else:
try:
self.state[key][
...
] = value # Give StateElement's preprocessvalues method a chance
except:
raise NotImplementedError
def reset(self):
"""reset the agent --- Override this
Override this method to specify how the components of the agent will be reset. By default, the agent will already call the reset method of all 4 components (policy, inference engine, observation engine, state). You can specify some added behavior here e.g. if you want each game to begin with a specific state value, you can specify that here. For example:
.. code-block:: python
# Sets the value of state 'x' to 0
def reset(self):
self.state["x"][...] = 123
:meta public:
"""
pass
def reset_all(self, dic=None, random=True):
"""reset the agent and all its components
In addition to running the agent's ``reset()``, ``reset_all()`` also calls state, observation engine, inference engine and policies' ``reset()`` method.
:param dic: reset_dictionnary, defaults to None. See the ``reset()`` method in `py:class:Bundle<coopihc.bundle.Bundle>` for more information.
:type dic: dictionary, optional
:param random: whether states should be randomly reset, defaults to True. See the ``reset()`` method in `py:class:Bundle<coopihc.bundle.Bundle>` for more information.
:type random: bool, optional
:meta public:
"""
self._base_reset(all=True, dic=dic, random=random)
def finit(self):
"""Finish initializing.
Method that specifies what happens when initializing the agent for the very first time (similar to __init__), but after a bundle has been initialized already. This allows to finish initializing (finit) the agent when information from another component is required to do so.
:meta public:
"""
pass
def take_action(
self,
agent_observation=None,
agent_state=None,
increment_turn=True,
):
"""Select an action
Select an action based on agent_observation and agent_state, by querying the agent's policy. If either of these arguments is not provided, then the argument is deduced from the agent's internals.
:param agent_observation: last agent observation, defaults to None. If None, gets the observation from the inference engine's buffer.
:type agent_observation: :py:class:State<coopihc.base.State>, optional
:param agent_state: current value of the agent's internal state, defaults to None. If None, gets the state from itself.
:type agent_state: :py:class:State<coopihc.base.State>, optional
:param increment_turn: whether to update bundle's turn and round
:type increment_turn: bool, optional
:meta public:
"""
try:
if increment_turn:
self.bundle.turn_number = (self.bundle.turn_number + 1) % 4
if self.bundle.turn_number == 0:
self.bundle.round_number += 1
except AttributeError: # Catch case where agent not linked to a bundle
if self.bundle is None:
pass
else: # Re-raise exception
self.bundle.turn_number = (self.bundle.turn_number + 1) % 4
return self.policy._base_sample(
agent_observation=agent_observation, agent_state=agent_state
)
def observe(
self,
game_state=None,
affect_bundle=True,
game_info={},
task_state={},
user_state={},
assistant_state={},
user_action={},
assistant_action={},
):
"""produce an observation
Produce an observation based on state information, by querying the agent's observation engine. By default, the agent will find the appropriate states to observe. To bypass this behavior, you can provide state information. When doing so, either provide the full game state, or provide the needed individual states.
The affect_bundle flag determines whether or not the observation produces like this becomes the agent's last observation.
:param game_state: the full game state as defined in the *CoopIHC* interaction model, defaults to None.
:type game_state: `:py:class:State<coopihc.base.State>`, optional
:param affect_bundle: whether or not the observation is stored and becomes the agent's last observation, defaults to True.
:type affect_bundle: bool, optional
:param game_info: game_info substate, see the *CoopIHC* interaction model, defaults to {}.
:type game_info: `:py:class:State<coopihc.base.State>`, optional
:param task_state: task_state substate, see the *CoopIHC* interaction model, defaults to {}
:type task_state: `:py:class:State<coopihc.base.State>`, optional
:param user_state: user_state substate, see the *CoopIHC* interaction model, defaults to {}
:type user_state: `:py:class:State<coopihc.base.State>`, optional
:param assistant_state: assistant_state substate, see the *CoopIHC* interaction model, defaults to {}
:type assistant_state: `:py:class:State<coopihc.base.State>`, optional
:param user_action: user_action substate, see the *CoopIHC* interaction model, defaults to {}
:type user_action: `:py:class:State<coopihc.base.State>`, optional
:param assistant_action: assistant_action substate, see the *CoopIHC* interaction model, defaults to {}
:type assistant_action: `:py:class:State<coopihc.base.State>`, optional
:meta public:
"""
if (
bool(game_info)
or bool(task_state)
or bool(user_state)
or bool(assistant_state)
or bool(user_action)
or bool(assistant_action)
):
observation, reward = self.observation_engine.observe_from_substates(
game_info=game_info,
task_state=task_state,
user_state=user_state,
assistant_state=assistant_state,
user_action=user_action,
assistant_action=assistant_action,
)
else:
observation, reward = self.observation_engine.observe(game_state=game_state)
if affect_bundle:
self.inference_engine.add_observation(observation)
return observation, reward
def infer(self, agent_observation=None, affect_bundle=True):
"""infer the agent's internal state
Infer the new agent state from the agent's observation. By default, the agent will select the agent's last observation. To bypass this behavior, you can provide a given agent_observation.
The affect_bundle flag determines whether or not the agent's internal state is actually updated.
:param agent_observation: last agent observation, defaults to None. If None, gets the observation from the inference engine's buffer.
:type agent_observation: :py:class:State<coopihc.base.State>, optional
:param affect_bundle: whether or not the agent's state is updated with the new inferred state, defaults to True.
:type affect_bundle: bool, optional
:meta public:
"""
agent_state, agent_infer_reward = self.inference_engine.infer(
agent_observation=agent_observation
)
if affect_bundle:
self.state.update(agent_state)
return agent_state, agent_infer_reward
def _agent_step(self, infer=True):
"""Play an agent's turn.
Observe the game state via the observation engine, update the internal state via the inference engine, collect rewards for both processes and return them to the caller (the bundle).
:param infer: whether inference should be performed, defaults to True
:type infer: bool, optional
:return: observation and inference rewards
:rtype: tuple(float, float)
:meta private:
"""
# agent_observation, agent_obs_reward = self.observe(self.bundle.game_state)
agent_observation, agent_obs_reward = self.observe()
if infer:
agent_state, agent_infer_reward = self.infer()
else:
agent_infer_reward = 0
return agent_obs_reward, agent_infer_reward
def prepare_action(
self,
affect_bundle=True,
game_state=None,
agent_observation=None,
increment_turn=True,
**kwargs,
):
if self.bundle is not None:
if self.bundle.turn_number != 0 and self.role == "user":
raise RuntimeError(
f"You are preparing User {self.__class__.__name__} to take an action, but the Bundle is at turn {self.bundle.turn_number} (should be 0) "
)
if self.bundle.turn_number != 2 and self.role == "assistant":
raise RuntimeError(
f"You are preparing Assistant {self.__class__.__name__} to take an action, but the Bundle is at turn {self.bundle.turn_number} (should be 2) "
)
if increment_turn:
self.bundle.turn_number = (self.bundle.turn_number + 1) % 4
if agent_observation is None:
_agent_observation, agent_obs_reward = self.observe(
affect_bundle=affect_bundle, game_state=game_state, **kwargs
)
if agent_observation is None:
agent_observation = _agent_observation
agent_state, agent_infer_reward = self.infer(
agent_observation=agent_observation, affect_bundle=affect_bundle
)
return agent_obs_reward + agent_infer_reward
def render(self, mode="text", ax_user=None, ax_assistant=None, ax_task=None):
"""render the agent
Displays agent information on the passed axes.
:param mode: display mode, defaults to "text". Also supports "plot".
:type mode: str, optional
:param ax_user: user axis, defaults to None
:type ax_user: Matploblib axis, optional
:param ax_assistant: assistant axis, defaults to None
:type ax_assistant: Matploblib axis, optional
:param ax_task: task axis, defaults to None
:type ax_task: Matploblib axis, optional
"""
if "plot" in mode:
if self.ax is not None:
pass
else:
if self.role == "user":
self.ax = ax_user
else:
self.ax = ax_assistant
self.ax.axis("off")
self.ax.set_title(type(self).__name__ + " State")
if "text" in mode:
print(type(self).__name__ + " State")
```
#### File: coopihc/agents/ExampleUser.py
```python
import numpy
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.base.State import State
from coopihc.base.elements import discrete_array_element, array_element, cat_element
from coopihc.base.elements import cat_element, discrete_array_element
from coopihc.policy.ExamplePolicy import ExamplePolicy, PseudoRandomPolicy
class ExampleUser(BaseAgent):
"""An Example of a User.
An agent that handles the ExamplePolicy, has a single 1d state, and has the default observation and inference engines.
See the documentation of the :py:mod:`BaseAgent <coopihc.agents.BaseAgent.BaseAgent>` class for more details.
:meta public:
"""
def __init__(self, *args, **kwargs):
# Define an internal state with a 'goal' substate
state = State()
state["goal"] = discrete_array_element(init=4, low=-4, high=4)
# Define policy
action_state = State()
action_state["action"] = discrete_array_element(init=0, low=-1, high=1)
agent_policy = ExamplePolicy(action_state=action_state)
# Use default observation and inference engines
observation_engine = None
inference_engine = None
super().__init__(
"user",
*args,
agent_policy=agent_policy,
agent_observation_engine=observation_engine,
agent_inference_engine=inference_engine,
agent_state=state,
**kwargs
)
def reset(self, dic=None):
"""reset
Override default behaviour of BaseAgent which would randomly sample new goal values on each reset. Here for purpose of demonstration we impose a goal = 4
:meta public:
"""
self.state["goal"] = 4
class PseudoRandomUser(BaseAgent):
def __init__(self, *args, **kwargs):
# Define an internal state with a 'goal' substate
state = State()
state["p0"] = discrete_array_element(init=1, low=-10, high=10)
state["p1"] = discrete_array_element(init=5, low=-10, high=10)
state["p2"] = discrete_array_element(init=7, low=-10, high=10)
# Call the policy defined above
action_state = State()
action_state["action"] = discrete_array_element(init=0, N=10)
agent_policy = PseudoRandomPolicy(action_state=action_state)
# Use default observation and inference engines
observation_engine = None
inference_engine = None
super().__init__(
"user",
*args,
agent_policy=agent_policy,
agent_observation_engine=observation_engine,
agent_inference_engine=inference_engine,
agent_state=state,
**kwargs
)
class PseudoRandomUserWithParams(BaseAgent):
def __init__(self, p=[1, 1, 1], *args, **kwargs):
# Define an internal state with a 'goal' substate
self.p = p
state = State()
state["p0"] = discrete_array_element(init=p[0], low=-10, high=10)
state["p1"] = discrete_array_element(init=p[1], low=-10, high=10)
state["p2"] = discrete_array_element(init=p[2], low=-10, high=10)
# Call the policy defined above
action_state = State()
action_state["action"] = discrete_array_element(init=0, N=10)
agent_policy = PseudoRandomPolicy(action_state=action_state)
# Use default observation and inference engines
observation_engine = None
inference_engine = None
super().__init__(
"user",
*args,
agent_policy=agent_policy,
agent_observation_engine=observation_engine,
agent_inference_engine=inference_engine,
agent_state=state,
**kwargs
)
```
#### File: agents/lqrcontrollers/IHCT_LQGController.py
```python
import numpy
import copy
import warnings
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.observation.RuleObservationEngine import RuleObservationEngine
from coopihc.base.State import State
from coopihc.base.elements import discrete_array_element, array_element, cat_element
from coopihc.policy.LinearFeedback import LinearFeedback
from coopihc.inference.ContinuousKalmanUpdate import ContinuousKalmanUpdate
# Infinite Horizon Continuous Time LQG controller, based on Phillis 1985
class IHCT_LQGController(BaseAgent):
"""Infinite Horizon Continuous Time LQ Gaussian Controller.
An Infinite Horizon (Steady-state) LQG controller, based on [Phillis1985]_ and [Qian2013]_.
For the a task where state 'x' follows a linear noisy dynamic:
.. math::
\\begin{align}
x(+.) = (Ax(.) + Bu(.))dt + Fx(.).d\\beta + G.d\\omega + Hu(.)d\\gamma \\\\
\\end{align}
the LQG controller produces the following observations dy and commands u minimizing cost J:
.. math::
\\begin{align*}
dy & = Cxdt + Dd\\xi \\\\
d\\hat{x} & = (A \\hat{x} + Bu) dt + K (dy - C\\hat{x}dt) \\\\
u & = - L\\hat{x} \\\\
\\tilde{x} & = x - \\hat{x} \\\\
J & \simeq \\mathbb{E} [\\tilde{x}^T U \\tilde{x} + x^TQx + u^TRu]
\\end{align*}
.. [Phillis1985] <NAME>. "Controller design of systems with multiplicative noise." IEEE Transactions on Automatic Control 30.10 (1985): 1017-1019. `Link <https://ieeexplore.ieee.org/abstract/document/1103828>`_
.. [Qian2013] <NAME>, et al. "Movement duration, Fitts's law, and an infinite-horizon optimal feedback control model for biological motor systems." Neural computation 25.3 (2013): 697-724. `Link <https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.296.4312&rep=rep1&type=pdf>`_
:param role: "user" or "assistant"
:type role: string
:param timestep: duration of timestep
:type timestep: float
:param Q: State cost
:type Q: numpy.ndarray
:param R: Control cost
:type R: numpy.ndarray
:param U: Estimation error cost
:type U: numpy.ndarray
:param C: Observation matrix
:type C: numpy.ndarray
:param D: Observation noise matrix
:type D: numpy.ndarray
:param noise: whether or not to have, defaults to "on"
:type noise: str, optional
:param Acontroller: Representation of A for the agent. If None, the agent representation of A is equal to the task A, defaults to None.
:type Acontroller: numpy.ndarray, optional
:param Bcontroller: Representation of B for the agent. If None, the agent representation of B is equal to the task B, defaults to None.
:type Bcontroller: numpy.ndarray, optional
:param Fcontroller: Representation of F for the agent. If None, the agent representation of F is equal to the task F, defaults to None.
:type Fcontroller: numpy.ndarray, optional
:param Gcontroller: Representation of G for the agent. If None, the agent representation of G is equal to the task G, defaults to None.
:type Gcontroller: numpy.ndarray, optional
:param Hcontroller: Representation of H for the agent. If None, the agent representation of H is equal to the task H, defaults to None.
:type Hcontroller: numpy.ndarray, optional
"""
def __init__(
self,
role,
timestep,
Q,
R,
U,
C,
D,
*args,
noise="on",
Acontroller=None,
Bcontroller=None,
F=None,
G=None,
H=None,
**kwargs
):
self.C = C
self.Q = Q
self.R = R
self.U = U
self.D = D
self.timestep = timestep
self.role = role
# Initialize Random Kalmain gains
self.K = numpy.random.rand(*C.T.shape)
self.L = numpy.random.rand(1, Q.shape[1])
self.noise = noise
self.Acontroller = Acontroller
self.Bcontroller = Bcontroller
self.Fcontroller = F
self.Gcontroller = G
self.Hcontroller = H
# =================== Linear Feedback Policy ==========
action_state = State()
action_state["action"] = array_element(shape=(1, 1))
# StateElement(
# numpy.zeros((1, 1)),
# Space(
# [numpy.full((1, 1), -numpy.inf), numpy.full((1, 1), numpy.inf)],
# "continuous",
# ),
# )
# Linear Feedback with LQ reward
class LFwithLQreward(LinearFeedback):
def __init__(self, R, *args, **kwargs):
super().__init__(*args, **kwargs)
self.R = R
def sample(self, agent_observation=None, agent_state=None):
action, _ = super().sample(agent_observation=agent_observation)
return (
action,
(action.T @ self.R @ action).squeeze().tolist(),
)
agent_policy = LFwithLQreward(
self.R,
action_state,
("user_state", "xhat"),
)
# =========== Observation Engine ==============
# Rule Observation Engine with LQ reward
class RuleObswithLQreward(RuleObservationEngine):
def __init__(self, Q, *args, **kwargs):
super().__init__(*args, **kwargs)
self.Q = Q
def observe(self, game_state=None):
observation, _ = super().observe(game_state=game_state)
x = observation["task_state"]["x"].view(numpy.ndarray)
reward = x.T @ self.Q @ x
return observation, reward
# Base Spec
user_engine_specification = [
("game_info", "all"),
("task_state", "x"),
("user_state", "all"),
("assistant_state", None),
("user_action", "all"),
("assistant_action", None),
]
# Add rule for matrix observation y += Cx
def observation_linear_combination(_obs, game_state, C):
return C @ _obs
C_rule = {
("task_state", "x"): (
observation_linear_combination,
(C,),
)
}
extradeterministicrules = {}
extradeterministicrules.update(C_rule)
# Add rule for noisy observation y += D * epsilon ~ N(mu, sigma)
def additive_gaussian_noise(_obs, gamestate, D, *args):
try:
mu, sigma = args
except ValueError:
mu, sigma = numpy.zeros(_obs.shape), numpy.eye(max(_obs.shape))
return _obs + D @ numpy.random.multivariate_normal(
mu, sigma, size=1
).reshape(-1, 1)
# Instantiate previous rule so that epsilon ~ N(0, sqrt(dt))
agn_rule = {
("task_state", "x"): (
additive_gaussian_noise,
(
D,
numpy.zeros((C.shape[0], 1)).reshape(
-1,
),
numpy.sqrt(timestep) * numpy.eye(C.shape[0]),
),
)
}
extraprobabilisticrules = {}
extraprobabilisticrules.update(agn_rule)
observation_engine = RuleObswithLQreward(
self.Q,
deterministic_specification=user_engine_specification,
extradeterministicrules=extradeterministicrules,
extraprobabilisticrules=extraprobabilisticrules,
)
# ======================= Inference Engine
inference_engine = ContinuousKalmanUpdate()
super().__init__(
"user",
agent_policy=agent_policy,
agent_observation_engine=observation_engine,
agent_inference_engine=inference_engine,
)
def finit(self):
"""Get and compute needed matrices.
0. Take A, B, F, G, H from task if not provided by the end-user
1. Create an :math:`\\hat{x}` state;
2. attach the model dynamics to the inference engine if needed
3. compute K and L;
4. set K and L in inference engine and policy
"""
task = self.bundle.task
for elem, taskelem in zip(
[
"Acontroller",
"Bcontroller",
"Fcontroller",
"Gcontroller",
"Hcontroller",
],
[task.A, task.B, task.F, task.G, task.H],
):
if getattr(self, elem) == None:
setattr(self, elem, taskelem)
# ---- init xhat state
self.state["xhat"] = copy.deepcopy(self.bundle.task.state["x"])
# ---- Attach the model dynamics to the inference engine.
if not self.inference_engine.fmd_flag:
self.inference_engine.set_forward_model_dynamics(
self.Acontroller, self.Bcontroller, self.C
)
# ---- Set K and L up
mc = self._MContainer(
self.Acontroller,
self.Bcontroller,
self.C,
self.D,
self.Gcontroller,
self.Hcontroller,
self.Q,
self.R,
self.U,
)
self.K, self.L = self._compute_Kalman_matrices(mc.pass_args())
self.inference_engine.set_K(self.K)
self.policy.set_feedback_gain(self.L)
class _MContainer:
"""Matrix container
The purpose of this container is to facilitate common manipulations of the matrices of the LQG problem, as well as potentially storing their evolution. (not implemented yet)
"""
def __init__(self, A, B, C, D, G, H, Q, R, U):
self.A = A
self.B = B
self.C = C
self.D = D
self.G = G
self.H = H
self.Q = Q
self.R = R
self.U = U
self._check_matrices()
def _check_matrices(self):
# Not implemented yet
pass
def pass_args(self):
return (
self.A,
self.B,
self.C,
self.D,
self.G,
self.H,
self.Q,
self.R,
self.U,
)
def _compute_Kalman_matrices(self, matrices, N=20):
"""Compute K and L
K and L are computed according to the algorithm described in [Qian2013]_ with some minor tweaks. K and L are obtained recursively, where more and more precise estimates are obtained. At first N iterations are performed, if that fails to converge, N is grown as :math:`N^{1.3}` and K and L are recomputed.
:param matrices: (A, B, C, D, G, H, Q, R, U)
:type matrices: tuple(numpy.ndarray)
:param N: max iterations of the algorithm on first try, defaults to 20
:type N: int, optional
:return: (K, L)
:rtype: tuple(numpy.ndarray, numpy.ndarray)
"""
A, B, C, D, G, H, Q, R, U = matrices
Y = B @ H.reshape(1, -1)
Lnorm = []
Knorm = []
K = numpy.random.rand(*C.T.shape)
L = numpy.random.rand(1, A.shape[1])
for i in range(N):
Lnorm.append(numpy.linalg.norm(L))
Knorm.append(numpy.linalg.norm(K))
n, m = A.shape
Abar = numpy.block([[A - B @ L, B @ L], [numpy.zeros((n, m)), A - K @ C]])
Ybar = numpy.block([[-Y @ L, Y @ L], [-Y @ L, Y @ L]])
Gbar = numpy.block(
[[G, numpy.zeros((G.shape[0], D.shape[1]))], [G, -K @ D]]
)
V = numpy.block(
[
[Q + L.T @ R @ L, -L.T @ R @ L],
[-L.T @ R @ L, L.T @ R @ L + U],
]
)
P, p_res = self._LinRicatti(Abar, Ybar, Gbar @ Gbar.T)
S, s_res = self._LinRicatti(Abar.T, Ybar.T, V)
P22 = P[n:, n:]
S11 = S[:n, :n]
S22 = S[n:, n:]
K = P22 @ C.T @ numpy.linalg.pinv(D @ D.T)
L = numpy.linalg.pinv(R + Y.T @ (S11 + S22) @ Y) @ B.T @ S11
K, L = self._check_KL(Knorm, Lnorm, K, L, matrices)
return K, L
def _LinRicatti(self, A, B, C):
"""_LinRicatti [summary]
Returns norm of an equation of the form
.. math ::
\\begin{align}
AX + XA.T + BXB.T + C = 0
\\end{align}
:param A: See Equation above
:type A: numpy.ndarray
:param B: See Equation above
:type B: numpy.ndarray
:param C: See Equation above
:type C: numpy.ndarray
:return: X, residue
:rtype: tuple(numpy.ndarray, float)
"""
#
n, m = A.shape
nc, mc = C.shape
if n != m:
print("Matrix A has to be square")
return -1
M = (
numpy.kron(numpy.identity(n), A)
+ numpy.kron(A, numpy.identity(n))
+ numpy.kron(B, B)
)
C = C.reshape(-1, 1)
X = -numpy.linalg.pinv(M) @ C
X = X.reshape(n, n)
C = C.reshape(nc, mc)
res = numpy.linalg.norm(A @ X + X @ A.T + B @ X @ B.T + C)
return X, res
# Counting decorator
def counted_decorator(f):
"""counted_decorator
Decorator that counts the number of times function f has been called
:param f: decorated function
:type f: function
"""
def wrapped(*args, **kwargs):
wrapped.calls += 1
return f(*args, **kwargs)
wrapped.calls = 0
return wrapped
@counted_decorator
def _check_KL(self, Knorm, Lnorm, K, L, matrices):
"""Check K and L convergence
Checks whether K and L have converged, by looking at the variations over the last 5 iterations.
:param Knorm: list of the norms of K on each iteration
:type Knorm: list(numpy.array)
:param Lnorm: list of the norms of L on each iteration
:type Lnorm: list(numpy.array)
:param K: See Equation in class docstring
:type K: numpy.array
:param L: See Equation in class docstring
:type L: numpy.array
:param matrices: Matrix container
:type matrices: _MContainer
:return: K and L
:rtype: tuple(numpy.ndarray, numpy.ndarray)
"""
average_delta = numpy.convolve(
numpy.diff(Lnorm) + numpy.diff(Knorm),
numpy.ones(5) / 5,
mode="full",
)[-5]
if average_delta > 0.01: # Arbitrary threshold
print(
"Warning: the K and L matrices computations did not converge. Retrying with different starting point and a N={:d} search".format(
int(20 * 1.3 ** self._check_KL.calls)
)
)
K, L = self._compute_Kalman_matrices(
matrices, N=int(20 * 1.3 ** self.check_KL.calls)
)
else:
return K, L
```
#### File: agents/lqrcontrollers/IHDT_LQRController.py
```python
from coopihc.agents.lqrcontrollers.LQRController import LQRController
import scipy.linalg
class IHDT_LQRController(LQRController):
"""Infinite Horizon Discrete Time LQR
An Infinite Horizon (i.e. planning for unicode:: U+221E .. steps) Discrete Time implementation of the LQR controller. The controller is computed to minimize costs :math: `X^tQX + u^t R u`, where X is the state of the system and u is the linear feedback command :math:`u = -K X`, where the feedback gain :math:`K` is given by solving the discrete ARE
.. math::
\\begin{align}
K = (R + B^tPB)^{-1}B^TPA \\text{ (gain)}\\
P = Q + A^tPA - A^tPB(R + B^tPB)^{-1}B^TPA \\text{Discrete ARE}
\\end{align}
:param role: "user" or "assistant"
:type role: string
:param Q: see :py:class:`LQRController <coopihc.agents.lqrcontrollers.LQRController.LQRController>`
:type Q: numpy.ndarray
:param R: see :py:class:`LQRController <coopihc.agents.lqrcontrollers.LQRController.LQRController>`
:type R: numpy.ndarray
:param Acontroller: Model of A used by the controller to compute K
:type Acontroller: numpy.ndarray
:param Bcontroller: Model of B used by the controller to compute K
:type Bcontroller: numpy.ndarray
"""
def __init__(self, role, Q, R, Acontroller=None, Bcontroller=None):
self.Acontroller = Acontroller
self.Bcontroller = Bcontroller
super().__init__(role, Q, R)
def finit(self):
"""finit
Uses Discrete Algebraic Ricatti Equation to get P
:meta public:
"""
task = self.bundle.task
if self.Acontroller is None:
self.Acontroller = task.A
if self.Bcontroller is None:
self.Bcontroller = task.B
A, B = self.Acontroller, self.Bcontroller
P = scipy.linalg.solve_discrete_are(A, B, self.Q, self.R)
invPart = scipy.linalg.inv((self.R + B.T @ P @ B))
K = invPart @ B.T @ P @ A
self.policy.set_feedback_gain(K)
```
#### File: agents/lqrcontrollers/LQRController.py
```python
import numpy
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.base.State import State
from coopihc.base.elements import discrete_array_element, array_element, cat_element
from coopihc.policy.LinearFeedback import LinearFeedback
from coopihc.observation.RuleObservationEngine import RuleObservationEngine
from coopihc.observation.utils import base_task_engine_specification
class LQRController(BaseAgent):
"""A Linear Quadratic Regulator.
This agent will read a state named 'x' from the task, and produce actions according to:
.. math::
\\text{action} = -K X
where K is the so-called feedback gain, which has to be specified externally. For an example, see the :py:class:`coopihc.agents.lqrcontrollers.FHDT_LQRController.FHDT_LQRController` source code.
The controller will also output observation rewards J, for state X and action u
.. math::
J = -X^t Q X - u^t R u
.. note::
This class is meant to be subclassed
.. warning::
Tested only on 1d output.
:param role: "user" or "assistant"
:type role: string
:param Q: State cost
:type Q: numpy.ndarray
:param R: Control cost
:type R: numpy.ndarray
"""
def __init__(self, role, Q, R, *args, **kwargs):
self.R = R
self.Q = Q
self.role = role
# ================== Policy ================
action_state = State()
action_state["action"] = array_element(
low=numpy.full((1,), -numpy.inf), high=numpy.full((1,), numpy.inf)
)
agent_policy = LinearFeedback(
action_state,
("task_state", "x"),
)
# ================== Observation Engine
class RuleObsWithRewards(RuleObservationEngine):
def __init__(
self,
Q,
R,
*args,
deterministic_specification=base_task_engine_specification,
extradeterministicrules={},
extraprobabilisticrules={},
mapping=None,
**kwargs
):
self.R = R
self.Q = Q
super().__init__(
*args,
deterministic_specification=base_task_engine_specification,
extradeterministicrules={},
extraprobabilisticrules={},
mapping=None,
**kwargs
)
def observe(self, game_state=None):
obs, _ = super().observe(game_state=game_state)
x = obs["task_state"]["x"].view(numpy.ndarray)
u = obs["user_action"]["action"].view(numpy.ndarray)
reward = -x.T @ self.R @ x - u.T @ self.Q @ u
return obs, reward
observation_engine = RuleObsWithRewards(
self.R, self.Q, deterministic_specification=base_task_engine_specification
)
super().__init__(
"user",
agent_policy=agent_policy,
agent_observation_engine=observation_engine,
)
def render(self, *args, **kwargs):
"""render
Displays actions selected by the LQR agent.
"""
mode = kwargs.get("mode")
if mode is None:
mode = "text"
if "plot" in mode:
axtask, axuser, axassistant = args[:3]
if self.ax is None:
self.ax = axuser
self.ax.set_xlabel("Time (s)")
self.ax.set_ylabel("Action")
if self.action:
self.ax.plot(
self.bundle.round_number * self.bundle.task.timestep,
self.action,
"bo",
)
if "text" in mode:
print("Action")
print(self.action)
```
#### File: coopihc/base/Space.py
```python
import numpy
import itertools
import warnings
class BaseSpace:
"""Base space from which other spaces inherit.
:param seed: seed used by the rng, defaults to None
:type seed: int, optional
:param dtype: dtype to which the space will be coerced, defaults to None. If None, the dtype is inferred from the supplied data.
:type dtype: numpy.dtype, optional
:param contains: how the Python ``in`` keyword is evaluated, defaults to "numpy". If "numpy", a value is considered in the space according to numpy's ``in``
:type contains: str, optional
"""
def __init__(
self,
seed=None,
contains="numpy",
):
self.seed = seed
self.contains = contains
self.rng = numpy.random.default_rng(seed)
self._shape = None
self._spacetype = None
@property
def spacetype(self):
if self._spacetype is None:
if numpy.issubdtype(self.dtype, numpy.integer):
return "discrete"
elif numpy.issubdtype(self.dtype, numpy.floating):
return "continuous"
else:
raise NotImplementedError
class Space:
"""_summary_
:param low: see `Numeric<coopihc.base.Space.Numeric>`, defaults to None
:type low: see `Numeric<coopihc.base.Space.Numeric>`, optional
:param high: see `Numeric<coopihc.base.Space.Numeric>`, defaults to None
:type high: see `Numeric<coopihc.base.Space.Numeric>`, optional
:param array: see `CatSet<coopihc.base.Space.CatSet>`, defaults to None
:type array: see `CatSet<coopihc.base.Space.CatSet>`, optional
:param N: for future, defaults to None
:type N: for future, optional
:param _function: for future, defaults to None
:type _function: for future, optional
:return: A CoopIHC space
:rtype: `Numeric<coopihc.base.Space.Numeric>` or `CatSet<coopihc.base.Space.CatSet>`
"""
def __new__(
cls,
low=None,
high=None,
array=None,
N=None,
_function=None,
seed=None,
dtype=None,
contains="numpy",
):
if low is not None and high is not None:
return Numeric(
low=numpy.asarray(low),
high=numpy.asarray(high),
seed=seed,
dtype=dtype,
contains=contains,
)
if array is not None:
return CatSet(
array=numpy.asarray(array), seed=seed, dtype=dtype, contains=contains
)
if N is not None and _function is not None:
raise NotImplementedError
raise ValueError(
"You have to specify either low and high, or a set, or N and function, but you provided low = {}, high = {}, set = {}, N = {}, function = {}".format(
low, high, array, N, _function
)
)
@staticmethod
def cartesian_product(*spaces):
"""cartesian_product
Computes the cartesian product of the spaces provided in input. For this method, continuous spaces are treated as singletons {None}.
.. code-block:: python
s = space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
q = space(array=numpy.array([-3, -2, -1], dtype=numpy.int16))
cp, shape = cartesian_product(s, q)
assert (
cp
== numpy.array(
[
[1, -3],
[1, -2],
[1, -1],
[2, -3],
[2, -2],
[2, -1],
[3, -3],
[3, -2],
[3, -1],
]
)
).all()
:return: cartesian product and shape of associated spaces
:rtype: tuple(numpy.ndarray, list(tuples))
"""
arrays = []
shape = []
for space in spaces:
shape.append(space.shape)
if isinstance(space, CatSet):
arrays.append(space.array)
elif isinstance(space, Numeric):
if space.spacetype == "discrete":
arrays.append(space.array)
else:
arrays.append(numpy.array([None]))
la = len(arrays)
dtype = numpy.result_type(*arrays)
arr = numpy.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(numpy.ix_(*arrays)):
arr[..., i] = a
return arr.reshape(-1, la), shape
class Numeric(BaseSpace):
"""Numeric
An interval that defines the space for a StateElement.
You can define an Numeric by specifying the lower and upper bounds:
.. code-block:: python
s = Numeric(
low=-numpy.ones((2, 2), dtype=numpy.float32),
high=numpy.ones((2, 2), dtype=numpy.float32),
)
assert s.dtype == numpy.float32
assert (s.high == numpy.ones((2, 2))).all()
assert (s.low == -numpy.ones((2, 2))).all()
assert s.shape == (2, 2)
You can further set the seed of the space (useful when sampling from the space), force the dtype of the space and specify how membership to the space is checked via the keyword arguments. See ``BaseSpace`` for more information.
.. note::
lower and upper bounds must be valid numpy objects. For example, to specify a 0-D space, you should do: ``Numeric(low = -numpy.float64(1), high = numpy.float64(1))``
:param low: lower bound, defaults to -numpy.array([1])
:type low: numpy.ndarray, optional
:param high: upper bound, defaults to numpy.array([1])
:type high: numpy.ndarray, optional
"""
def __init__(
self,
low=-numpy.array([1]),
high=numpy.array([1]),
seed=None,
dtype=None,
contains="numpy",
):
if dtype is not None:
self._dtype = numpy.dtype(dtype)
else:
self._dtype = None
low = numpy.asarray(low)
high = numpy.asarray(high)
self._N = None
self._array = None
if low is not None and high is not None:
if low.shape != high.shape:
return ValueError(
"Low and high must have the same shape, but low and high have shape {} and {}".format(
low.shape, high.shape
)
)
self.low, self.high = low, high
super().__init__(seed=seed, contains=contains)
# converting numpy.inf to integer is not standardized and
# self.low = low.astype(self.dtype)
# self.high = high.astype(self.dtype)
# will not work
# Currently, it will give -2**(nbits) /2 for both numpy.inf and -numpy.inf. Hack below
if numpy.issubdtype(self.dtype, numpy.integer):
self.low = numpy.nan_to_num(self.low, neginf=numpy.iinfo(self.dtype).min)
self.high = numpy.nan_to_num(self.high, posinf=numpy.iinfo(self.dtype).max)
self.low = self.low.astype(self.dtype)
self.high = self.high.astype(self.dtype)
@property
def shape(self):
"""shape
Returns the numpy shape of the bounds.
"""
self._shape = self.low.shape
return self._shape
@property
def dtype(self):
"""dtype
Determines the numpy dtype of data contained in the space.
.. note::
If you input two different integer dtypes, the result will be a numpy.float64, per https://numpy.org/doc/stable/reference/generated/numpy.common_type.html
:return: _description_
:rtype: _type_
"""
if self._dtype is None:
if self.low.dtype == self.high.dtype:
self._dtype = numpy.dtype(self.low.dtype)
else:
self._dtype = numpy.dtype(numpy.common_type(self.low, self.high))
self.low = self.low.astype(self._dtype)
self.high = self.high.astype(self._dtype)
return self._dtype
@property
def N(self):
if self._N is None:
if numpy.issubdtype(self.dtype, numpy.integer):
self._N = self.high - self.low + 1
return self._N
@property
def array(self):
if self._array is None:
if numpy.issubdtype(self.dtype, numpy.integer):
self._array = numpy.linspace(
self.low, self.high, num=self.N, endpoint=True, dtype=self.dtype
)
return self._array
def __iter__(self):
"""__iter__"""
self._iter_low = iter(self.low)
self._iter_high = iter(self.high)
return self
def __next__(self):
"""__next__"""
return type(self)(
low=next(self._iter_low),
high=next(self._iter_high),
seed=self.seed,
dtype=self.dtype,
contains=self.contains,
)
def __getitem__(self, key):
"""__getitem__
Numpy Indexing is valid:
.. code-block:: python
s = Numeric(
low=-numpy.ones((2, 2), dtype=numpy.float32),
high=numpy.ones((2, 2), dtype=numpy.float32),
)
assert s[0] == Numeric(
low=-numpy.ones((2,), dtype=numpy.float32),
high=numpy.ones((2,), dtype=numpy.float32),
)
s = Numeric(
low=-numpy.ones((2, 2), dtype=numpy.float32),
high=numpy.ones((2, 2), dtype=numpy.float32),
)
assert s[:, 0] == Numeric(
low=-numpy.ones((2,), dtype=numpy.float32),
high=numpy.ones((2,), dtype=numpy.float32),
)
assert s[0, :] == Numeric(
low=-numpy.ones((2,), dtype=numpy.float32),
high=numpy.ones((2,), dtype=numpy.float32),
)
assert s[:, :] == s
assert s[...] == s
"""
return type(self)(
low=self.low[key],
high=self.high[key],
seed=self.seed,
dtype=self.dtype,
contains=self.contains,
)
def __eq__(self, other):
"""__eq__
.. code-block:: python
s = Numeric(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)))
assert s == Numeric(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)))
assert s != Numeric(low=-1.5 * numpy.ones((2, 2)), high=2 * numpy.ones((2, 2)))
assert s != Numeric(low=-numpy.ones((1,)), high=numpy.ones((1,)))
:param other: space to compare to
:type other: Numeric
"""
if not isinstance(other, type(self)):
return False
return (
self.shape == other.shape
and (self.low == other.low).all()
and (self.high == other.high).all()
and self.dtype == other.dtype
)
def __contains__(self, item, mode=None):
"""Check whether ``item`` belongs to the space
.. code-block:: python
s = Numeric(
low=-numpy.ones((2, 2)),
high=numpy.ones((2, 2)),
)
assert [0.0, 0.0, 0.0, 0.0] not in s
assert [[0.0, 0.0], [0.0, 0.0]] in s
assert numpy.array([0.0, 0.0, 0.0, 0.0]) not in s
assert numpy.array([[0.0, 0.0], [0.0, 0.0]]) in s
assert 1.0 * numpy.ones((2, 2)) in s
assert -1.0 * numpy.ones((2, 2)) in s
assert numpy.ones((2, 2), dtype=numpy.int16) in s
:param item: item
:type item: numpy.ndarray
:param mode: see "contains" keyword argument, defaults to None
:type mode: string, optional
"""
if mode is None:
mode = self.contains
if mode == "numpy":
try:
return numpy.all(item >= self.low) and numpy.all(item <= self.high)
except:
return False
else:
raise NotImplementedError
def __repr__(self):
if self.seed is None:
return f"{type(self).__name__}([{self.low}, {self.high}]) -- {self.dtype}"
else:
return f"{type(self).__name__}([{self.low}, {self.high}]) -- {self.dtype} -- seed: {self.seed}"
def __flat__(self):
if self.seed is None:
return f"{type(self).__name__}({self.shape}) -- {self.dtype}"
else:
return f"{type(self).__name__}({self.shape}) -- {self.dtype} -- seed: {self.seed}"
def sample(self):
"""sample
Generate values by sampling from the interval. If the interval represents integers, sampling is uniform. Otherwise, sampling is Gaussian. You can set the seed to sample, see keyword arguments at init.
.. code-block:: python
s = Numeric(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)), seed=123)
q = Numeric(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)), seed=123)
r = Numeric(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)), seed=12)
_s, _q, _r = s.sample(), q.sample(), r.sample()
assert _s in s
assert _q in q
assert _r in r
assert (_s == _q).all()
assert (_s != _r).any()
"""
if numpy.issubdtype(self.dtype, numpy.integer):
return self.rng.integers(
low=self.low, high=self.high, endpoint=True, dtype=self.dtype.type
)
else:
return numpy.nan_to_num(
(self.high - self.low), nan=1, posinf=1
) * self.rng.random(self.shape, dtype=self.dtype.type) + numpy.nan_to_num(
self.low, neginf=1
)
def serialize(self):
"""serialize to JSON"""
return {
"space": type(self).__name__,
"seed": self.seed,
"low,high": [self.low.tolist(), self.high.tolist()],
"shape": self.shape,
"dtype": self.dtype.__class__.__name__,
}
class CatSet(BaseSpace):
"""Categorical Set
A categorical set defined explicitly. Use this for data where traditional distance is meaningless i.e. when 1 is not closer to 0 then to 5.
Performance of this object for large dimensions may be bad, because the whole array is stored in memory.
.. code-block:: python
s = space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert s.dtype == numpy.int16
assert s.N == 3
assert s.shape == ()
You can further set the seed of the space (useful when sampling from the space), force the dtype of the space and specify how membership to the space is checked via the keyword arguments. See ``BaseSpace`` for more information.
:param array: set of values, defaults to None
:type array: numpy.ndarray, optional
"""
def __init__(self, array=None, seed=None, dtype=None, contains="numpy"):
self.array = array
if dtype is not None:
if not numpy.issubdtype(dtype, numpy.integer):
raise ValueError("dtype has to be an integer type")
self._dtype = numpy.dtype(dtype)
else:
self._dtype = None
super().__init__(seed=seed, contains=contains)
self.array = array.astype(self.dtype)
@property
def N(self):
"""Cardinality of the set"""
return len(self.array)
@property
def dtype(self):
"""numpy.dtype of data"""
if self._dtype is None:
if self.array is not None:
self._dtype = numpy.dtype(self.array.dtype)
if not numpy.issubdtype(self._dtype, numpy.integer):
self._dtype = numpy.dtype(numpy.int64)
return self._dtype
@property
def shape(self):
"""numpy shape of the data that belongs to the set"""
if self._shape is None:
self._shape = ()
return self._shape
@property
def low(self): # Should be removed, doesn't make sense
return self.array[0]
@property
def high(self): # Should be removed, doesn't make sense
return self.array[-1]
def __iter__(self):
"""__iter__"""
return self
def __next__(self):
"""__next__"""
raise StopIteration
def __getitem__(self, key):
"""__getitem__
The set can not be separated into different elements, and indexing over the set is only possible in edge cases:
.. code-block:: python
s = CatSet(array=numpy.array([1, 2, 3], dtype=numpy.int16))
s[0] # raises a ``SpaceNotSeparableError``
assert s[...] == s
assert s[:] == s
"""
if key == Ellipsis:
return self
if key == slice(None, None, None):
return self
from coopihc.base.utils import SpaceNotSeparableError
raise SpaceNotSeparableError("This space is not separable")
def __eq__(self, other):
"""__eq__
.. code-block:: python
s = CatSet(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert s == space(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert s != space(array=numpy.array([1, 2, 3, 4], dtype=numpy.int16))
:param other: other space
:type other: CatSet
"""
if not isinstance(other, type(self)):
return False
try:
return (self.array == other.array).all() and self.dtype == other.dtype
except AttributeError:
return self.array == other.array and self.dtype == other.dtype
def __contains__(self, item, mode=None):
"""__contains__
Checks if item belong to the space. By default, this check is done leniently, according to Numpy __contains__, see kwargs in init for more information.
.. code-block:: python
s = CatSet(array=numpy.array([1, 2, 3], dtype=numpy.int16))
assert 1 in s
assert [1] in s
assert [[1]] in s
assert numpy.array(1) in s
assert numpy.array([1]) in s
assert numpy.array([[1]]) in s
assert numpy.array([2]) in s
assert numpy.array([3]) in s
assert numpy.array([1.0]) in s
assert numpy.array([2]) in s
"""
if mode is None:
mode = self.contains
if mode == "numpy":
return item in self.array
else:
raise NotImplementedError
def __repr__(self):
return f"{type(self).__name__}({self.array})"
def __flat__(self):
return f"{type(self).__name__}({self.name})"
def sample(self):
"""sample
Generate values by sampling uniformly from the set. You can set the seed to the rng, see keyword arguments at init.
.. code-block:: python
s = CatSet(array=numpy.arange(1000), seed=123)
q = CatSet(array=numpy.arange(1000), seed=123)
r = CatSet(array=numpy.arange(1000), seed=12)
_s, _q, _r = s.sample(), q.sample(), r.sample()
assert _s in s
assert _q in q
assert _r in r
assert _s == _q
assert _s != _r
"""
return self.rng.choice(self.array)
def serialize(self):
return {
"space": type(self).__name__,
"seed": self.seed,
"array": self.array.tolist(),
"dtype": self.dtype.__class__.__name__,
}
```
#### File: coopihc/bundle/BaseBundle.py
```python
from random import random
from coopihc.base.State import State
from coopihc.base.elements import discrete_array_element, array_element, cat_element
from coopihc.base.elements import discrete_array_element, cat_element
import numpy
import yaml
import matplotlib.pyplot as plt
import copy
class BaseBundle:
"""Main class for bundles.
Main class for bundles. This class is subclassed by Bundle, which defines the interface with which to interact.
A bundle combines a task with a user and an assistant. The bundle creates the ``game_state`` by combining the task, user and assistant states with the turn index and both agent's actions.
The bundle takes care of all the messaging between classes, making sure the gamestate and all individual states are synchronized at all times.
The bundle implements a forced reset mechanism, where each state of the bundle can be forced to a particular state via a dictionnary mechanism (see :py:func:reset)
The bundle also takes care of rendering each of the three component in a single place.
:param task: (:py:class:`coopihc.interactiontask.InteractionTask.InteractionTask`) A task that inherits from ``InteractionTask``
:param user: (:py:class:`coopihc.agents.BaseAgent.BaseAgent`) a user which inherits from ``BaseAgent``
:param assistant: (:py:class:`coopihc.agents.BaseAgent.BaseAgent`) an assistant which inherits from ``BaseAgent``
:meta public:
"""
turn_dict = {
"after_assistant_action": 0,
"before_user_action": 1,
"after_user_action": 2,
"before_assistant_action": 3,
}
def __init__(
self,
task,
user,
assistant,
*args,
reset_random=False,
reset_start_after=-1,
reset_go_to=0,
**kwargs,
):
self._reset_random = reset_random
self._reset_start_after = reset_start_after
self._reset_go_to = reset_go_to
self.kwargs = kwargs
self.task = task
self.task.bundle = self
self.user = user
self.user.bundle = self
self.assistant = assistant
self.assistant.bundle = self
# Form complete game state
self.game_state = State()
turn_index = cat_element(
N=4, init=0, out_of_bounds_mode="raw", dtype=numpy.int8
)
round_index = discrete_array_element(
init=0, low=0, high=numpy.iinfo(numpy.int64).max, out_of_bounds_mode="raw"
)
self.game_state["game_info"] = State()
self.game_state["game_info"]["turn_index"] = turn_index
self.game_state["game_info"]["round_index"] = round_index
self.game_state["task_state"] = task.state
self.game_state["user_state"] = user.state
self.game_state["assistant_state"] = assistant.state
# here there is a small caveat: you can not access action states in the game_state at finit, you have to pass through the agent instead. This is due to the current way of creating the game_state.
self.task.finit()
self.user.finit()
self.assistant.finit()
if user.policy is not None:
self.game_state["user_action"] = user.policy.action_state
else:
self.game_state["user_action"] = State()
self.game_state["user_action"]["action"] = array_element()
if assistant.policy is not None:
self.game_state["assistant_action"] = assistant.policy.action_state
else:
self.game_state["assistant_action"] = State()
self.game_state["assistant_action"]["action"] = array_element()
# This will not work sometimes
# self.task.finit()
# self.user.finit()
# self.assistant.finit()
# Needed for render
self.active_render_figure = None
self.figure_layout = [211, 223, 224]
self.rendered_mode = None
self.render_perm = False
self.playspeed = 0.1
def __repr__(self):
"""__repr__
Pretty representation for Bundles.
:return: pretty bundle print
:rtype: string
"""
return "{}\n".format(self.__class__.__name__) + yaml.safe_dump(
self.__content__()
)
def __content__(self):
"""__content__
Custom class representation
:return: class repr
:rtype: dictionnary
"""
return {
"Task": self.task.__content__(),
"User": self.user.__content__(),
"Assistant": self.assistant.__content__(),
}
@property
def parameters(self):
return {
**self.task._parameters,
**self.user._parameters,
**self.assistant._parameters,
}
@property
def turn_number(self):
"""turn_number
The turn number in the game (0 to 3)
:return: turn number
:rtype: numpy.ndarray
"""
return self.game_state["game_info"]["turn_index"]
@turn_number.setter
def turn_number(self, value):
self._turn_number = value
self.game_state["game_info"]["turn_index"] = value
@property
def round_number(self):
"""round_number
The round number in the game (0 to N)
:return: turn number
:rtype: numpy.ndarray
"""
return self.game_state["game_info"]["round_index"]
@round_number.setter
def round_number(self, value):
self._round_number = value
self.game_state["game_info"]["round_index"] = value
@property
def state(self):
return self.game_state
def reset(
self,
go_to=None,
start_after=None,
task=True,
user=True,
assistant=True,
dic={},
random_reset=False,
):
"""Reset bundle.
1. Reset the game and start at a specific turn number.
2. select which components to reset
3. forced reset mechanism using dictionnaries
Example:
.. code-block:: python
new_target_value = self.game_state["task_state"]["targets"]
new_fixation_value = self.game_state["task_state"]["fixation"]
)
reset_dic = {"task_state": {"targets": new_target_value, "fixation": new_fixation_value}}
self.reset(dic=reset_dic, turn = 1)
Will set the substates "targets" and "fixation" of state "task_state" to some value.
.. note ::
If subclassing BaseBundle, make sure to call super().reset() in the new reset method.
:param turn: game turn number. Can also be set globally at the bundle level by passing the "reset_turn" keyword argument, defaults to 0
:type turn: int, optional
:param start_after: which turn to start at (allows skipping some turns during reset), defaults to 0
:type start_after: int, optional
:param task: reset task?, defaults to True
:type task: bool, optional
:param user: reset user?, defaults to True
:type user: bool, optional
:param assistant: reset assistant?, defaults to True
:type assistant: bool, optional
:param dic: reset_dic, defaults to {}
:type dic: dict, optional
:param random_reset: whether during resetting values should be randomized or not if not set by a reset dic, default to False
:type random_reset: bool, optional
:return: new game state
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
if go_to is None:
go_to = self._reset_go_to
if start_after is None:
start_after = self._reset_start_after
random_reset = self._reset_random or random_reset
if task:
task_dic = dic.get("task_state")
self.task._base_reset(
dic=task_dic,
random=random_reset,
)
if user:
user_dic = dic.get("user_state")
self.user._base_reset(
dic=user_dic,
random=random_reset,
)
if assistant:
assistant_dic = dic.get("assistant_state")
self.assistant._base_reset(
dic=assistant_dic,
random=random_reset,
)
self.round_number = 0
if not isinstance(go_to, (numpy.integer, int)):
go_to = self.turn_dict[go_to]
if not isinstance(start_after, (numpy.integer, int)):
start_after = self.turn_dict[start_after]
self.turn_number = go_to
if go_to == 0 and start_after + 1 == 0:
return self.game_state
if start_after <= go_to:
if go_to >= 1 and start_after + 1 <= 1:
self._user_first_half_step()
if go_to >= 2 and start_after + 1 <= 2:
user_action, _ = self.user.take_action(increment_turn=False)
self.user.action = user_action
self._user_second_half_step(user_action)
if go_to >= 3 and start_after + 1 <= 3:
self._assistant_first_half_step()
else:
raise ValueError(
f"start_after ({start_after}) can not be after go_to ({go_to}). You can likely use a combination of reset and step to achieve what you are looking for"
)
return self.game_state
def quarter_step(self, user_action=None, assistant_action=None, **kwargs):
return self.step(
user_action=user_action,
assistant_action=assistant_action,
go_to=(int(self.turn_number) + 1) % 4,
)
def step(self, user_action=None, assistant_action=None, go_to=None, **kwargs):
"""Play a round
Play a round of the game. A round consists in 4 turns. If go_to is not None, the round is only played until that turn.
If a user action and assistant action are passed as arguments, then these are used as actions to play the round. Otherwise, these actions are sampled from each agent's policy.
:param user action: user action
:type: any
:param assistant action: assistant action
:type: any
:param go_to: turn at which round stops, defaults to None
:type go_to: int, optional
:return: gamestate, reward, game finished flag
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, collections.OrderedDict, boolean)
"""
if go_to is None:
go_to = int(self.turn_number)
if not isinstance(go_to, (numpy.integer, int)):
go_to = self.turn_dict[go_to]
_started = False
rewards = {}
rewards["user_observation_reward"] = 0
rewards["user_inference_reward"] = 0
rewards["user_policy_reward"] = 0
rewards["first_task_reward"] = 0
rewards["assistant_observation_reward"] = 0
rewards["assistant_inference_reward"] = 0
rewards["assistant_policy_reward"] = 0
rewards["second_task_reward"] = 0
while self.turn_number != go_to or (not _started):
_started = True
# User observes and infers
if self.turn_number == 0 and "no-user" != self.kwargs.get("name"):
(
user_obs_reward,
user_infer_reward,
) = self._user_first_half_step()
(
rewards["user_observation_reward"],
rewards["user_inference_reward"],
) = (user_obs_reward, user_infer_reward)
# User takes action and receives reward from task
elif self.turn_number == 1 and "no-user" != self.kwargs.get("name"):
if user_action is None:
user_action, user_policy_reward = self.user.take_action(
increment_turn=False
)
else:
self.user.action = user_action
user_policy_reward = 0
task_reward, is_done = self._user_second_half_step(user_action)
rewards["user_policy_reward"] = user_policy_reward
rewards["first_task_reward"] = task_reward
if is_done:
return self.game_state, rewards, is_done
elif self.turn_number == 2 and "no-assistant" == self.kwargs.get("name"):
self.round_number = self.round_number + 1
# Assistant observes and infers
elif self.turn_number == 2 and "no-assistant" != self.kwargs.get("name"):
(
assistant_obs_reward,
assistant_infer_reward,
) = self._assistant_first_half_step()
(
rewards["assistant_observation_reward"],
rewards["assistant_inference_reward"],
) = (assistant_obs_reward, assistant_infer_reward)
# Assistant takes action and receives reward from task
elif self.turn_number == 3 and "no-assistant" != self.kwargs.get("name"):
if assistant_action is None:
(
assistant_action,
assistant_policy_reward,
) = self.assistant.take_action(increment_turn=False)
else:
self.assistant.action = assistant_action
assistant_policy_reward = 0
task_reward, is_done = self._assistant_second_half_step(
assistant_action
)
rewards["assistant_policy_reward"] = assistant_policy_reward
rewards["second_task_reward"] = task_reward
if is_done:
return self.game_state, rewards, is_done
self.round_number = self.round_number + 1
self.turn_number = (self.turn_number + 1) % 4
return self.game_state, rewards, False
def render(self, mode, *args, **kwargs):
"""render
Combines all render methods.
:param mode: "text" or "plot"
:param type: string
:meta public:
"""
self.rendered_mode = mode
if "text" in mode:
print("\n")
print("Round number {}".format(self.round_number.tolist()))
print("Task Render")
self.task.render(mode="text", *args, **kwargs)
print("User Render")
self.user.render(mode="text", *args, **kwargs)
print("Assistant Render")
self.assistant.render(mode="text", *args, **kwargs)
if "log" in mode:
self.task.render(mode="log", *args, **kwargs)
self.user.render(mode="log", *args, **kwargs)
self.assistant.render(mode="log", *args, **kwargs)
if "plot" in mode:
if self.active_render_figure:
plt.pause(self.playspeed)
self.task.render(
ax_task=self.axtask,
ax_user=self.axuser,
ax_assistant=self.axassistant,
mode="plot",
**kwargs,
)
self.user.render(
ax_task=self.axtask,
ax_user=self.axuser,
ax_assistant=self.axassistant,
mode="plot",
**kwargs,
)
self.assistant.render(
ax_task=self.axtask,
ax_user=self.axuser,
ax_assistant=self.axassistant,
mode="plot",
**kwargs,
)
self.fig.canvas.draw()
else:
self.active_render_figure = True
self.fig = plt.figure()
self.axtask = self.fig.add_subplot(self.figure_layout[0])
self.axtask.set_title("Task State")
self.axuser = self.fig.add_subplot(self.figure_layout[1])
self.axuser.set_title("User State")
self.axassistant = self.fig.add_subplot(self.figure_layout[2])
self.axassistant.set_title("Assistant State")
self.task.render(
ax_task=self.axtask,
ax_user=self.axuser,
ax_assistant=self.axassistant,
mode="plot",
**kwargs,
)
self.user.render(
ax_task=self.axtask,
ax_user=self.axuser,
ax_assistant=self.axassistant,
mode="plot",
**kwargs,
)
self.assistant.render(
ax_task=self.axtask,
ax_user=self.axuser,
ax_assistant=self.axassistant,
mode="plot",
**kwargs,
)
self.fig.show()
plt.tight_layout()
if not ("plot" in mode or "text" in mode):
self.task.render(None, mode=mode, *args, **kwargs)
self.user.render(None, mode=mode, *args, **kwargs)
self.assistant.render(None, mode=mode, *args, **kwargs)
def close(self):
"""close
Close the bundle once the game is finished.
"""
if self.active_render_figure:
plt.close(self.fig)
# self.active_render_figure = None
def _user_first_half_step(self):
"""_user_first_half_step
Turn 1, where the user observes the game state and updates its state via inference.
:return: user observation and inference reward
:rtype: tuple(float, float)
"""
if not self.kwargs.get("onreset_deterministic_first_half_step"):
user_obs_reward, user_infer_reward = self.user._agent_step()
else:
# Store the probabilistic rules
store = self.user.observation_engine.extraprobabilisticrules
# Remove the probabilistic rules
self.user.observation_engine.extraprobabilisticrules = {}
# Generate an observation without generating an inference
user_obs_reward, user_infer_reward = self.user._agent_step(infer=False)
# Reposition the probabilistic rules, and reset mapping
self.user.observation_engine.extraprobabilisticrules = store
self.user.observation_engine.mapping = None
self.kwargs["onreset_deterministic_first_half_step"] = False
return user_obs_reward, user_infer_reward
def _user_second_half_step(self, user_action):
"""_user_second_half_step
Turn 2, where the operaror takes an action.
:param user_action: user action
:param type: Any
:return: task reward, task done?
:rtype: tuple(float, boolean)
"""
# Play user's turn in the task
task_state, task_reward, is_done = self.task.base_on_user_action(
user_action=user_action
)
return task_reward, is_done
def _assistant_first_half_step(self):
"""_assistant_first_half_step
Turn 3, where the assistant observes the game state and updates its state via inference.
:return: assistant observation and inference reward
:rtype: tuple(float, float)
"""
(
assistant_obs_reward,
assistant_infer_reward,
) = self.assistant._agent_step()
return assistant_obs_reward, assistant_infer_reward
def _assistant_second_half_step(self, assistant_action):
"""_assistant_second_half_step
Turn 4, where the assistant takes an action.
:param user_action: assistant action
:param type: Any
:return: task reward, task done?
:rtype: tuple(float, boolean)
"""
# Play assistant's turn in the task
task_state, task_reward, is_done = self.task.base_on_assistant_action(
assistant_action=assistant_action
)
return task_reward, is_done
def _on_user_action(self, *args):
"""Turns 1 and 2
:param \*args: either provide the user action or not. If no action is provided the action is determined by the agent's policy using sample()
:param type: (None or list)
:return: user observation, inference, policy and task rewards, game is done flag
:return type: tuple(float, float, float, float, bool)
"""
user_obs_reward, user_infer_reward = self._user_first_half_step()
try:
# If human input is provided
user_action = args[0]
except IndexError:
# else sample from policy
user_action, user_policy_reward = self.user.take_action(
increment_turn=False
)
self.user.action = user_action
task_reward, is_done = self._user_second_half_step(user_action)
return (
user_obs_reward,
user_infer_reward,
user_policy_reward,
task_reward,
is_done,
)
def _on_assistant_action(self, *args):
"""Turns 3 and 4
:param \*args: either provide the assistant action or not. If no action is provided the action is determined by the agent's policy using sample()
:param type: (None or list)
:return: assistant observation, inference, policy and task rewards, game is done flag
:return type: tuple(float, float, float, float, bool)
"""
(
assistant_obs_reward,
assistant_infer_reward,
) = self._assistant_first_half_step()
try:
# If human input is provided
assistant_action = args[0]
except IndexError:
# else sample from policy
(
assistant_action,
assistant_policy_reward,
) = self.assistant.take_action(increment_turn=False)
self.assistant.action = assistant_action
task_reward, is_done = self._assistant_second_half_step(assistant_action)
return (
assistant_obs_reward,
assistant_infer_reward,
assistant_policy_reward,
task_reward,
is_done,
)
```
#### File: examples/basic_examples/policy_examples.py
```python
from coopihc.policy.ExamplePolicy import ExamplePolicy
from coopihc.base.elements import cat_element
from coopihc.base.State import State
from coopihc.policy.ELLDiscretePolicy import ELLDiscretePolicy
## ==================== ExamplePolicy
ep = ExamplePolicy()
## ==================== ELLDiscretePolicy
# [start-elld-def-model]
# Define the likelihood model
def likelihood_model(self, action, observation, *args, **kwargs):
if action == 0:
return 1 / 7
elif action == 1:
return 1 / 7 + 0.05
elif action == 2:
return 1 / 7 - 0.05
elif action == 3:
return 1 / 7 + 0.1
elif action == 4:
return 1 / 7 - 0.1
elif action == 5:
return 1 / 7 + 0.075
elif action == 6:
return 1 / 7 - 0.075
else:
raise RuntimeError(
"warning, unable to compute likelihood. You may have not covered all cases in the likelihood definition"
)
# [end-elld-def-model]
# [start-elld-attach]
_seed = 123
se = cat_element(init=1, N=7)
action_state = State(**{"action": se})
policy = ELLDiscretePolicy(action_state, seed=_seed)
# Attach the model
policy.attach_likelihood_function(likelihood_model)
# [end-elld-attach]
```
#### File: examples/simplepointing/assistants.py
```python
import coopihc
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.policy.BIGDiscretePolicy import BIGDiscretePolicy
from coopihc.inference.GoalInferenceWithUserPolicyGiven import (
GoalInferenceWithUserPolicyGiven,
)
from coopihc.base.Space import Space
from coopihc.base.StateElement import StateElement
from coopihc.base.elements import discrete_array_element, array_element, cat_element
import numpy
import copy
class ConstantCDGain(BaseAgent):
"""A Constant CD Gain transfer function.
Here the assistant just picks a fixed modulation.
:param gain: (float) constant CD gain.
:meta public:
"""
def __init__(self, gain):
self.gain = gain
super().__init__("assistant")
def finit(self):
self.policy.action_state["action"] = array_element(
init=self.gain,
low=numpy.full((1, self.bundle.task.dim), self.gain),
high=numpy.full((1, self.bundle.task.dim), self.gain),
)
class BIGGain(BaseAgent):
def __init__(self):
super().__init__(
"assistant", agent_inference_engine=GoalInferenceWithUserPolicyGiven() #
)
def finit(self):
action_state = self.bundle.game_state["assistant_action"]
action_state["action"] = discrete_array_element(
init=0, low=0, high=self.bundle.task.gridsize, out_of_bounds_mode="error"
)
user_policy_model = copy.deepcopy(self.bundle.user.policy)
agent_policy = BIGDiscretePolicy(action_state, user_policy_model)
self.attach_policy(agent_policy)
self.inference_engine.attach_policy(user_policy_model)
self.state["beliefs"] = array_element(
init=1 / self.bundle.task.number_of_targets,
low=numpy.zeros((self.bundle.task.number_of_targets,)),
high=numpy.ones((self.bundle.task.number_of_targets,)),
out_of_bounds_more="error",
)
def reset(self, dic=None):
self.state["beliefs"][...] = numpy.array(
[
1 / self.bundle.task.number_of_targets
for i in range(self.bundle.task.number_of_targets)
]
)
# change theta for inference engine
set_theta = [
{
("user_state", "goal"): discrete_array_element(
init=t, low=0, high=self.bundle.task.gridsize
)
}
for t in self.bundle.task.state["targets"]
]
self.inference_engine.attach_set_theta(set_theta)
self.policy.attach_set_theta(set_theta)
def transition_function(assistant_action, observation):
"""What future observation will the user see due to assistant action"""
# always do this
observation["assistant_action"]["action"] = assistant_action
# specific to BIGpointer
observation["task_state"]["position"] = assistant_action
return observation
self.policy.attach_transition_function(transition_function)
def render(self, *args, **kwargs):
mode = kwargs.get("mode")
if mode is None:
mode = "text"
try:
axtask, axuser, axassistant = args
self.inference_engine.render(axassistant, mode=mode)
except ValueError:
self.inference_engine.render(mode=mode)
```
#### File: coopihc/inference/BaseInferenceEngine.py
```python
from collections import OrderedDict
# Base Inference Engine: does nothing but return the same state. Any new inference method can subclass InferenceEngine to have a buffer and add_observation method (required by the bundle)
class BaseInferenceEngine:
"""BaseInferenceEngine
The base Inference Engine from which other engines can be defined. This engine does nothing but return the same state. Any new inference method can subclass ``InferenceEngine`` to have a buffer and ``add_observation`` method (required by the bundle)
:param buffer_depth: number of observations that are stored, defaults to 1
:type buffer_depth: int, optional
"""
""""""
def __init__(self, *args, buffer_depth=1, **kwargs):
self.buffer = None
self.buffer_depth = buffer_depth
self.render_flag = None
self.ax = None
self._host = None
@property
def host(self):
return self._host
@host.setter
def host(self, value):
self._host = value
@property
def role(self):
return self.host.role
def __content__(self):
"""__content__
Custom class representation
:return: representation
:rtype: string
"""
return self.__class__.__name__
@property
def observation(self):
"""observation
The last observation.
:return: last observation
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
try:
return self.buffer[-1]
except TypeError:
return None
@property
def state(self):
"""state
The current agent state
:return: agent state
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
try:
return self.buffer[-1]["{}_state".format(self.host.role)]
except AttributeError:
return AttributeError(
"This agent is not capable of observing its internal state. Think about changing your observation engine."
)
@property
def action(self):
"""action
The agent's last action
:return: agent action
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
try:
return self.host.action
except AttributeError:
return None
@property
def unwrapped(self):
return self
def add_observation(self, observation):
"""add observation
Add an observation to a buffer. If the buffer does not exist, create a naive buffer. The buffer has a size given by buffer length
:param observation: observation produced by an engine
:type observation: :py:class:`State<coopihc.base.State.State>`
"""
if self.buffer is None:
self.buffer = []
if len(self.buffer) < self.buffer_depth:
self.buffer.append(observation)
else:
self.buffer = self.buffer[1:] + [observation]
# https://stackoverflow.com/questions/1015307/python-bind-an-unbound-method
def bind(self, func, as_name=None):
"""bind
Bind function to the engine with a given name. If as_name is None, then the func name is used.
:param func: function to bind
:type func: function
:param as_name: name of resulting method, defaults to None
:type as_name: string, optional
:return: bound method
:rtype: method
"""
if as_name is None:
as_name = func.__name__
bound_method = func.__get__(self, self.__class__)
setattr(self, as_name, bound_method)
return bound_method
def default_value(func):
"""Apply this decorator to use self.agent_observation as default value to infer from if agent_observation = None"""
def wrapper_default_value(self, agent_observation=None):
if agent_observation is None:
agent_observation = self.observation
return func(self, agent_observation=agent_observation)
return wrapper_default_value
@default_value
def infer(self, agent_observation=None):
"""infer
The main method of this class. Return the new value of the internal state of the agent, as well as the reward associated with inferring the state. By default, this inference engine does nothing, and just returns the state with a null reward.
:return: (new internal state, reward)
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float)
"""
# do something with information inside buffer
if self.host.role == "user":
try:
return agent_observation["user_state"], 0
except KeyError:
return {}, 0
else:
try:
return agent_observation["assistant_state"], 0
except KeyError:
return {}, 0
def reset(self, random=True):
"""reset _summary_
Empty the buffer
:param random: whether to randomize parameters internal to the inference engine. This is provided in case of subclass the BaseInferenceEngine, defaults to True.
:type random: bool, optional
"""
self.buffer = None
def render(self, mode="text", ax_user=None, ax_assistant=None, ax_task=None):
render_flag = False
for r in self.render_tag:
if r in mode:
render_flag = True
if render_flag:
if "plot" in mode:
if self.ax is not None:
pass
else:
if ax_user is not None:
self.ax = ax_user
elif ax_assistant is not None:
self.ax = ax_assistant
else:
self.ax = ax_task
self.ax.set_title(type(self).__name__)
if "text" in mode:
print(type(self).__name__)
```
#### File: coopihc/inference/DualInferenceEngine.py
```python
from coopihc.inference.BaseInferenceEngine import BaseInferenceEngine
# Base Inference Engine: does nothing but return the same state. Any new inference method can subclass InferenceEngine to have a buffer and add_observation method (required by the bundle)
class DualInferenceEngine(BaseInferenceEngine):
"""BaseInferenceEngine
The base Inference Engine from which other engines can be defined. This engine does nothing but return the same state. Any new inference method can subclass ``InferenceEngine`` to have a buffer and ``add_observation`` method (required by the bundle)
:param buffer_depth: number of observations that are stored, defaults to 1
:type buffer_depth: int, optional
"""
""""""
def __init__(
self,
primary_inference_engine,
dual_inference_engine,
primary_kwargs={},
dual_kwargs={},
order="primary-first",
**kwargs
):
self.order = order
self._mode = "primary"
if type(primary_inference_engine).__name__ == "type":
self.primary_engine = primary_inference_engine(**primary_kwargs)
else:
self.primary_engine = primary_inference_engine
if type(dual_inference_engine).__name__ == "type":
self.dual_engine = dual_inference_engine(**dual_kwargs)
else:
self.dual_engine = dual_inference_engine
super().__init__(**kwargs)
@property
def host(self):
return self._host
@host.setter
def host(self, value):
self._host = value
self.primary_engine.host = value
self.dual_engine.host = value
# Set mode to read-only
@property
def mode(self):
return self._mode
@property
def buffer(self):
if self.mode == "primary":
return self.primary_engine.buffer
else:
return self.dual_engine.buffer
@buffer.setter
def buffer(self, value):
if self.mode == "primary":
self.primary_engine.buffer = value
else:
self.dual_engine.buffer = value
@BaseInferenceEngine.default_value
def infer(self, agent_observation=None):
if self._mode == "primary":
state, primary_reward = self.primary_engine.infer(
agent_observation=agent_observation
)
return state, primary_reward
else:
state, dual_reward = self.dual_engine.infer(
agent_observation=agent_observation
)
return state, dual_reward
```
#### File: coopihc/inference/ExampleInferenceEngine.py
```python
from coopihc.inference.BaseInferenceEngine import BaseInferenceEngine
from coopihc.bundle.Simulator import Simulator
import numpy
import copy
# [start-infeng-subclass]
class ExampleInferenceEngine(BaseInferenceEngine):
"""ExampleInferenceEngine
Example class
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def infer(self, agent_observation=None):
"""infer
Do nothing. Same behavior as parent ``BaseInferenceEngine``
:return: (new internal state, reward)
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float)
"""
if agent_observation is None:
agent_state = self.state
reward = 0
# Do something
# agent_state = ..
# reward = ...
return agent_state, reward
ExampleInferenceEngine(buffer_depth=5)
# [end-infeng-subclass]
class CoordinatedInferenceEngine(BaseInferenceEngine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def simulation_bundle(self):
return self.host.simulation_bundle
@BaseInferenceEngine.default_value
def infer(self, agent_observation=None):
agent_state = getattr(agent_observation, f"{self.role}_state")
# Parameter Inference is naive on purpose here
while True:
# Prediction using user model (can sample directly from the policy in this case, because it already does a single-shot prediction)
usermodel_action, _ = self.host.take_action(
agent_observation=agent_observation, agent_state=self.state
)
# actual observation
user_action = agent_observation.user_action.action
# Compare prediction with observation
if user_action != usermodel_action:
# If different, increment parameter by 1 and apply modulo 10. This works because we assumed we knew everything except the value of this parameter.
agent_state["user_p0"][...] = (agent_state["user_p0"] + 1) % 10
else:
break
reward = 0
return self.state, reward
class RolloutCoordinatedInferenceEngine(BaseInferenceEngine):
def __init__(self, task_model, user_model, assistant, **kwargs):
super().__init__(**kwargs)
self.task_model = task_model
self.user_model = user_model
self.assistant = assistant
self._simulator = None
self.__inference_count = 0
# define the simulator here. Simulator is called like a Bundle, but it will use the dual version of objects if available.
@property
def simulator(self):
if self._simulator is None:
self._simulator = Simulator(
task_model=self.task_model,
user_model=self.user_model,
assistant=self.assistant,
)
return self._simulator
@BaseInferenceEngine.default_value
def infer(self, agent_observation=None):
if (
self.__inference_count > 0
): # If it is the first time there is inference, continue, else just perform a BaseInference. We can do this because we know the parameter p[0] will not evolve over time.
return super().infer(agent_observation=agent_observation)
self.__inference_count += 1
agent_state = getattr(agent_observation, f"{self.role}_state")
mem_state = copy.deepcopy(
agent_state
) # agent state will be altered in the simulator, so keep a copy of it for reference.
# For the 10 possible values, completely simulate them. The right parameter is the one that leads to the maximum rewards
rew = [0 for i in range(10)]
for i in range(10): # Exhaustively try out all cases
# load the simulation with the right parameters
reset_dic = copy.deepcopy(agent_observation)
# try out a new state
del reset_dic["assistant_state"]
reset_dic = {
**reset_dic,
**{
"assistant_state": {
"p0": i,
"p1": mem_state.p1,
"p2": mem_state.p2,
}
},
}
self.simulator.reset(go_to=0, dic=reset_dic)
while True:
state, rewards, is_done = self.simulator.step()
rew[i] += sum(rewards.values())
if is_done:
break
# Don't forget to close the simulator when you are finished.
self.simulator.close()
index = numpy.argmax(rew)
self.state["user_p0"] = index
return mem_state, 0
```
#### File: coopihc/interactiontask/ClassicControlTask.py
```python
import numpy
import copy
from coopihc.helpers import flatten
from coopihc.base.State import State
from coopihc.base.elements import discrete_array_element, array_element, cat_element
from coopihc.base.elements import array_element
from coopihc.interactiontask.InteractionTask import InteractionTask
class ClassicControlTask(InteractionTask):
"""ClassicControlTask
A task used for a classic control setting with signal dependent and independent noise. You can account for control-dependent noise with an appropriate noise model in the policy or the observation engine.
The task has a state x(.) which evolves according to
.. math ::
\\begin{align}
x(+.) = Ax(.) + Bu(.) + Fx(.).d\\beta + G.d\\omega + Hu(.)d\\gamma \\\\
\\end{align}
for "timespace=discrete" and
.. math ::
\\begin{align}
x(+.) = (Ax(.) + Bu(.))dt + Fx(.).d\\beta + G.d\\omega + Hu(.)d\\gamma \\\\
\\end{align}
for "timespace=continuous".
where :math:``u(.)`` is the user action. The task is finised when the first component x[0,0] is close enough to 0. Currently this is implemented as the condition ``abs(x[0, 0]) <= 0.01``.
where :math:`\\beta, \\omega \\sim \\mathcal{N}(0, \\sqrt{dt})` are Wiener processes.
A and B may represent continuous or discrete dynamics. A conversion is implictly made following the value of discrete_dynamics keyword:
.. math ::
\\begin{align}
A_c = \\frac{1}{dt} (A - I) \\\\
B_c = B \\frac{1}{dt}
\\end{align}
.. math ::
\\begin{align}
A_d = I + dt \cdot{} A \\\\
B_d = dt \cdot{} B
\\end{align}
:param timestep: dt
:type timestep: float
:param A: Passive dynamics
:type A: numpy.ndarray
:param B: Response to command
:type B: numpy.ndarray
:param F: signal dependent noise, defaults to None
:type F: numpy.ndarray, optional
:param G: independent noise, defaults to None
:type G: numpy.ndarray, optional
:param H: control-dependent noise, defaults to None
:type H: numpy.ndarray, optional
:param discrete_dynamics: whether A and B are continuous or discrete, defaults to True
:type discrete_dynamics: bool, optional
:param noise: whether to include noise, defaults to "on"
:type noise: str, optional
:param timespace: if the task is modeled as discrete or continuous, defaults to "discrete"
:type noise: str, optional
"""
@property
def user_action(self):
return super().user_action[0]
def __init__(
self,
timestep,
A,
B,
*args,
F=None,
G=None,
H=None,
discrete_dynamics=True,
noise="on",
timespace="discrete",
end="standard",
**kwargs
):
super().__init__(*args, **kwargs)
self.dim = max(A.shape)
self.state = State()
self.state["x"] = array_element(
low=numpy.full((self.dim, 1), -numpy.inf),
high=numpy.full((self.dim, 1), numpy.inf),
)
self.state_last_x = copy.copy(self.state["x"])
self.timestep = timestep
if F is None:
self.F = numpy.zeros(A.shape)
else:
self.F = F
if G is None:
self.G = numpy.zeros(A.shape)
else:
self.G = G
if H is None:
self.H = numpy.zeros(B.shape)
else:
self.H = H
# Convert dynamics between discrete and continuous.
if discrete_dynamics:
self.A_d = A
self.B_d = B
# Euler method
self.A_c = 1 / timestep * (A - numpy.eye(A.shape[0]))
self.B_c = B / timestep
else:
self.A_c = A
self.B_c = B
# Euler Method
self.A_d = numpy.eye(A.shape[0]) + timestep * A
self.B_d = timestep * B
self.noise = noise
self.timespace = timespace
if end == "standard":
self.end = numpy.full((self.dim, 1), 0.01)
else:
self.end = end
self.state_last_x = None
def finit(self):
"""finit
Define whether to use continuous or discrete representation for A and B
"""
if self.timespace == "continuous":
self.A = self.A_c
self.B = self.B_c
else:
self.A = self.A_d
self.B = self.B_d
def reset(self, dic=None):
"""Force all substates except the first to be null.
Force all substates except the first to be null. Also stores the last state as an attribute (for rendering).
:param dic: reset_dic, see :py:class:``InteractionTask <coopihc.interactiontask.InteractionTask.InteractionTask>``, defaults to None
:type dic: dictionnary, optional
"""
# Force zero velocity
self.state["x"][0, 0] = 1
self.state["x"][1:, 0] = 0
def on_user_action(self, *args, user_action=None, **kwargs):
"""user step
Takes the state from x(.) to x(+.) according to
.. math ::
\\begin{align}
x(+.) = Ax(.) + Bu(.) + Fx(.).\\beta + G.\\omega \\\\
\\end{align}
"""
# Call super for counters
# For readability
A, B, F, G, H = self.A, self.B, self.F, self.G, self.H
_u = self.user_action.view(numpy.ndarray)
_x = self.state["x"].view(numpy.ndarray)
# Generate noise samples
if self.noise == "on":
beta, gamma = numpy.random.normal(0, numpy.sqrt(self.timestep), (2, 1))
omega = numpy.random.normal(0, numpy.sqrt(self.timestep), (self.dim, 1))
else:
beta, gamma = numpy.random.normal(0, 0, (2, 1))
omega = numpy.random.normal(0, 0, (self.dim, 1))
# Store last_x for render
self.state_last_x = copy.copy(_x)
# Deterministic update + State dependent noise + independent noise
if self.timespace == "discrete":
_x = (A @ _x + B * _u) + F @ _x * beta + G @ omega + H * _u * gamma
else:
_x += (
(A @ _x + B * _u) * self.timestep
+ F @ _x * beta
+ G @ omega
+ H * _u * gamma
)
self.state["x"] = _x
is_done = self.stopping_condition()
return self.state, 0, is_done
def stopping_condition(self):
_x = self.state["x"]
if (abs(_x[:]) <= self.end).all():
return True
return False
def on_assistant_action(self, *args, **kwargs):
"""on_assistant_action"""
return self.state, 0, False
def render(self, mode="text", ax_user=None, ax_assistant=None, ax_task=None):
"""render
Text mode: print task state
plot mode: Dynamically update axes with state trajectories.
"""
if mode is None:
mode = "text"
if "text" in mode:
print("state")
print(self.state["x"])
if "plot" in mode:
if self.ax is not None:
self.draw()
if self.turn_number == 3:
self.ax.legend(
handles=[self.axes[i].lines[0] for i in range(self.dim)]
)
else:
self.color = ["b", "g", "r", "c", "m", "y", "k"]
self.labels = ["x[{:d}]".format(i) for i in range(self.dim)]
self.axes = [ax_task]
self.ax = ax_task
for i in range(self.dim - 1):
self.axes.append(self.ax.twinx())
for i, ax in enumerate(self.axes):
# ax.yaxis.label.set_color(self.color[i])
ax.tick_params(axis="y", colors=self.color[i])
self.draw()
def draw(self):
if (self.state_last_x == self.state["x"]).all() or self.state_last_x is None:
pass
else:
for i in range(self.dim):
self.axes[i].plot(
[
((self.round_number - 1)) * self.timestep,
(self.round_number) * self.timestep,
],
flatten(
[
self.state_last_x[i, 0].tolist(),
self.state["x"][i, 0].tolist(),
]
),
"-",
color=self.color[i],
label=self.labels[i],
)
return
```
#### File: coopihc/interactiontask/InteractionTask.py
```python
from abc import ABC, abstractmethod
from coopihc.base.State import State
from coopihc.base.StateElement import StateElement
import numpy
"""
The main API methods for this class are:
__init__
finit
reset
on_user_action
on_assistant_action
render
:meta public:
"""
class InteractionTask(ABC):
"""InteractionTask
The class that defines an Interaction Task. Subclass this task when
creating a new task to ensure compatibility with CoopIHC. When doing so,
make sure to call ``super()`` in ``__init__()``.
"""
def __init__(self, *args, **kwargs):
self._state = State()
self.bundle = None
self.timestep = 0.1
self._parameters = {}
# Render
self.ax = None
def finit(self):
return
@property
def turn_number(self):
"""Turn number.
The turn number of the game
:return: turn number
:rtype: numpy.ndarray
"""
if self.bundle is None:
no_bundle_specified = "turn_number accesses the bundle's turn number. self.bundle was None. Is this task part of a bundle?"
raise Exception(no_bundle_specified)
return self.bundle.turn_number
@property
def round_number(self):
if self.bundle is None:
no_bundle_specified = "turn_number accesses the bundle's turn number. self.bundle was None. Is this task part of a bundle?"
raise Exception(no_bundle_specified)
return self.bundle.round_number
@property
def state(self):
"""state
The current state of the task.
:return: task state
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def user_action(self):
"""user action
The last action input by the user.
:return: user action
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
try:
return self.bundle.user.action
except AttributeError:
raise AttributeError("This task has not been connected to a user yet")
@property
def assistant_action(self):
"""assistant action
The last action input by the assistant.
:return: assistant action
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
try:
return self.bundle.assistant.action
except AttributeError:
raise AttributeError("This task has not been connected to an assistant yet")
# def __getattr__(self, value):
# try:
# return self.parameters.__getitem__(value)
# except:
# raise AttributeError(
# f"'{self.__class__.__name__}' object has no attribute '{value}'"
# )
@property
def parameters(self):
if self.bundle:
return self.bundle.parameters
return self._parameters
def __content__(self):
"""Custom class representation.
A custom class representation.
:return: custom repr
:rtype: dictionnary
"""
return {
"Name": self.__class__.__name__,
"State": self.state.__content__(),
}
"""Describe how the task state should be reset. This method has to be
redefined when subclassing this class.
:param args: (OrderedDict) state to which the task should be reset,
if provided.
:return: state (OrderedDict) of the task.
:meta public:
"""
def _base_reset(self, dic=None, random=True):
"""base reset
Method that wraps the user defined reset() method. Takes care of the
dictionary reset mechanism and updates rounds.
:param dic: reset dictionary (passed by bundle),
:type dic: dictionary, optional
:param random: whether to randomize task states, defaults to True
:type random: boolean, optional
"""
if random:
# Reset everything randomly before starting
self.state.reset(dic={})
# Apply end-user defined reset
self.reset(dic=dic)
if not dic:
return
# forced reset with dic
for key in list(self.state.keys()):
value = dic.get(key)
if isinstance(value, StateElement):
self.state[key] = value
continue
elif isinstance(value, numpy.ndarray):
self.state[key][...] = value
elif value is None:
continue
else:
raise NotImplementedError(
"Values in the reset dictionnary should be of type StateElement or numpy.ndarray, but you provided values of type {} ({})".format(
value.__class__.__name__, str(value)
)
)
def base_on_user_action(self, *args, **kwargs):
"""base user step
Wraps the user defined on_user_action() method. For now does little but
provide default values, may be useful later.
:return: (task state, task reward, is_done flag, metadata):
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float, boolean, dictionnary)
"""
ret = self.on_user_action(*args, **kwargs)
if ret is None:
return self.state, -1 / 2, False, {}
else:
return ret
def base_on_assistant_action(self, *args, **kwargs):
"""base assistant step
Wraps the assistant defined on_assistant_action() method. For now does
little but provide default values, may be useful later.
:return: (task state, task reward, is_done flag, metadata):
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float, boolean, dictionnary)
"""
ret = self.on_assistant_action(*args, **kwargs)
if ret is None:
return self.state, -1 / 2, False, {}
else:
return ret
@abstractmethod
def on_user_action(self, *args, user_action=None, **kwargs):
"""on_user_action
Redefine this to specify the task state transitions and rewards issued.
:return: (task state, task reward, is_done flag, {})
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float, boolean, dictionnary)
"""
return None
@abstractmethod
def on_assistant_action(self, *args, assistant_action=None, **kwargs):
"""on_assistant_action
Redefine this to specify the task state transitions and rewards issued.
:return: (task state, task reward, is_done flag, {})
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float, boolean, dictionnary)
"""
return None
@abstractmethod
def reset(self):
"""reset
Redefine this to specify how to reinitialize the task before each new
game.
"""
return None
def render(self, mode="text", ax_user=None, ax_assistant=None, ax_task=None):
"""Render the task on the main plot.
:param mode: (str) text or plot
:param args: (list) list of axis in order axtask, axuser, axassistant
"""
if mode is None:
mode = "text"
if "text" in mode:
print(self.state)
else:
pass
```
#### File: coopihc/interactiontask/PipeTaskWrapper.py
```python
from abc import ABC, abstractmethod
from coopihc.interactiontask.InteractionTask import InteractionTask
class PipeTaskWrapper(InteractionTask, ABC):
"""PipeTaskWrapper
A Wrapper for tasks so that messages are passed through a pipe. Subclass this task to use tasks defined externally (e.g. that pass messages via websockets to a server which forwards the message to a task via a pipe)
.. note::
Need to explain interface here
:param task: task to wrap
:type task: :py:class:`InteractionTask<coopihc.interactiontask.InteractionTask.InteractionTask`
:param pipe: pipe
:type pipe: subprocess.Pipe
"""
def __init__(self, task, pipe):
self.__dict__ = task.__dict__
self.task = task
self.pipe = pipe
self.pipe.send({"type": "init", "parameters": self.parameters})
is_done = False
while True:
self.pipe.poll(None)
received_state = self.pipe.recv()
# This assumes that the final message sent by the client is a task_state message. Below should be changed to remove that assumption (i.e. client can send whatever order)
if received_state["type"] == "task_state":
is_done = True
self.update_state(received_state)
if is_done:
break
def __getattr__(self, attr):
if self.__dict__:
return getattr(self.__dict__["task"], attr)
else:
# should never happen
pass
def __setattr__(self, name, value):
if name == "__dict__" or name == "task":
super().__setattr__(name, value)
return
if self.__dict__:
setattr(self.__dict__["task"], name, value)
def update_state(self, state):
"""update_state
Remove the 'type' entry from the state dictionnary
:param state: state received via pipe
:type state: dictionnary
"""
if state["type"] == "task_state":
del state["type"]
self.update_task_state(state)
elif state["type"] == "user_state":
del state["type"]
self.update_user_state(state)
@abstractmethod
def update_task_state(self, state):
"""update_task_state
Redefine this. Example `here <https://jgori-ouistiti.github.io/CoopIHC-zoo/_modules/coopihczoo/pointing/envs.html#DiscretePointingTaskPipeWrapper>`_
:param state: state received via pipe
:type state: dictionnary
"""
pass
@abstractmethod
def update_user_state(self, state):
"""update_user_state
See update_task_state
:param state: state received via pipe
:type state: dictionnary
"""
pass
def on_user_action(self, *args, **kwargs):
"""on_user_action
1. Transform user action into dictionnary with appropriate interface
2. Send message over pipe
3. Wait for pipe message
4. Update state and return
:return: (task state, task reward, is_done flag, {})
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float, boolean, dictionnary)
"""
super().on_user_action(*args, **kwargs)
user_action_msg = {
"type": "user_action",
"value": self.bundle.game_state["user_action"]["action"].serialize(),
}
self.pipe.send(user_action_msg)
self.pipe.poll(None)
received_dic = self.pipe.recv()
received_state = received_dic["state"]
self.update_state(received_state)
return self.state, received_dic["reward"], received_dic["is_done"], {}
def on_assistant_action(self, *args, **kwargs):
"""on_assistant_action
Same as on_user_action
:return: (task state, task reward, is_done flag, {})
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float, boolean, dictionnary)
"""
super().on_assistant_action(*args, **kwargs)
assistant_action_msg = {
"type": "assistant_action",
"value": self.bundle.game_state["assistant_action"]["action"].serialize(),
}
self.pipe.send(assistant_action_msg)
self.pipe.poll(None)
received_dic = self.pipe.recv()
received_state = received_dic["state"]
self.update_state(received_state)
return self.state, received_dic["reward"], received_dic["is_done"], {}
def reset(self, dic=None):
"""reset
1. Send reset dic over pipe
2. Wait for pipe message
3. Update state and return
.. note ::
verify the dic=None signature
:param dic: reset dic, defaults to None
:type dic: dictionnary, optional
:return: Task state
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
super().reset(dic=dic)
reset_msg = {"type": "reset", "reset_dic": dic}
self.pipe.send(reset_msg)
self.pipe.poll(None)
received_state = self.pipe.recv()
self.update_state(received_state)
self.bundle.reset(task=False)
return self.state
```
#### File: coopihc/interactiontask/TaskWrapper.py
```python
from coopihc.interactiontask.InteractionTask import InteractionTask
class TaskWrapper(InteractionTask):
"""TaskWrapper
Unused ?
"""
def __init__(self, task):
self.task = task
self.__dict__.update(task.__dict__)
def on_user_action(self, *args, **kwargs):
return self.task.on_user_action(*args, **kwargs)
def on_assistant_action(self, *args, **kwargs):
return self.task.on_assistant_action(*args, **kwargs)
def reset(self, dic=None):
return self.task.reset(dic=dic)
def render(self, *args, **kwargs):
return self.task.render(*args, **kwargs)
@property
def unwrapped(self):
return self.task.unwrapped
```
#### File: coopihc/observation/BaseObservationEngine.py
```python
import copy
from xml.dom.minidom import Attr
import numpy
from coopihc.base.State import State
class BaseObservationEngine:
"""Base Class for Observation Engine.
Does nothing but specify a type for the observation engine and return the full game state.
All Observation Engines are subclassed from this main class, but you are really not inheriting much... This is mostly here for potential future changes.
"""
def __init__(self, *args, seed=None, **kwargs):
self.rng = numpy.random.default_rng(seed)
def __content__(self):
"""__content__
Custom class representation
:return: custom repr
:rtype: string
"""
return self.__class__.__name__
@property
def observation(self):
"""observation
returns the last observation
:return: last observation
:rtype: :py:class:`State <coopihc.base.State.State>`
"""
try:
return self.host.inference_engine.buffer[-1]
except AttributeError:
return None
@property
def bundle(self):
try:
return self.host.bundle
except AttributeError:
raise AttributeError(
"You haven't connected the observation to a user that is connected to a bundle yet."
)
@property
def action(self):
"""action
returns the last action
:return: last action
:rtype: :py:class:`State<coopihc.base.State.State>`
"""
try:
return self.host.action
except AttributeError:
return None
@property
def unwrapped(self):
return self
def observe_from_substates(
self,
game_info={},
task_state={},
user_state={},
assistant_state={},
user_action={},
assistant_action={},
):
game_state = State(
**{
"game_info": game_info,
"task_state": task_state,
"user_state": user_state,
"assistant_state": assistant_state,
"user_action": user_action,
"assistant_action": assistant_action,
}
)
return self.observe(game_state=game_state)
def default_value(func):
"""Apply this decorator to use bundle.game_state as default value to observe if game_state = None"""
def wrapper_default_value(self, game_state=None):
if game_state is None:
game_state = self.host.bundle.game_state
return func(self, game_state=game_state)
return wrapper_default_value
@default_value
def observe(self, game_state=None):
"""observe
Redefine this
.. warning::
deepcopy mechanisms is extremely slow
:param game_state: game state
:type game_state: :py:class:`State<coopihc.base.State.State>`
:return: observation, obs reward
:rtype: tuple(:py:class:`State<coopihc.base.State.State>`, float)
"""
return copy.deepcopy(game_state), 0
def reset(self, random=True):
"""reset _summary_
Empty by default.
:param random: whether states internal to the observation engine are reset randomly, defaults to True. Useful in case of subclassing the Observation Engine.
:type random: bool, optional
"""
return
# To be able to inherit these decorators
# get_params = staticmethod(get_params)
default_value = staticmethod(default_value)
```
#### File: coopihc/observation/CascadedObservationEngine.py
```python
from coopihc.observation.BaseObservationEngine import BaseObservationEngine
import copy
class CascadedObservationEngine(BaseObservationEngine):
"""CascadedObservationEngine
Cascades (serially) several observation engines.
Gamestate --> Engine1 --> Engine2 --> ... --> EngineN --> Observation
:param engine_list: list of observation engines
:type engine_list: list(:py:mod:`Observation Engine<coopihc.observation>`)
"""
def __init__(self, engine_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.engine_list = engine_list
def __content__(self):
"""__content__
Custom class repr
:return: custom repr
:rtype: dictionnary
"""
return {
self.__class__.__name__: {
"Engine{}".format(ni): i.__content__()
for ni, i in enumerate(self.engine_list)
}
}
def observe(self, game_state):
"""observe
Serial observations (i.e. output of an engine becomes input of the next one)
:param game_state: game state
:type game_state: `State<coopihc.base.State.State`
:return: (observation, obs reward)
:rtype: tuple(`State<coopihc.base.State.State`, float)
"""
game_state = copy.deepcopy(game_state)
rewards = 0
for engine in self.engine_list:
new_obs, new_reward = engine.observe(game_state)
game_state.update(new_obs)
rewards += new_reward
return game_state, rewards
```
#### File: coopihc/policy/WrapAsPolicy.py
```python
from abc import abstractmethod
from coopihc.policy.BasePolicy import BasePolicy
from abc import ABC, abstractmethod
# ============== General Policies ===============
class WrapAsPolicy(BasePolicy, ABC):
"""WrapAsPolicy
Wrap a bundle as a policy.
:param action_bundle: bundle to wrap
:type action_bundle: `Bundle<coopihc.bundle>`
:param action_state: which bundle game state should serve as action state
:type action_state: `State<coopihc.base.State.State>`
"""
def __init__(self, action_bundle, action_state, *args, **kwargs):
super().__init__(*args, action_state=action_state, **kwargs)
self.bundle = action_bundle
def __content__(self):
return {
"Name": self.__class__.__name__,
"Bundle": self.bundle.__content__(),
}
@property
def unwrapped(self):
return self.bundle.unwrapped
@property
def game_state(self):
return self.bundle.game_state
def reset(self, *args, **kwargs):
return self.bundle.reset(*args, **kwargs)
def step(self, *args, **kwargs):
return self.bundle.step(*args, **kwargs)
@abstractmethod
def sample(self, agent_observation=None, agent_state=None):
pass
# Do something
# return action, rewards
def __str__(self):
return "{} <[ {} ]>".format(self.__class__.__name__, self.bundle.__str__())
def __repr__(self):
return self.__str__()
```
#### File: components/agents/test_subclass_baseagent.py
```python
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.base.State import State
from coopihc.base.elements import discrete_array_element, array_element, cat_element
from coopihc.policy.ExamplePolicy import ExamplePolicy
from coopihc.policy.BasePolicy import BasePolicy
from coopihc.inference.ExampleInferenceEngine import ExampleInferenceEngine
from coopihc.inference.BaseInferenceEngine import BaseInferenceEngine
from coopihc.observation.BaseObservationEngine import BaseObservationEngine
from coopihc.observation.ExampleObservationEngine import ExampleObservationEngine
import numpy
class MinimalAgent(BaseAgent):
"""Non-functional minimal subclass to use in tests."""
class NonMinimalAgent(BaseAgent):
def __init__(self, *args, **kwargs):
# custom policy
action_state = State()
action_state["action"] = discrete_array_element(init=2, low=1, high=3)
policy = ExamplePolicy(action_state=action_state)
# custom state
state = State(**{"substate_1": cat_element(N=2, init=1)})
# custom inference_engine
inference_engine = ExampleInferenceEngine(buffer_depth=7)
# custom observation_engine
observation_engine = ExampleObservationEngine("substate_1")
super().__init__(
"user",
*args,
agent_state=state,
agent_policy=policy,
agent_inference_engine=inference_engine,
agent_observation_engine=observation_engine,
**kwargs
)
def test_imports():
"""Tests the different import ways for the BaseAgent."""
from coopihc import BaseAgent
from coopihc.agents import BaseAgent
from coopihc.agents.BaseAgent import BaseAgent
def test_example():
"""Tries to import and create the example user."""
from coopihc.agents.ExampleUser import ExampleUser
user = ExampleUser()
return True
def test_init():
"""Tries to initialize an BaseAgent and checks the expected
properties and methods."""
test_properties()
test_methods()
def test_properties():
"""Tests the expected properties for a minimal BaseAgent."""
user = MinimalAgent("user")
# Direct attributes
assert hasattr(user, "bundle")
assert hasattr(user, "ax")
assert hasattr(user, "role")
# components
assert hasattr(user, "state")
assert hasattr(user, "policy")
assert hasattr(user, "observation_engine")
assert hasattr(user, "inference_engine")
# properties
assert hasattr(user, "observation")
assert hasattr(user, "action")
def test_methods():
"""Tests the expected methods for a minimal Interactionuser."""
user = MinimalAgent("user")
# Public methods
assert hasattr(user, "finit")
assert hasattr(user, "_attach_policy")
assert hasattr(user, "_attach_observation_engine")
assert hasattr(user, "_attach_inference_engine")
assert hasattr(user, "reset")
assert hasattr(user, "render")
assert hasattr(user, "observe")
# Private methods
assert hasattr(user, "__content__")
assert hasattr(user, "_base_reset")
assert hasattr(user, "_override_components")
assert hasattr(user, "take_action")
assert hasattr(user, "_agent_step")
def test_minimalagent():
"""Tests the methods provided by the BaseAgent class."""
test_imports()
test_example()
test_init()
def test_nonminimalagent():
test_state()
test_policy()
test_inference_engine()
test_observation_engine()
def test_state():
agent = NonMinimalAgent()
assert agent.state["substate_1"] == cat_element(N=2, init=1)
def test_policy():
agent = NonMinimalAgent()
assert isinstance(agent.policy, ExamplePolicy)
assert agent.action == discrete_array_element(init=2, low=1, high=3)
def test_inference_engine():
agent = NonMinimalAgent()
assert isinstance(agent.inference_engine, ExampleInferenceEngine)
assert agent.inference_engine.buffer_depth == 7
def test_observation_engine():
agent = NonMinimalAgent()
assert isinstance(agent.observation_engine, ExampleObservationEngine)
assert agent.observation_engine.observable_state == "substate_1"
def test_override_components():
test_override_components_args()
test_override_components_kwargs()
def test_override_components_args():
test_override_state()
test_override_policy()
test_override_obseng()
test_override_infeng()
def test_override_components_kwargs():
# test the mechanism when kwargs are provided
return True
def test_override_state():
state = State()
agent = NonMinimalAgent(override_state=state)
assert agent.state == State()
def test_override_policy():
policy = BasePolicy()
agent = NonMinimalAgent(override_policy=(policy, {}))
assert isinstance(agent.policy, BasePolicy)
assert agent.action == cat_element(N=2)
def test_override_obseng():
obseng = BaseObservationEngine()
agent = NonMinimalAgent(override_observation_engine=(obseng, {}))
assert isinstance(agent.observation_engine, BaseObservationEngine)
def test_override_infeng():
infeng = BaseInferenceEngine()
agent = NonMinimalAgent(override_inference_engine=(infeng, {}))
assert isinstance(agent.inference_engine, BaseInferenceEngine)
assert agent.inference_engine.buffer_depth == 1
# +----------------------+
# + MAIN +
# +----------------------+
if __name__ == "__main__":
test_minimalagent()
test_nonminimalagent()
test_override_components()
```
#### File: base/stateelement/test_statelement.py
```python
from coopihc.base.StateElement import StateElement
from coopihc.base.utils import (
StateNotContainedError,
StateNotContainedWarning,
)
from coopihc.base.elements import integer_set, box_space
import numpy
import pytest
import json
import copy
from tabulate import tabulate
def test_array_init_integer():
x = StateElement(2, integer_set(3))
assert hasattr(x, "space")
assert x.shape == ()
assert x == 2
def test_array_init_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
assert hasattr(x, "space")
assert x.shape == (2, 2)
assert (x == numpy.zeros((2, 2))).all()
def test_array_init():
test_array_init_integer()
test_array_init_numeric()
def test_array_init_error_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(4, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="error")
def test_array_init_error_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="error"
)
with pytest.raises(StateNotContainedError):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
with pytest.raises(StateNotContainedError):
x = StateElement(
numpy.array([[0, 0], [-2, 0]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="error",
)
def test_array_init_error():
test_array_init_error_integer()
test_array_init_error_numeric()
def test_array_init_warning_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(4, integer_set(3), out_of_bounds_mode="warning")
with pytest.warns(StateNotContainedWarning):
x = StateElement(-3, integer_set(3), out_of_bounds_mode="warning")
def test_array_init_warning_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="warning"
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
with pytest.warns(StateNotContainedWarning):
x = StateElement(
numpy.array([[0, 0], [-2, 0]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
def test_array_init_warning():
test_array_init_warning_integer()
test_array_init_warning_numeric()
def test_array_init_clip_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([2])
x = StateElement(4, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([2])
x = StateElement(-3, integer_set(3), out_of_bounds_mode="clip")
assert x == numpy.array([0])
def test_array_init_clip_numeric():
x = StateElement(
numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))), out_of_bounds_mode="clip"
)
assert (x == numpy.zeros((2, 2))).all()
x = StateElement(
2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
assert (x == numpy.ones((2, 2))).all()
x = StateElement(
-2 * numpy.ones((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
assert (x == -1.0 * numpy.ones((2, 2))).all()
def test_array_init_clip():
test_array_init_clip_integer()
test_array_init_clip_numeric()
def test_array_init_dtype_integer():
x = StateElement(2, integer_set(3), out_of_bounds_mode="warning")
assert x.dtype == numpy.int64
x = StateElement(2, integer_set(3, dtype=numpy.int16), out_of_bounds_mode="warning")
assert x.dtype == numpy.int16
def test_array_init_dtype_numeric():
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.float64
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2), dtype=numpy.float32)),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.float32
x = StateElement(
numpy.zeros((2, 2)),
box_space(numpy.ones((2, 2), dtype=numpy.int8)),
out_of_bounds_mode="warning",
)
assert x.dtype == numpy.int8
def test_array_init_dtype():
test_array_init_dtype_integer()
test_array_init_dtype_numeric()
# def test__array_ufunc__discrete():
# # Simple arithmetic
# global discr_space
# x = StateElement(2, discr_space, out_of_bounds_mode="error")
# assert x + numpy.array(1) == 3
# assert x + 1 == 3
# assert x - 1 == 1
# assert 3 - x == 1
# assert x - numpy.array(1) == 1
# assert numpy.array(3) - x == 1
# assert 1 + x == 3
# x += 1
# y = x - 1
# assert y.out_of_bounds_mode == "error"
# with pytest.raises(StateNotContainedError):
# 1 - x
# with pytest.raises(StateNotContainedError):
# x + 2
# with pytest.raises(StateNotContainedError):
# x += 5
# def test__array_ufunc__continuous():
# # some matrix operations
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (x + numpy.ones((2, 2)) == numpy.ones((2, 2))).all()
# assert (x + 1 == numpy.ones((2, 2))).all()
# assert (1 + x == numpy.ones((2, 2))).all()
# assert (x - 1 == -numpy.ones((2, 2))).all()
# assert (1 - x == numpy.ones((2, 2))).all()
# assert ((1 + x) * 0.5 == 0.5 * numpy.ones((2, 2))).all()
# assert (0.5 * (1 + x) @ numpy.ones((2, 2)) == numpy.ones((2, 2))).all()
# def test__array_ufunc__multidiscrete():
# global multidiscr_space
# x = StateElement([1, 1, 8], multidiscr_space, out_of_bounds_mode="error")
# assert (x + numpy.array([[1], [1], [-3]]) == numpy.array([[2], [2], [5]])).all()
# with pytest.raises(StateNotContainedError):
# x + numpy.array([[1], [1], [1]])
# def test__array_ufunc__comparisons():
# global discr_space
# x = StateElement(2, discr_space, out_of_bounds_mode="error")
# assert x > 1 == True
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (x < 0).all() == False
# global multidiscr_space
# x = StateElement(
# numpy.array([[1], [1], [1]]), multidiscr_space, out_of_bounds_mode="error"
# )
# assert (x >= numpy.array([[1], [0], [1]])).all() == True
# assert (x >= numpy.array([[1], [5], [1]])).all() == False
# comp = x >= numpy.array([[1], [5], [1]])
# assert (comp == numpy.array([[True], [False], [True]])).all()
# def test__array_ufunc__trigonometry():
# global cont_space
# x = StateElement(numpy.zeros((2, 2)), cont_space, out_of_bounds_mode="error")
# assert (numpy.cos(x) == numpy.ones((2, 2))).all()
# def test__array_ufunc__floating():
# global cont_space
# x = StateElement(
# numpy.array([[0.2, 0.3], [1, 0.95]]), cont_space, out_of_bounds_mode="error"
# )
# assert numpy.isfinite(x).all() == True
# def test__array_ufunc__out_of_bounds_mode():
# x = StateElement(
# numpy.array([[0.2, 0.3], [1, 0.95]]), cont_space, out_of_bounds_mode="error"
# )
# y = StateElement(
# numpy.array([[-0.2, -0.3], [-1, -0.95]]),
# cont_space,
# out_of_bounds_mode="warning",
# )
# z = StateElement(
# numpy.array([[0.0, 0.0], [0.0, 0.0]]),
# cont_space,
# out_of_bounds_mode="silent",
# )
# u = x + y
# assert u.out_of_bounds_mode == "error"
# u = y + x
# assert u.out_of_bounds_mode == "error"
# u = z + x
# assert u.out_of_bounds_mode == "error"
# u = y + z
# assert u.out_of_bounds_mode == "warning"
# u = z + 0
# assert u.out_of_bounds_mode == "silent"
# def test__array_ufunc__():
# test__array_ufunc__discrete()
# test__array_ufunc__continuous()
# test__array_ufunc__multidiscrete()
# test__array_ufunc__comparisons()
# test__array_ufunc__trigonometry()
# test__array_ufunc__floating()
# test__array_ufunc__out_of_bounds_mode()
# def test_amax_nothandled():
# StateElement.HANDLED_FUNCTIONS = {}
# cont_space = autospace(
# [[-1, -1], [-1, -1]], [[1, 1], [1, 1]], dtype=numpy.float64
# ) # Here the
# x = StateElement(
# numpy.array([[0, 0.1], [-0.5, 0.8]], dtype=numpy.float64),
# cont_space,
# out_of_bounds_mode="warning",
# )
# # Without handled function
# with pytest.warns(NumpyFunctionNotHandledWarning):
# y = numpy.max(x)
# assert isinstance(y, numpy.ndarray)
# assert not isinstance(y, StateElement)
# assert y == 0.8
# assert not hasattr(y, "space")
# assert not hasattr(y, "out_of_bounds_mode")
# def test_amax_implements_decorator():
# cont_space = autospace([[-1, -1], [-1, -2]], [[1, 1], [1, 3]], dtype=numpy.float64)
# x = StateElement(
# numpy.array([[0, 0.1], [-0.5, 0.8]], dtype=numpy.float64),
# cont_space,
# out_of_bounds_mode="warning",
# )
# @StateElement.implements(numpy.amax)
# def amax(arr, **keywordargs):
# space, out_of_bounds_mode, kwargs = (
# arr.space,
# arr.out_of_bounds_mode,
# arr.kwargs,
# )
# obj = arr.view(numpy.ndarray)
# argmax = numpy.argmax(obj, **keywordargs)
# index = numpy.unravel_index(argmax, arr.space.shape)
# obj = numpy.amax(obj, **keywordargs)
# obj = numpy.asarray(obj).view(StateElement)
# if arr.space.space_type == "continuous":
# obj.space = autospace(
# numpy.atleast_2d(arr.space.low[index[0], index[1]]),
# numpy.atleast_2d(arr.space.high[index[0], index[1]]),
# )
# else:
# raise NotImplementedError
# obj.out_of_bounds_mode = arr.out_of_bounds_mode
# obj.kwargs = arr.kwargs
# return obj
# y = numpy.amax(x)
# assert isinstance(y, StateElement)
# assert StateElement.HANDLED_FUNCTIONS.get(numpy.amax) is not None
# assert x.HANDLED_FUNCTIONS.get(numpy.amax) is not None
# assert y.shape == ()
# assert y == 0.8
# assert y.space.space_type == "continuous"
# assert y.space.shape == (1, 1)
# assert y.space.low == numpy.array([[-2]])
# assert y.space.high == numpy.array([[3]])
# def test_array_function_simple():
# test_amax_nothandled()
# test_amax_implements_decorator()
# def test__array_function__():
# test_array_function_simple()
def test_equals_integer():
int_space = integer_set(3)
other_int_space = integer_set(4)
x = StateElement(numpy.array(1), int_space)
y = StateElement(numpy.array(1), other_int_space)
assert x.equals(y)
assert not x.equals(y, mode="hard")
z = StateElement(numpy.array(2), int_space)
assert not x.equals(z)
def test_equals_numeric():
numeric_space = box_space(numpy.ones((2, 2)))
other_numeric_space = box_space(
low=numpy.array([[-1, -1], [-1, -2]]), high=numpy.array([[1, 2], [1, 1]])
)
x = StateElement(numpy.zeros((2, 2)), numeric_space)
y = StateElement(numpy.zeros((2, 2)), other_numeric_space)
assert (x.equals(y)).all()
assert not (x.equals(y, mode="hard")).all()
z = StateElement(numpy.eye(2), numeric_space)
assert not (x.equals(z)).all()
def test_equals():
test_equals_integer()
test_equals_numeric()
def test__iter__integer():
x = StateElement([2], integer_set(3))
with pytest.raises(TypeError):
next(iter(x))
def test__iter__numeric():
x = StateElement(
numpy.array([[0.2, 0.3], [0.4, 0.5]]), box_space(numpy.ones((2, 2)))
)
for i, _x in enumerate(x):
if i == 0:
assert (
_x == StateElement(numpy.array([0.2, 0.3]), box_space(numpy.ones((2,))))
).all()
if i == 1:
assert (
_x == StateElement(numpy.array([0.4, 0.5]), box_space(numpy.ones((2,))))
).all()
for j, _xx in enumerate(_x):
print(i, j)
if i == 0 and j == 0:
assert _xx == StateElement(
numpy.array(0.2), box_space(numpy.float64(1))
)
elif i == 0 and j == 1:
assert _xx == StateElement(
numpy.array(0.3), box_space(numpy.float64(1))
)
elif i == 1 and j == 0:
assert _xx == StateElement(
numpy.array(0.4), box_space(numpy.float64(1))
)
elif i == 1 and j == 1:
assert _xx == StateElement(
numpy.array(0.5), box_space(numpy.float64(1))
)
def test__iter__():
test__iter__integer()
test__iter__numeric()
def test__repr__integer():
x = StateElement(2, integer_set(3))
assert x.__repr__() == "StateElement(array(2), CatSet([0 1 2]), 'warning')"
def test__repr__numeric():
x = StateElement(numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))))
x.__repr__()
def test__repr__():
test__repr__integer()
test__repr__numeric()
def test_serialize_integer():
x = StateElement(numpy.array([2]), integer_set(3))
assert x.serialize() == {
"values": 2,
"space": {
"space": "CatSet",
"seed": None,
"array": [0, 1, 2],
"dtype": "dtype[int64]",
},
}
def test_serialize_numeric():
x = StateElement(numpy.zeros((2, 2)), box_space(numpy.ones((2, 2))))
assert x.serialize() == {
"values": [[0.0, 0.0], [0.0, 0.0]],
"space": {
"space": "Numeric",
"seed": None,
"low,high": [[[-1.0, -1.0], [-1.0, -1.0]], [[1.0, 1.0], [1.0, 1.0]]],
"shape": (2, 2),
"dtype": "dtype[float64]",
},
}
def test_serialize():
test_serialize_integer()
test_serialize_numeric()
def test__getitem__integer():
x = StateElement(1, integer_set(3))
assert x[..., {"space": True}] == x
assert x[..., {"space": True}] is x
assert x[...] == x
def test__getitem__numeric():
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]), box_space(numpy.ones((2, 2)))
)
assert x[0, 0] == 0.0
assert x[0, 0, {"space": True}] == StateElement(0.0, box_space(numpy.float64(1)))
assert x[0, 1, {"space": True}] == StateElement(0.1, box_space(numpy.float64(1)))
assert x[1, 0, {"space": True}] == StateElement(0.2, box_space(numpy.float64(1)))
assert x[1, 1, {"space": True}] == StateElement(0.3, box_space(numpy.float64(1)))
assert (x[:, 1] == numpy.array([0.1, 0.3])).all()
assert (
x[:, 1, {"space": True}]
== StateElement(numpy.array([0.1, 0.3]), box_space(numpy.ones((2,))))
).all()
x = StateElement(numpy.array(0), box_space(low=-1, high=1))
from coopihc import State
s = State()
s["x"] = x
fd = {"x": ...}
a = s.filter(mode="stateelement", filterdict=fd)
def test__getitem__():
test__getitem__integer()
test__getitem__numeric()
def test__setitem__integer():
x = StateElement(1, integer_set(3))
x[...] = 2
assert x == StateElement(2, integer_set(3))
with pytest.warns(StateNotContainedWarning):
x[...] = 4
def test__setitem__numeric():
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]), box_space(numpy.ones((2, 2)))
)
x[0, 0] = 0.5
x[0, 1] = 0.6
x[1, 0] = 0.7
x[1, 1] = 0.8
assert (
x
== StateElement(
numpy.array([[0.5, 0.6], [0.7, 0.8]]), box_space(numpy.ones((2, 2)))
)
).all()
with pytest.warns(StateNotContainedWarning):
x[0, 0] = 1.3
x = StateElement(
numpy.array([[0.0, 0.1], [0.2, 0.3]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
x[:, 0] = numpy.array([0.9, 0.9])
x[0, :] = numpy.array([1.2, 0.2])
x[1, 1] = 0.5
assert (
x
== StateElement(
numpy.array([[1, 0.2], [0.9, 0.5]]),
box_space(numpy.ones((2, 2))),
out_of_bounds_mode="clip",
)
).all()
def test__setitem__():
test__setitem__integer()
test__setitem__numeric()
def test_reset_integer():
x = StateElement(numpy.array([2]), integer_set(3), out_of_bounds_mode="error")
xset = {}
for i in range(1000):
x.reset()
_x = x.squeeze().tolist()
xset.update({str(_x): _x})
assert sorted(xset.values()) == [0, 1, 2]
# forced reset:
x.reset(value=0)
assert x == StateElement(0, integer_set(3), out_of_bounds_mode="error")
with pytest.raises(StateNotContainedError):
x.reset(value=5)
x.out_of_bounds_mode = "clip"
x.reset(value=5)
assert x == StateElement(
numpy.array([2]), integer_set(3), out_of_bounds_mode="clip"
)
def test_reset_numeric():
x = StateElement(numpy.ones((2, 2)), box_space(numpy.ones((2, 2))))
for i in range(1000):
x.reset()
x.reset(0.59 * numpy.ones((2, 2)))
assert (
x == StateElement(0.59 * numpy.ones((2, 2)), box_space(numpy.ones((2, 2))))
).all()
def test_reset():
test_reset_integer()
test_reset_numeric()
def test_tabulate_integer():
x = StateElement(1, integer_set(3))
x._tabulate()
tabulate(x._tabulate()[0])
def test_tabulate_numeric():
x = StateElement(numpy.zeros((3, 3)), box_space(numpy.ones((3, 3))))
x._tabulate()
tabulate(x._tabulate()[0])
def test_tabulate():
test_tabulate_integer()
test_tabulate_numeric()
def test_cast_discrete_to_cont():
discr_box_space = box_space(low=numpy.int8(1), high=numpy.int8(3))
cont_box_space = box_space(low=numpy.float64(-1.5), high=numpy.float64(1.5))
x = StateElement(1, discr_box_space)
ret_stateElem = x.cast(cont_box_space, mode="edges")
assert ret_stateElem == StateElement(-1.5, cont_box_space)
ret_stateElem = x.cast(cont_box_space, mode="center")
assert ret_stateElem == StateElement(-1, cont_box_space)
x = StateElement(2, discr_box_space)
ret_stateElem = x.cast(cont_box_space, mode="edges")
assert ret_stateElem == StateElement(0, cont_box_space)
ret_stateElem = x.cast(cont_box_space, mode="center")
assert ret_stateElem == StateElement(0, cont_box_space)
x = StateElement(3, discr_box_space)
ret_stateElem = x.cast(cont_box_space, mode="edges")
assert ret_stateElem == StateElement(1.5, cont_box_space)
ret_stateElem = x.cast(cont_box_space, mode="center")
assert ret_stateElem == StateElement(1, cont_box_space)
def test_cast_cont_to_discrete():
cont_box_space = box_space(low=numpy.float64(-1.5), high=numpy.float64(1.5))
discr_box_space = box_space(low=numpy.int8(1), high=numpy.int8(3))
x = StateElement(0, cont_box_space)
ret_stateElem = x.cast(discr_box_space, mode="center")
assert ret_stateElem == StateElement(2, discr_box_space)
ret_stateElem = x.cast(discr_box_space, mode="edges")
assert ret_stateElem == StateElement(2, discr_box_space)
center = []
edges = []
for i in numpy.linspace(-1.5, 1.5, 100):
x = StateElement(i, cont_box_space)
ret_stateElem = x.cast(discr_box_space, mode="center")
if i < -0.75:
assert ret_stateElem == StateElement(1, discr_box_space)
if i > -0.75 and i < 0.75:
assert ret_stateElem == StateElement(2, discr_box_space)
if i > 0.75:
assert ret_stateElem == StateElement(3, discr_box_space)
center.append(ret_stateElem.tolist())
ret_stateElem = x.cast(discr_box_space, mode="edges")
if i < -0.5:
assert ret_stateElem == StateElement(1, discr_box_space)
if i > -0.5 and i < 0.5:
assert ret_stateElem == StateElement(2, discr_box_space)
if i > 0.5:
assert ret_stateElem == StateElement(3, discr_box_space)
edges.append(ret_stateElem.tolist())
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.plot(
# numpy.linspace(-1.5, 1.5, 100), numpy.array(center) - 0.05, "+", label="center"
# )
# ax.plot(
# numpy.linspace(-1.5, 1.5, 100), numpy.array(edges) + 0.05, "o", label="edges"
# )
# ax.legend()
# plt.show()
def test_cast_cont_to_cont():
cont_space = box_space(numpy.full((2, 2), 1), dtype=numpy.float32)
other_cont_space = box_space(
low=numpy.full((2, 2), 0), high=numpy.full((2, 2), 4), dtype=numpy.float32
)
for i in numpy.linspace(-1, 1, 100):
x = StateElement(numpy.full((2, 2), i), cont_space)
ret_stateElement = x.cast(other_cont_space)
assert (ret_stateElement == (x + 1) * 2).all()
def test_cast_discr_to_discr():
discr_box_space = box_space(low=numpy.int8(1), high=numpy.int8(4))
other_discr_box_space = box_space(low=numpy.int8(11), high=numpy.int8(14))
for i in [1, 2, 3, 4]:
x = StateElement(i, discr_box_space)
ret_stateElement = x.cast(other_discr_box_space)
assert ret_stateElement == x + 10
def test_cast():
test_cast_discrete_to_cont()
test_cast_cont_to_discrete()
test_cast_cont_to_cont()
test_cast_discr_to_discr()
if __name__ == "__main__":
test_array_init()
test_array_init_error()
test_array_init_warning()
test_array_init_clip()
test_array_init_dtype()
# test__array_ufunc__() # kept here just in case
# test__array_function__() # kept here just in case
test_equals()
test__iter__()
test__repr__()
test_serialize()
test__setitem__()
test__getitem__()
test_reset()
test_tabulate()
test_cast()
```
#### File: base/state/test_state.py
```python
import coopihc
from coopihc.base.elements import (
discrete_array_element,
array_element,
cat_element,
integer_space,
box_space,
)
from coopihc.base.State import State
from coopihc.base.Space import Space
from coopihc.base.utils import StateNotContainedError
from coopihc.base.elements import example_game_state
import numpy
from tabulate import tabulate
s = 0
def test__init__():
global s
x = discrete_array_element(init=1, low=1, high=3)
s = State()
s["x"] = x
assert State(x=x) == s
s["y"] = array_element(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)))
assert "x" in s.keys()
assert "y" in s.keys()
def test_reset_small():
global s
s.reset()
assert s["x"] in integer_space(start=1, stop=3)
assert s["y"] in box_space(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)))
reset_dic = {"x": 3, "y": numpy.ones((2, 2))}
s.reset(dic=reset_dic)
assert s["x"] == 3
assert (s["y"] == numpy.ones((2, 2))).all()
state = State()
substate = State()
substate["x1"] = discrete_array_element(init=1, low=1, high=3)
substate["x3"] = array_element(
init=1.5 * numpy.ones((2, 2)), low=numpy.ones((2, 2)), high=2 * numpy.ones((2, 2))
)
substate2 = State()
substate2["y1"] = discrete_array_element(init=1, low=1, high=3)
state["sub1"] = substate
state["sub2"] = substate2
filterdict = dict(
{
"sub1": dict({"x1": ..., "x3": slice(0, 1)}),
"sub2": dict({"y1": ...}),
}
)
# def test_filter():
# global filterdict, state
# f_state = state.filter(mode="space", filterdict=filterdict)
# assert f_state == {
# "sub1": {
# "x1": Space(low=1, high=3, dtype=numpy.int64),
# "x3": Space(low=numpy.ones((2, 2)), high=2 * numpy.ones((2, 2))),
# },
# "sub2": {"y1": Space(low=1, high=3, dtype=numpy.int64)},
# }
#
# f_state = state.filter(mode="array", filterdict=filterdict)
# # print(f_state)
# f_state = state.filter(mode="stateelement", filterdict=filterdict)
# # print(f_state)
# f_state = state.filter(mode="space")
# # print(f_state)
# f_state = state.filter(mode="array")
# # print(f_state)
# f_state = state.filter(mode="array-Gym")
# # print(f_state)
def test_serialize():
global state
state.serialize()
def test_reset_full():
reset_dic = {
"sub1": {"x1": 3},
"sub2": {"y1": 3},
}
state.reset(dic=reset_dic)
assert state["sub1"]["x1"] == 3
assert state["sub2"]["y1"] == 3
def test_tabulate_small():
x = discrete_array_element(init=1, low=1, high=3)
s = State()
s["x"] = x
s["y"] = array_element(low=-numpy.ones((2, 2)), high=numpy.ones((2, 2)))
print(s._tabulate())
print(tabulate(s._tabulate()[0]))
def test_tabulate_full():
global state
state["sub3"] = cat_element(N=3)
print(state._tabulate())
print(tabulate(state._tabulate()[0]))
def test_tabulate():
test_tabulate_small()
test_tabulate_full()
def test_equals_soft():
_example_state = example_game_state()
obs = {
"game_info": {"turn_index": numpy.array(0), "round_index": numpy.array(0)},
"task_state": {"position": numpy.array(2), "targets": numpy.array([0, 1])},
"user_action": {"action": numpy.array(0)},
"assistant_action": {"action": numpy.array(2)},
}
del _example_state["user_state"]
del _example_state["assistant_state"]
assert _example_state == obs
assert _example_state.equals(obs, mode="soft")
def test_equals_hard():
_example_state = example_game_state()
obs = {
"game_info": {"turn_index": numpy.array(0), "round_index": numpy.array(0)},
"task_state": {"position": numpy.array(2), "targets": numpy.array([0, 1])},
"user_action": {"action": numpy.array(0)},
"assistant_action": {"action": numpy.array(2)},
}
del _example_state["user_state"]
del _example_state["assistant_state"]
assert not _example_state.equals(obs, mode="hard")
def test_equals():
test_equals_soft()
test_equals_hard()
def test_assign_after():
S = State()
S["x"] = discrete_array_element(N=4)
S["x"] = 0.0
assert numpy.issubdtype(S["x"].dtype, numpy.integer)
S["x"] = 1 + array_element(init=1)
assert S["x"] == 2
assert numpy.issubdtype(S["x"].dtype, numpy.integer)
if __name__ == "__main__":
test__init__()
# test_filter()
test_serialize()
test_reset_small()
test_reset_full()
test_tabulate()
test_equals()
```
#### File: base/utils/test_space_shortcuts.py
```python
import numpy
# from coopihc.base.elements import lin_space
from coopihc.base.elements import box_space, integer_space, integer_set
from coopihc.base.Space import CatSet, Numeric
def test_lin_space():
# space = lin_space(0, 10, num=11, dtype=numpy.int16)
# assert space.dtype == numpy.int16
# assert space.low == 0
# assert space.high == 10
# assert space.N == 11
# space = lin_space(-5, 5, num=11)
# assert space.dtype == numpy.int64
# assert space.low == -5
# assert space.high == 5
# assert space.N == 11
pass
def test_integer_space():
space = integer_space(10, dtype=numpy.int16)
assert isinstance(space, Numeric)
assert space.dtype == numpy.int16
assert space.low == 0
assert space.high == 9
assert space.N == 10
space = integer_space(N=3, start=-1)
assert isinstance(space, Numeric)
assert space.dtype == numpy.int64
assert space.low == -1
assert space.high == 1
assert space.N == 3
def test_integer_set():
space = integer_set(10, dtype=numpy.int16)
assert isinstance(space, CatSet)
assert space.dtype == numpy.int16
assert space.low == 0
def test_box():
space = box_space(numpy.ones((3, 3)))
assert isinstance(space, Numeric)
assert space.dtype == numpy.float64
assert (space.low == numpy.full((3, 3), -1)).all()
assert (space.high == numpy.full((3, 3), 1)).all()
space = box_space(low=-2 * numpy.ones((2, 2)), high=numpy.ones((2, 2)))
assert isinstance(space, Numeric)
assert space.dtype == numpy.float64
assert (space.low == numpy.full((2, 2), -2)).all()
assert (space.high == numpy.full((2, 2), 1)).all()
def test_base_init():
test_lin_space()
test_integer_space()
test_integer_set()
test_box()
if __name__ == "__main__":
test_base_init()
```
#### File: components/interaction_task/test_classiccontroltask.py
```python
# """This module provides tests for the ClassicControlTask class of the
# coopihc package."""
# from coopihc.interactiontask.ClassicControlTask import ClassicControlTask
# from coopihc.base.elements import array_element
# import numpy
# import copy
# task = None
# def test_init_no_kwargs():
# global task
# timestep = 0.01
# A = numpy.eye(2)
# B = numpy.array([[1], [1]])
# task = ClassicControlTask(timestep, A, B)
# assert (task.A_d == A).all()
# assert (task.B_d == B).all()
# def test_finit_no_kwargs():
# task.finit()
# assert (task.A == task.A_d).all()
# assert (task.B == task.B_d).all()
# def test_reset_no_kwargs():
# task.reset()
# x = task.state["x"]
# assert (x[1:] == numpy.zeros(x[1:].shape)).all()
# def test_on_user_action():
# u = array_element(init=1, low=-1, high=1)
# old_x = copy.copy(task.state["x"])
# new_state, reward, is_done = task.on_user_action(user_action=u)
# assert (new_state["x"] == old_x + numpy.array([1, 1])).all()
# def test_no_kwargs():
# test_init_no_kwargs()
# test_finit_no_kwargs()
# test_reset_no_kwargs()
# test_on_user_action()
# def test_init_no_kwargs_A():
# global task
# timestep = 0.01
# A = numpy.array([[1, 1], [1, 1]])
# B = numpy.array([[1], [1]])
# task = ClassicControlTask(timestep, A, B)
# task.finit()
# task.state["x"][...] = numpy.array([1, 1])
# def test_on_user_action_A():
# u = array_element(init=1, low=-1, high=1)
# old_x = copy.copy(task.state["x"])
# new_state, reward, is_done = task.on_user_action(user_action=u)
# assert (new_state["x"] == numpy.full((2, 2), 1) @ old_x + numpy.array([1, 1])).all()
# def test_no_kwargs_A():
# test_init_no_kwargs_A()
# test_on_user_action_A()
# def test_noise_F():
# # Incomplete test, could test the std of the new_state['x'] and see if it corresponds to Gaussian noise with std = F*sqrt(timestep)
# global task
# timestep = 0.01
# A = numpy.array([[1, 1], [1, 1]])
# B = numpy.array([[1], [1]])
# F = numpy.eye(2)
# task = ClassicControlTask(timestep, A, B, F=F)
# task.finit()
# u = array_element(init=1, low=-1, high=1)
# noise_sample = []
# for i in range(10000):
# task.state["x"][:] = numpy.array([1, 1])
# new_state, reward, is_done = task.on_user_action(user_action=u)
# noise_sample.append(new_state["x"].tolist() - 3)
# mean = numpy.mean(numpy.array(noise_sample))
# assert abs(mean) <= 0.01
# assert 0 < abs(mean)
# def test_noise_G():
# # incomplete test
# pass
# def test_noise_H():
# # Incomplete test
# pass
# def test_noise_selector():
# global task
# timestep = 0.01
# A = numpy.array([[1, 1], [1, 1]])
# B = numpy.array([[1], [1]])
# F = numpy.eye(2)
# task = ClassicControlTask(timestep, A, B, F=F, noise="off")
# task.finit()
# u = array_element(init=1, low=-1, high=1)
# noise_sample = []
# for i in range(1000):
# task.state["x"][...] = numpy.array([1, 1])
# new_state, reward, is_done = task.on_user_action(user_action=u)
# noise_sample.append(new_state["x"].tolist() - 3)
# mean = numpy.mean(numpy.array(noise_sample))
# assert 0 == abs(mean)
# def test_noise():
# test_noise_F()
# test_noise_G()
# test_noise_H()
# test_noise_selector()
# def test_discrete_dynamics_discrete_timespace():
# global task
# timestep = 0.01
# A = numpy.array([[1, 1], [1, 1]])
# B = numpy.array([[1], [1]])
# F = numpy.eye(2)
# task = ClassicControlTask(
# timestep, A, B, discrete_dynamics=True, timespace="discrete"
# )
# task.finit()
# assert (task.A_d == A).all()
# assert (task.B_d == B).all()
# assert (task.A_c == (A - numpy.eye(2)) / timestep).all()
# assert (task.B_c == 1 / timestep * B).all()
# assert (task.A == task.A_d).all()
# assert (task.B_d == task.B).all()
# def test_discrete_dynamics_continuous_timespace():
# global task
# timestep = 0.01
# A = numpy.array([[1, 1], [1, 1]])
# B = numpy.array([[1], [1]])
# F = numpy.eye(2)
# task = ClassicControlTask(
# timestep, A, B, discrete_dynamics=True, timespace="continuous"
# )
# task.finit()
# assert (task.A_d == A).all()
# assert (task.B_d == B).all()
# assert (task.A_c == (A - numpy.eye(2)) / timestep).all()
# assert (task.B_c == 1 / timestep * B).all()
# assert (task.A == task.A_c).all()
# assert (task.B_c == task.B).all()
# def test_continuous_dynamics_discrete_timespace():
# global task
# timestep = 0.01
# A = numpy.array([[1, 1], [1, 1]])
# B = numpy.array([[1], [1]])
# F = numpy.eye(2)
# task = ClassicControlTask(
# timestep, A, B, discrete_dynamics=False, timespace="discrete"
# )
# task.finit()
# assert (task.A_c == A).all()
# assert (task.B_c == B).all()
# assert (task.A_d == numpy.eye(2) + timestep * A).all()
# assert (task.B_d == timestep * B).all()
# assert (task.A == task.A_d).all()
# assert (task.B_d == task.B).all()
# def test_continuous_dynamics_continuous_timespace():
# global task
# timestep = 0.01
# A = numpy.array([[1, 1], [1, 1]])
# B = numpy.array([[1], [1]])
# F = numpy.eye(2)
# task = ClassicControlTask(
# timestep, A, B, discrete_dynamics=False, timespace="continuous"
# )
# task.finit()
# assert (task.A_c == A).all()
# assert (task.B_c == B).all()
# assert (task.A_d == numpy.eye(2) + timestep * A).all()
# assert (task.B_d == timestep * B).all()
# assert (task.A == task.A_c).all()
# assert (task.B_c == task.B).all()
# def test_dynamics_timespace():
# test_discrete_dynamics_discrete_timespace()
# test_discrete_dynamics_continuous_timespace()
# test_continuous_dynamics_discrete_timespace()
# test_continuous_dynamics_continuous_timespace()
# if __name__ == "__main__":
# test_no_kwargs()
# test_no_kwargs_A()
# test_noise()
# test_dynamics_timespace()
```
#### File: components/interaction_task/test_interactiontask.py
```python
import numpy
from coopihc import InteractionTask, StateElement
from coopihc.base.elements import array_element, cat_element, discrete_array_element
from coopihc.base.utils import StateNotContainedWarning, StateNotContainedError
import pytest
class MinimalTask(InteractionTask):
"""Non-functional minimal subclass to use in tests."""
def on_user_action(self):
pass
def on_assistant_action(self):
pass
def reset(self, dic=None):
pass
class MinimalTaskWithState(MinimalTask):
"""Non-functional minimal subclass including a state to use
in tests."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.state["x"] = discrete_array_element(low=-1, high=1, init=0)
class MinimalTaskWithStateAugmented(MinimalTask):
"""Non-functional minimal subclass including a more complex
state to use in tests."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.state["x"] = discrete_array_element(low=-1, high=1, init=0)
self.state["y"] = discrete_array_element(
low=2, high=5, init=2, out_of_bounds_mode="error"
)
self.state["z"] = discrete_array_element(
low=0, high=9, init=0, out_of_bounds_mode="clip"
)
class MinimalTaskWithStateAndDirectReset(MinimalTaskWithState):
"""Non-functional minimal subclass including a state and reset method
to use in tests. This class resets the state element directly."""
def reset(self, dic=None):
reset_value = -1
self.state["x"][...] = reset_value
class MinimalTaskWithStateAndResetViaState(MinimalTaskWithState):
"""Non-functional minimal subclass including a state and reset method
to use in tests. This class resets the state element via the state
property."""
def reset(self, dic=None):
reset_value = -1
self.state.reset(dic={"x": reset_value})
class MinimalTaskWithStateAndResetViaStateElement(MinimalTaskWithState):
"""Non-functional minimal subclass including a state and reset method
to use in tests. This class resets the state element via the
StateElement's reset method."""
def reset(self, dic=None):
reset_value = -1
self.state["x"].reset(value=reset_value)
def test_imports():
"""Tests the different import ways for the InteractionTask."""
from coopihc import InteractionTask
from coopihc.interactiontask import InteractionTask
from coopihc.interactiontask.InteractionTask import InteractionTask
def test_example():
"""Tries to import and create the example task."""
from coopihc import ExampleTask
ExampleTask()
def test_init():
"""Tries to initialize an InteractionTask and checks the expected
properties and methods."""
assert empty_init_fails()
assert cant_be_subclassed_without_necessary_overrides()
assert can_be_subclassed_with_minimal_overrides()
test_properties()
test_methods()
def test_properties():
"""Tests the expected properties for a minimal InteractionTask."""
task = MinimalTask()
# Direct attributes
assert hasattr(task, "_state")
assert hasattr(task, "bundle")
assert hasattr(task, "timestep")
assert hasattr(task, "ax")
# Property functions
assert hasattr(task, "state")
def test_methods():
"""Tests the expected methods for a minimal InteractionTask."""
task = MinimalTask()
# Public methods
assert hasattr(task, "finit")
assert hasattr(task, "base_on_user_action")
assert hasattr(task, "base_on_assistant_action")
assert hasattr(task, "on_user_action")
assert hasattr(task, "on_assistant_action")
assert hasattr(task, "reset")
assert hasattr(task, "render")
# Private methods
assert hasattr(task, "__content__")
assert hasattr(task, "_base_reset")
def can_be_subclassed_with_minimal_overrides():
"""Returns True if trying to subclass an InteractionTask with
only overrides for on_user_action, on_assistant_action and reset succeeds."""
MinimalTask()
return True
def cant_be_subclassed_without_necessary_overrides():
"""Returns True if trying to subclass an InteractionTask without
the necessary method overrides fails."""
assert cant_be_subclassed_without_on_user_action()
assert cant_be_subclassed_without_assistent_step()
assert cant_be_subclassed_without_reset()
return True
def cant_be_subclassed_without_reset():
"""Returns True if trying to subclass an InteractionTask without
a reset method override fails."""
class TaskWithoutReset(InteractionTask):
def on_assistant_action(self):
pass
def on_user_action(self):
pass
try:
TaskWithoutReset()
except TypeError:
return True
def cant_be_subclassed_without_on_user_action():
"""Returns True if trying to subclass an InteractionTask without
a on_user_action method override fails."""
class TaskWithoutAsssistentStep(InteractionTask):
def on_assistant_action(self):
pass
def reset(self):
pass
try:
TaskWithoutAsssistentStep()
except TypeError:
return True
def cant_be_subclassed_without_assistent_step():
"""Returns True if trying to subclass an InteractionTask without
an assistent_step method override fails."""
class TaskWithoutUserStep(InteractionTask):
def on_user_action(self):
pass
def reset(self):
pass
try:
TaskWithoutUserStep()
except TypeError:
return True
def empty_init_fails():
"""Returns True if trying to initialize an InteractionTask
without any arguments fails."""
try:
InteractionTask()
except TypeError:
return True
def test_double_base_reset_without_dic():
"""Creates a minimal task and calls base reset on it twice."""
task = MinimalTaskWithState()
task._base_reset()
task._base_reset()
def test_base_reset_randomness():
"""Tests that state value is set to random value within space when
no dic is supplied."""
task = MinimalTaskWithState()
# Reset task state (should be random)
possible_values = [-1, 0, 1]
counter = {value: 0 for value in possible_values}
for _ in range(1000):
task._base_reset()
value = task.state["x"].squeeze().tolist()
counter[value] += 1
for value in possible_values:
assert counter[value] > 0
def test_base_reset_without_dic():
"""Tests the reset method when no dic is provided."""
test_double_base_reset_without_dic()
test_base_reset_randomness()
def test_base_reset_with_full_dic():
task = MinimalTaskWithState()
reset_dic = {"x": numpy.array([0])}
task._base_reset(dic=reset_dic)
assert isinstance(task.state["x"], StateElement)
assert task.state["x"] == 0
reset_dic = {"x": numpy.array([1])}
task._base_reset(reset_dic)
assert isinstance(task.state["x"], StateElement)
assert task.state["x"] == 1
reset_dic = {"x": numpy.array([-1])}
task._base_reset(reset_dic)
assert isinstance(task.state["x"], StateElement)
assert task.state["x"] == -1
reset_dic = {"x": numpy.array([-2])}
with pytest.warns(StateNotContainedWarning):
task._base_reset(reset_dic)
assert isinstance(task.state["x"], StateElement)
assert task.state["x"] == -2
reset_dic = {"x": numpy.array([2])}
with pytest.warns(StateNotContainedWarning):
task._base_reset(reset_dic)
assert task.state["x"] == 2
assert isinstance(task.state["x"], StateElement)
task = MinimalTaskWithStateAugmented()
reset_dic = {"x": numpy.array([0]), "y": numpy.array([5]), "z": numpy.array([1])}
task._base_reset(dic=reset_dic)
assert task.state["x"] == 0
assert isinstance(task.state["x"], StateElement)
assert task.state["y"] == 5
assert isinstance(task.state["y"], StateElement)
assert task.state["z"] == 1
assert isinstance(task.state["z"], StateElement)
reset_dic = {"x": numpy.array([0]), "y": numpy.array([6]), "z": numpy.array([1])}
with pytest.raises(StateNotContainedError):
task._base_reset(dic=reset_dic)
reset_dic = {"x": numpy.array([0]), "y": numpy.array([5]), "z": numpy.array([-8])}
task._base_reset(dic=reset_dic)
assert task.state["z"] == 0
def test_base_reset_with_partial_dic():
task = MinimalTaskWithStateAugmented()
reset_dic = {"x": numpy.array([0]), "y": numpy.array([2])}
task._base_reset(reset_dic)
assert task.state["x"] == 0
assert isinstance(task.state["x"], StateElement)
assert task.state["y"] == 2
assert isinstance(task.state["y"], StateElement)
set_z = {}
for i in range(100):
task._base_reset(reset_dic)
set_z[str(task.state["z"])] = task.state["z"].tolist()
assert sorted(list(set_z.values())) == [i for i in range(10)]
def test_base_reset_with_overwritten_reset():
"""Tests the _base_reset method if the subclassed InteractionTask has
implemented a custom reset methd."""
for task_class in [
MinimalTaskWithStateAndDirectReset,
MinimalTaskWithStateAndResetViaState,
MinimalTaskWithStateAndResetViaStateElement,
]:
task = task_class()
assert task.state["x"] == 0
assert isinstance(task.state["x"], StateElement)
task._base_reset()
assert task.state["x"] == -1
assert isinstance(task.state["x"], StateElement)
def test_base_reset():
"""Tests the forced reset mechanism provided by the _base_reset method"""
test_base_reset_without_dic()
test_base_reset_with_full_dic()
test_base_reset_with_partial_dic()
test_base_reset_with_overwritten_reset()
def test_interactiontask():
"""Tests the methods provided by the InteractionTask class."""
test_imports()
test_example()
test_init()
test_base_reset()
# +----------------------+
# + MAIN +
# +----------------------+
if __name__ == "__main__":
test_interactiontask()
```
#### File: test/examples/test_examples.py
```python
import pytest
def test_basic_examples():
import coopihc.examples.basic_examples.space_examples
import coopihc.examples.basic_examples.stateelement_examples
import coopihc.examples.basic_examples.state_examples
import coopihc.examples.basic_examples.observation_examples
import coopihc.examples.basic_examples.policy_examples
import coopihc.examples.basic_examples.agents_examples
import coopihc.examples.basic_examples.interactiontask_examples
def test_simple_examples():
import coopihc.examples.simple_examples.lqr_example
import coopihc.examples.simple_examples.lqg_example
import coopihc.examples.simple_examples.assistant_has_user_model
import coopihc.examples.simple_examples.rl_sb3
import coopihc.examples.simple_examples.exploit_rlnet
@pytest.mark.timeout(3)
def test_bundle_examples():
import coopihc.examples.basic_examples.bundle_examples
def test_all_examples():
test_basic_examples()
test_simple_examples()
test_bundle_examples()
if __name__ == "__main__":
test_all_examples()
```
|
{
"source": "jgori-ouistiti/CoopIHC-zoo",
"score": 2
}
|
#### File: coopihczoo/biggraph/assistants.py
```python
from coopihc import (
BaseAgent,
GoalInferenceWithUserPolicyGiven,
BIGDiscretePolicy,
State,
StateElement,
Space,
autospace,
)
import copy
import numpy
class B(BaseAgent):
def __init__(self, N=8):
self.N = N
super().__init__(
"assistant", agent_inference_engine=GoalInferenceWithUserPolicyGiven() #
)
def finit(self):
action_state = self.bundle.game_state["assistant_action"]
action_state["action"] = StateElement(
0, autospace([i for i in range(2 ** self.N)]), out_of_bounds_mode="error"
)
user_policy_model = copy.deepcopy(self.bundle.user.policy)
agent_policy = BIGDiscretePolicy(action_state, user_policy_model)
self.attach_policy(agent_policy)
self.inference_engine.attach_policy(user_policy_model)
self.state["beliefs"] = StateElement(
numpy.array([1 / self.N for i in range(self.N)]).reshape(-1, 1),
autospace(
numpy.zeros((1, self.N)),
numpy.ones((1, self.N)),
),
out_of_bounds_mode="error",
)
def reset(self, dic=None):
self.state["beliefs"][:] = numpy.array(
[1 / self.N for i in range(self.N)]
).reshape(1, -1)
# change theta for inference engine
set_theta = [
{
("user_state", "goal"): StateElement(
t,
discrete_space(numpy.array(list(range(self.bundle.task.gridsize)))),
)
}
for t in self.bundle.task.state["targets"]
]
self.inference_engine.attach_set_theta(set_theta)
self.policy.attach_set_theta(set_theta)
def transition_function(assistant_action, observation):
"""What future observation will the user see due to assistant action"""
# always do this
observation["assistant_action"]["action"] = assistant_action
# specific to BIGpointer
observation["task_state"]["position"] = assistant_action
return observation
self.policy.attach_transition_function(transition_function)
```
#### File: coopihczoo/eye/users.py
```python
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.observation.RuleObservationEngine import RuleObservationEngine
from coopihc.observation.utils import base_user_engine_specification
from coopihc.policy.LinearFeedback import LinearFeedback
from coopihc.space.State import State
from coopihc.space.StateElement import StateElement
from coopihc.space.Space import Space
from coopihc.inference.LinearGaussianContinuous import LinearGaussianContinuous
from coopihc.inference.BaseInferenceEngine import BaseInferenceEngine
from coopihc.inference.CascadedInferenceEngine import CascadedInferenceEngine
from .utils import ProvideLikelihoodInferenceEngine, eccentric_noise
from scipy.linalg import toeplitz
import numpy
import copy
class ChenEye(BaseAgent):
"""Model based on that of Chen, Xiuli, et al. "An Adaptive Model of Gaze-based Selection" Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 2021., --> with a real 2D implementation and a couple changes"""
@staticmethod
def eccentric_motor_noise(action, observation, oculomotornoise, *args, **kwargs):
noise_covariance_matrix = eccentric_noise(
action, observation["task_state"]["fixation"], oculomotornoise
)
noise = numpy.random.multivariate_normal(
numpy.zeros(shape=action.reshape(-1).shape), noise_covariance_matrix
)
return noise, noise_covariance_matrix
@staticmethod
def eccentric_perceptual_noise(_obs, game_state, perceptualnoise, *args):
target = game_state["task_state"]["target"]
position = game_state["task_state"]["fixation"]
# Def of eccentric_noise in utils
Sigma = eccentric_noise(target, position, perceptualnoise)
noise = numpy.random.multivariate_normal(
numpy.zeros(shape=target.reshape(-1).shape), Sigma
).reshape(-1, 1)
print("\n============ here")
print(noise, target)
return _obs + noise # noise clipped automatically by StateElement behavior
def __init__(self, perceptualnoise, oculomotornoise, dimension=2, *args, **kwargs):
self.dimension = dimension
self.perceptualnoise = perceptualnoise
self.oculomotornoise = oculomotornoise
# ============= Define Policy
action_state = State()
action_state["action"] = StateElement(
numpy.zeros((dimension, 1), dtype=numpy.float32),
Space(
[
-numpy.ones((dimension, 1), dtype=numpy.float32),
numpy.ones((dimension, 1), dtype=numpy.float32),
],
"continuous",
),
out_of_bounds_mode="warning",
)
def noise_function(action, observation, oculomotornoise):
noise_obs = State()
noise_obs["task_state"] = State()
noise_obs["task_state"]["target"] = action
noise_obs["task_state"]["fixation"] = observation["task_state"]["fixation"]
noise = self.eccentric_motor_noise(action, noise_obs, oculomotornoise)[0]
return action + noise.reshape((-1, 1))
agent_policy = LinearFeedback(
action_state,
("user_state", "belief-mu"),
noise_function=noise_function,
noise_func_args=(self.oculomotornoise,),
)
# ============ Define observation Engine
extraprobabilisticrules = {
("task_state", "target"): (
self.eccentric_perceptual_noise,
(self.perceptualnoise,),
)
}
observation_engine = RuleObservationEngine(
deterministic_specification=base_user_engine_specification,
extraprobabilisticrules=extraprobabilisticrules,
)
# =============== Define inference Engine
first_inference_engine = ProvideLikelihoodInferenceEngine(perceptualnoise)
second_inference_engine = LinearGaussianContinuous()
inference_engine = CascadedInferenceEngine(
[first_inference_engine, second_inference_engine]
)
# ============= Define State
belief_mu = StateElement(
numpy.zeros((self.dimension, 1)),
Space(
[
-numpy.ones((self.dimension, 1), dtype=numpy.float32),
numpy.ones((self.dimension, 1), dtype=numpy.float32),
],
"continuous",
),
out_of_bounds_mode="warning",
)
belief_sigma = StateElement(
numpy.full((self.dimension, self.dimension), numpy.inf),
Space(
[
-numpy.inf
* numpy.ones((self.dimension, self.dimension), dtype=numpy.float32),
numpy.inf
* numpy.ones((self.dimension, self.dimension), dtype=numpy.float32),
],
"continuous",
),
out_of_bounds_mode="warning",
)
state = State()
state["belief-mu"] = belief_mu
state["belief-sigma"] = belief_sigma
state["y"] = copy.deepcopy(belief_mu)
state["Sigma_0"] = copy.deepcopy(belief_sigma)
super().__init__(
"user",
agent_state=state,
agent_policy=agent_policy,
agent_observation_engine=observation_engine,
agent_inference_engine=inference_engine,
)
def finit(self):
pass
def reset(self, dic=None):
"""Reset the fixation at the center (0;0), reset the prior belief
:meta public:
"""
# Initialize here the start position of the eye as well as initial uncertainty
observation = State()
observation["task_state"] = State()
observation["task_state"]["target"] = self.bundle.task.state["target"]
observation["task_state"]["fixation"] = self.bundle.task.state["fixation"]
# Initialize with a huge Gaussian noise so that the first observation massively outweights the prior. Put more weight on the pure variance components to ensure that it will behave well.
Sigma = toeplitz([1000] + [100 for i in range(self.dimension - 1)])
self.state["belief-mu"][:] = numpy.array([0 for i in range(self.dimension)])
self.state["belief-sigma"][:, :] = Sigma
self.state["y"][:] = numpy.array([0 for i in range(self.dimension)])
self.state["Sigma_0"][:] = Sigma
def render(self, *args, **kwargs):
mode = kwargs.get("mode")
if mode is None:
mode = "text"
try:
axtask, axuser, axassistant = args
self.inference_engine.render(axtask, axuser, axassistant, mode=mode)
except ValueError:
self.inference_engine.render(mode=mode)
```
#### File: examples/composition/carefulpointer_with_cheneye.py
```python
from coopihczoo.pointing.envs import SimplePointingTask
import copy
# Add a state to the SimplePointingTask to memorize the old position
class oldpositionMemorizedSimplePointingTask(SimplePointingTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.memorized = None
def reset(self, dic={}):
super().reset(dic=dic)
self.state["oldposition"] = copy.deepcopy(self.state["position"])
def user_step(self, *args, **kwargs):
self.memorized = copy.deepcopy(self.state["position"])
obs, rewards, is_done = super().user_step(*args, **kwargs)
obs["oldposition"] = self.memorized
return obs, rewards, is_done
def assistant_step(self, *args, **kwargs):
self.memorized = copy.deepcopy(self.state["position"])
obs, rewards, is_done = super().assistant_step(*args, **kwargs)
obs["oldposition"] = self.memorized
return obs, rewards, is_done
pointing_task = oldpositionMemorizedSimplePointingTask(
gridsize=31, number_of_targets=8, mode="position"
)
from coopihczoo.eye.envs import ChenEyePointingTask
from coopihczoo.eye.users import ChenEye
from coopihc.bundle.Bundle import Bundle
fitts_W = 4e-2
fitts_D = 0.8
perceptualnoise = 0.2
oculomotornoise = 0.2
task = ChenEyePointingTask(fitts_W, fitts_D, dimension=1)
user = ChenEye(perceptualnoise, oculomotornoise, dimension=1)
obs_bundle = Bundle(task=task, user=user)
from coopihc.observation.WrapAsObservationEngine import WrapAsObservationEngine
class ChenEyeObservationEngineWrapper(WrapAsObservationEngine):
def __init__(self, obs_bundle):
super().__init__(obs_bundle)
def observe(self, game_state):
# Deal with the case where the cursor is in the same position as the target. This is needed to have a non singular matrix (i.e. a matrix that can be inverted) for
if (
game_state["task_state"]["position"]
== game_state["task_state"]["oldposition"]
):
return game_state, -1
# set observation bundle to the right state and cast it to the right space
target = game_state["task_state"]["position"].cast(
self.game_state["task_state"]["target"]
)
fixation = game_state["task_state"]["oldposition"].cast(
self.game_state["task_state"]["fixation"]
)
reset_dic = {"task_state": {"target": target, "fixation": fixation}}
self.reset(dic=reset_dic, turn=0)
# perform the run
is_done = False
rewards = 0
while True:
obs, reward_dic, is_done = self.step()
rewards += sum(reward_dic.values())
if is_done:
break
# cast back to initial space and return
obs["task_state"]["fixation"].cast(game_state["task_state"]["oldposition"])
obs["task_state"]["target"].cast(game_state["task_state"]["position"])
return game_state, rewards
from coopihc.observation.RuleObservationEngine import RuleObservationEngine
from coopihc.observation.CascadedObservationEngine import CascadedObservationEngine
from coopihc.observation.utils import base_user_engine_specification
# Define cascaded observation engine
cursor_tracker = ChenEyeObservationEngineWrapper(obs_bundle)
default_observation_engine = RuleObservationEngine(
deterministic_specification=base_user_engine_specification,
)
observation_engine = CascadedObservationEngine(
[cursor_tracker, default_observation_engine]
)
from coopihczoo.pointing.users import CarefulPointer
from coopihczoo.pointing.assistants import BIGGain
binary_user = CarefulPointer(override_observation_engine=(observation_engine, {}))
BIGpointer = BIGGain()
bundle = Bundle(task=pointing_task, user=binary_user, assistant=BIGpointer)
game_state = bundle.reset(turn=1)
bundle.render("plotext")
reward_list = []
while True:
obs, rewards, is_done = bundle.step()
reward_list.append(rewards)
bundle.render("plotext")
if is_done:
break
```
|
{
"source": "jgori-ouistiti/interaction-agents",
"score": 3
}
|
#### File: coopihc/agents/GoalDrivenDiscreteUser.py
```python
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.space.StateElement import StateElement
import copy
### Goal could be defined as a target state of the task, in a more general description.
class GoalDrivenDiscreteUser(BaseAgent):
"""Discrete actionspace, state includes a 'goal' state and expects a task with a 'targets' state.
A user whose behavior (policy) is driven by the value of a 'goal' state. The policy uses discrete actions. It expects to be used with a task that has a substate named 'targets'. The goal state's value may be either one of the task's 'targets'.
"""
def finit(self):
"""finit.
Appends a 'goal' substate to the agent's internal state, whose values are either one of the task's 'targets'.
:meta public:
"""
target_space = self.bundle.task.state["targets"]["spaces"][0]
self.state["goal"] = StateElement(values=None, spaces=copy.copy(target_space))
return
def render(self, *args, **kwargs):
"""render.
Similar to BaseAgent's render, but displays the "goal" state in addition.
:param args: (list) list of axes used in the bundle render, in order: axtask, axuser, axassistant
:param mode: (str) currently supports either 'plot' or 'text'. Both modes can be combined by having both modes in the same string e.g. 'plottext' or 'plotext'.
:meta public:
"""
mode = kwargs.get("mode")
if mode is None:
mode = "text"
if "plot" in mode:
axtask, axuser, axassistant = args[:3]
if self.ax is not None:
pass
else:
self.ax = axuser
self.ax.text(0, 0, "Goal: {}".format(self.state["goal"][0][0]))
self.ax.set_xlim([-0.5, 0.5])
self.ax.set_ylim([-0.5, 0.5])
self.ax.axis("off")
self.ax.set_title(type(self).__name__ + " Goal")
if "text" in mode:
print(type(self).__name__ + " Goal")
print(self.state["Goal"][0][0])
```
#### File: agents/lqrcontrollers/FHDT_LQRController.py
```python
from coopihc.agents.lqrcontrollers.LQRController import LQRController
import scipy.linalg
# Finite Horizon Discrete Time Controller
# Outdated
class FHDT_LQRController(LQRController):
"""Finite Horizon Discrete Time LQR
A Finite Horizon (i.e. planning for N steps) Discrete Time implementation of the LQR controller.
:param N: Horizon (steps)
:type N: int
:param role: "user" or "assistant"
:type role: string
:param Q: see :py:class:`LQRController <coopihc.agents.lqrcontrollers.LQRController.LQRController>`
:type Q: numpy.ndarray
:param R: see :py:class:`LQRController <coopihc.agents.lqrcontrollers.LQRController.LQRController>`
:type R: numpy.ndarray
:param Gamma: see :py:class:`LQRController <coopihc.agents.lqrcontrollers.LQRController.LQRController>`
:type Gamma: float
"""
def __init__(self, N, role, Q, R, Gamma):
self.N = N
self.i = 0
super().__init__(role, Q, R, gamma=Gamma)
self.timespace = "discrete"
# untested, old version below
def reset(self):
"""reset"""
self.i = 0
# def reset(self, dic=None):
#
# self.i = 0
# super().reset(dic)
def finit(self):
"""finit
Compute feedback gain from A, B, Q, R matrices.
"""
self.K = []
task = self.bundle.task
A, B = task.A, task.B
# Compute P(k) matrix for k in (N:-1:1)
self.P = [self.Q]
for k in range(self.N - 1, 0, -1):
Pcurrent = self.P[0]
invPart = scipy.linalg.inv((self.R + B.T @ Pcurrent @ B))
Pnext = (
self.Q
+ A.T @ Pcurrent @ A
- A.T @ Pcurrent @ B @ invPart @ B.T @ Pcurrent @ A
)
self.P.insert(0, Pnext)
# Compute Kalman Gain
for Pcurrent in self.P:
invPart = scipy.linalg.inv((self.R + B.T @ Pcurrent @ B))
K = -invPart @ B.T @ Pcurrent @ A
self.K.append(K)
```
#### File: coopihc/bundle/Bundle.py
```python
from coopihc.bundle._Bundle import _Bundle
from coopihc.agents.BaseAgent import BaseAgent
class Bundle(_Bundle):
"""Bundle
Modifies the interface of the _Bundle class.
A bundle combines a task with a user and an assistant. The bundle creates the ``game_state`` by combining the task, user and assistant states with the turn index and both agent's actions.
The bundle takes care of all the messaging between classes, making sure the gamestate and all individual states are synchronized at all times.
The bundle implements a forced reset mechanism, where each state of the bundle can be forced to a particular state via a dictionnary mechanism (see :py:func:reset)
The bundle also takes care of rendering each of the three component in a single place.
:param task: A task that inherits from ``InteractionTask``, defaults to None
:type task: :py:class:`coopihc.interactiontask.InteractionTask.InteractionTask`, optional
:param user: a user which inherits from ``BaseAgent``, defaults to None
:type user: :py:class:`coopihc.agents.BaseAgent.BaseAgent`, optional
:param assistant: an assistant which inherits from ``BaseAgent``, defaults to None
:type assistant: :py:class:`coopihc.agents.BaseAgent.BaseAgent`, optional
"""
def __init__(self, *args, task=None, user=None, assistant=None, **kwargs):
if task is None:
task_bit = "0"
raise NotImplementedError
else:
task_bit = "1"
if user is None:
user = BaseAgent("user")
user_bit = "0"
else:
user_bit = "1"
if assistant is None:
assistant = BaseAgent("assistant")
assistant_bit = "0"
else:
assistant_bit = "1"
self.bundle_bits = task_bit + user_bit + assistant_bit
if user_bit + assistant_bit == "00":
name = "no-user--no-assistant"
elif user_bit + assistant_bit == "01":
name = "no-user"
elif user_bit + assistant_bit == "10":
name = "no-assistant"
else:
name = "full"
super().__init__(task, user, assistant, *args, name=name, **kwargs)
```
#### File: coopihc/bundle/_Bundle.py
```python
from coopihc.space.Space import Space
from coopihc.space.State import State
from coopihc.space.StateElement import StateElement
import numpy
import yaml
from collections import OrderedDict
import matplotlib.pyplot as plt
class _Bundle:
"""Main class for bundles.
Main class for bundles. This class is subclassed by Bundle, which defines the interface with which to interact.
A bundle combines a task with a user and an assistant. The bundle creates the ``game_state`` by combining the task, user and assistant states with the turn index and both agent's actions.
The bundle takes care of all the messaging between classes, making sure the gamestate and all individual states are synchronized at all times.
The bundle implements a forced reset mechanism, where each state of the bundle can be forced to a particular state via a dictionnary mechanism (see :py:func:reset)
The bundle also takes care of rendering each of the three component in a single place.
:param task: (:py:class:`coopihc.interactiontask.InteractionTask.InteractionTask`) A task that inherits from ``InteractionTask``
:param user: (:py:class:`coopihc.agents.BaseAgent.BaseAgent`) a user which inherits from ``BaseAgent``
:param assistant: (:py:class:`coopihc.agents.BaseAgent.BaseAgent`) an assistant which inherits from ``BaseAgent``
:meta public:
"""
def __init__(self, task, user, assistant, *args, **kwargs):
self.kwargs = kwargs
self.task = task
self.task.bundle = self
self.user = user
self.user.bundle = self
self.assistant = assistant
self.assistant.bundle = self
# Form complete game state
self.game_state = State()
turn_index = StateElement(
values=[0],
spaces=Space([numpy.array([0, 1, 2, 3])], dtype=numpy.int8),
)
self.game_state["turn_index"] = turn_index
self.game_state["task_state"] = task.state
self.game_state["user_state"] = user.state
self.game_state["assistant_state"] = assistant.state
if user.policy is not None:
self.game_state["user_action"] = user.policy.action_state
else:
self.game_state["user_action"] = State()
self.game_state["user_action"]["action"] = StateElement()
if assistant.policy is not None:
self.game_state["assistant_action"] = assistant.policy.action_state
else:
self.game_state["assistant_action"] = State()
self.game_state["assistant_action"]["action"] = StateElement()
self.task.finit()
self.user.finit()
self.assistant.finit()
self.round_number = 0
# Needed for render
self.active_render_figure = None
self.figure_layout = [211, 223, 224]
self.rendered_mode = None
self.render_perm = False
self.playspeed = 0.1
def __repr__(self):
"""__repr__
Pretty representation for Bundles.
:return: pretty bundle print
:rtype: string
"""
return "{}\n".format(self.__class__.__name__) + yaml.safe_dump(
self.__content__()
)
def __content__(self):
"""__content__
Custom class representation
:return: class repr
:rtype: dictionnary
"""
return {
"Task": self.task.__content__(),
"User": self.user.__content__(),
"Assistant": self.assistant.__content__(),
}
@property
def turn_number(self):
"""turn_number
The turn number in the game (0 to 3)
:return: turn number
:rtype: numpy.ndarray
"""
return self.game_state["turn_index"]["values"][0]
@turn_number.setter
def turn_number(self, value):
self._turn_number = value
self.game_state["turn_index"]["values"] = numpy.array(value)
def reset(self, turn=0, task=True, user=True, assistant=True, dic={}):
"""Reset bundle.
1. Reset the game and start at a specific turn number.
2. select which components to reset
3. forced reset mechanism using dictionnaries
Example:
.. code-block:: python
new_target_value = self.game_state["task_state"]["targets"]
new_fixation_value = self.game_state["task_state"]["fixation"]
)
reset_dic = {"task_state": {"targets": new_target_value, "fixation": new_fixation_value}}
self.reset(dic=reset_dic, turn = 1)
Will set the substates "targets" and "fixation" of state "task_state" to some value.
.. note ::
If subclassing _Bundle, make sure to call super().reset() in the new reset method.
:param turn: game turn number, defaults to 0
:type turn: int, optional
:param task: reset task?, defaults to True
:type task: bool, optional
:param user: reset user?, defaults to True
:type user: bool, optional
:param assistant: reset assistant?, defaults to True
:type assistant: bool, optional
:param dic: reset_dic, defaults to {}
:type dic: dict, optional
:return: new game state
:rtype: :py:class:`State<coopihc.space.State.State>`
"""
if task:
task_dic = dic.get("task_state")
task_state = self.task._base_reset(dic=task_dic)
if user:
user_dic = dic.get("user_state")
user_state = self.user._base_reset(dic=user_dic)
if assistant:
assistant_dic = dic.get("assistant_state")
assistant_state = self.assistant._base_reset(dic=assistant_dic)
self.turn_number = turn
if turn == 0:
return self.game_state
if turn >= 1:
self._user_first_half_step()
if turn >= 2:
user_action, _ = self.user.take_action()
self.broadcast_action("user", user_action)
self._user_second_half_step(user_action)
if turn >= 3:
self._assistant_first_half_step()
return self.game_state
def step(self, user_action=None, assistant_action=None, go_to_turn=None, **kwargs):
"""Play a round
Play a round of the game. A round consists in 4 turns. If go_to_turn is not None, the round is only played until that turn.
If a user action and assistant action are passed as arguments, then these are used as actions to play the round. Otherwise, these actions are sampled from each agent's policy.
:param user action: user action
:type: any
:param assistant action: assistant action
:type: any
:param go_to_turn: turn at which round stops, defaults to None
:type go_to_turn: int, optional
:return: gamestate, reward, game finished flag
:rtype: tuple(:py:class:`State<coopihc.space.State.State>`, collections.OrderedDict, boolean)
"""
# step() was called
# if not args:
# user_action, assistant_action = None, None
# elif len(args) == 1:
# if self.kwargs.get("name") == "no-assistant":
# user_action, assistant_action = args[0], None
# elif self.kwargs.get("name") == "no-user":
# user_action, assistant_action = None, args[0]
# else:
# raise AttributeError(
# "Passing a single action is only allowed when the game is played with a single agent."
# )
# step(user_action, None) or step(None, assistant_action) or step(user_action, assistant_action) was called
# else:
# user_action, assistant_action = args
if go_to_turn is None:
go_to_turn = self.turn_number
_started = False
rewards = {}
rewards["user_observation_reward"] = 0
rewards["user_inference_reward"] = 0
rewards["first_task_reward"] = 0
rewards["assistant_observation_reward"] = 0
rewards["assistant_inference_reward"] = 0
rewards["second_task_reward"] = 0
while self.turn_number != go_to_turn or (not _started):
_started = True
# User observes and infers
if self.turn_number == 0:
(
user_obs_reward,
user_infer_reward,
) = self._user_first_half_step()
(
rewards["user_observation_reward"],
rewards["user_inference_reward"],
) = (user_obs_reward, user_infer_reward)
# User takes action and receives reward from task
elif self.turn_number == 1:
if user_action is None:
user_action, user_policy_reward = self.user._take_action()
else:
user_policy_reward = 0
self.broadcast_action("user", user_action)
task_reward, is_done = self._user_second_half_step(user_action)
rewards["first_task_reward"] = task_reward
if is_done:
return self.game_state, rewards, is_done
# Assistant observes and infers
elif (
self.turn_number == 2 and not self.kwargs.get("name") == "no-assistant"
):
(
assistant_obs_reward,
assistant_infer_reward,
) = self._assistant_first_half_step()
(
rewards["assistant_observation_reward"],
rewards["assistant_inference_reward"],
) = (assistant_obs_reward, assistant_infer_reward)
# Assistant takes action and receives reward from task
elif (
self.turn_number == 3 and not self.kwargs.get("name") == "no-assistant"
):
if assistant_action is None:
(
assistant_action,
assistant_policy_reward,
) = self.assistant._take_action()
else:
assistant_policy_reward = 0
self.broadcast_action("assistant", assistant_action)
task_reward, is_done = self._assistant_second_half_step(
assistant_action
)
rewards["second_task_reward"] = task_reward
if is_done:
return self.game_state, rewards, is_done
self.turn_number = (self.turn_number + 1) % 4
self.round_number += 1
self.task.round += 1
return self.game_state, rewards, False
def render(self, mode, *args, **kwargs):
"""render
Combines all render methods.
:param mode: "text" or "plot"
:param type: string
:meta public:
"""
self.rendered_mode = mode
if "text" in mode:
print("Task Render")
self.task.render(mode="text", *args, **kwargs)
print("User Render")
self.user.render(mode="text", *args, **kwargs)
print("Assistant Render")
self.assistant.render(mode="text", *args, **kwargs)
if "log" in mode:
self.task.render(mode="log", *args, **kwargs)
self.user.render(mode="log", *args, **kwargs)
self.assistant.render(mode="log", *args, **kwargs)
if "plot" in mode:
if self.active_render_figure:
plt.pause(self.playspeed)
self.task.render(
self.axtask,
self.axuser,
self.axassistant,
mode=mode,
*args,
**kwargs,
)
self.user.render(
self.axtask,
self.axuser,
self.axassistant,
mode="plot",
*args,
**kwargs,
)
self.assistant.render(
self.axtask,
self.axuser,
self.axassistant,
mode="plot",
*args,
**kwargs,
)
self.fig.canvas.draw()
else:
self.active_render_figure = True
self.fig = plt.figure()
self.axtask = self.fig.add_subplot(self.figure_layout[0])
self.axtask.set_title("Task State")
self.axuser = self.fig.add_subplot(self.figure_layout[1])
self.axuser.set_title("User State")
self.axassistant = self.fig.add_subplot(self.figure_layout[2])
self.axassistant.set_title("Assistant State")
self.task.render(
self.axtask,
self.axuser,
self.axassistant,
mode="plot",
*args,
**kwargs,
)
self.user.render(
self.axtask,
self.axuser,
self.axassistant,
*args,
mode="plot",
**kwargs,
)
self.assistant.render(
self.axtask,
self.axuser,
self.axassistant,
*args,
mode="plot",
**kwargs,
)
self.fig.show()
plt.tight_layout()
if not ("plot" in mode or "text" in mode):
self.task.render(None, mode=mode, *args, **kwargs)
self.user.render(None, mode=mode, *args, **kwargs)
self.assistant.render(None, mode=mode, *args, **kwargs)
def close(self):
"""close
Close the bundle once the game is finished.
"""
if self.active_render_figure:
plt.close(self.fig)
self.active_render_figure = None
def _user_first_half_step(self):
"""_user_first_half_step
Turn 1, where the user observes the game state and updates its state via inference.
:return: user observation and inference reward
:rtype: tuple(float, float)
"""
if not self.kwargs.get("onreset_deterministic_first_half_step"):
user_obs_reward, user_infer_reward = self.user._agent_step()
else:
# Store the probabilistic rules
store = self.user.observation_engine.extraprobabilisticrules
# Remove the probabilistic rules
self.user.observation_engine.extraprobabilisticrules = {}
# Generate an observation without generating an inference
user_obs_reward, user_infer_reward = self.user._agent_step(infer=False)
# Reposition the probabilistic rules, and reset mapping
self.user.observation_engine.extraprobabilisticrules = store
self.user.observation_engine.mapping = None
self.kwargs["onreset_deterministic_first_half_step"] = False
return user_obs_reward, user_infer_reward
def _user_second_half_step(self, user_action):
"""_user_second_half_step
Turn 2, where the operaror takes an action.
:param user_action: user action
:param type: Any
:return: task reward, task done?
:rtype: tuple(float, boolean)
"""
# Play user's turn in the task
task_state, task_reward, is_done, _ = self.task.base_user_step(user_action)
# update task state (likely not needed, remove ?)
self.broadcast_state("user", "task_state", task_state)
return task_reward, is_done
def _assistant_first_half_step(self):
"""_assistant_first_half_step
Turn 3, where the assistant observes the game state and updates its state via inference.
:return: assistant observation and inference reward
:rtype: tuple(float, float)
"""
(
assistant_obs_reward,
assistant_infer_reward,
) = self.assistant._agent_step()
return assistant_obs_reward, assistant_infer_reward
def _assistant_second_half_step(self, assistant_action):
"""_assistant_second_half_step
Turn 4, where the assistant takes an action.
:param user_action: assistant action
:param type: Any
:return: task reward, task done?
:rtype: tuple(float, boolean)
"""
# update action_state
# Play assistant's turn in the task
task_state, task_reward, is_done, _ = self.task.base_assistant_step(
assistant_action
)
# update task state
self.broadcast_state("assistant", "task_state", task_state)
return task_reward, is_done
def _user_step(self, *args):
"""Turns 1 and 2
:param \*args: either provide the user action or not. If no action is provided the action is determined by the agent's policy using sample()
:param type: (None or list)
:return: user observation, inference, policy and task rewards, game is done flag
:return type: tuple(float, float, float, float, bool)
"""
user_obs_reward, user_infer_reward = self._user_first_half_step()
try:
# If human input is provided
user_action = args[0]
except IndexError:
# else sample from policy
user_action, user_policy_reward = self.user.take_action()
self.broadcast_action("user", user_action)
task_reward, is_done = self._user_second_half_step(user_action)
return (
user_obs_reward,
user_infer_reward,
user_policy_reward,
task_reward,
is_done,
)
def _assistant_step(self, *args):
"""Turns 3 and 4
:param \*args: either provide the assistant action or not. If no action is provided the action is determined by the agent's policy using sample()
:param type: (None or list)
:return: assistant observation, inference, policy and task rewards, game is done flag
:return type: tuple(float, float, float, float, bool)
"""
(
assistant_obs_reward,
assistant_infer_reward,
) = self._assistant_first_half_step()
try:
# If human input is provided
assistant_action = args[0]
except IndexError:
# else sample from policy
(
assistant_action,
assistant_policy_reward,
) = self.assistant.take_action()
self.broadcast_action("assistant", assistant_action)
task_reward, is_done = self._assistant_second_half_step(assistant_action)
return (
assistant_obs_reward,
assistant_infer_reward,
assistant_policy_reward,
task_reward,
is_done,
)
def broadcast_state(self, role, state_key, state):
"""broadcast state
Broadcast a state value to the gamestate and update the agent's observation.
:param role: "user" or "assistant"
:type role: string
:param state_key: state key in gamestate
:type state_key: string
:param state: new state value
:type state: :py:class:`State<coopihc.space.State.State>`
"""
self.game_state[state_key] = state
getattr(self, role).observation[state_key] = state
def broadcast_action(self, role, action):
"""broadcast action
Broadcast an action to the gamestate and update the agent's policy.
:param role: "user" or "assistant"
:type role: string
:param action: action
:type action: Any
"""
# update game state and observations
if isinstance(action, StateElement):
getattr(self, role).policy.action_state["action"] = action
getattr(self, role).observation["{}_action".format(role)]["action"] = action
else:
getattr(self, role).policy.action_state["action"]["values"] = action
getattr(self, role).observation["{}_action".format(role)]["action"][
"values"
] = action
```
#### File: coopihc/bundle/_PlayAssistant.py
```python
from coopihc.bundle._Bundle import _Bundle
class PlayAssistant(_Bundle):
"""A bundle which samples oeprator actions directly from the user but uses assistant actions provided externally in the step() method.
:param task: (coopihc.interactiontask.InteractionTask) A task, which is a subclass of InteractionTask
:param user: (coopihc.agents.BaseAgent) an user, which is a subclass of BaseAgent
:param assistant: (coopihc.agents.BaseAgent) an assistant, which is a subclass of BaseAgent
:meta public:
"""
def __init__(self, task, user, assistant, **kwargs):
super().__init__(task, user, assistant, **kwargs)
self.action_space = self.assistant.policy.action_state["action"]["spaces"]
# assistant.policy.action_state['action'] = StateElement(
# values = None,
# spaces = [gym.spaces.Box(low = -numpy.inf, high = numpy.inf, shape = (1,)) for i in range(len(assistant.policy.action_state['action']))],
# possible_values = None
# )
# def reset(self, dic = {}, **kwargs):
# """ Reset the bundle. A first user step and assistant observation and inference is performed.
#
# :param args: see Bundle
#
# :meta public:
# """
# full_obs = super().reset(dic = dic, **kwargs)
# self._user_step()
# self._assistant_first_half_step()
# return self.assistant.inference_engine.buffer[-1]
def step(self, assistant_action):
"""Play a step, user actions are obtained by sampling the agent's policy and assistant actions are given externally in the step() method.
:param assistant_action: (list) assistant action
:return: sum_rewards (float), is_done (bool), rewards (list). Returns the sum of all intermediate rewards, the is_done flag to indicate whether or not the task has finisged, and the list of intermediate rewards.
:meta public:
"""
super().step(assistant_action)
self.broadcast_action("assistant", assistant_action, key="values")
second_task_reward, is_done = self._assistant_second_half_step(assistant_action)
if is_done:
return (
self.assistant.inference_engine.buffer[-1],
second_task_reward,
is_done,
[second_task_reward],
)
(
user_obs_reward,
user_infer_reward,
user_policy_reward,
first_task_reward,
is_done,
) = self._user_step()
(
assistant_obs_reward,
assistant_infer_reward,
) = self._assistant_first_half_step()
return (
self.assistant.inference_engine.buffer[-1],
sum(
[
user_obs_reward,
user_infer_reward,
user_policy_reward,
first_task_reward,
assistant_obs_reward,
assistant_infer_reward,
second_task_reward,
]
),
is_done,
[
user_obs_reward,
user_infer_reward,
user_policy_reward,
first_task_reward,
assistant_obs_reward,
assistant_infer_reward,
second_task_reward,
],
)
```
#### File: coopihc/bundle/_PlayUser.py
```python
from coopihc.bundle._Bundle import _Bundle
import copy
class PlayUser(_Bundle):
"""A bundle which samples assistant actions directly from the assistant but uses user actions provided externally in the step() method.
:param task: (coopihc.interactiontask.InteractionTask) A task, which is a subclass of InteractionTask
:param user: (coopihc.agents.BaseAgent) an user, which is a subclass of BaseAgent
:param assistant: (coopihc.agents.BaseAgent) an assistant, which is a subclass of BaseAgent
:meta public:
"""
def __init__(self, task, user, assistant, **kwargs):
super().__init__(task, user, assistant, **kwargs)
self.action_space = copy.copy(self.user.policy.action_state["action"]["spaces"])
# def reset(self, dic = {}, **kwargs):
# """ Reset the bundle. A first observation and inference is performed.
#
# :param args: see Bundle
#
# :meta public:
# """
# full_obs = super().reset(dic = dic, **kwargs)
# self._user_first_half_step()
# return self.user.observation
# # return self.user.inference_engine.buffer[-1]
def step(self, user_action):
"""Play a step, assistant actions are obtained by sampling the agent's policy and user actions are given externally in the step() method.
:param user_action: (list) user action
:return: sum_rewards (float), is_done (bool), rewards (list). Returns the sum of all intermediate rewards, the is_done flag to indicate whether or not the task has finisged, and the list of intermediate rewards.
:meta public:
"""
super().step(user_action)
self.broadcast_action("user", user_action, key="values")
first_task_reward, is_done = self._user_second_half_step(user_action)
if is_done:
return (
self.user.inference_engine.buffer[-1],
first_task_reward,
is_done,
[first_task_reward],
)
(
assistant_obs_reward,
assistant_infer_reward,
assistant_policy_reward,
second_task_reward,
is_done,
) = self._assistant_step()
user_obs_reward, user_infer_reward = self._user_first_half_step()
return (
self.user.inference_engine.buffer[-1],
sum(
[
user_obs_reward,
user_infer_reward,
first_task_reward,
assistant_obs_reward,
assistant_infer_reward,
assistant_policy_reward,
second_task_reward,
]
),
is_done,
[
user_obs_reward,
user_infer_reward,
first_task_reward,
assistant_obs_reward,
assistant_infer_reward,
assistant_policy_reward,
second_task_reward,
],
)
```
#### File: coopihc/bundle/_SinglePlayUser.py
```python
from coopihc.bundle._Bundle import _Bundle
class SinglePlayUser(_Bundle):
"""A bundle without assistant. This is used e.g. to model psychophysical tasks such as perception, where there is no real interaction loop with a computing device.
:param task: (coopihc.interactiontask.InteractionTask) A task, which is a subclass of InteractionTask
:param user: (coopihc.agents.BaseAgent) an user, which is a subclass of BaseAgent
:meta public:
"""
def __init__(self, task, user, **kwargs):
super().__init__(task=task, user=user, **kwargs)
@property
def observation(self):
return self.user.observation
def reset(self, dic={}, **kwargs):
"""Reset the bundle. A first observation and inference is performed.
:param args: see Bundle
:meta public:
"""
full_obs = super().reset(dic=dic, **kwargs)
self._user_first_half_step()
return self.observation
def step(self, user_action):
"""Play a step, user actions are given externally in the step() method.
:param user_action: (list) user action
:return: sum_rewards (float), is_done (bool), rewards (list). Returns the sum of all intermediate rewards, the is_done flag to indicate whether or not the task has finisged, and the list of intermediate rewards.
:meta public:
"""
super().step(user_action)
self.broadcast_action("user", user_action)
first_task_reward, is_done = self._user_second_half_step(user_action)
if is_done:
return (
self.user.inference_engine.buffer[-1],
first_task_reward,
is_done,
[first_task_reward],
)
self.task.base_assistant_step([None])
user_obs_reward, user_infer_reward = self._user_first_half_step()
return (
self.user.inference_engine.buffer[-1],
sum([user_obs_reward, user_infer_reward, first_task_reward]),
is_done,
[user_obs_reward, user_infer_reward, first_task_reward],
)
```
#### File: bundle/wrappers/Train.py
```python
from coopihc.helpers import hard_flatten
from coopihc.space.utils import GymConvertor, GymForceConvertor
import gym
import numpy
class Train:
"""Generic Wrapper to make bundles compatibles with gym.Env
This is a generic Wrapper to make bundles compatibles with gym.Env. It is mainly here to be subclassed by other wrappers.
Depending on the spaces you are using, you might need to provide a wrapper to accustom the fact that coopihc spaces can take any values whereas e.g. gym discrete spaces have to be unit-spaced values.
.. note::
Experimental: This class automatically build wrappers to account for the transformation between a bundle and an env, but we offer no guarantees that it will work in all cases. It might also likely be faster (computationnally) to hard code your own wrappers.
:param bundle: bundle to wrap
:type bundle: `Bundle<coopihc.bundle.Bundle.Bundle`
:param train_user: whether to train the user, defaults to True
:type train_user: bool, optional
:param train_assistant: whether to train the assistant, defaults to True
:type train_assistant: bool, optional
:param API: API with which the bundle will be made compatible for, defaults to "gym-force". In gym force, a limited gym compatible environment is created, which casts everything to float32 and boxes.
:type API: str, optional
:param observation_dict: to filter out observations, you can apply a dictionnary, defaults to None. e.g.:
..code-block:: python
filterdict = OrderedDict(
{
"user_state": OrderedDict({"goal": 0}),
"task_state": OrderedDict({"x": 0}),
}
)
:type observation_dict: collections.OrderedDict, optional
:param reset_dic: During training, the bundle will be repeatedly reset. Pass the reset_dic here (see bundle reset mechanism), defaults to {}
:type reset_dic: dict, optional
:param reset_turn: During training, the bundle will be repeatedly reset. Pass the reset_turn here (see bundle reset_turn mechanism), defaults to 0
:type reset_turn: int, optional
"""
def __init__(
self,
bundle,
*args,
train_user=True,
train_assistant=True,
API="gym-force",
observation_dict=None,
reset_dic={},
reset_turn=0,
**kwargs
):
self.bundle = bundle
self.train_user = train_user
self.train_assistant = train_assistant
self.observation_dict = observation_dict
self.reset_dic = reset_dic
self.reset_turn = reset_turn
self._convertor = None
if API == "gym":
self._convertor = GymConvertor
elif API == "gym-force":
self._convertor = GymForceConvertor
else:
raise NotImplementedError
(
self.action_space,
self.action_wrappers,
) = self._get_action_spaces_and_wrappers()
(
self.observation_space,
self.observation_wrappers,
) = self._get_observation_spaces_and_wrappers()
def _get_observation_spaces_and_wrappers(self):
"""_get_observation_spaces_and_wrappers
Get obs spaces and wrappers by querying an observation from the bundle and calling the convertor."""
obs = self.bundle.reset()
if self.observation_dict is None:
filter = obs
else:
filter = self.observation_dict
spaces = hard_flatten(obs.filter("spaces", filter))
return self.convertor.get_spaces_and_wrappers(spaces, "observation")[:2]
def _get_action_spaces_and_wrappers(self):
"""_get_action_spaces_and_wrappers [summary]
Get action spaces and wrappers. Checks who should be trained, and calls the convertor
"""
action_spaces = []
if self.train_user:
user_action_space = self.bundle.game_state["user_action"]["action"][
"spaces"
]
action_spaces.extend(user_action_space)
else:
user_action_space = None
if self.train_assistant:
assistant_action_space = self.bundle.game_state["assistant_action"][
"action"
]["spaces"]
action_spaces.extend(assistant_action_space)
else:
assistant_action_space = None
self.bundle_user_action_space = user_action_space
self.bundle_assistant_action_space = assistant_action_space
self.convertor = self._convertor(bundle_action_spaces=action_spaces)
return self.convertor.get_spaces_and_wrappers(action_spaces, "action")[
:2
] # no wrapper flags returned
def _convert_observation(self, observation):
"""_convert_observation
Hard flattens the bundle observation and casts to int or array
"""
if isinstance(self.observation_space, gym.spaces.Discrete):
return int(
hard_flatten(observation.filter("values", self.observation_dict))[0]
)
else:
return numpy.array(
hard_flatten(observation.filter("values", self.observation_dict))
)
class TrainGym(Train, gym.Env):
"""Generic Wrapper to make bundles compatibles with gym.Env
This is a generic Wrapper to make bundles compatibles with gym.Env. Read more on the Train class.
:param bundle: bundle to wrap
:type bundle: `Bundle<coopihc.bundle.Bundle.Bundle`
:param train_user: whether to train the user, defaults to True
:type train_user: bool, optional
:param train_assistant: whether to train the assistant, defaults to True
:type train_assistant: bool, optional
:param observation_dict: to filter out observations, you can apply a dictionnary, defaults to None. e.g.:
..code-block:: python
filterdict = OrderedDict(
{
"user_state": OrderedDict({"goal": 0}),
"task_state": OrderedDict({"x": 0}),
}
)
:type observation_dict: collections.OrderedDict, optional
:param reset_dic: During training, the bundle will be repeatedly reset. Pass the reset_dic here (see bundle reset mechanism), defaults to {}
:type reset_dic: dict, optional
:param reset_turn: During training, the bundle will be repeatedly reset. Pass the reset_turn here (see bundle reset_turn mechanism), defaults to 0
:type reset_turn: int, optional
"""
def __init__(
self,
bundle,
*args,
train_user=True,
train_assistant=True,
observation_dict=None,
reset_dic={},
reset_turn=0,
force=False,
**kwargs
):
if force == True:
api = "gym-force"
else:
api = "gym"
super().__init__(
bundle,
*args,
train_user=train_user,
train_assistant=train_assistant,
API=api,
observation_dict=observation_dict,
reset_dic=reset_dic,
reset_turn=reset_turn,
**kwargs,
)
def reset(self):
"""Reset the environment.
:return: observation (numpy.ndarray) observation of the flattened game_state --> see gym API. rewards is a dictionnary which gives all elementary rewards for this step.
"""
obs = self.bundle.reset(turn=self.reset_turn, dic=self.reset_dic)
return self._convert_observation(obs)
def step(self, action):
"""Perform a step of the environment.
:param action: (list, numpy.ndarray) Action (or joint action for PlayBoth)
:return: observation, reward, is_done, rewards --> see gym API. rewards is a dictionnary which gives all elementary rewards for this step.
:meta public:
"""
user_action = action[: len(self.bundle_user_action_space)]
assistant_action = action[len(self.bundle_user_action_space) :]
obs, rewards, is_done = self.bundle.step(user_action, assistant_action)
return (
self._convert_observation(obs),
float(sum(rewards.values())),
is_done,
rewards,
)
def render(self, mode):
"""See Bundle and gym API
:meta public:
"""
self.bundle.render(mode)
def close(self):
"""See Bundle and gym API
:meta public:
"""
self.bundle.close()
```
#### File: examples/simple_examples/bundle_examples.py
```python
import sys
from pathlib import Path
file = Path(__file__).resolve()
root = file.parents[3]
sys.path.append(str(root))
import numpy
from coopihc.interactiontask import ExampleTask
from coopihc.space.State import State
from coopihc.space.Space import Space
from coopihc.space.StateElement import StateElement
from coopihc.bundle.Bundle import Bundle
from coopihc.agents.BaseAgent import BaseAgent
from coopihc.policy.BasePolicy import BasePolicy
from coopihc.agents.ExampleUser import ExampleUser
# [start-check-task]
# Define agent action states (what actions they can take)
user_action_state = State()
user_action_state["action"] = StateElement(
values=None,
spaces=[Space([numpy.array([-1, 0, 1], dtype=numpy.int16)])],
)
assistant_action_state = State()
assistant_action_state["action"] = StateElement(
values=None,
spaces=[Space([numpy.array([-1, 0, 1], dtype=numpy.int16)])],
)
# Bundle a task together with two BaseAgents
bundle = Bundle(
task=ExampleTask(),
user=BaseAgent("user", override_agent_policy=BasePolicy(user_action_state)),
assistant=BaseAgent(
"assistant",
override_agent_policy=BasePolicy(assistant_action_state),
),
)
# Reset the task, plot the state.
bundle.reset(turn=1)
print(bundle.game_state)
bundle.step(numpy.array([1]), numpy.array([1]))
print(bundle.game_state)
# Test simple input
bundle.step(numpy.array([1]), numpy.array([1]))
# Test with input sampled from the agent policies
bundle.reset()
while True:
task_state, rewards, is_done = bundle.step(
bundle.user.policy.sample()[0], bundle.assistant.policy.sample()[0]
)
print(task_state)
if is_done:
break
# [end-check-task]
# [start-check-taskuser]
class ExampleTaskWithoutAssistant(ExampleTask):
def assistant_step(self, *args, **kwargs):
return self.state, 0, False, {}
example_task = ExampleTaskWithoutAssistant()
example_user = ExampleUser()
bundle = Bundle(task=example_task, user=example_user)
bundle.reset(turn=1)
while 1:
state, rewards, is_done = bundle.step(bundle.user.policy.sample()[0])
print(state, rewards, is_done)
if is_done:
break
# [end-check-taskuser]
# [start-highlevel-code]
# Define a task
example_task = ExampleTask()
# Define a user
example_user = ExampleUser()
# Define an assistant
example_assistant = BaseAgent("assistant")
# Bundle them together
bundle = Bundle(task=example_task, user=example_user)
# Reset the bundle (i.e. initialize it to a random or presecribed states)
bundle.reset(turn=1)
# Step through the bundle (i.e. play a full round)
while 1:
state, rewards, is_done = bundle.step(bundle.user.policy.sample()[0])
print(state, rewards, is_done)
if is_done:
break
# [end-highlevel-code]
```
#### File: examples/worked_out_examples/train-and-execute.py
```python
from pointing.envs import SimplePointingTask
from pointing.users import CarefulPointer
from pointing.assistants import ConstantCDGain
from coopihc.policy import BasePolicy, RLPolicy
import coopihc
from coopihc.bundle import PlayUser, PlayNone, Train
# other imports
from collections import OrderedDict
import gym
import numpy
import matplotlib.pyplot as plt
# stables baselines 2 seems much faster for some reason.
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import SubprocVecEnv
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.utils import set_random_seed
import loguru
loguru.logger.remove()
# Pointing task
task = SimplePointingTask(gridsize=31, number_of_targets=8)
# Unit cd gain assistant
unitcdgain = ConstantCDGain(1)
# The policy defines the action set that we are going to use for the
# user. The BasePolicy randomly samples actions from the action set.
# But that is okay because we don't sample from the policy during
# learning
policy = BasePolicy(
action_space=[coopihc.space.Discrete(10)],
action_set=[[-5 + i for i in range(5)] + [i + 1 for i in range(5)]],
action_values=None,
)
# Re-use the previously defined user model called CarefulPointer, but
# override its policy
user = CarefulPointer(agent_policy=policy)
# Bundle the pointing task, the user model and the assistant in a POMDP, where assistant actions are queried from its policy. The bundle expects to be fed user actions
bundle = PlayUser(task, user, unitcdgain)
# Initialize the bundle to some random state
observation = bundle.reset()
# Only observe the position (task state) and goal (user state) states.
# The rest is uninformative and will slow down training.
observation_dict = OrderedDict(
{"task_state": OrderedDict({"position": 0}), "user_state": OrderedDict({"goal": 0})}
)
# We are going to use PPO to solve the POMDP, which works with
# continuous action spaces, but our policy has discrete actions. So we introduce a wrapper to pass between continuous and discrete spaces. This is classical in RL, and does not depend on Coopihc
class ThisActionWrapper(gym.ActionWrapper):
def __init__(self, env):
super().__init__(env)
self.N = env.action_space[0].n
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(1,))
def action(self, action):
return int(numpy.round((action * (self.N - 1e-3) / 2) + (self.N - 1) / 2)[0])
# First wrap the bundle in a train environment to make it compatible
# with gym, then wrap it up in a gym wrapper:
# ActionWrapper < Train < bundle > >
md_env = ThisActionWrapper(
Train(bundle, observation_mode="multidiscrete", observation_dict=observation_dict)
)
# Verify that the environment is compatible with stable_baselines
from stable_baselines3.common.env_checker import check_env
check_env(md_env)
# Wrap everything above in a make_env function, used to parallelize
# the environments (to sample faster from the environments).
# Everything above is just to explain, only the code inside this
# function is needed to define envs.
def make_env(rank, seed=0):
def _init():
task = SimplePointingTask(gridsize=31, number_of_targets=8)
unitcdgain = ConstantCDGain(1)
policy = BasePolicy(
action_space=[coopihc.space.Discrete(10)],
action_set=[[-5 + i for i in range(5)] + [i + 1 for i in range(5)]],
action_values=None,
)
user = CarefulPointer(agent_policy=policy)
bundle = PlayUser(task, user, unitcdgain)
observation_dict = OrderedDict(
{
"task_state": OrderedDict({"position": 0}),
"user_state": OrderedDict({"goal": 0}),
}
)
env = ThisActionWrapper(
Train(
bundle,
observation_mode="multidiscrete",
observation_dict=observation_dict,
)
)
env.seed(seed + rank)
return env
set_random_seed(seed)
return _init
if __name__ == "__main__":
# parallelize the environments
env = SubprocVecEnv([make_env(i) for i in range(4)])
# define the learning algorithm
model = PPO("MlpPolicy", env, verbose=1, tensorboard_log="./tb/")
# train the algorithm
print("start training")
model.learn(total_timesteps=6000)
model.save("saved_model")
# Trained policy is now saved
# Reusing the trained and saved policy:
# ================ Recreate the training environment ()
class ThisActionWrapper(gym.ActionWrapper):
def __init__(self, env):
super().__init__(env)
self.N = env.action_space[0].n
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(1,))
def action(self, action):
return int(
numpy.round((action * (self.N - 1e-3) / 2) + (self.N - 1) / 2)[0]
)
task = SimplePointingTask(gridsize=31, number_of_targets=8)
unitcdgain = ConstantCDGain(1)
policy = BasePolicy(
action_space=[coopihc.space.Discrete(10)],
action_set=[[-5 + i for i in range(5)] + [i + 1 for i in range(5)]],
action_values=None,
)
user = CarefulPointer(agent_policy=policy)
bundle = PlayUser(task, user, unitcdgain)
observation_dict = OrderedDict(
{
"task_state": OrderedDict({"position": 0}),
"user_state": OrderedDict({"goal": 0}),
}
)
training_env = ThisActionWrapper(
Train(
bundle, observation_mode="multidiscrete", observation_dict=observation_dict
)
)
# ================ Training environment finished creating
task = SimplePointingTask(gridsize=31, number_of_targets=8)
unitcdgain = ConstantCDGain(1)
# # specifying user policy
# observation_dict = OrderedDict({'task_state': OrderedDict({'position': 0}), 'user_state': OrderedDict({'goal': 0})})
action_wrappers = OrderedDict()
action_wrappers["ThisActionWrapper"] = (ThisActionWrapper, ())
# Define an RL policy, by giving the model path, the library and algorithm used to train it, as well as potential wrappers used during training.
trained_policy = RLPolicy(
"user",
model_path="saved_model",
learning_algorithm="PPO",
library="stable_baselines3",
training_env=training_env,
wrappers={"actionwrappers": action_wrappers, "observation_wrappers": {}},
)
# Override the old policy with the new policy
user = CarefulPointer(agent_policy=trained_policy)
# Evaluate the trained policy
bundle = PlayNone(task, user, unitcdgain)
game_state = bundle.reset()
bundle.render("plotext")
plt.tight_layout()
while True:
observation, sum_reward, is_done, rewards = bundle.step()
bundle.render("plotext")
if is_done:
break
```
#### File: interaction-agents/coopihc/helpers.py
```python
import numpy
import collections
def hard_flatten(l):
# hack for circular import
from coopihc.space.Space import Space
out = []
# tentative
# if isinstance(l, (Space)):
# l = l.spaces
if isinstance(l, (collections.OrderedDict, dict)):
l = list(l.values())
for item in l:
if isinstance(item, (list, tuple)):
out.extend(hard_flatten(item))
else:
if isinstance(item, numpy.ndarray):
out.extend(hard_flatten(item.tolist()))
elif isinstance(item, collections.OrderedDict):
out.extend(hard_flatten(list(item.values())))
# tentative
# elif isinstance(item, (Space)):
# out.extend(hard_flatten(item.spaces))
else:
out.append(item)
return out
def flatten(l):
out = []
try:
for item in l:
if isinstance(item, (list, tuple)):
out.extend(flatten(item))
else:
out.append(item)
except TypeError:
return flatten([l])
return out
def sort_two_lists(list1, list2, *args, **kwargs):
try:
key = args[0]
sortedlist1, sortedlist2 = [
list(u) for u in zip(*sorted(zip(list1, list2), key=key, **kwargs))
]
except IndexError:
sortedlist1, sortedlist2 = [
list(u) for u in zip(*sorted(zip(list1, list2), **kwargs))
]
return sortedlist1, sortedlist2
def isdefined(obj):
if None not in flatten(obj):
return True
return False
```
#### File: coopihc/inference/GoalInferenceWithUserPolicyGiven.py
```python
import numpy
import copy
from coopihc.space.State import State
from coopihc.helpers import hard_flatten
from coopihc.inference.BaseInferenceEngine import BaseInferenceEngine
# The usermodel is not updated with this assistant
class GoalInferenceWithUserPolicyGiven(BaseInferenceEngine):
"""GoalInferenceWithUserPolicyGiven
An inference Engine used by an assistant to infer the 'goal' of a user.
The inference is based on a model of the user policy, which has to be provided to this engine.
:param \*args: policy model
:type \*args: :py:mod`Policy<coopihc.policy>`
"""
def __init__(self, *args, user_policy_model=None, **kwargs):
super().__init__(*args, **kwargs)
self.attach_policy(user_policy_model)
self.render_tag = ["plot", "text"]
def attach_policy(self, policy):
"""attach_policy
Attach a policy to the engine from which it can sample.
:param policy: a policy
:type policy: :py:mod`Policy<coopihc.policy>`
"""
if policy is None:
self.user_policy_model = None
return
if not policy.explicit_likelihood:
print(
"Warning: This inference engine requires a policy defined by an explicit likelihood"
)
print("Attached policy {} to {}".format(policy, self.__class__.__name__))
self.user_policy_model = policy
def attach_set_theta(self, set_theta):
"""attach_set_theta
The set of possible 'goal's.
:param set_theta: dictionnary
:type set_theta: dictionnary
"""
self.set_theta = set_theta
def render(self, *args, **kwargs):
mode = kwargs.get("mode")
render_flag = False
for r in self.render_tag:
if r in mode:
render_flag = True
## ----------------------------- Begin Helper functions
def set_box(
ax,
pos,
draw="k",
fill=None,
symbol=None,
symbol_color=None,
shortcut=None,
box_width=1,
boxheight=1,
boxbottom=0,
):
if shortcut == "void":
draw = "k"
fill = "#aaaaaa"
symbol = None
elif shortcut == "target":
draw = "#96006c"
fill = "#913979"
symbol = "1"
symbol_color = "k"
elif shortcut == "goal":
draw = "#009c08"
fill = "#349439"
symbol = "X"
symbol_color = "k"
elif shortcut == "position":
draw = "#00189c"
fill = "#6573bf"
symbol = "X"
symbol_color = "k"
BOX_HW = box_width / 2
_x = [pos - BOX_HW, pos + BOX_HW, pos + BOX_HW, pos - BOX_HW]
_y = [
boxbottom,
boxbottom,
boxbottom + boxheight,
boxbottom + boxheight,
]
x_cycle = _x + [_x[0]]
y_cycle = _y + [_y[0]]
if fill is not None:
fill = ax.fill_between(_x[:2], _y[:2], _y[2:], color=fill)
(draw,) = ax.plot(x_cycle, y_cycle, "-", color=draw, lw=2)
symbol = None
if symbol is not None:
symbol = ax.plot(
pos, 0, color=symbol_color, marker=symbol, markersize=100
)
return draw, fill, symbol
def draw_beliefs(ax):
beliefs = hard_flatten(self.host.state["beliefs"]["values"])
ticks = []
ticklabels = []
for i, b in enumerate(beliefs):
draw, fill, symbol = set_box(ax, 2 * i, shortcut="target", boxheight=b)
ticks.append(2 * i)
ticklabels.append(i)
self.ax.set_xticks(ticks)
self.ax.set_xticklabels(ticklabels, rotation=90)
## -------------------------- End Helper functions
if "plot" in mode:
ax = args[0]
if self.ax is not None:
title = self.ax.get_title()
self.ax.clear()
draw_beliefs(ax)
ax.set_title(title)
else:
self.ax = ax
draw_beliefs(ax)
self.ax.set_title(type(self).__name__ + " beliefs")
if "text" in mode:
beliefs = hard_flatten(self.host.state["beliefs"]["values"])
print("beliefs", beliefs)
def infer(self):
"""infer
Update the substate 'beliefs' from the internal state. Generate candidate observations for each potential target, evaluate its likelihood and update the prior to form the posterior. Normalize the posterior and return the new state.
:return: (new internal state, reward)
:rtype: tuple(:py:class:`State<coopihc.space.State.State>`, float)
"""
if self.user_policy_model is None:
raise RuntimeError(
"This inference engine requires a likelihood-based model of an user policy to function."
)
observation = self.buffer[-1]
state = observation["assistant_state"]
old_beliefs = state["beliefs"]["values"][0].squeeze().tolist()
user_action = observation["user_action"]["action"]
print(old_beliefs)
for nt, t in enumerate(self.set_theta):
print(nt, t)
# candidate_observation = copy.copy(observation)
candidate_observation = copy.deepcopy(observation)
for key, value in t.items():
try:
candidate_observation[key[0]][key[1]] = value
except KeyError: # key[0] is not in observation
_state = State()
_state[key[1]] = value
candidate_observation[key[0]] = _state
old_beliefs[nt] *= self.user_policy_model.compute_likelihood(
user_action, candidate_observation
)
if sum(old_beliefs) == 0:
print(
"warning: beliefs sum up to 0 after updating. I'm resetting to uniform to continue behavior. You should check if the behavior model makes sense. Here are the latest results from the model"
)
old_beliefs = [1 for i in old_beliefs]
new_beliefs = [i / sum(old_beliefs) for i in old_beliefs]
state["beliefs"]["values"] = numpy.array(new_beliefs)
return state, 0
```
#### File: coopihc/policy/RLPolicy.py
```python
import copy
from coopihc.space.State import State
from coopihc.policy.BasePolicy import BasePolicy
# ======================= RL Policy
class RLPolicy(BasePolicy):
"""RLPolicy [summary]
A policy object compatible with CoopIHC that wraps a policy trained via Reinforcement learning.
arguments to pass:
* role
kw arguments to pass:
* model_path
* learning_algorithm
* library
* training env
* wrappers
.. note ::
Currently only supports policies obtained via stable baselines 3.
.. note ::
Code works as proof of concept, but should be tested and augmented to deal with arbitrary wrappers. Possibly the wrapper class should be augmented with a reverse method, or something like that.
:param BasePolicy: [description]
:type BasePolicy: [type]
"""
def __init__(self, *args, **kwargs):
self.role = args[0]
model_path = kwargs.get("model_path")
learning_algorithm = kwargs.get("learning_algorithm")
library = kwargs.get("library")
self.training_env = kwargs.get("training_env")
self.wrappers = kwargs.get("wrappers")
if library != "stable_baselines3":
raise NotImplementedError(
"The Reinforcement Learning Policy currently only supports policies obtained via stables baselines 3."
)
import stable_baselines3
learning_algorithm = getattr(stable_baselines3, learning_algorithm)
self.model = learning_algorithm.load(model_path)
# Recovering action space
action_state = State()
action_state["action"] = copy.deepcopy(
getattr(
getattr(
getattr(self.training_env.unwrapped.bundle, "user"),
"policy",
),
"action_state",
)["action"]
)
super().__init__(*args, action_state=action_state, **kwargs)
def sample(self, observation=None):
"""sample
Get action by using model.predict(), and apply actionwrappers.
:return: action, reward
:rtype: tuple(`StateElement<coopihc.space.StateElement.StateElement>`, float)
"""
if observation is None:
observation = self.observation
nn_obs = self.training_env.unwrapped.convert_observation(observation)
_action = self.model.predict(nn_obs)[0]
for wrappers_name, (_cls, _args) in reversed(
self.wrappers["actionwrappers"].items()
):
aw = _cls(self.training_env.unwrapped, *_args)
_action = aw.action(_action)
action = self.action_state["action"]
action["values"] = _action
return action, 0
```
#### File: coopihc/space/State.py
```python
from collections import OrderedDict
import copy
import json
from tabulate import tabulate
from coopihc.helpers import flatten
from coopihc.space.StateElement import StateElement
class State(OrderedDict):
"""The container that defines states.
:param \*args: Same as collections.OrderedDict
:param \*\*kwargs: Same as collections.OrderedDict
:return: A state Object
:rtype: State
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __bool__(self):
return bool(self.items())
def reset(self, dic={}):
"""Initialize the state. See StateElement"""
for key, value in self.items():
reset_dic = dic.get(key)
if reset_dic is None:
reset_dic = {}
value.reset(reset_dic)
def _flat(self):
values = []
spaces = []
labels = []
l, k = list(self.values()), list(self.keys())
for n, item in enumerate(l):
_values, _spaces, _labels = item._flat()
values.extend(_values)
spaces.extend(_spaces)
labels.extend([k[n] + "|" + label for label in _labels])
return values, spaces, labels
def filter(self, mode, filterdict=None):
"""Retain only parts of the state.
An example for filterdict's structure is as follows:
.. code-block:: python
ordereddict = OrderedDict(
{"substate1": OrderedDict({"substate_x": 0, "substate_w": 0})}
)
will filter out every component but the first component (index 0) for substates x and w contained in substate_1.
:param str mode: 'values' or 'spaces'
:param collections.OrderedDict filterdict: The OrderedDict which specifies which substates to keep and which to leave out.
:return: The filtered state
:rtype: collections.OrderedDict
"""
new_state = OrderedDict()
if filterdict is None:
filterdict = self
for key, values in filterdict.items():
if isinstance(self[key], State):
new_state[key] = self[key].filter(mode, values)
elif isinstance(self[key], StateElement):
# to make S.filter("values", S) possible.
# Warning: Contrary to what one would expect values != self[key]
if isinstance(values, StateElement):
values = slice(0, len(values), 1)
if mode == "spaces":
new_state[key] = flatten([self[key][mode][values]])
else:
new_state[key] = self[key][mode][values]
else:
new_state[key] = self[key]
return new_state
def __content__(self):
return list(self.keys())
# Here we override copy and deepcopy simply because there seems to be some overhead in the default deepcopy implementation. It turns out the gain is almost None, but keep it here as a reminder that deepcopy needs speeding up. Adapted from StateElement code
def __copy__(self):
cls = self.__class__
copy_object = cls.__new__(cls)
copy_object.__dict__.update(self.__dict__)
copy_object.update(self)
return copy_object
def __deepcopy__(self, memodict={}):
cls = self.__class__
deepcopy_object = cls.__new__(cls)
memodict[id(self)] = deepcopy_object
deepcopy_object.__dict__.update(self.__dict__)
for k, v in self.items():
deepcopy_object[k] = copy.deepcopy(v, memodict)
return deepcopy_object
def serialize(self):
"""Serialize state --> JSON output.
:return: JSON-like blob
:rtype: dict
"""
ret_dict = {}
for key, value in dict(self).items():
try:
value_ = json.dumps(value)
except TypeError:
try:
value_ = value.serialize()
except AttributeError:
print(
"warning: I don't know how to serialize {}. I'm sending the whole internal dictionnary of the object. Consider adding a serialize() method to your custom object".format(
value.__str__()
)
)
value_ = value.__dict__
ret_dict[key] = value_
return ret_dict
def __str__(self):
"""Print out the game_state and the name of each substate with according indices."""
table_header = ["Index", "Label", "Value", "Space", "Possible Value"]
table_rows = []
for i, (v, s, l) in enumerate(zip(*self._flat())):
table_rows.append([str(i), l, str(v), str(s)])
_str = tabulate(table_rows, table_header)
return _str
```
#### File: interaction-agents/guide/train_operator_ppo_sb.py
```python
from pointing.envs import SimplePointingTask
from pointing.assistants import ConstantCDGain
from pointing.users import CarefulPointer
from coopihc.policy import Policy
from coopihc.bundle import PlayUser, Train
from gym.wrappers import FlattenObservation
from coopihc.helpers import hard_flatten
from collections import OrderedDict
import gym
import numpy
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import SubprocVecEnv
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.utils import set_random_seed
class ThisActionWrapper(gym.ActionWrapper):
def __init__(self, env):
super().__init__(env)
self.N = env.action_space[0].n
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(1,))
def action(self, action):
return int(numpy.round((action * (self.N - 1e-3) / 2) + (self.N - 1) / 2)[0])
def make_env(rank, seed=0):
def _init():
task = SimplePointingTask(gridsize=31, number_of_targets=8)
unitcdgain = ConstantCDGain(1)
policy = Policy(
action_space=[gym.spaces.Discrete(10)],
action_set=[-5 + i for i in range(5)] + [i + 1 for i in range(5)],
action_values=None,
)
user = CarefulPointer(agent_policy=policy)
bundle = PlayUser(task, user, unitcdgain)
observation_dict = OrderedDict(
{
"task_state": OrderedDict({"Position": 0}),
"user_state": OrderedDict({"Goal": 0}),
}
)
env = ThisActionWrapper(
Train(
bundle,
observation_mode="multidiscrete",
observation_dict=observation_dict,
)
)
env.seed(seed + rank)
return env
set_random_seed(seed)
return _init
if __name__ == "__main__":
task = SimplePointingTask(gridsize=31, number_of_targets=8)
unitcdgain = ConstantCDGain(1)
policy = Policy(
action_space=[gym.spaces.Discrete(10)],
action_set=[-5 + i for i in range(5)] + [i + 1 for i in range(5)],
action_values=None,
)
user = CarefulPointer(agent_policy=policy)
bundle = PlayUser(task, user, unitcdgain)
observation_dict = OrderedDict(
{
"task_state": OrderedDict({"Position": 0}),
"user_state": OrderedDict({"Goal": 0}),
}
)
md_env = ThisActionWrapper(
Train(
bundle, observation_mode="multidiscrete", observation_dict=observation_dict
)
)
from stable_baselines3.common.env_checker import check_env
check_env(md_env)
num_cpu = 3
env = SubprocVecEnv([make_env(i) for i in range(num_cpu)])
model = PPO("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=100000)
model.save("guide/models/basic_pointer_ppo")
obs = md_env.reset()
while True:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = md_env.step(action)
md_env.render("plotext")
if done:
break
```
#### File: unit/space/test_space.py
```python
from coopihc.space.Space import Space
from coopihc.space.utils import discrete_space, continuous_space, multidiscrete_space
from coopihc.helpers import flatten
import numpy
import copy
def test_init():
# ===================== 1D int
s = Space([numpy.array([1, 2, 3], dtype=numpy.int16)])
# prop and attributes
assert s.dtype == numpy.int16
assert s.continuous == False
assert s.shape == (1, 1)
assert s.N == 3
assert s.high == [3]
assert s.low == [1]
assert (s.range == numpy.atleast_2d(numpy.array([1, 2, 3]))).all()
# __ methods
# __len__
assert len(s) == 1
# __contains__
assert numpy.array([1]) in s
assert numpy.array([2]) in s
assert numpy.array([3]) in s
assert numpy.array([[2]]).reshape(1, -1) in s
assert numpy.array([[2]]).reshape(-1, 1) in s
assert numpy.array([2.0]) not in s
assert numpy.array([2], dtype=numpy.float32) not in s
# __iter__ and __eq__ ---> here iter quasi-idempotent (== object, but not identical)
for _s in s:
assert _s == s
q = Space([numpy.array([1, 1, 3], dtype=numpy.int16)])
assert q != s
r = Space([numpy.array([1, 2, 3], dtype=numpy.float32)])
assert s != r
# ========================= single 2D float
s = Space(
[
-numpy.ones((2, 2), dtype=numpy.float32),
numpy.ones((2, 2), dtype=numpy.float32),
]
)
# prop and attributes
assert s.dtype == numpy.float32
assert s.continuous == True
assert s.shape == (2, 2)
assert s.N == None
assert (s.high == numpy.ones((2, 2), numpy.float32)).all()
assert (s.low == -numpy.ones((2, 2), numpy.float32)).all()
assert (s.range[0] == -numpy.ones((2, 2), dtype=numpy.float32)).all()
assert (s.range[1] == numpy.ones((2, 2), dtype=numpy.float32)).all()
# __ methods
# __len__
assert len(s) == 2
# __contains__
assert -1.0 * numpy.eye(2, 2) in s
assert 1.0 * numpy.eye(2, 2) in s
assert 0 * numpy.eye(2, 2) in s
assert 1 * numpy.eye(2, 2) in s
assert -1 * numpy.eye(2, 2) in s
assert 2 * numpy.eye(2, 2) not in s
# __eq__
ss = Space(
[
-1.0 * numpy.ones((2, 2), dtype=numpy.float32),
1.0 * numpy.ones((2, 2), dtype=numpy.float32),
]
)
assert ss == s
sss = Space(
[
-1.0 * numpy.ones((2, 2)),
1.0 * numpy.ones((2, 2)),
]
)
assert sss != s
q = Space(
[
-numpy.ones((2, 2), dtype=numpy.int16),
numpy.ones((2, 2), dtype=numpy.int16),
]
)
r = Space(
[
-2 * numpy.ones((2, 2), dtype=numpy.float32),
numpy.ones((2, 2), dtype=numpy.float32),
]
)
assert q != s
assert r != s
# __iter__
for _s in s:
assert 0.5 * numpy.eye(1, 2) in _s
for _ss in _s:
assert numpy.array([[0.5]]) in _ss
# ====================== multi 1D int
gridsize = (31, 31)
s = Space(
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
# prop and attributes
assert s.dtype == numpy.int16
assert s.continuous == False
assert s.shape == (2, 1)
assert s.N == None
assert s.high == [30, 30]
assert s.low == [0, 0]
assert (s.range[0] == numpy.array([[i for i in range(31)]])).all()
assert (s.range[1] == numpy.array([[i for i in range(31)]])).all()
# __ methods
# __len__
assert len(s) == 2
# __contains__
assert numpy.array([1, 2]) in s
assert numpy.array([-2, 5]) not in s
assert numpy.array([1, 35]) not in s
# __eq__
ss = Space(
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
assert ss == s
sss = Space(
[
numpy.array([i for i in range(29)], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
assert sss != s
ssss = Space(
[
numpy.array([i for i in range(31)], dtype=numpy.int16),
numpy.array([i for i in range(5)], dtype=numpy.int16),
]
)
assert ssss != s
q = Space(
[
numpy.array([i - 4 for i in range(31)], dtype=numpy.int16),
numpy.array([i for i in range(31)], dtype=numpy.int16),
]
)
r = Space(
[
numpy.array([i for i in range(31)], dtype=numpy.int16),
numpy.array([i + 1 for i in range(31)], dtype=numpy.int16),
]
)
assert q != s
assert r != s
# __iter__
for _s in s:
assert _s == Space(
[numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16)]
)
# ========= multi int 2D
number_of_targets = 3
s = Space(
flatten(
[
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
for j in range(number_of_targets)
]
)
)
# prop and attributes
assert s.dtype == numpy.int16
assert s.continuous == False
assert s.shape == (6, 1)
assert s.N == None
assert s.high == [30, 30, 30, 30, 30, 30]
assert s.low == [0, 0, 0, 0, 0, 0]
for i in range(s.shape[0]):
assert (s.range[i] == numpy.array([[i for i in range(31)]])).all()
# __ methods
# __len__
assert len(s) == 6
# __contains__
assert numpy.array([1, 2, 4, 5, 3, 2]) in s
assert numpy.array([-2, 5, 1, 1, 1, 1]) not in s
assert numpy.array([1, 35, 1, 1, 1, 1]) not in s
assert numpy.array([1, 35, 1, 1]) not in s
# __eq__
ss = Space(
[numpy.array([i for i in range(31)], dtype=numpy.int16) for j in range(6)]
)
assert ss == s
sss = Space(
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
)
assert sss != s
ssss = Space(
flatten(
[
[
numpy.array([i for i in range(31)], dtype=numpy.int16)
for j in range(5)
],
[numpy.array([i for i in range(5)], dtype=numpy.int16)],
]
)
)
assert ssss != s
q = Space(
[numpy.array([i - j for i in range(31)], dtype=numpy.int16) for j in range(6)]
)
r = Space(
[numpy.array([i + j for i in range(31)], dtype=numpy.int16) for j in range(6)]
)
assert q != s
assert r != s
# __iter__
for n, _s in enumerate(s):
assert _s == Space(
[numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16)]
)
assert n == 5
# =========== None single space
s = Space([numpy.array([None], dtype=object)])
# prop and attributes
assert s.dtype == object
assert s.continuous == False
assert s.shape == (1, 1)
assert s.range == [None]
assert s.high == [None]
assert s.low == [None]
assert s.N == None
# __ methods
# __len__
assert len(s) == 1
def test_sample():
# ================== Discrete
number_of_targets = 3
gridsize = [5, 5]
s = Space(
flatten(
[
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
for j in range(number_of_targets)
]
),
seed=123,
)
# -------- Check if samples are valid
for i in range(100):
assert s.sample() in s
# --------- Check two samples are different
assert (s.sample() != s.sample()).any()
# --------- Check that seeding works
ss = copy.deepcopy(s)
assert (ss.sample() == s.sample()).all()
sss = Space(
flatten(
[
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
for j in range(number_of_targets)
]
),
seed=123,
)
ssss = Space(
flatten(
[
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
for j in range(number_of_targets)
]
),
seed=123,
)
sfive = Space(
flatten(
[
[
numpy.array([i for i in range(gridsize[0])], dtype=numpy.int16),
numpy.array([i for i in range(gridsize[1])], dtype=numpy.int16),
]
for j in range(number_of_targets)
]
),
seed=13,
)
a1 = sss.sample()
a2 = ssss.sample()
a3 = sfive.sample()
assert (a1 == a2).all()
assert (a1 != a3).any()
# =============== Continuous
s = Space(
[
-numpy.ones((2, 2), dtype=numpy.float32),
numpy.ones((2, 2), dtype=numpy.float32),
]
)
# -------- Check if samples are valid
for i in range(100):
assert s.sample() in s
# --------- Check two samples are different
assert (s.sample() != s.sample()).any()
# --------- Check that seeding works
ss = copy.deepcopy(s)
assert (ss.sample() == s.sample()).all()
def test_eq():
s = Space(
[
-numpy.ones((2, 2), dtype=numpy.float32),
numpy.ones((2, 2), dtype=numpy.float32),
]
)
v = Space(
[
-numpy.ones((2, 2), dtype=numpy.float32),
numpy.ones((2, 2), dtype=numpy.float32),
]
)
w = Space(
[
-numpy.ones((2, 2), dtype=numpy.float64),
numpy.ones((2, 2), dtype=numpy.float64),
]
)
assert s == v
assert s != w
def test_getitem():
# discrete
s = Space(
[
numpy.array([1, 2, 3], dtype=numpy.int16),
]
)
assert s[0] == s
# continuous
print("\n================\n")
s = Space(
[
-numpy.ones((2, 2), dtype=numpy.float32),
numpy.ones((2, 2), dtype=numpy.float32),
]
)
assert s[0] == s
# multidiscrete
s = Space(
[
numpy.array([3, 4, 5], dtype=numpy.int16),
numpy.array([1, 2, 3], dtype=numpy.int16),
]
)
assert s[0] == Space(
[
numpy.array([3, 4, 5], dtype=numpy.int16),
]
)
assert s[1] == Space(
[
numpy.array([1, 2, 3], dtype=numpy.int16),
]
)
def test_shortcuts():
space = discrete_space([1, 2, 3])
space = continuous_space(-numpy.eye(2, 2), numpy.eye(2, 2))
space = multidiscrete_space([[1, 2, 3], [4, 5, 6]])
```
|
{
"source": "jgorman3691/PyQt5Tutorials",
"score": 3
}
|
#### File: Jan Bodnar - zetcode/Painting/bezier_curve.py
```python
import sys, PyQt5
from PyQt5.QtGui import QPainter, QPainterPath
from PyQt5.QtWidgets import QWidget, QApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 380, 250)
self.setWindowTitle('Bézier Curve')
self.show()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
qp.setRenderHint(QPainter.Antialiasing)
self.drawBezierCurve(qp)
qp.end()
def drawBezierCurve(self, qp):
path = QPainterPath()
path.moveTo(30, 30)
path.cubicTo(30, 30, 200, 350, 350, 30)
qp.drawPath(path)
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
#### File: Jan Bodnar - zetcode/Painting/draw_text.py
```python
import sys, PyQt5
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QFont
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.text = "I am a powerful, loving, trusting, confident man."
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('Drawing Text')
self.show()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
qp.setPen(QColor(168, 34, 3))
qp.setFont(QFont('Decorative', 10))
qp.drawText(event.rect(), Qt.AlignCenter, self.text)
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
#### File: Jan Bodnar - zetcode/Windows/submenu.py
```python
import sys, PyQt5
from PyQt5.QtWidgets import QMainWindow, QAction, QMenu, QApplication
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
menubar = self.menuBar()
fileMenu = menubar.addMenu('File')
impMenu = QMenu('Import', self)
impAct = QAction('Import Mail', self)
impMenu.addAction(impAct)
newAct = QAction('New', self)
fileMenu.addAction(newAct)
fileMenu.addMenu(impMenu)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('Submenu')
self.show()
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
|
{
"source": "jgorman3/Wireless-Communications",
"score": 4
}
|
#### File: Wireless-Communications/Fourier/FourierSeries.py
```python
import scipy
import matplotlib.pyplot as plt
import scipy.integrate as integrate
#enter a periodic signal and output a its fourier representation
class FourierSeries:
def __init__(self, function, period, startval):
#making assumption this is all I need
self.function = function
self.period = period
self.startval = startval
self.frequency = 1 / period
self.twopi = 2*pi
self.omega = twopi*period
def TrigForm(self, function, period, intervals):
#find a0
a0 = self.frequency * integrate.quad(function,0,period)
#find an
sinefunction = function * cos(intervals*self.twopi*self.frequency*x)
an = (2 / self.frequency) * integrate.quad(sinefunction,0,period)
#find bn
cosfunction = function * sin(intervals*self.twopi*self.frequency*x)
bn = (2 / self.frequency) * integrate.quad(cosfunction,0,period)
#produce trig fourier series
for i in range(1,intervals):
an* cos(intervals*self.twopi*self.frequency*x) + bn* cos(intervals*self.twopi*self.frequency*x)
def CompactTrigForm(self, function, period):
return 0
def ExponentialForm(self, function, period):
return 0
def FormConversions(self, function, period):
return 0
plt.plot([1,2,3,4])
plt.ylabel('some numbers')
plt.show()
```
|
{
"source": "jgornet/NeuroTorch",
"score": 3
}
|
#### File: neurotorch/augmentations/blur.py
```python
from neurotorch.augmentations.augmentation import Augmentation
from neurotorch.datasets.dataset import Data
from scipy.ndimage.filters import gaussian_filter
from numpy.random import normal
from scipy.ndimage.filters import median_filter
from random import random
class Blur(Augmentation):
def __init__(self, volume, max_blur=(0.66, 4, 4), **kwargs):
self.setMaxBlur(max_blur)
super().__init__(volume, **kwargs)
def augment(self, bounding_box):
raw, label = self.getParent().get(bounding_box)
augmented_raw, augmented_label = self.blur(raw, label, self.max_blur)
return (augmented_raw, augmented_label)
def setFrequency(self, frequency):
self.frequency = frequency
def setMaxBlur(self, max_blur):
self.max_blur = max_blur
def blur(self, raw_data, label_data, max_blur):
raw = raw_data.getArray().copy()
gaussian_raw = gaussian_filter(raw, sigma=max_blur)
noise = raw - median_filter(raw, size=(3, 3, 3))
gaussian_raw = gaussian_raw + noise
gaussian_raw = gaussian_raw.astype(raw.dtype)
augmented_raw_data = Data(gaussian_raw, raw_data.getBoundingBox())
return augmented_raw_data, label_data
```
#### File: neurotorch/datasets/filetypes.py
```python
from neurotorch.datasets.dataset import Volume, Array, Data
from neurotorch.datasets.datatypes import BoundingBox, Vector
from abc import abstractmethod
import fnmatch
import os.path
import h5py
import numpy as np
import tifffile as tif
class TiffVolume(Volume):
def __init__(self, tiff_file, bounding_box: BoundingBox,
iteration_size: BoundingBox=BoundingBox(Vector(0, 0, 0),
Vector(128, 128, 32)),
stride: Vector=Vector(64, 64, 16)):
"""
Loads a TIFF stack file or a directory of TIFF files and creates a
corresponding three-dimensional volume dataset
:param tiff_file: Either a TIFF stack file or a directory
containing TIFF files
:param chunk_size: Dimensions of the sample subvolume
"""
# Set TIFF file and bounding box
self.setFile(tiff_file)
super().__init__(bounding_box, iteration_size, stride)
def setFile(self, tiff_file):
if os.path.isfile(tiff_file) or os.path.isdir(tiff_file):
self.tiff_file = tiff_file
else:
raise IOError("{} was not found".format(tiff_file))
def getFile(self):
return self.tiff_file
def get(self, bounding_box):
return self.getArray().get(bounding_box)
def __enter__(self):
if os.path.isfile(self.getFile()):
try:
print("Opening {}".format(self.getFile()))
array = tif.imread(self.getFile())
except IOError:
raise IOError("TIFF file {} could not be " +
"opened".format(self.getFile()))
elif os.path.isdir(self.getFile()):
tiff_list = os.listdir(self.getFile())
tiff_list = filter(lambda f: fnmatch.fnmatch(f, '*.tif'),
tiff_list)
if tiff_list:
array = tif.TiffSequence(tiff_list).asarray()
else:
raise IOError("{} was not found".format(self.getFile()))
array = Array(array, bounding_box=self.getBoundingBox(),
iteration_size=self.getIterationSize(),
stride=self.getStride())
self.setArray(array)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.setArray(None)
def _indexToBoundingBox(self, idx):
return self.getArray()._indexToBoundingBox(idx)
class Hdf5Volume(Volume):
def __init__(self, hdf5_file, dataset, bounding_box: BoundingBox,
iteration_size: BoundingBox=BoundingBox(Vector(0, 0, 0),
Vector(128, 128, 20)),
stride: Vector=Vector(64, 64, 10)):
"""
Loads a HDF5 dataset and creates a corresponding three-dimensional
volume dataset
:param hdf5_file: A HDF5 file path
:param dataset: A HDF5 dataset name
:param chunk_size: Dimensions of the sample subvolume
"""
self.setFile(hdf5_file)
self.setDataset(dataset)
super().__init__(bounding_box, iteration_size, stride)
def setFile(self, hdf5_file):
self.hdf5_file = hdf5_file
def getFile(self):
return self.hdf5_file
def setDataset(self, hdf5_dataset):
self.hdf5_dataset = hdf5_dataset
def getDataset(self):
return self.hdf5_dataset
def __enter__(self):
if os.path.isfile(self.getFile()):
with h5py.File(self.getFile(), 'r') as f:
array = f[self.getDataset()].value
array = Array(array, bounding_box=self.getBoundingBox(),
iteration_size=self.getIterationSize(),
stride=self.getStride())
self.setArray(array)
def __exit__(self, exc_type, exc_value, traceback):
self.setArray(None)
```
#### File: neurotorch/loss/SimplePointWeighting.py
```python
import torch
from torch.nn import BCEWithLogitsLoss, Module
import numpy as np
from scipy.ndimage.measurements import label
class SimplePointBCEWithLogitsLoss(Module):
"""
Weights the binomial cross-entropy loss by the non-simple points
"""
def __init__(self, simple_weight=1, non_simple_weight=1):
super().__init__()
self.simple_weight = simple_weight
self.non_simple_weight = non_simple_weight
self.bce = BCEWithLogitsLoss(reduction='none')
def forward(self, prediction, label):
simple_weight = self.simple_weight
non_simple_weight = self.non_simple_weight
prediction_weights = self.simple_weight(
prediction, simple_weight=0, non_simple_weight=1,
)
label_weights = self.simple_weight(
label, simple_weight=0, non_simple_weight=1,
)
weight = (prediction_weights + label_weights) > 0
weight = (weight.float() * non_simple_weight) + \
((~weight).float() * simple_weight)
cost = self.bce(prediction, label)
cost = weight * cost
return cost.mean()
def simple_weight(self, tensor, simple_weight=1, non_simple_weight=1):
non_simple_points = self.label_nonsimple_points(tensor)
simple_points = tensor.new_ones(tensor.size()).to(tensor.get_device()) - \
non_simple_points
inputs_weights = non_simple_weight * non_simple_points + \
simple_weight * simple_points
return inputs_weights
def label_nonsimple_points(self, tensor, threshold=0):
"""
Labels every non-simple point in a tensor
:param tensor: A PyTorch tensor
:param threshold: The threshold to binarize the tensor
"""
try:
device = tensor.get_device()
except RuntimeError:
raise RuntimeError("simple point weighting currently only works" +
" for GPUs")
array = tensor.to("cpu")
array = array.data.numpy()
array = (array > threshold)
labeled_array, num_features = label(array)
size = labeled_array.shape
padded_array = np.pad(labeled_array, (1,), 'edge')
result = np.zeros(size)
for k in range(0, size[0]):
for j in range(0, size[1]):
for i in range(0, size[2]):
if self._is_nonsimple_point(padded_array[k:k+3,
j:j+3,
i:i+3]):
result[k, j, i] = 1
result = torch.from_numpy(result).to(device).type(type(tensor))
return result
def _is_nonsimple_point(self, neighborhood):
"""
Determines whether the center voxel in a labeled 3x3 neighborhood is simple
:param neighborhood: A labeled 3x3 Numpy array
"""
# Skip if the point is background
if (neighborhood[1, 1, 1] == 0).any():
return False
# Setup neighborhood
result = np.copy(neighborhood)
center_point_label = result[1, 1, 1]
# Create 18-neighborhood structure
s = np.zeros((3, 3, 3))
s[0, :, :] = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
s[1, :, :] = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
s[2, :, :] = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
# Calculates the topological number of the cavity
result[result == 0] = -1
labeled_array, num_features = label(result != center_point_label,
structure=s)
if num_features != 1:
return True
# Calculates the topological number of the component
result = (result == center_point_label)
result[1, 1, 1] = 0
labeled_array, num_features = label(result,
structure=np.ones((3, 3, 3)))
if num_features != 1:
return True
# If the prior conditions are not satisfied, the point is simple
return False
```
#### File: neurotorch/training/logging.py
```python
from neurotorch.core.trainer import TrainerDecorator
import tensorboardX
import os
import logging
import time
import numpy as np
class LossWriter(TrainerDecorator):
"""
Logs the loss at each iteration to a Tensorboard log
"""
def __init__(self, trainer, logger_dir, experiment_name):
"""
Initializes the Tensorboard writer
:param trainer: Trainer object that the class wraps
:param logger_dir: Directory to save Tensorboard logs
:param experiment_name: The name to mark the experiment
"""
if not os.path.isdir(logger_dir):
raise IOError("{} is not a valid directory".format(logger_dir))
super().__init__(trainer)
experiment_dir = os.path.join(logger_dir, experiment_name)
os.makedirs(experiment_dir, exist_ok=True)
self.train_writer = tensorboardX.SummaryWriter(os.path.join(experiment_dir,
"train_log"))
self.validation_writer = tensorboardX.SummaryWriter(os.path.join(experiment_dir,
"validation_log"))
self.iteration = 0
def log_loss(self, loss: float, duration: float, iteration: int):
"""
Writes the loss onto the Tensorboard log
:param loss: The training loss of the model
:param duration: The time elapsed by the current iteration
:param iteration: The current iteration of the model
"""
self.train_writer.add_scalar("Time", duration, iteration)
self.train_writer.add_scalar("Loss", loss, iteration)
def evaluate(self, batch):
start = time.time()
loss, accuracy, output = super().evaluate(batch)
end = time.time()
duration = end - start
self.validation_writer.add_scalar("Time", duration, self.iteration)
self.validation_writer.add_scalar("Loss", loss, self.iteration)
self.validation_writer.add_scalar("Accuracy", accuracy*100, self.iteration)
return loss, accuracy, output
def run_epoch(self, sample_batch):
"""
Runs an epoch and saves the parameters onto the Tensorboard log
:param sample_batch: A batch of input/label samples for training
"""
start = time.time()
loss = super().run_epoch(sample_batch)
end = time.time()
duration = end - start
self.iteration += 1
if self.iteration % 10 == 0:
self.log_loss(loss, duration, self.iteration)
return loss
class TrainingLogger(TrainerDecorator):
"""
Logs the iteration parameters onto a plain text log file
"""
def __init__(self, trainer, logger_dir=None):
"""
Initializes the Python Logger
:param trainer: Trainer object that the class wraps
:param logger_dir: The directory to save logs
"""
if logger_dir is not None and not os.path.isdir(logger_dir):
raise IOError("{} is not a valid directory".format(logger_dir))
super().__init__(trainer)
self.logger = logging.getLogger("Trainer")
self.logger.setLevel(logging.INFO)
if logger_dir:
file_handler = logging.FileHandler(os.path.join(logger_dir,
"training.log"))
self.logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
self.logger.addHandler(console_handler)
self.iteration = 0
def evaluate(self, batch):
start = time.time()
loss, accuracy, output = super().evaluate(batch)
end = time.time()
duration = end - start
self.logger.info("Iteration: {}, Accuracy: {}, Loss: {}, Time: {}".format(self.iteration,
accuracy * 100,
loss,
duration))
return loss, accuracy, output
def run_epoch(self, sample_batch):
"""
Runs an epoch and saves the parameters in a log
:param sample_batch: A batch of input/label samples for training
"""
start = time.time()
loss = super().run_epoch(sample_batch)
end = time.time()
duration = end - start
self.iteration += 1
self.logger.info("Iteration: {}, Loss: {}, Time: {}".format(self.iteration,
loss,
duration))
return loss
class ImageWriter(TrainerDecorator):
"""
Write the image of each validation to a Tensorboard log
"""
def __init__(self, trainer, logger_dir, experiment_name):
"""
Initializes the Tensorboard writer
:param trainer: Trainer object that the class wraps
:param logger_dir: Directory to save Tensorboard logs
:param experiment_name: The name to mark the experiment
"""
if not os.path.isdir(logger_dir):
raise IOError("{} is not a valid directory".format(logger_dir))
super().__init__(trainer)
experiment_dir = os.path.join(logger_dir, experiment_name)
os.makedirs(experiment_dir, exist_ok=True)
self.image_writer = tensorboardX.SummaryWriter(os.path.join(experiment_dir,
"validation_image"))
self.iteration = 0
def evaluate(self, batch):
loss, accuracy, output = super().evaluate(batch)
inputs = np.amax(batch[0].cpu().numpy(), axis=2).astype(np.float)
inputs = (inputs + 200.0) * 0.50 / (np.max(inputs) - np.min(inputs))
inputs = np.concatenate(list(inputs), axis=2)
labels = np.amax(batch[1].cpu().numpy(), axis=2).astype(np.float) * 0.9
labels = np.concatenate(list(labels), axis=2)
prediction = np.amax(1/(1 + np.exp(-output[0])), axis=2)
prediction = np.concatenate(list(prediction), axis=2)
self.image_writer.add_image("input_image", inputs, self.iteration)
self.image_writer.add_image("label_image", labels, self.iteration)
self.image_writer.add_image("prediction_image", prediction, self.iteration)
return loss, accuracy, output
def run_epoch(self, sample_batch):
"""
Runs an epoch and saves the parameters in a log
:param sample_batch: A batch of input/label samples for training
"""
loss = super().run_epoch(sample_batch)
self.iteration += 1
return loss
```
#### File: NeuroTorch/tests/test_dataset.py
```python
from neurotorch.datasets.dataset import (AlignedVolume, Array, PooledVolume)
from neurotorch.datasets.filetypes import (TiffVolume, Hdf5Volume)
from neurotorch.datasets.specification import JsonSpec
import numpy as np
import unittest
import tifffile as tif
import os.path
import pytest
from os import getpid
from psutil import Process
from neurotorch.datasets.datatypes import BoundingBox, Vector
import time
IMAGE_PATH = "./tests/images/"
class TestDataset(unittest.TestCase):
def test_torch_dataset(self):
input_dataset = TiffVolume(os.path.join(IMAGE_PATH,
"inputs.tif"),
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50)))
label_dataset = TiffVolume(os.path.join(IMAGE_PATH,
"labels.tif"),
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50)))
input_dataset.__enter__()
label_dataset.__enter__()
training_dataset = AlignedVolume((input_dataset, label_dataset),
iteration_size=BoundingBox(Vector(0, 0, 0), Vector(128, 128, 20)),
stride=Vector(128, 128, 20))
tif.imsave(os.path.join(IMAGE_PATH, "test_input.tif"),
training_dataset[10][0].getArray())
tif.imsave(os.path.join(IMAGE_PATH, "test_label.tif"),
training_dataset[10][1].getArray()*255)
def test_tiff_dataset(self):
# Test that TiffVolume opens a TIFF stack
testDataset = TiffVolume(os.path.join(IMAGE_PATH,
"inputs.tif"),
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50)),
iteration_size=BoundingBox(Vector(0, 0, 0), Vector(128, 128, 20)),
stride=Vector(128, 128, 20))
testDataset.__enter__()
# Test that TiffVolume has the correct length
self.assertEqual(64, len(testDataset),
"TIFF dataset size does not match correct size")
# Test that TiffVolume outputs the correct samples
self.assertTrue((tif.imread(os.path.join(IMAGE_PATH,
"test_sample.tif"))
== testDataset[10].getArray()).all,
"TIFF dataset value does not match correct value")
# Test that TiffVolume can read and write consistent samples
tif.imsave(os.path.join(IMAGE_PATH,
"test_write.tif"), testDataset[10].getArray())
self.assertTrue((tif.imread(os.path.join(IMAGE_PATH,
"test_write.tif"))
== testDataset[10].getArray()).all,
"TIFF dataset output does not match written output")
def test_stitcher(self):
# Stitch a test TIFF dataset
inputDataset = TiffVolume(os.path.join(IMAGE_PATH,
"inputs.tif"),
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50)))
outputDataset = Array(np.zeros(inputDataset
.getBoundingBox()
.getNumpyDim()))
inputDataset.__enter__()
for data in inputDataset:
outputDataset.blend(data)
self.assertTrue((inputDataset[20].getArray()
== outputDataset[20].getArray()).all,
"Blending output does not match input")
tif.imsave(os.path.join(IMAGE_PATH,
"test_stitch.tif"),
outputDataset[100]
.getArray()
.astype(np.uint16))
def test_memory_free(self):
process = Process(getpid())
initial_memory = process.memory_info().rss
start = time.perf_counter()
with TiffVolume(os.path.join(IMAGE_PATH, "inputs.tif"),
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50))) as v:
volume_memory = process.memory_info().rss
end = time.perf_counter()
print("Load time: {} secs".format(end-start))
final_memory = process.memory_info().rss
self.assertAlmostEqual(initial_memory, final_memory,
delta=initial_memory*0.2,
msg=("memory leakage: final memory usage is " +
"larger than the initial memory usage"))
self.assertLess(initial_memory, volume_memory,
msg=("volume loading error: volume memory usage is " +
"not less than the initial memory usage"))
def test_pooled_volume(self):
pooled_volume = PooledVolume(stack_size=5)
pooled_volume.add(TiffVolume(os.path.join(IMAGE_PATH,
"inputs.tif"),
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50))))
pooled_volume.add(TiffVolume(os.path.join(IMAGE_PATH,
"inputs.tif"),
BoundingBox(Vector(0, 0, 50),
Vector(1024, 512, 100))))
output = pooled_volume.get(BoundingBox(Vector(0, 0, 40),
Vector(128, 128, 60)))
self.assertTrue((tif.imread(os.path.join(IMAGE_PATH,
"test_pooled_volume.tif"))
== output.getArray()).all,
"PooledVolume output does not match test case")
def test_hdf5_volume(self):
pooled_volume = PooledVolume(stack_size=5)
pooled_volume.add(Hdf5Volume(os.path.join(IMAGE_PATH,
"inputs.h5"),
"input-1",
BoundingBox(Vector(0, 0, 0),
Vector(1024, 512, 50))))
pooled_volume.add(Hdf5Volume(os.path.join(IMAGE_PATH,
"inputs.h5"),
"input-2",
BoundingBox(Vector(0, 0, 50),
Vector(1024, 512, 100))))
output = pooled_volume.get(BoundingBox(Vector(0, 0, 40),
Vector(128, 128, 60)))
self.assertTrue((tif.imread(os.path.join(IMAGE_PATH,
"test_pooled_volume.tif"))
== output.getArray()).all,
"PooledVolume output does not match test case")
def test_json_spec(self):
# Tests the JSON volume specification
json_spec = JsonSpec()
pooled_volume = json_spec.open(os.path.join(IMAGE_PATH,
"inputs_spec.json"))
output = pooled_volume.get(BoundingBox(Vector(0, 0, 40),
Vector(128, 128, 60)))
self.assertTrue((tif.imread(os.path.join(IMAGE_PATH,
"test_pooled_volume.tif"))
== output.getArray()).all,
"JsonSpec output does not match test case")
```
|
{
"source": "jgorset/django-respite",
"score": 2
}
|
#### File: django-respite/tests/monkeys.py
```python
from urllib import urlencode
from urlparse import urlparse
from django.test.client import Client, FakePayload
from utils import monkeypatch_method
@monkeypatch_method(Client)
def patch(self, path, data, content_type, **extra):
"""Construct a PATCH request."""
if isinstance(data, dict):
data = urlencode(data)
parsed_url = urlparse(path)
environment = {
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': content_type,
'PATH_INFO': parsed_url[2],
'QUERY_STRING': parsed_url[4],
'REQUEST_METHOD': 'PATCH',
'wsgi.input': FakePayload(data)
}
environment.update(extra)
return self.request(**environment)
```
#### File: tests/project/views.py
```python
from django.http import HttpResponse
def home(request):
return HttpResponse(request.method)
```
#### File: django-respite/tests/test_middleware.py
```python
import json
from nose.tools import *
from urllib import urlencode
from django.test.client import Client, RequestFactory
from respite.middleware import *
client = Client()
def test_json_middleware():
request = RequestFactory().post(
path = '/',
data = json.dumps({
'foo': 'foo',
'bar': 'bar',
'baz': 'baz',
'hogera': [
{'hoge': 'hoge'},
{'fuga': 'fuga'}
]
}),
content_type = 'application/json'
)
JsonMiddleware().process_request(request)
assert_equal(request.POST, {
'foo': ['foo'],
'bar': ['bar'],
'baz': ['baz'],
'hogera': [
{'hoge': ['hoge']},
{'fuga': ['fuga']}
]
})
def test_http_method_override_middleware():
request = RequestFactory().post(
path = '/',
data = {
'foo': 'bar',
'_method': 'PUT'
}
)
HttpMethodOverrideMiddleware().process_request(request)
assert_equal(request.method, 'PUT')
assert_equal(request.POST, {})
def test_http_put_middleware():
request = RequestFactory().put(
path = '/',
data = urlencode({
'foo': 'bar'
}),
content_type = "application/x-www-form-urlencoded"
)
HttpPutMiddleware().process_request(request)
assert_equal(request.PUT, {
'foo': ['bar']
})
```
|
{
"source": "Jgorsick/Advocacy_Angular",
"score": 3
}
|
#### File: openstates/ak/events.py
```python
import re
import datetime
from billy.scrape.events import Event, EventScraper
from openstates.utils import LXMLMixin
import pytz
class AKEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'ak'
_TZ = pytz.timezone('US/Alaska')
_DATETIME_FORMAT = '%m/%d/%Y %I:%M %p'
def scrape(self, session, chambers):
EVENTS_URL = 'http://www.akleg.gov/basis/Meeting/Find'
events = self.lxmlize(EVENTS_URL).xpath(
'//ul[@id="meetingResults"]/li')
for info in events:
event_url = info.xpath('span[@class="col04"]/a/@href')[0]
doc = self.lxmlize(event_url)
# Skip events that are placeholders or tentative
# Also skip whole-chamber events
if any(x.strip().startswith("No Meeting") for x in
doc.xpath('//div[@class="schedule"]//text()')) \
or "session" in \
info.xpath('span[@class="col01"]/text()')[0].lower():
continue
event = Event(
session=session,
when=self._TZ.localize(datetime.datetime.strptime(
info.xpath('span[@class="col02"]/text()')[0],
self._DATETIME_FORMAT
)),
type='committee:meeting',
description=" ".join(x.strip() for x
in doc.xpath('//div[@class="schedule"]//text()')
if x.strip()),
location=doc.xpath(
'//div[@class="heading-container"]/span/text()')
[0].title()
)
event.add_participant(
type='host',
participant=info.xpath(
'span[@class="col01"]/text()')[0].title(),
participant_type='committee'
)
for document in doc.xpath('//td[@data-label="Document"]/a'):
event.add_document(
name=document.xpath('text()')[0],
url=document.xpath('@href')[0]
)
event.add_source(EVENTS_URL)
event.add_source(event_url.replace(" ", "%20"))
self.save_event(event)
```
#### File: openstates/az/bills.py
```python
import re
from billy.scrape import NoDataForPeriod
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from . import utils
from .action_utils import get_action_type, get_verbose_action
from lxml import html
BASE_URL = 'http://www.azleg.gov/'
# map to type and official_type (maybe we can figure out PB/PZ and add names
SPONSOR_TYPES = {'P': ('primary', 'P'),
'C': ('cosponsor', 'C'),
'PB': ('primary', 'PB'),
'PZ': ('primary', 'PZ'),
'CZ': ('cosponsor', 'CZ')}
# This string of hot garbage appears when a document hasn't been posted yet.
hot_garbage_404_fail = ('The Requested Document Has Not Been '
'Posted To The Web Site Yet.'
'|There Are No Documents For [A-Z\d]+'
'|The page cannot be displayed because an internal server error has occurred.')
class AZBillScraper(BillScraper):
def accept_response(self, response):
normal = super(AZBillScraper, self).accept_response(response)
return normal or response.status_code == 500
"""
Arizona Bill Scraper.
"""
jurisdiction = 'az'
def get_session_id(self, session):
"""
returns the session id for a given session
"""
return self.metadata['session_details'][session]['session_id']
def scrape_bill(self, chamber, session, bill_id):
"""
Scrapes documents, actions, vote counts and votes for
a given bill.
"""
session_id = self.get_session_id(session)
url = BASE_URL + 'DocumentsForBill.asp?Bill_Number=%s&Session_ID=%s' % (
bill_id.replace(' ', ''), session_id)
docs_for_bill = self.get(url).text
if re.search(hot_garbage_404_fail, docs_for_bill):
# Bailing here will prevent the bill from being saved, which
# occurs in the scrape_actions method below.
return
root = html.fromstring(docs_for_bill)
bill_title = root.xpath(
'//div[@class="ContentPageTitle"]')[1].text.strip()
b_type = utils.get_bill_type(bill_id)
bill = Bill(session, chamber, bill_id, bill_title, type=b_type)
bill.add_source(url)
path = '//tr[contains(td/font/text(), "%s")]'
link_path = '//tr[contains(td/a/@href, "%s")]'
link_path2 = '//tr[contains(td/font/a/@href, "%s")]'
# versions
for href in root.xpath("//a[contains(@href, 'pdf')]"):
version_url = href.attrib['href']
if "bills" in version_url.lower():
name = list(href.getparent().getparent().getparent())
name = name[1].text_content()
bill.add_version(href.text_content(), version_url,
on_duplicate='use_old',
mimetype='application/pdf')
#fact sheets and summary
rows = root.xpath(link_path2 % '/summary/')
for row in rows:
tds = row.xpath('td')
fact_sheet = tds[1].text_content().strip()
fact_sheet_url = tds[1].xpath('string(font/a/@href)') or \
tds[2].xpath('string(font/a/@href)')
bill.add_document(fact_sheet, fact_sheet_url, type="summary")
#agendas
# skipping revised, cancelled, date, time and room from agendas
# but how to get the agenda type cleanly? meaning whether it is
# house or senate?
rows = root.xpath(link_path % '/agendas')
for row in rows:
tds = row.xpath('td')
agenda_committee = tds[0].text_content().strip()
agenda_html = tds[7].xpath('string(a/@href)').strip()
if agenda_html == '':
agenda_html = tds[6].xpath('string(a/@href)').strip()
bill.add_document(agenda_committee, agenda_html)
# House Calendars
# skipping calendar number, modified, date
rows = root.xpath(link_path % '/calendar/h')
for row in rows:
tds = row.xpath('td')
calendar_name = tds[0].text_content().strip()
calendar_html = tds[5].xpath('string(a/@href)')
bill.add_document(calendar_name, calendar_html,
type='house calendar')
# Senate Calendars
# skipping calendar number, modified, date
rows = root.xpath(link_path % '/calendar/s')
for row in rows:
tds = row.xpath('td')
calendar_name = tds[0].text_content().strip()
calendar_html = tds[5].xpath('string(a/@href)')
bill.add_document(calendar_name, calendar_html,
type='senate calendar')
# amendments
rows = root.xpath(path % 'AMENDMENT:')
for row in rows:
tds = row.xpath('td')
amendment_title = tds[1].text_content().strip()
amendment_link = tds[2].xpath('string(font/a/@href)')
if amendment_link == "": #if there's no html link, take the pdf one which is next
amendment_link = tds[3].xpath('string(font/a/@href)')
if amendment_link:
bill.add_document(amendment_title, amendment_link,
type='amendment')
# videos
# http://azleg.granicus.com/MediaPlayer.php?view_id=13&clip_id=7684
rows = root.xpath(link_path % '&clip_id')
for row in rows:
tds = row.xpath('td')
video_title = tds[1].text_content().strip()
video_link = tds[2].xpath('string(a/@href)')
video_date = tds[0].text_content().strip()
bill.add_document(video_title, video_link, date=video_date,
type='video')
self.scrape_actions(chamber, session, bill)
def scrape_actions(self, chamber, session, bill):
"""
Scrape the actions for a given bill
"""
ses_num = utils.legislature_to_number(session)
bill_id = bill['bill_id'].replace(' ', '')
action_url = BASE_URL + 'FormatDocument.asp?inDoc=/legtext/%s/bills/%so.asp' % (ses_num, bill_id.lower())
action_page = self.get(action_url).text
if re.search(hot_garbage_404_fail, action_page):
# This bill has no actions yet, but that
# happened frequently with pre-filed bills
# before the 2013 session, so skipping probably
# isn't the thing to do.
self.save_bill(bill)
return
bill.add_source(action_url)
root = html.fromstring(action_page)
base_table = root.xpath('//table[@class="ContentAreaBackground"]')[0]
# common xpaths
table_path = '//table[contains(tr/td/b/text(), "%s")]'
#sponsors
sponsors = base_table.xpath('//sponsor')
for sponsor in sponsors:
name = sponsor.text.strip()
# sponsor.xpath('string(ancestor::td[1]/following-sibling::td[1]/text())').strip()
s_type = sponsor.getparent().getparent().getnext().text_content().strip()
s_type, o_type = SPONSOR_TYPES[s_type]
bill.add_sponsor(s_type, name, official_type=o_type)
#titles
table = base_table.xpath(table_path % 'TITLE')
if table:
for row in table[0].iterchildren('tr'):
title = row[1].text_content().strip()
if title != bill['title']:
bill.add_title(title)
for table in base_table.xpath('tr/td/table'):
action = table.xpath('string(tr[1]/td[1])').strip()
if action == '':
action = table.xpath('string(tr[1])').strip()
if (action.endswith('FIRST READ:') or
action.endswith('SECOND READ:') or 'WAIVED' in action):
rows = table.xpath('tr')
for row in rows:
action = row[0].text_content().strip()[:-1]
actor = 'lower' if action.startswith('H') else 'upper'
date = utils.get_date(row[1])
# bill:introduced
if (action.endswith('FIRST READ') or
action.endswith('FIRST WAIVED')):
if actor == chamber:
a_type = ['bill:introduced', 'bill:reading:1']
else:
a_type = 'bill:reading:1'
bill.add_action(actor, action, date, type=a_type)
else:
a_type = 'bill:reading:2'
bill.add_action(actor, action, date, type=a_type)
continue
elif action == 'COMMITTEES:':
# committee assignments
rows = table.xpath('tr')[1:]
for row in rows:
# First add the committee assigned action
meta_tag = row.xpath('.//meta')[0]
h_or_s = meta_tag.get('name')[0] # @name is HCOMMITTEE OR SCOMMITTEE
committee = meta_tag.get('content') # @content is committee abbrv
#actor is house or senate referring the bill to committee
actor = 'lower' if h_or_s.lower() == 'h' else 'upper'
act = 'assigned to committee: ' + \
utils.get_committee_name(committee, actor)
date = utils.get_date(row[1])
bill.add_action(actor, act, date, type='committee:referred')
# now lets see if there is a vote
vote_url = row[0].xpath('string(a/@href)')
if vote_url:
date = utils.get_date(row[3])
try:
act = row[5].text_content().strip()
except IndexError:
#not sure what to do if action is not specified
#skipping and throwing a warning for now
self.logger.warning("Vote has no action, skipping.")
continue
a_type = get_action_type(act, 'COMMITTEES:')
act = get_verbose_action(act)
bill.add_action(actor,
utils.get_committee_name(committee, actor) + ":" + act,
date, type=a_type, abbrv=committee)
self.scrape_votes(actor, vote_url, bill, date,
motion='committee: ' + act,
committees=committee,
type='other')
elif len(row) == 5:
# probably senate rules committee
date = utils.get_date(row[3])
if date == '':
date = utils.get_date(row[1])
act = row[4].text_content().strip()
a_type = get_action_type(act, 'COMMITTEES:')
act = get_verbose_action(act)
bill.add_action(actor,
utils.get_committee_name(
committee, actor) +
":" + act, date,
type=a_type, abbrv=committee)
continue
elif 'CAUCUS' in action:
rows = table.xpath('tr')[0:2]
for row in rows:
actor = utils.get_actor(row, chamber)
action = row[0].text_content().strip()
if action.endswith(':'):
action = action[:-1]
if len(row) != 3:
self.warning('skipping row: %s' %
row.text_content())
continue
result = row[2].text_content().strip()
# majority caucus Y|N
action = action + " recommends to concur: " + result
date = utils.get_date(row[1])
bill.add_action(actor, action, date, concur=result,
type='other')
continue
# transmit to house or senate
elif 'TRANSMIT TO' in action:
rows = table.xpath('tr')
for row in rows:
action = row[0].text_content().strip()[:-1]
actor = 'upper' if action.endswith('HOUSE') else 'lower'
date = utils.get_date(row[1])
bill.add_action(actor, action, date, type='other')
continue
# Committee of the whole actions
elif 'COW ACTION' in action:
rows = table.xpath('tr')
actor = utils.get_actor(rows[0], chamber)
if 'SIT COW ACTION' in action:
act = rows[0][-1].text_content().strip()
date = utils.get_date(rows[0][1])
else:
act = rows[1][2].text_content().strip()
date = utils.get_date(rows[1][1])
action = action + " " + get_verbose_action(act) # COW ACTION 1 DPA
bill.add_action(actor, action, date, type='other')
if rows[1][0].text_content().strip() == 'Vote Detail':
vote_url = rows[1][0].xpath('string(a/@href)')
self.scrape_votes(actor, vote_url, bill, date,
motion=action, type='other',
extra=act)
continue
# AMENDMENTS
elif 'AMENDMENTS' in action:
rows = table.xpath('tr')[1:]
for row in rows:
act = row.text_content().strip()
if act == '':
continue
if 'passed' in act or 'adopted' in act:
a_type = 'amendment:passed'
elif 'failed' in act:
a_type = 'amendment:failed'
elif 'withdrawn' in act:
a_type = 'amendment:withdrawn'
else:
a_type = 'other'
# actor and date will same as previous action
bill.add_action(actor, act, date, type=a_type)
continue
# CONFERENCE COMMITTEE
# http://www.azleg.gov/FormatDocument.asp?inDoc=/legtext/49Leg/2r/bills/hb2083o.asp
# MISCELLANEOUS MOTION
# MOTION TO RECONSIDER
elif action == 'MOTION TO RECONSIDER:':
date = utils.get_date(table[1][1])
if date:
if table[1][0].text_content().strip() == 'Vote Detail':
vote_url = table[1][0].xpath('string(a/@href)')
bill.add_action(actor, action, date, type=a_type)
self.scrape_votes(actor, vote_url, bill, date,
motion='motion to reconsider',
type='other')
else:
action = table[-1][1].text_content().strip()
bill.add_action(actor, action, date, type='other')
continue
elif (action.endswith('FINAL READ:') or
action.endswith('THIRD READ:')):
# house|senate final and third read
rows = table.xpath('tr')
# need to find out if third read took place in house or senate
# if an ancestor table contains 'TRANSMIT TO' then the action
# is taking place in that chamber, else it is in chamber
actor = utils.get_actor(rows[0], chamber)
# get a dict of keys from the header and values from the row
k_rows = utils.get_rows(rows[1:], rows[0])
action = rows[0][0].text_content().strip()
for row in k_rows:
a_type = [get_action_type(action, 'Generic')]
if row[action].text_content().strip() == 'Vote Detail':
vote_url = row.pop(action).xpath('string(a/@href)')
vote_date = utils.get_date(row.pop('DATE'))
try:
passed = row.pop('RESULT').text_content().strip()
except KeyError:
passed = row.pop('2/3 VOTE').text_content().strip()
# leaves vote counts, ammended, emergency, two-thirds
# and possibly rfe left in k_rows. get the vote counts
# from scrape votes and pass ammended and emergency
# as kwargs to sort them in scrap_votes
pass_fail = {'PASSED': 'bill:passed',
'FAILED': 'bill:failed'}[passed]
a_type.append(pass_fail)
bill.add_action(actor, action, vote_date,
type=a_type)
row['type'] = 'passage'
self.scrape_votes(actor, vote_url, bill, vote_date,
passed=passed, motion=action,
**row)
else:
date = utils.get_date(row.pop('DATE'))
if date:
bill.add_action(actor, action, date, type=a_type)
continue
elif 'TRANSMITTED TO' in action:
# transmitted to Governor or secretary of the state
# SoS if it goes to voters as a proposition and memorials, etc
rows = table.xpath('tr')
actor = utils.get_actor(rows[0], chamber)
# actor is the actor from the previous statement because it is
# never transmitted to G or S without third or final read
sent_to = rows[0][1].text_content().strip()
date = utils.get_date(rows[0][2])
a_type = 'governor:received' if sent_to[0] == 'G' else 'other'
bill.add_action(actor, "TRANSMITTED TO " + sent_to, date,
type=a_type)
# See if the actor is the governor and whether he signed
# the bill or vetoed it
act, date, chapter, version = '', '', '', ''
for row in rows[1:]:
if row[0].text_content().strip() == 'ACTION:':
act = row[1].text_content().strip()
date = utils.get_date(row[2])
elif row[0].text_content().strip() == 'CHAPTER:':
chapter = row[1].text_content().strip()
elif row[0].text_content().strip() == 'CHAPTERED VERSION:':
version = row[1].text_content().strip()
elif row[0].text_content().strip() == 'TRANSMITTED VERSION:':
version = row[1].text_content().strip()
if act and sent_to == 'GOVERNOR':
a_type = 'governor:signed' if act == 'SIGNED' else 'governor:vetoed'
if chapter:
bill.add_action(sent_to.lower(), act, date,
type=a_type, chapter=chapter,
chaptered_version=version)
else:
bill.add_action(sent_to.lower(), act, date,
type=a_type)
continue
# this is probably only important for historical legislation
elif 'FINAL DISPOSITION' in action:
rows = table.xpath('tr')
if rows:
disposition = rows[0][1].text_content().strip()
bill['final_disposition'] = disposition
bill = self.sort_bill_actions(bill)
self.save_bill(bill)
def scrape(self, chamber, session):
try:
session_id = self.get_session_id(session)
except KeyError:
raise NoDataForPeriod(session)
view = {'lower':'allhouse', 'upper':'allsenate'}[chamber]
url = BASE_URL + 'Bills.asp?view=%s&Session_ID=%s' % (view, session_id)
bills_index = self.get(url).text
root = html.fromstring(bills_index)
bill_links = root.xpath('//div/table/tr[3]/td[4]/table/tr/td/' +
'table[2]/tr[2]/td/table/tr/td[2]/table/tr/td//a')
for link in bill_links:
bill_id = link.text.strip()
bill_id = " ".join(re.split('([A-Z]*)([0-9]*)', bill_id)).strip()
self.scrape_bill(chamber, session, bill_id)
def scrape_votes(self, chamber, url, bill, date, **kwargs):
"""
Scrapes the votes from a vote detail page with the legislator's names
this handles all of the votes and expects the following keyword
arguments: motion
an Arizona Vote object will have the following additional fields:
additional vote counts:
+not_voting, +excused, +absent, +present
additional vote lists
+NV, +EX, +AB, +P
this depends on the chamber and the committee
"""
o_args = {}
passed = '' # to test if we need to compare vote counts later
v_type = kwargs.pop('type')
if 'passed' in kwargs:
passed = {'PASSED': True, 'FAILED': False}[kwargs.pop('passed')]
if 'AMEND' in kwargs:
o_args['amended'] = kwargs.pop('AMEND').text_content().strip()
if 'motion' in kwargs:
motion = kwargs.pop('motion')
if 'EMER' in kwargs and kwargs['EMER'].text_content().strip():
o_args['EMER'] = kwargs.pop('EMER').text_content().strip()
if '2/3 VOTE' in kwargs and kwargs['2/3 VOTE'].text_content().strip():
o_args['2/3 VOTE'] = kwargs.pop('2/3 VOTE').text_content().strip()
if 'committee' in kwargs:
o_args['committee'] = utils.get_committee_name(kwargs.pop('committee'),
chamber)
if 'committees' in kwargs:
o_args['committee'] = utils.get_committee_name(kwargs.pop('committees'),
chamber)
vote_page = self.get(url).text
root = html.fromstring(vote_page)
vote_table = root.xpath('/html/body/div/table/tr[3]/td[4]/table/tr/td/table/tr/td/table')[0]
vote_count = vote_table.xpath('following-sibling::p/following-sibling::text()')
vote_string = vote_count[0].replace(u'\xa0', '').strip()
v_count = re.compile(r'\b[A-Z]*\s*[A-z]*:\s\d*')
v_list = v_count.findall(vote_string)
o_count = 0
for x in v_list:
k, v = x.split(':')
# make NOT VOTING not_voting
k = k.strip().replace(' ', '_').lower()
v = int(v.strip())
if k == 'ayes':
yes_count = int(v)
elif k == 'nays':
no_count = int(v)
else:
o_args.update({str(k):v})
o_count += int(v)
if passed == '':
passed = yes_count > no_count
if ('committee' not in o_args) and ('committees' not in o_args):
if chamber == 'upper' and passed:
if 'EMER' in o_args or '2/3 VOTE' in o_args:
passed = yes_count > 20
else:
passed = yes_count > 16
elif chamber == 'lower' and passed:
if 'EMER' in o_args or '2/3 VOTE' in o_args:
passed = yes_count > 40
else:
passed = yes_count > 31
vote = Vote(chamber, date, motion, passed, yes_count, no_count,
o_count, type=v_type, **o_args)
vote.add_source(url)
# grab all the tables descendant tds
tds = vote_table.xpath('descendant::td')
# pair 'em up
matched = [ tds[y:y+2] for y in range(0, len(tds), 2) ]
for name, v in iter(matched):
v = v.text_content().strip()
name = name.text_content().strip()
if name == '<NAME>':
continue
if v == 'Y':
vote.yes(name)
elif v == 'N':
vote.no(name)
else:
if v in vote:
vote[v].append(name)
else:
vote[v] = [name]
vote.other(name)
# Warn if the stated other_vote count doesn't add up.
if vote['other_count'] != len(vote['other_votes']):
self.warning("Other votes count on webpage didn't match "
"len(other_votes)...using length instead.")
vote['other_count'] = len(vote['other_votes'])
bill.add_vote(vote)
def sort_bill_actions(self, bill):
actions = bill['actions']
actions_list = []
out_of_order = []
new_list = []
if not actions:
return bill
action_date = actions[0]['date']
actions[0]['action'] = actions[0]['action'].lower()
actions_list.append(actions[0])
# seperate the actions that are out of order
for action in actions[1:]:
if action['date'] < action_date:
out_of_order.append(action)
else:
actions_list.append(action)
action_date = action['date']
action['action'] = action['action'].lower()
action_date = actions_list[0]['date']
for action in actions_list:
# this takes care of the actions in beween
for act in out_of_order:
if act['date'] < action_date:
o_index = out_of_order.index(act)
new_list.append(out_of_order.pop(o_index))
if act['date'] >= action_date and act['date'] < action['date']:
o_index = out_of_order.index(act)
new_list.append(out_of_order.pop(o_index))
new_list.append(action)
for act in out_of_order:
if act['date'] == action['date']:
o_index = out_of_order.index(act)
new_list.append(out_of_order.pop(o_index))
if out_of_order != []:
self.log("Unable to sort " + bill['bill_id'])
return bill
else:
bill['actions'] = new_list
return bill
```
#### File: openstates/az/utils.py
```python
import re, datetime
doc_for_bills_url = 'http://www.azleg.gov/DocumentsForBill.asp?Bill_Number=%s&Session_ID=%s'
base_url = 'http://www.azleg.gov/'
select_session_url = 'http://www.azleg/SelectSession.asp.html'
def parse_link_id(link):
"""
extracts the div[@id] from the links on the DocumentsForBill pages
"""
return link.get('href')[link.get('href').find("'") + 1 : link.get('href').rfind("'")]
def get_bill_type(bill_id):
"""
bill_id = 'SJR2204'
get_bill_type(bill_id) --> 'joint resolution'
"""
prefix = re.match('([a-z]*)', bill_id.lower()).group()
if prefix in bill_types:
return bill_types[prefix]
else:
return 'bill'
def legislature_to_number(leg):
"""
Takes a full session and splits it down to the values for
FormatDocument.asp.
session = '49th-1st-regular'
legislature_to_number(session) --> '49Leg/1s'
"""
l = leg.lower().split('-')
return '%sLeg/%s%s' % (l[0][0:2], l[1][0], l[2][0])
def get_date(elem):
"""
Returns the date object or an empty string, silly but it will really save
some typing since a table might have a date field or it might be empty
"""
try:
return_date = datetime.datetime.strptime(elem.text_content().strip(), '%m/%d/%y')
except ValueError:
return_date = ''
return return_date
def img_check(elem):
"""
Checks if the cell contains an image and returns true or false
used to see if a calendar was modified revised or cancelled.
"""
img = elem.xpath('img')
if img:
return 'Y'
else:
text = elem.text_content().strip()
if text:
return 'Y'
else:
return 'N'
def get_rows(rows, header):
"""
takes the rows and header and returns a dict for each row with { key : <td> }
"""
header = [x.text_content().strip() for x in header]
keyed_rows = []
for r in rows:
dict_row = {}
for k,v in zip(header, r.xpath('td')):
dict_row.update({k:v})
keyed_rows.append(dict_row)
return keyed_rows
def get_actor(tr, chamber):
"""
gets the actor of a given action based on presence of a 'TRANSMIT TO' action
"""
actor = tr[0].text_content().strip()
if actor.startswith('H') or actor.startswith('S'):
actor = actor[0]
return {'H': 'lower', 'S': 'upper'}[actor]
else:
h_or_s = tr.xpath('ancestor::table[1]/preceding-sibling::' +
'table/tr/td/b[contains(text(), "TRANSMIT TO")]')
if h_or_s:
# actor is the last B element
h_or_s = h_or_s[-1].text_content().strip()
actor = 'upper' if h_or_s.endswith('SENATE:') else 'lower'
else:
actor = chamber
return actor
def get_committee_name(abbrv, chamber):
try:
return com_names[chamber][abbrv]
except KeyError:
return abbrv
com_names = {
'lower': {'APPROP': 'Appropriations',
'AW': 'Agriculture and Water',
'BI': 'Banking and Insurance',
'COM': 'Commerce',
'ED': 'Education',
'ENR': 'Energy and Natural Resources',
'ENV': 'Environment',
'ERA': 'Employment and Regulatory Affairs',
'GOV': 'Government',
'HEIR': 'Higher Education, Innovation and Reform',
'HHS': 'Health and Human Services',
'JUD': 'Judiciary',
'MAPS': 'Military Affairs and Public Safety',
'RULES': 'Rules',
'TI': 'Technology and Infrastructure',
'TRANS': 'Transportation',
'WM': 'Ways and Means'},
'upper': {'APPROP': 'Appropriations',
'BI': 'Banking and Insurance',
'BSFSS': 'Border Security, Federalism and States Sovereignty',
'CE': 'Commerce and Energy',
'ED': 'Education',
'EDJC': 'Economic Development and Jobs Creation',
'FIN': 'Finance',
'GR': 'Government Reform',
'HMLR': 'Healthcare and Medical Liability Reform',
'JUD': 'Judiciary',
'NRT': 'Natural Resources and Transportation',
'PSHS': 'Public Safety and Human Services',
'RULES': 'Rules',
'SUB APPROP HW': 'Appropriations',
'SUB APPROP RIEN': 'Appropriations',
'SUB APPROP TCJ': 'Appropriations',
'VMA': 'Veterans and Military Affairs',
'WLRD': 'Water, Land Use and Rural Development'}}
bill_types = {
'sb': 'bill',
'sm': 'memorial',
'sr': 'resolution',
'scr': 'concurrent resolution',
'scm': 'concurrent memorial',
'scj': 'joint resolution',
'hb': 'bill',
'hm': 'memorial',
'hr': 'resolution',
'hcr': 'concurrent resolution',
'hcm': 'concurrent memorial',
'hjr': 'joint resolution',
'mis': 'miscellaneous'
}
```
#### File: openstates/ca/__init__.py
```python
import datetime
import lxml.html
from .bills import CABillScraper
from .legislators import CALegislatorScraper
from .committees import CACommitteeScraper
from .events import CAEventScraper
settings = dict(SCRAPELIB_RPM=30)
metadata = dict(
name='California',
abbreviation='ca',
capitol_timezone='America/Los_Angeles',
legislature_name='California State Legislature',
legislature_url='http://www.legislature.ca.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'Assembly', 'title': 'Assemblymember'},
},
terms=[
{'name': '20092010',
'sessions': [
'20092010',
'20092010 Special Session 1',
'20092010 Special Session 2',
'20092010 Special Session 3',
'20092010 Special Session 4',
'20092010 Special Session 5',
'20092010 Special Session 6',
'20092010 Special Session 7',
'20092010 Special Session 8',
],
'start_year': 2009, 'end_year': 2010,
'start_date': datetime.date(2008, 12, 1),
},
{'name': '20112012',
'sessions': ['20112012 Special Session 1', '20112012'],
'start_year': 2011, 'end_year': 2012,
'start_date': datetime.date(2010, 12, 6),
},
{'name': '20132014',
'sessions': [
'20132014 Special Session 1', '20132014',
'20132014 Special Session 2'],
'start_year': 2013, 'end_year': 2014,
# 'start_date': datetime.date(2013, ??, ?),
},
{'name': '20152016',
'sessions': [
'20152016 Special Session 1',
'20152016 Special Session 2',
'20152016'],
'start_year': 2015, 'end_year': 2016,
},
],
session_details={
'20092010': {
'start_date': datetime.date(2008, 12, 1),
'display_name': '2009-2010 Regular Session',
'type': 'primary'
},
'20092010 Special Session 1': {
'type': 'special',
'display_name': '2009-2010, 1st Special Session',
},
'20092010 Special Session 2': {
'type': 'special',
'display_name': '2009-2010, 2nd Special Session',
},
'20092010 Special Session 3': {
'type': 'special',
'display_name': '2009-2010, 3rd Special Session',
},
'20092010 Special Session 4': {
'type': 'special',
'display_name': '2009-2010, 4th Special Session',
},
'20092010 Special Session 5': {
'type': 'special',
'display_name': '2009-2010, 5th Special Session',
},
'20092010 Special Session 6': {
'type': 'special',
'display_name': '2009-2010, 6th Special Session',
},
'20092010 Special Session 7': {
'type': 'special',
'display_name': '2009-2010, 7th Special Session',
},
'20092010 Special Session 8': {
'type': 'special',
'display_name': '2009-2010, 8th Special Session',
},
'20112012 Special Session 1': {
'type': 'special',
'display_name': '2011-2012, 1st Special Session',
},
'20112012': {
'start_date': datetime.date(2010, 12, 6),
'display_name': '2011-2012 Regular Session',
'type': 'primary'
},
'20132014': {
# 'start_date': datetime.date(2013, ?, ?),
'display_name': '2013-2014 Regular Session',
'type': 'primary'
},
'20132014 Special Session 1': {
# 'start_date': datetime.date(2013, ?, ?),
'display_name': '2013-2014, 1st Special Session',
'type': 'special'
},
'20132014 Special Session 2': {
# 'start_date': datetime.date(2013, ?, ?),
'display_name': '2013-2014, 2nd Special Session',
'type': 'special'
},
'20152016': {
# 'start_date': datetime.date(2013, ?, ?),
'display_name': '2015-2016 Regular Session',
'_scraped_name': "2015-2016",
'type': 'primary'
},
'20152016 Special Session 1': {
'display_name': '2015-2016, 1st Special Session',
'type': 'special'
},
'20152016 Special Session 2': {
'display_name': '2015-2016, 2nd Special Session',
'type': 'special'
},
},
feature_flags=['subjects', 'influenceexplorer'],
_ignored_scraped_sessions = [
'2013-2014',
'2011-2012',
'2009-2010',
'2007-2008',
'2005-2006',
'2003-2004',
'2001-2002',
'1999-2000',
'1997-1998',
'1995-1996',
'1993-1994'
]
)
def session_list():
from billy.scrape.utils import url_xpath
import re
sessions = url_xpath('http://www.leginfo.ca.gov/bilinfo.html',
"//select[@name='sess']/option/text()")
sessions = [
re.findall('\(.*\)', session)[0][1:-1] \
for session in sessions
]
return sessions
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
divs_to_try = ['//div[@id="bill"]', '//div[@id="bill_all"]']
for xpath in divs_to_try:
div = doc.xpath(xpath)
if div:
return div[0].text_content()
```
#### File: openstates/ca/legislators.py
```python
import re
import collections
from operator import methodcaller
import lxml.html
from billy.scrape.legislators import LegislatorScraper, Legislator
def parse_address(s, split=re.compile(r'[;,]\s{,3}').split):
'''
Extract address fields from text.
'''
# If the address isn't formatted correctly, skip for now.
if ';' not in s:
return []
fields = 'city state_zip phone'.split()
vals = split(s)
res = []
while True:
try:
_field = fields.pop()
_value = vals.pop()
except IndexError:
break
else:
if _value.strip():
res.append((_field, _value))
if vals:
res.append(('street', ', '.join(vals)))
return res
class CALegislatorScraper(LegislatorScraper):
jurisdiction = 'ca'
urls = {'upper': 'http://senate.ca.gov/senators',
'lower': 'http://assembly.ca.gov/assemblymembers'}
def scrape(self, chamber, term):
url = self.urls[chamber]
html = self.get(url).text
doc = lxml.html.fromstring(html)
if chamber == 'lower':
rows = doc.xpath('//table/tbody/tr')
parse = self.parse_assembly
else:
rows = doc.xpath('//div[contains(@class, "views-row")]')
parse = self.parse_senate
for tr in rows:
legislator = parse(tr, term, chamber)
if legislator is None:
continue
if 'Vacant' in legislator['full_name']:
continue
legislator.add_source(url)
legislator['full_name'] = legislator['full_name'].strip()
self.save_legislator(legislator)
def parse_senate(self, div, term, chamber):
name = div.xpath('.//h3/text()')[0]
if name.endswith(' (R)'):
party = 'Republican'
elif name.endswith(' (D)'):
party = 'Democratic'
else:
self.warning('skipping ' + name)
return None
name = name.split(' (')[0]
district = div.xpath(
'.//div[contains(@class, "senator-district")]/div/text()'
)[0].strip().lstrip('0')
photo_url = div.xpath('.//img/@src')[0]
url = div.xpath('.//a/@href')[0]
leg = Legislator(term, chamber, full_name=name, party=party, district=district,
photo_url=photo_url, url=url)
for addr in div.xpath('.//div[contains(@class, "views-field-field-senator-capitol-office")]//p'):
addr, phone = addr.text_content().split('; ')
leg.add_office(
'capitol', 'Senate Office',
address=addr.strip(), phone=phone.strip())
for addr in div.xpath('.//div[contains(@class, "views-field-field-senator-district-office")]//p'):
for addr in addr.text_content().strip().splitlines():
try:
addr, phone = addr.strip().replace(u'\xa0', ' ').split('; ')
leg.add_office(
'district', 'District Office',
address=addr.strip(), phone=phone.strip())
except ValueError:
addr = addr.strip().replace(u'\xa0', ' ')
leg.add_office('district', 'District Office', address=addr)
return leg
def parse_assembly(self, tr, term, chamber):
'''
Given a tr element, get specific data from it.
'''
strip = methodcaller('strip')
xpath = 'td[contains(@class, "views-field-field-%s-%s")]%s'
xp = {
'url': [('lname-sort', '/a[not(contains(text(), "edit"))]/@href')],
'district': [('district', '/text()')],
'party': [('party', '/text()')],
'full_name': [('office-information', '/a[not(contains(text(), "edit"))]/text()')],
'address': [('office-information', '/h3/following-sibling::text()'),
('office-information', '/p/text()')]
}
titles = {'upper': 'senator', 'lower': 'member'}
funcs = {
'full_name': lambda s: re.sub( # "Assembly" is misspelled once
r'Contact Assembl?y Member', '', s).strip(),
'address': parse_address,
}
rubberstamp = lambda _: _
tr_xpath = tr.xpath
res = collections.defaultdict(list)
for k, xpath_info in xp.items():
for vals in xpath_info:
f = funcs.get(k, rubberstamp)
vals = (titles[chamber],) + vals
vals = map(f, map(strip, tr_xpath(xpath % vals)))
res[k].extend(vals)
# Photo.
try:
res['photo_url'] = tr_xpath('td/p/img/@src')[0]
except IndexError:
pass
# Addresses.
addresses = res['address']
try:
addresses = map(dict, filter(None, addresses))
except ValueError:
# Sometimes legislators only have one address, in which
# case this awful hack is helpful.
addresses = map(dict, filter(None, [addresses]))
for address in addresses[:]:
# Toss results that don't have required keys.
if not set(['street', 'city', 'state_zip']) < set(address):
if address in addresses:
addresses.remove(address)
# Re-key the addresses
offices = []
if addresses:
# <NAME>'s addresses wouldn't parse correctly as of
# 3/23/2013, so here we're forced to test whether any
# addresses were even found.
addresses[0].update(type='capitol', name='Capitol Office')
offices.append(addresses[0])
for office in addresses[1:]:
office.update(type='district', name='District Office')
offices.append(office)
for office in offices:
street = office['street']
state_zip = re.sub(r'\s+', ' ', office['state_zip'])
street = '%s\n%s, %s' % (street, office['city'], state_zip)
office['address'] = street
office['fax'] = None
office['email'] = None
del office['street'], office['city'], office['state_zip']
res['offices'] = offices
del res['address']
# Remove junk from assembly member names.
junk = 'Contact Assembly Member '
try:
res['full_name'] = res['full_name'].pop().replace(junk, '')
except IndexError:
return
# Normalize party.
for party in res['party'][:]:
if party:
if party == 'Democrat':
party = 'Democratic'
res['party'] = party
break
else:
res['party'] = None
# <NAME> also didn't have a url that lxml would parse
# as of 3/22/2013.
if res['url']:
res['url'] = res['url'].pop()
else:
del res['url']
# strip leading zero
res['district'] = str(int(res['district'].pop()))
# Add a source for the url.
leg = Legislator(term, chamber, **res)
leg.update(**res)
return leg
```
#### File: openstates/co/actions.py
```python
import re
from billy.scrape.actions import Rule, BaseCategorizer
committees = [
u'Agriculture, Livestock (?:and|&) Natural Resources',
u'Finance',
u'Joint Budget Committee',
u'Appropriations',
u'Health (?:and|&) Environment',
u'Transportation',
u'Education',
u'Agriculture, Livestock, (?:and|&) Natural Resources',
u'Judiciary',
u'Legal Services',
u'State, Veterans (?:and|&) Military Affairs',
u'Economic (?:and|&) Business Development',
u'Local Government',
u'Congressional Redistricting',
u'Legislative Council',
u'State Veterans, (?:and|&) Military Affairs',
u'Health (?:and|&) Environment',
u'Legislative Audit',
u'Capital Development',
u'State, Veterans, (?:and|&) Military Affairs',
u'State, Veterans, (?:and|&) Military Affairs',
u'Executive Committee of Legislative Council',
u'Health (?:and|&) Environment',
u'Finance',
u'Appropriations',
u'Agriculture, Natural Resources (?:and|&) Energy',
u'Judiciary',
u'Business, Labor (?:and|&) Technology',
u'Health (?:and|&) Human Services',
u'State, Veterans (?:and|&) Military Affairs',
u'Local Government',
u'Legislative Audit',
u'Executive Committee of Legislative Council',
u'Transportation',
u'Health (?:and|&) Human Services',
u'Education',
u'Legislative Council',
u'Legal Services',
u'Capital Development',
u'Transportation (?:and|&) Energy',
u'Joint Budget Committee',
u'Business, Labor, (?:and|&) Technology',
u'State, Veterans, (?:and|&) Military Affairs'
]
rules = (
Rule('^House', actor='lower'),
Rule('^Senate', actor='upper'),
Rule('^Introduced in Senate', actor='upper'),
Rule('^Introduced in House', actor='lower'),
Rule('^Governor', actor='governor'),
Rule('Governor Action - Partial Veto', 'governor:vetoed:line-item'),
Rule('Sent to the Governor', 'governor:received'),
Rule('Governor Action - Signed', 'governor:signed'),
Rule('Governor Signed', 'governor:signed'),
Rule('Governor Action - Vetoed', 'governor:vetoed'),
Rule(r'^Introduced', 'bill:introduced'),
Rule(r'Assigned to (?P<committees>.+)'),
Rule(u'(?i)refer (un)?amended to (?P<committees>.+)',
[u'committee:referred']),
Rule(u'(?i)\S+ Committee on (?P<committees>.+?) Refer (un)amended'),
Rule(u'Second Reading Passed', [u'bill:reading:2']),
Rule(u'Third Reading Passed', ['bill:reading:3', 'bill:passed'])
)
committees_rgx = '(%s)' % '|'.join(
sorted(committees, key=len, reverse=True))
class Categorizer(BaseCategorizer):
rules = rules
def categorize(self, text):
'''Wrap categorize and add boilerplate committees.
'''
attrs = BaseCategorizer.categorize(self, text)
if 'committees' in attrs:
committees = attrs['committees']
for committee in re.findall(committees_rgx, text, re.I):
if committee not in committees:
committees.append(committee)
return attrs
def post_categorize(self, attrs):
res = set()
if 'legislators' in attrs:
for text in attrs['legislators']:
rgx = r'(,\s+(?![a-z]\.)|\s+and\s+)'
legs = re.split(rgx, text)
legs = filter(lambda x: x not in [', ', ' and '], legs)
res |= set(legs)
attrs['legislators'] = list(res)
res = set()
if 'committees' in attrs:
for text in attrs['committees']:
for committee in text.split(' + '):
res.add(committee.strip())
attrs['committees'] = list(res)
return attrs
```
#### File: openstates/ct/utils.py
```python
import re
import datetime
import collections
import chardet
import unicodecsv
try:
import cStringIO as StringIO
except ImportError:
import StringIO
def open_csv(data):
char_encoding = chardet.detect(data.content)['encoding']
return unicodecsv.DictReader(StringIO.StringIO(data.content),
encoding=char_encoding)
Listing = collections.namedtuple('Listing', 'mtime size filename')
def parse_directory_listing(text):
files = []
dir_re = r'^(\d\d-\d\d-\d\d\s+\d\d:\d\d(AM|PM))\s+(\d+)\s+(.*\.htm)\s+$'
for match in re.finditer(dir_re, text, re.MULTILINE):
mtime = datetime.datetime.strptime(match.group(1),
"%m-%d-%y %I:%M%p")
files.append(Listing(mtime=mtime, size=int(match.group(3)),
filename=match.group(4)))
return files
```
#### File: openstates/de/actions.py
```python
import re
from billy.scrape.actions import Rule, BaseCategorizer
rules = (
Rule([u'Amendment (?P<bills>.+?) -\s+Laid On Table'], ['amendment:tabled']),
Rule([u'Favorable'], ['committee:passed:favorable']),
Rule([u'(?i)Amendment (?P<bills>.+?) defeated'], ['amendment:failed']),
Rule([u'(?i)introduced and adopted in lieu of (?P<bills>.+)'],
['bill:introduced']),
Rule([u'(?i)assigned to (?P<committees>.+?) Committee in'],
['committee:referred', 'bill:introduced']),
Rule([u'Signed by Governor'], ['governor:signed']),
Rule([u'(?i)Amendment (?P<bills>[\w\s]+?) Introduced'],
['amendment:introduced']),
Rule([u'Amendment (?P<bills>.+?) - Passed'], ['amendment:passed']),
Rule([u'^Passed by'], ['bill:passed']),
Rule([u'^Defeated'], ['bill:failed']),
Rule([u'(?i)unfavorable'], ['committee:passed:unfavorable']),
Rule([u'Reported Out of Committee \((?P<committees>.+?)\)'],
['committee:passed']),
Rule([u'Vetoed by Governor'], ['governor:vetoed']),
Rule([u'(?i)Amendment (?P<bills>.+?)\s+-\s+Introduced'],
['amendment:introduced']),
Rule([u'(?i)Amendment (?P<bills>[\w\s]+?) Passed'], ['amendment:passed']),
Rule([u'Amendment (?P<bills>.+?) - Defeated by House of .+?\. Votes: Defeated'],
['amendment:failed']),
Rule([u'^Introduced'], ['bill:introduced']),
Rule([u'Amendment (?P<bills>.+?) - Defeated in House'], ['amendment:failed']),
Rule([u'^Passed in House'], ['bill:passed'])
)
class Categorizer(BaseCategorizer):
rules = rules
def post_categorize(self, attrs):
res = set()
if 'legislators' in attrs:
for text in attrs['legislators']:
rgx = r'(,\s+(?![a-z]\.)|\s+and\s+)'
legs = re.split(rgx, text)
legs = filter(lambda x: x not in [', ', ' and '], legs)
res |= set(legs)
attrs['legislators'] = list(res)
res = set()
if 'committees' in attrs:
for text in attrs['committees']:
text = text.strip()
res.add(text)
attrs['committees'] = list(res)
return attrs
def get_actor(action_text, chamber, rgxs=(
(re.compile(r'(in|by) senate', re.I), 'upper'),
(re.compile(r'(in|by) house', re.I), 'lower'),
(re.compile(r'by governor', re.I), 'governor'),
)):
'''Guess the actor for a particular action.
'''
for r, actor in rgxs:
m = r.search(action_text)
if m:
return actor
return chamber
```
#### File: openstates/de/legislators.py
```python
import re
import lxml.html
from openstates.utils import LXMLMixin
from billy.scrape.legislators import LegislatorScraper, Legislator
class DELegislatorScraper(LegislatorScraper,LXMLMixin):
jurisdiction = 'de'
def scrape(self, chamber, term):
url = {
'upper': 'http://legis.delaware.gov/legislature.nsf/sen?openview',
'lower': 'http://legis.delaware.gov/Legislature.nsf/Reps?openview',
}[chamber]
doc = self.lxmlize(url)
if chamber == "upper":
#for the senate, it's the same table
#but the html is hard-coded in js.
table_js = doc.xpath('.//script')[-1].text_content()
table = None
for line in table_js.split("\n"):
if line.strip().startswith("var") and "sen=" in line:
table = line.replace("var","")
table = table.replace('sen="<','<')
table = table.replace('>";','>')
break
assert table is not None, "Senate table could not be found"
table = lxml.html.fromstring(table)
table.make_links_absolute(url)
trs = table.xpath('//tr')
else:
#same table for the house, but kindly in actual html
trs = doc.xpath('//tr')
base_url = "http://legis.delaware.gov"
for tr in trs:
name_and_url = tr.xpath('.//a')[0]
bio_url = name_and_url.attrib["href"]
bio_url = bio_url.replace("JavaScript:window.top.location.href=","")
bio_url = bio_url.replace('"','')
name = name_and_url.text_content()
if name.strip() == "." or name.strip() == "":
continue
if name.strip().lower().startswith("vacant"):
continue
re_spaces=re.compile(r'\s{1,5}')
name = ' '.join(re_spaces.split(name))
district = tr.xpath('.//td')[2].text_content()
district = district.replace("District:","").strip()
leg = self.scrape_bio(term, chamber, district, name, bio_url)
leg.add_source(bio_url, page="legislator detail page")
leg.add_source(url, page="legislator list page")
self.save_legislator(leg)
def scrape_bio(self, term, chamber, district, name, url):
# this opens the committee section without having to do another request
url += '&TableRow=1.5.5'
frame_doc = self.lxmlize(url)
actual_url = frame_doc.xpath("//frame[@name='right']/@src")[0]
doc = self.lxmlize(actual_url)
# party is in one of these
party = doc.xpath('//div[@id="page_header"]')[0].text.strip()[-3:]
if '(D)' in party:
party = 'Democratic'
elif '(R)' in party:
party = 'Republican'
else:
raise AssertionError("No party found for {name}".format(name=name))
leg = Legislator(term, chamber, district, name, party=party)
photo_url = doc.xpath('//img[contains(@src, "jpg")]/@src')
if photo_url:
leg['photo_url'] = photo_url[0]
contact_info = self.scrape_contact_info(doc)
leg.update(contact_info)
return leg
def scrape_contact_info(self, doc):
# Email
email = doc.xpath(".//a[contains(@href,'mailto')]")
email = email[0].text_content().strip()
leg_email = None
dist_email = None
try:
emails = email.split(";")
except AttributeError:
pass
else:
for e in emails:
e = e.strip()
if e:
if "state.de.us" in e:
leg_email = e
else:
dist_email = e
# Offices
leg_office = dict(name="Capitol Office", type="capitol",
phone=None, fax=None, email=leg_email, address=None)
dist_office = dict(name="Outside Office", type="capitol",
phone=None,fax=None, email=dist_email, address=None)
#this is enormously painful, DE.
office_list = doc.xpath("//tr")
for office in office_list:
title_td = 0
#in some trs the photo is the first td
if len(office.xpath("./td/img")) > 0:
title_td = 1
try:
title_text = office.xpath("./td")[title_td].text_content().lower()
content = office.xpath("./td")[title_td+1].text_content()
except IndexError:
continue
leg_office = self.add_contact("legislative",
title_text,content,leg_office)
dist_office = self.add_contact("outside",
title_text,content,dist_office)
offices = [o for o in [leg_office,dist_office] if o["address"]]
assert len(offices) > 0, "No offices with addresses found "\
"make sure we're not losing any data."
return {"offices":offices}
def add_contact(self,office_type,
title_text,content,office):
#office type is the name of the office
#either "legislative" or "outside"
if "{} office".format(office_type) in title_text:
office["address"] = content.strip()
if "{} phone".format(office_type) in title_text:
phones = content.lower().split("\n")
if len(phones) == 1:
phone = self.clean_phone(phones[0])
if phone:
office["phone"] = phone
else:
for line in phones:
if "phone" in line:
phone = self.clean_phone(line)
if phone:
office["phone"] = phone
elif "fax" in line:
phone = self.clean_phone(line)
if phone:
office["fax"] = phone
return office
def clean_phone(self,phone):
if not phone.strip():
return
if not re.search("\d",phone):
return
if not ":" in phone:
return phone
return phone.split(":")[1].strip()
```
#### File: openstates/fl/committees.py
```python
import re
from itertools import chain
from billy.scrape import NoDataForPeriod
from billy.scrape.committees import CommitteeScraper, Committee
import lxml.html
class FLCommitteeScraper(CommitteeScraper):
jurisdiction = 'fl'
def scrape(self, chamber, term):
self.validate_term(term, latest_only=True)
if chamber == 'upper':
self.scrape_upper_committees()
else:
self.scrape_lower_committees()
def scrape_upper_committees(self):
url = "http://flsenate.gov/Committees/"
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
path = "//a[contains(@href, 'Committees/Show')]"
for link in page.xpath(path):
comm_name = link.text.strip()
if comm_name.startswith('Joint'):
continue
if 'Subcommittee on' in comm_name:
comm_name, sub_name = comm_name.split(' Subcommittee on ')
else:
comm_name, sub_name = comm_name, None
comm = Committee('upper', comm_name, sub_name)
self.scrape_upper_committee(comm, link.attrib['href'])
if comm['members']:
self.save_committee(comm)
def scrape_upper_committee(self, comm, url):
page = self.get(url).text
page = lxml.html.fromstring(page)
comm.add_source(url)
path = "//a[contains(@href, 'Senators')]/name"
seen = set()
for name in page.xpath(path):
dt = name.xpath("../../preceding-sibling::dt")
if dt:
mtype = dt[0].text.strip(': \r\n\t').lower()
else:
mtype = 'member'
member = re.sub(r'\s+', ' ', name.text.strip())
if (member, mtype) not in seen:
comm.add_member(member, mtype)
seen.add((member, mtype))
def scrape_lower_committees(self):
url = ("http://www.myfloridahouse.gov/Sections/Committees/"
"committees.aspx")
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
for link in page.xpath("//a[contains(@href, 'CommitteeId')]"):
comm_name = link.text.strip()
if 'Committee' in comm_name:
parent = re.sub(r'Committee$', '', comm_name).strip()
sub = None
else:
sub = re.sub(r'Subcommittee$', '', comm_name).strip()
comm = Committee('lower', parent, sub)
self.scrape_lower_committee(comm, link.get('href'))
if comm['members']:
self.save_committee(comm)
for link in page.xpath('//a[contains(@href, "committees/joint")]/@href'):
self.scrape_joint_committee(link)
def scrape_joint_committee(self, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
name = doc.xpath('//h1/text()') or doc.xpath('//h2/text()')
name = name[0].strip()
comm = Committee('joint', name)
comm.add_source(url)
members = chain(doc.xpath('//a[contains(@href, "MemberId")]'),
doc.xpath('//a[contains(@href, "Senators")]'))
seen = set()
for a in members:
parent_content = a.getparent().text_content()
if ':' in parent_content:
title = parent_content.split(':')[0].strip()
else:
title = 'member'
name = a.text.split(' (')[0].strip()
if (name, title) not in seen:
comm.add_member(name, title)
seen.add((name, title))
if comm['members']:
self.save_committee(comm)
def scrape_lower_committee(self, comm, url):
page = self.get(url).text
page = lxml.html.fromstring(page)
comm.add_source(url)
for link in page.xpath("//p[@class='committeelinks']/a[contains(@href, 'MemberId')]"):
# strip off spaces and everything in [R/D]
name = link.text.strip().split(' [')[0]
# membership type span follows link
mtype = link.getnext().text_content().strip()
if not mtype:
mtype = 'member'
comm.add_member(name, mtype)
```
#### File: openstates/fl/events.py
```python
import re
import datetime
from billy.scrape.events import EventScraper, Event
import feedparser
class FLEventScraper(EventScraper):
jurisdiction = 'fl'
def scrape(self, chamber, session):
self.scrape_upper_events(session)
def scrape_upper_events(self, session):
url = "https://www.flsenate.gov/Tracker/RSS/DailyCalendar"
page = self.get(url).text
feed = feedparser.parse(page)
for entry in feed['entries']:
#The feed breaks the RSS standard by making the pubdate the actual event's date, not the RSS item publish date
when = datetime.datetime(*entry['published_parsed'][:6])
desc = entry['summary'].split(' - ')[0]
location = entry['summary'].split(' - ')[1]
event = Event(session, when, 'committee:meeting',
desc, location)
event.add_source(entry['link'])
self.save_event(event)
```
#### File: openstates/ga/bills.py
```python
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from collections import defaultdict
from .util import get_client, get_url, backoff
# Methods (7):
# GetLegislationDetail(xs:int LegislationId, )
#
# GetLegislationDetailByDescription(ns2:DocumentType DocumentType,
# xs:int Number, xs:int SessionId)
#
# GetLegislationForSession(xs:int SessionId, )
#
# GetLegislationRange(ns2:LegislationIndexRangeSet Range, )
#
# GetLegislationRanges(xs:int SessionId,
# ns2:DocumentType DocumentType, xs:int RangeSize, )
#
# GetLegislationSearchResultsPaged(ns2:LegislationSearchConstraints
# Constraints, xs:int PageSize,
# xs:int StartIndex, )
# GetTitles()
member_cache = {}
SOURCE_URL = "http://www.legis.ga.gov/Legislation/en-US/display/{session}/{bid}"
class GABillScraper(BillScraper):
jurisdiction = 'ga'
lservice = get_client("Legislation").service
vservice = get_client("Votes").service
mservice = get_client("Members").service
lsource = get_url("Legislation")
msource = get_url("Members")
vsource = get_url("Votes")
def get_member(self, member_id):
if member_id in member_cache:
return member_cache[member_id]
mem = backoff(self.mservice.GetMember, member_id)
member_cache[member_id] = mem
return mem
def scrape(self, session, chambers):
sid = self.metadata['session_details'][session]['_guid']
legislation = backoff(
self.lservice.GetLegislationForSession,
sid
)['LegislationIndex']
for leg in legislation:
lid = leg['Id']
instrument = backoff(self.lservice.GetLegislationDetail, lid)
history = [x for x in instrument['StatusHistory'][0]]
actions = reversed([{
"code": x['Code'],
"action": x['Description'],
"_guid": x['Id'],
"date": x['Date']
} for x in history])
guid = instrument['Id']
bill_type = instrument['DocumentType']
chamber = {
"H": "lower",
"S": "upper",
"J": "joint"
}[bill_type[0]] # XXX: This is a bit of a hack.
bill_id = "%s %s" % (
bill_type,
instrument['Number'],
)
if instrument['Suffix']:
bill_id += instrument['Suffix']
title = instrument['Caption']
description = instrument['Summary']
if title is None:
continue
bill = Bill(
session,
chamber,
bill_id,
title,
description=description,
_guid=guid
)
if instrument['Votes']:
for vote_ in instrument['Votes']:
_, vote_ = vote_
vote_ = backoff(self.vservice.GetVote, vote_[0]['VoteId'])
vote = Vote(
{"House": "lower", "Senate": "upper"}[vote_['Branch']],
vote_['Date'],
vote_['Caption'] or "Vote on Bill",
(vote_['Yeas'] > vote_['Nays']),
vote_['Yeas'],
vote_['Nays'],
(vote_['Excused'] + vote_['NotVoting']),
session=session,
bill_id=bill_id,
bill_chamber=chamber)
vote.add_source(self.vsource)
methods = {"Yea": vote.yes, "Nay": vote.no,}
for vdetail in vote_['Votes'][0]:
whom = vdetail['Member']
how = vdetail['MemberVoted']
try:
m = methods[how]
except KeyError:
m = vote.other
m(whom['Name'])
bill.add_vote(vote)
types = {
"HI": ["other"],
"SI": ["other"],
"HH": ["other"],
"SH": ["other"],
"HPF": ["bill:introduced"],
"HDSAS": ["other"],
"SPF": ["bill:introduced"],
"HSR": ["bill:reading:2"],
"SSR": ["bill:reading:2"],
"HFR": ["bill:reading:1"],
"SFR": ["bill:reading:1"],
"HRECM": ["bill:withdrawn", "committee:referred"],
"SRECM": ["bill:withdrawn", "committee:referred"],
"SW&C": ["bill:withdrawn", "committee:referred"],
"HW&C": ["bill:withdrawn", "committee:referred"],
"HRA": ["bill:passed"],
"SRA": ["bill:passed"],
"HPA": ["bill:passed"],
"HRECO": ["other"],
"SPA": ["bill:passed"],
"HTABL": ["other"], # "House Tabled" - what is this?
"SDHAS": ["other"],
"HCFR": ["committee:passed:favorable"],
"SCFR": ["committee:passed:favorable"],
"HRAR": ["committee:referred"],
"SRAR": ["committee:referred"],
"STR": ["bill:reading:3"],
"SAHAS": ["other"],
"SE": ["bill:passed"],
"SR": ["committee:referred"],
"HTRL": ["bill:reading:3", "bill:failed"],
"HTR": ["bill:reading:3"],
"S3RLT": ["bill:reading:3", "bill:failed"],
"HASAS": ["other"],
"S3RPP": ["other"],
"STAB": ["other"],
"SRECO": ["other"],
"SAPPT": ["other"],
"HCA": ["other"],
"HNOM": ["other"],
"HTT": ["other"],
"STT": ["other"],
"SRECP": ["other"],
"SCRA": ["other"],
"SNOM": ["other"],
"S2R": ["bill:reading:2"],
"H2R": ["bill:reading:2"],
"SENG": ["bill:passed"],
"HENG": ["bill:passed"],
"HPOST": ["other"],
"HCAP": ["other"],
"SDSG": ["governor:signed"],
"SSG": ["governor:received"],
"Signed Gov": ["governor:signed"],
"HDSG": ["governor:signed"],
"HSG": ["governor:received"],
"EFF": ["other"],
"HRP": ["other"],
"STH": ["other"],
"HTS": ["other"],
}
ccommittees = defaultdict(list)
committees = instrument['Committees']
if committees:
for committee in committees[0]:
ccommittees[{
"House": "lower",
"Senate": "upper",
}[committee['Type']]].append(committee['Name'])
for action in actions:
chamber = {
"H": "lower",
"S": "upper",
"E": "other", # Effective Date
}[action['code'][0]]
try:
_types = types[action['code']]
except KeyError:
self.debug(action)
_types = ["other"]
committees = []
if any(('committee' in x for x in _types)):
committees = [str(x) for x in ccommittees.get(chamber, [])]
bill.add_action(chamber, action['action'], action['date'], _types,
committees=committees,
_code=action['code'],
_code_id=action['_guid'])
sponsors = []
if instrument['Authors']:
sponsors = instrument['Authors']['Sponsorship']
if 'Sponsors' in instrument and instrument['Sponsors']:
sponsors += instrument['Sponsors']['Sponsorship']
sponsors = [
(x['Type'], self.get_member(x['MemberId'])) for x in sponsors
]
for typ, sponsor in sponsors:
name = "{First} {Last}".format(**dict(sponsor['Name']))
bill.add_sponsor(
'primary' if 'Author' in typ else 'seconday',
name
)
for version in instrument['Versions']['DocumentDescription']:
name, url, doc_id, version_id = [
version[x] for x in [
'Description',
'Url',
'Id',
'Version'
]
]
bill.add_version(
name,
url,
mimetype='application/pdf',
_internal_document_id=doc_id,
_version_id=version_id
)
versions = sorted(
bill['versions'],
key=lambda x: x['_internal_document_id']
)
bill['versions'] = versions
bill.add_source(self.msource)
bill.add_source(self.lsource)
bill.add_source(SOURCE_URL.format(**{
"session": session,
"bid": guid,
}))
self.save_bill(bill)
```
#### File: openstates/ga/committees.py
```python
import time
from billy.scrape.committees import CommitteeScraper, Committee
from .util import get_client, get_url, backoff
CTTIE_URL = ("http://www.house.ga.gov/COMMITTEES/en-US/committee.aspx?"
"Committee={cttie}&Session={sid}")
class GACommitteeScraper(CommitteeScraper):
jurisdiction = 'ga'
latest_only = True
cservice = get_client("Committees").service
csource = get_url("Committees")
ctty_cache = {}
def scrape_session(self, term, chambers, session):
sid = self.metadata['session_details'][session]['_guid']
committees = backoff(self.cservice.GetCommitteesBySession, sid)
#if committees.strip() == "":
# return # If we get here, it's a problem.
# Commenting this out for future debugging. - PRT
if str(committees).strip() == "":
raise ValueError("Error: No committee data for sid: %s" % (sid))
committees = committees['CommitteeListing']
for committee in committees:
cid = committee['Id']
committee = backoff(self.cservice.GetCommittee, cid)
subctty_cache = {}
comname, typ, guid, code, description = [committee[x] for x in [
'Name', 'Type', 'Id', 'Code', 'Description'
]]
chamber = {
"House": "lower",
"Senate": "upper",
"Joint": "joint"
}[typ]
ctty = None
if code in self.ctty_cache:
ctty = self.ctty_cache[code]
if (ctty['chamber'] != chamber) and (description and
'joint' in description.lower()):
ctty['chamber'] = 'joint'
else:
ctty = None
if ctty is None:
ctty = Committee(
chamber,
comname,
code=code,
_guid=guid,
description=description
)
self.ctty_cache[code] = ctty
members = committee['Members']['CommitteeMember']
for member in members:
name = "{First} {Last}".format(**dict(member['Member']['Name']))
role = member['Role']
ctty.add_member(name, role, _guid=member['Member']['Id'])
subcoms = member['SubCommittees']
if subcoms != None:
for subcom in subcoms:
subcom = subcom[1][0]
subguid = subcom['Id']
subcommittee = subcom['Name']
if subcommittee in subctty_cache:
# Add member to existing subcommittee.
subctty = subctty_cache[subcommittee]
else:
# Create subcommittee.
subctty = Committee(
chamber,
comname,
_guid=subguid,
subcommittee=subcommittee
)
subctty.add_source(self.csource)
subctty.add_source(CTTIE_URL.format(**{
"sid": sid,
"cttie": guid,
}))
subctty_cache[subcommittee] = subctty
subctty.add_member(
name, role, _guid=member['Member']['Id'])
for subctty in subctty_cache.values():
self.save_committee(subctty)
ctty.add_source(self.csource)
ctty.add_source(CTTIE_URL.format(**{
"sid": sid,
"cttie": guid,
}))
self.save_committee(ctty)
def scrape(self, term, chambers):
for t in self.metadata['terms']:
if t['name'] == term:
for session in t['sessions']:
self.scrape_session(term, chambers, session)
```
#### File: openstates/ga/util.py
```python
from suds.client import Client
import logging
import socket
import urllib2
import time
import suds
logging.getLogger('suds').setLevel(logging.WARNING)
log = logging.getLogger('billy')
url = 'http://webservices.legis.ga.gov/GGAServices/%s/Service.svc?wsdl'
def get_client(service):
client = backoff(Client, get_url(service), autoblend=True)
return client
def get_url(service):
return url % (service)
def backoff(function, *args, **kwargs):
retries = 5
nice = 0
def _():
time.sleep(1) # Seems like their server can't handle the load.
return function(*args, **kwargs)
for attempt in range(retries):
try:
return _()
except (socket.timeout, urllib2.URLError, suds.WebFault) as e:
if "This Roll Call Vote is not published." in e.message:
raise ValueError("Roll Call Vote isn't published")
backoff = ((attempt + 1) * 15)
log.warning(
"[attempt %s]: Connection broke. Backing off for %s seconds." % (
attempt,
backoff
)
)
log.info(str(e))
time.sleep(backoff)
raise ValueError(
"The server's not playing nice. We can't keep slamming it."
)
```
#### File: openstates/hi/legislators.py
```python
from billy.scrape import ScrapeError, NoDataForPeriod
from billy.scrape.legislators import LegislatorScraper, Legislator
from billy.scrape.committees import Committee
import lxml.html
import re, contextlib
HI_BASE_URL = "http://capitol.hawaii.gov"
def get_legislator_listing_url(chamber):
chamber = {"lower": "H",
"upper": "S"}[chamber]
return "%s/members/legislators.aspx?chamber=%s" % (HI_BASE_URL, chamber)
class HILegislatorScraper(LegislatorScraper):
jurisdiction = 'hi'
def get_page( self, url ):
html = self.get(url).text
page = lxml.html.fromstring(html)
return page
def scrape_homepage( self, url ):
page = self.get_page( url )
ret = { "source" : url, 'ctty' : [] }
table = page.xpath(
"//table[@id='ctl00_ContentPlaceHolderCol1_GridViewMemberof']")
if len(table) > 0:
table = table[0]
else:
table = None
chamber = page.xpath("//span[contains(@id, 'LabelChamber')]")
if chamber == []:
raise Exception("Can't find the chamber label")
chamber = chamber[0].text_content()
ret['chamber'] = chamber
if table:
cttys = table.xpath( "./tr/td/a" )
for ctty in cttys:
ret['ctty'].append({
"name" : ctty.text,
"page" : "%s/%s" % (HI_BASE_URL, ctty.attrib['href']),
})
return ret
def scrape_leg_page( self, url ):
page = self.get_page(url)
people = page.xpath( \
"//table[@id='ctl00_ContentPlaceHolderCol1_GridView1']")[0]
people = people.xpath('./tr')[1:]
display_order = {
"image" : 0,
"contact" : 1,
"district" : 2
}
ret = []
for person in people:
image = person[display_order["image"]]
contact = person[display_order["contact"]]
district = person[display_order["district"]]
metainf = self.scrape_contact_info( contact )
district = self.scrape_district_info( district )
homepage = self.scrape_homepage( metainf['homepage'] )
image = "%s/%s" % (
HI_BASE_URL,
image.xpath("./*/img")[0].attrib['src']
)
pmeta = {
"image" : image,
"source" : [ url ],
"district" : district,
"chamber": None
}
if homepage != None:
pmeta['source'].append(homepage['source'])
pmeta['ctty'] = homepage['ctty']
pmeta['chamber'] = homepage['chamber']
if pmeta['chamber'] is None:
raise Exception("No chamber found.")
for meta in metainf:
pmeta[meta] = metainf[meta]
ret.append(pmeta)
return ret
def br_split( self, contact ):
cel = []
els = [ cel ]
# krufty HTML requires stupid hacks
elements = contact.xpath("./*")
for element in elements:
if element.tag == "br":
cel = []
els.append(cel)
else:
cel.append( element )
return els
def scrape_district_info( self, district ):
return district[2].text_content()
def scrape_contact_info( self, contact ):
homepage = "%s/%s" % ( # XXX: Dispatch a read on this page
HI_BASE_URL,
contact.xpath("./a")[0].attrib['href']
)
els = self.br_split( contact )
def _scrape_title( els ):
return els[0].text_content()
def _scrape_name( els ):
lName = els[0].text_content()
fName = els[2].text_content()
return "%s %s" % ( fName, lName )
def _scrape_party( els ):
party = {
"(D)" : "Democratic",
"(R)" : "Republican"
}
try:
return party[els[4].text_content()]
except KeyError:
return "Other"
def _scrape_addr( els ):
room_number = els[1].text_content()
slug = els[0].text_content()
return "%s %s" % ( slug, room_number )
def _scrape_room( els ):
return els[1].text_content()
def _scrape_phone( els ):
return els[1].text_content()
def _scrape_fax( els ):
return els[1].text_content()
def _scrape_email( els ):
return els[1].text_content()
contact_entries = {
"title" : ( 0, _scrape_title ),
"name" : ( 1, _scrape_name ),
"party" : ( 1, _scrape_party ),
"addr" : ( 2, _scrape_addr ),
"room" : ( 2, _scrape_room ),
"phone" : ( 3, _scrape_phone ),
"fax" : ( 4, _scrape_fax ),
"email" : ( 5, _scrape_email )
}
ret = {
"homepage" : homepage
}
for entry in contact_entries:
index, callback = contact_entries[entry]
ret[entry] = callback( els[index] )
return ret
def scrape(self, chamber, session):
metainf = self.scrape_leg_page(get_legislator_listing_url(chamber))
for leg in metainf:
try:
chamber = {"House": "lower",
"Senate": "upper"}[leg['chamber']]
except KeyError:
print("")
print(" ERROR: Bad Legislator page.")
print(" -> " + "\n -> ".join(leg['source']))
print("")
print(" Added this workaround because of a bad legislator")
print(" page, while they filled their info out.")
print("")
print(" Emailed webmaster. Told to wait.")
print(" - PRT, Jun 23, 2014")
print("")
continue
p = Legislator( session, chamber, leg['district'], leg['name'],
party=leg['party'],
# some additional things the website provides:
photo_url=leg['image'],
url=leg['homepage'])
p.add_office('capitol', 'Capitol Office', address=leg['addr'],
phone=leg['phone'], fax=leg['fax'] or None,
email=leg['email'])
for source in leg['source']:
p.add_source( source )
try:
for ctty in leg['ctty']:
flag='Joint Legislative'
if ctty['name'][:len(flag)] == flag:
ctty_chamber = "joint"
else:
ctty_chamber = chamber
p.add_role( 'committee member',
term=session,
chamber=ctty_chamber,
committee=ctty['name'],
position="member")
except KeyError:
self.log( "XXX: Warning, %s has no scraped Commities" %
leg['name'] )
self.save_legislator( p )
```
#### File: openstates/ia/events.py
```python
import re
import datetime
from billy.scrape.events import EventScraper, Event
from .scraper import InvalidHTTPSScraper
import lxml.html
class IAEventScraper(InvalidHTTPSScraper, EventScraper):
jurisdiction = 'ia'
def scrape(self, chamber, session):
if chamber == 'other':
return
today = datetime.date.today()
start_date = today - datetime.timedelta(days=10)
end_date = today + datetime.timedelta(days=10)
if chamber == 'upper':
chamber_abbrev = 'S'
else:
chamber_abbrev = 'H'
url = ("http://www.legis.iowa.gov/committees/meetings/meetingsList"
"Chamber?chamber=%s&bDate=%02d/%02d/"
"%d&eDate=%02d/%02d/%d" % (chamber_abbrev,
start_date.month,
start_date.day,
start_date.year,
end_date.month,
end_date.day,
end_date.year))
page = lxml.html.fromstring(self.get(url).text)
page.make_links_absolute(url)
for link in page.xpath("//div[contains(@class, 'meetings')]/table[1]/tbody/tr[not(contains(@class, 'hidden'))]"):
comm = link.xpath("string(./td[2]/a[1]/text())").strip()
desc = comm + " Committee Hearing"
location = link.xpath("string(./td[3]/text())").strip()
when = link.xpath("string(./td[1]/span[1]/text())").strip()
if 'cancelled' in when.lower() or "upon" in when.lower():
continue
if "To Be Determined" in when:
continue
if 'AM' in when:
when = when.split('AM')[0] + " AM"
else:
when = when.split('PM')[0] + " PM"
junk = ['Reception']
for key in junk:
when = when.replace(key, '')
when = re.sub("\s+", " ", when).strip()
if "tbd" in when.lower():
# OK. This is a partial date of some sort.
when = datetime.datetime.strptime(
when,
"%m/%d/%Y TIME - TBD %p"
)
else:
try:
when = datetime.datetime.strptime(when, "%m/%d/%Y %I:%M %p")
except ValueError:
when = datetime.datetime.strptime(when, "%m/%d/%Y %I %p")
event = Event(session, when, 'committee:meeting',
desc, location)
event.add_source(url)
event.add_participant('host', comm, 'committee', chamber=chamber)
self.save_event(event)
```
#### File: openstates/ia/__init__.py
```python
import re
import datetime
import lxml.html
import requests
from billy.utils.fulltext import text_after_line_numbers
from .bills import IABillScraper
from .legislators import IALegislatorScraper
from .events import IAEventScraper
from .votes import IAVoteScraper
# Silencing unverified HTTPS request warnings.
requests.packages.urllib3.disable_warnings()
settings = dict(SCRAPELIB_TIMEOUT=240)
metadata = dict(
name = 'Iowa',
abbreviation = 'ia',
capitol_timezone = 'America/Chicago',
legislature_name = 'Iowa General Assembly',
legislature_url = 'https://www.legis.iowa.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms = [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011-2012'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013-2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015-2016'],
},
],
session_details = {
'2011-2012': {
'display_name': '2011-2012 Regular Session',
'_scraped_name': 'General Assembly: 84',
'number': '84',
'start_date': datetime.date(2011, 1, 10),
'end_date': datetime.date(2013, 1, 13),
},
'2013-2014': {
'display_name': '2013-2014 Regular Session',
'_scraped_name': 'General Assembly: 85',
'number': '85',
},
'2015-2016': {
'display_name': '2015-2016 Regular Session',
'_scraped_name': 'General Assembly: 86',
'number': '86',
},
},
feature_flags = ['events', 'influenceexplorer'],
_ignored_scraped_sessions = [
'Legislative Assembly: 86',
'General Assembly: 83',
'General Assembly: 82',
'General Assembly: 81',
'General Assembly: 80',
'General Assembly: 79',
'General Assembly: 79',
'General Assembly: 78',
'General Assembly: 78',
'General Assembly: 77',
'General Assembly: 77',
'General Assembly: 76',
]
)
def session_list():
def url_xpath(url, path):
doc = lxml.html.fromstring(requests.get(url, verify=False).text)
return doc.xpath(path)
sessions = url_xpath(
'https://www.legis.iowa.gov/legislation/findLegislation',
"//section[@class='grid_6']//li/a/text()[normalize-space()]"
)
sessions = [x[0] for x in filter(lambda x: x != [], [
re.findall(r'^.*Assembly: [0-9]+', session)
for session in sessions
])]
return sessions
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = doc.xpath('//pre')[0].text_content()
# strip two sets of line numbers
return text_after_line_numbers(text_after_line_numbers(text))
```
#### File: openstates/id/legislators.py
```python
from billy.scrape.legislators import LegislatorScraper, Legislator
import re
import datetime
import lxml.html
_BASE_URL = 'http://legislature.idaho.gov/%s/membership.cfm'
_CHAMBERS = {'upper':'Senate', 'lower':'House'}
_PARTY = {
'(R)': 'Republican',
'(D)': 'Democratic',
}
_PHONE_NUMBERS = {'hom':'phone_number',
'bus':'business_phone',
'fax':'fax_number'}
class IDLegislatorScraper(LegislatorScraper):
"""Legislator data seems to be available for the current term only."""
jurisdiction = 'id'
def _extract_email(self, contact_form):
legislator_id = re.search(r'(\d+)', contact_form).group(1)
contact_page = self.get(contact_form).text
pattern = re.compile(r'legislators.email%s = "(.+?)";' % legislator_id)
email = pattern.search(contact_page).group(1)
return email
def scrape_sub(self, chamber, term, district, sub_url):
"Scrape basic info for a legislator's substitute."
page = self.get(sub_url).text
html = lxml.html.fromstring(page)
html.make_links_absolute(sub_url)
# substitute info div#MAINS35
div = html.xpath('//div[contains(@id, "MAINS")]')[0]
leg = {}
leg['img_url'] = div[0][0].get('src')
subfor = div[1][0].text.replace(u'\xa0', ' ').replace(': ', '')
full_name = div[1][2].text.replace(u'\xa0', ' ')
party = _PARTY[div[1][2].tail.strip()]
leg['contact_form'] = div[1][3].xpath('string(a/@href)')
leg = Legislator(term, chamber, district.strip(), full_name, party, **leg)
leg['roles'][0] = {'chamber': chamber, 'state': self.state,
'term': term, 'role':'substitute',
'legislator': subfor[subfor.rindex('for'):],
'district': district.replace('District', '').strip(),
'party': party,
'start_date':None, 'end_date':None}
leg.add_source(sub_url)
self.save_legislator(leg)
def scrape(self, chamber, term):
"""
Scrapes legislators for the current term only
"""
self.validate_term(term, latest_only=True)
url = _BASE_URL % _CHAMBERS[chamber].lower()
index = self.get(url).text
html = lxml.html.fromstring(index)
html.make_links_absolute(url)
base_table = html.xpath('body/table/tr/td[2]/table[2]')
district = None # keep track of district for substitutes
for row in base_table[0].xpath('tr'):
img_url = row.xpath('string(.//img/@src)')
contact_form, additional_info_url = row.xpath('.//a/@href')
email = self._extract_email(contact_form)
if "Substitute" in row.text_content():
# it seems like the sub always follows the person who he/she
# is filling in for.
# most sub info is provided at the additional info url
self.scrape_sub(chamber, term, district, additional_info_url)
continue
else:
full_name = " ".join(row[1][0].text_content().replace(u'\xa0', ' ').split())
party = _PARTY[row[1][0].tail.strip()]
pieces = [ x.strip() for x in row.itertext() if x ][6:]
# The parsed HTML will be something like:
# ['District 4', '2', 'nd', 'term', address, phone(s), profession, committees]
# Sometimes there's a leadership title before all that
if 'District ' in pieces[1]:
pieces.pop(0)
assert pieces[0].startswith('District '), "Improper district found: {}".format(pieces[0])
assert pieces[3] == 'term', "Improper term found: {}".format(pieces[3])
district = pieces[0]
district = district.replace('District', '').strip()
pieces = pieces[4:]
if pieces[0].startswith(u'(Served '):
pieces.pop(0)
address = re.sub(r'(\d{5})', r'ID \1', pieces.pop(0).strip())
assert re.match(r'.*\d{5}', address), "Address potentially invalid: {}".format(address)
phone = None
fax = None
for line in pieces:
if line.lower().startswith('home '):
phone = line[len('home '):]
elif not phone and line.lower().startswith('bus '):
phone = line[len('bus '):]
if line.lower().startswith('fax '):
fax = line[len('fax '):]
# After committees begin, no more contact information exists
if line == "Committees:":
break
leg = Legislator(term,
chamber,
district,
full_name,
party=party,
email=email)
leg.add_office('district',
'District Office',
address=address,
email=email,
fax=fax if fax else None,
phone=phone if phone else None)
leg.add_source(url)
leg['photo_url'] = img_url
leg['contact_form'] = contact_form
leg['url'] = additional_info_url
self.save_legislator(leg)
```
#### File: openstates/il/committees.py
```python
from billy.scrape.committees import CommitteeScraper, Committee
import lxml.html
class ILCommitteeScraper(CommitteeScraper):
jurisdiction = 'il'
def scrape_members(self, com, url):
data = self.get(url).text
if 'No members added' in data:
return
doc = lxml.html.fromstring(data)
for row in doc.xpath('//table[@cellpadding="3"]/tr')[1:]:
tds = row.xpath('td')
# remove colon and lowercase role
role = tds[0].text_content().replace(':','').strip().lower()
name = tds[1].text_content().strip()
com.add_member(name, role)
def scrape(self, chamber, term):
chamber_name = 'senate' if chamber == 'upper' else 'house'
url = 'http://ilga.gov/{0}/committees/default.asp'.format(chamber_name)
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
top_level_com = None
for a in doc.xpath('//a[contains(@href, "members.asp")]'):
name = a.text.strip()
code = a.getparent().getnext()
if code is None:
#committee doesn't have a code, maybe it's a taskforce?
com = Committee(chamber, name)
else:
code = code.text_content().strip()
if 'Sub' in name:
com = Committee(chamber, top_level_com, name, code=code)
else:
top_level_com = name
com = Committee(chamber, name, code=code)
com_url = a.get('href')
self.scrape_members(com, com_url)
com.add_source(com_url)
if not com['members']:
self.log('skipping empty committee on {0}'.format(com_url))
else:
self.save_committee(com)
```
#### File: openstates/in/apiclient.py
```python
import os
import time
import requests
import urlparse
import functools
from OpenSSL.SSL import SysCallError
"""
API key must be passed as a header. You need the following headers to get JSON:
Authorization = your_apikey
Accept = "application/json"
If you're trying to hit api links through your browser you
need to install a header-modifying extension to do this, on firefox:
https://addons.mozilla.org/en-US/firefox/addon/modify-headers/
"""
class BadApiResponse(Exception):
'''Raised if the service returns a service code higher than 400,
other than 429. Makes the response object avaible as exc.resp
'''
def __init__(self, resp, *args):
super(BadApiResponse, self).__init__(self, *args)
self.resp = resp
def check_response(method):
'''Decorated functions will run, and if they come back
with a 429 and retry-after header, will wait and try again.
'''
@functools.wraps(method)
def wrapped(self, *args, **kwargs):
resp = method(self, *args, **kwargs)
status = resp.status_code
if 400 < status:
if resp.status_code == 429:
self.handle_429(resp, *args, **kwargs)
return method(self, *args, **kwargs).json()
msg_args = (resp, resp.text, resp.headers)
msg = 'Bad api response: %r %r %r' % msg_args
raise BadApiResponse(resp, msg)
return resp.json()
return wrapped
class ApiClient(object):
'''
docs: http://docs.api.iga.in.gov/
'''
root = "https://api.iga.in.gov/"
resources = dict(
sessions='/sessions',
subjects='/{session}/subjects',
chambers='/{session}/chambers',
bills='/{session}/bills',
bill='/{session}/bills/{bill_id}',
chamber_bills='/{session}/chambers/{chamber}/bills',
rollcalls='/{session}/rollcalls/{rollcall_id}', #note that rollcall_id has to be pulled off the URL, it's NOT the rollcall_number
bill_actions='/{session}/bills/{bill_id}/actions',
committees='/{session}/committees',
committee='/{committee_link}',
legislators='/{session}/legislators',
legislator='/{session}/legislators/{legislator_id}',
chamber_legislators='/{session}/chambers/{chamber}/legislators',
bill_version = '/{session}/bills/{bill_id}/versions/{version_id}'
)
def __init__(self, scraper):
self.scraper = scraper
self.apikey = os.environ['INDIANA_API_KEY']
@check_response
def geturl(self, url):
headers = {}
headers['Authorization'] = self.apikey
headers['Accept'] = "application/json"
self.scraper.info('Api GET next page: %r, %r' % (url, headers))
return self.scraper.get(url, headers=headers, verify=False)
@check_response
def get_relurl(self, url):
headers = {}
headers['Authorization'] = self.apikey
headers['Accept'] = "application/json"
url = urlparse.urljoin(self.root, url)
self.scraper.info('Api GET: %r, %r' % (url, headers))
return self.scraper.get(url, headers=headers, verify=False)
def make_url(self, resource_name, **url_format_args):
# Build up the url.
url = self.resources[resource_name]
url = url.format(**url_format_args)
url = urlparse.urljoin(self.root, url)
return url
@check_response
def get(self, resource_name, requests_args=None,
requests_kwargs=None, **url_format_args):
'''Resource is a self.resources dict key.
'''
num_bad_packets_allowed = 10
url = self.make_url(resource_name, **url_format_args)
# Add in the api key.
requests_args = requests_args or ()
requests_kwargs = requests_kwargs or {}
requests_kwargs.update(verify=False)
headers = requests_kwargs.get('headers', {})
headers['Authorization'] = self.apikey
headers['Accept'] = "application/json"
requests_kwargs['headers'] = headers
args = (url, requests_args, requests_kwargs)
self.scraper.info('Api GET: %r, %r, %r' % args)
resp = None
tries = 0
while resp is None and tries < num_bad_packets_allowed:
try:
resp = self.scraper.get(url, *requests_args, **requests_kwargs)
except SysCallError as e:
err, string = e.args
if err != 104:
raise
tries += 1
if tries >= num_bad_packets_allowed:
print err, string
raise RuntimeError("Got bad packet from API too many times, I give up")
return resp
def unpaginate(self, result):
for data in result['items']:
yield data
while True:
if 'nextLink' in result:
url = result['nextLink']
self.scraper.info('Api GET next page: %r' % url)
result = self.get_relurl(url)
if not result['items']:
return
for data in result['items']:
yield data
else:
return
def handle_429(self, resp, *args, **kwargs):
'''According to the docs:
"If the rate limit is exceeded, we will respond with a HTTP 429 Too Many
Requests response code and a body that details the reason for the rate
limiter kicking in. Further, the response will have a Retry-After
header that tells you for how many seconds to sleep before retrying.
You should anticipate this in your API client for the smoothest user
experience."
'''
seconds = int(resp.headers['retry-after'])
self.scraper.info('Got a 429: Sleeping %s seconds per retry-after header.' % seconds)
time.sleep(seconds)
```
#### File: openstates/in/committees.py
```python
import lxml.html
from billy.scrape.committees import CommitteeScraper, Committee
from apiclient import ApiClient
from .utils import get_with_increasing_timeout
from scrapelib import HTTPError
class INCommitteeScraper(CommitteeScraper):
jurisdiction = 'in'
def process_special_members(self,comm,comm_json,role_name):
role_dict = {"chair":"Chair",
"viceChair": "Vice Chair",
"rankingMinMember":"Ranking Minority Member"}
try:
mem = comm_json[role_name]
except KeyError:
return
if mem:
person = mem["firstName"]+" "+mem["lastName"]
comm.add_member(person,role=role_dict[role_name])
return person
return None
def get_subcommittee_info(self,session):
#api gives NO way of finding out who owns
#a subcommittee. It can be found based in indenting(!)
#here: http://iga.in.gov/legislative/2015/committees/standing
#so we're going to hit that and make a dictionary. yuck
#but this is less important than some other stuff
#so we're going to be OK if we timeout.
link = "http://iga.in.gov/legislative/{}/committees/standing".format(session)
html = get_with_increasing_timeout(self,link,fail=False)
sc_dict = {}
if html:
doc = lxml.html.fromstring(html.text)
committees = doc.xpath("//li[@class='committee-item']")
for c in committees:
comm_name = c.xpath("./a")[0]
comm_name = comm_name.text_content().strip()
subcomms = c.xpath(".//li[@class='subcommittee-item']")
for s in subcomms:
subcom_name = s.text_content().strip()
sc_dict[subcom_name] = comm_name
return sc_dict
def scrape(self,term,chambers):
t = next((item for item in self.metadata["terms"] if item["name"] == term),None)
session = max(t["sessions"])
subcomms = self.get_subcommittee_info(session)
api_base_url = "https://api.iga.in.gov"
html_base_url = "http://iga.in.gov/legislative/{}/committees/".format(session)
client = ApiClient(self)
r = client.get("committees",session=session)
all_pages = client.unpaginate(r)
for comm_info in all_pages:
#this is kind of roundabout, but needed in order
#to take advantage of all of our machinery to make
#sure we're not overloading their api
comm_link = comm_info["link"]
comm_name = comm_link.split("/")[-1]
if "withdrawn" in comm_name or "conference" in comm_name:
continue
try:
comm_json = client.get("committee",committee_link=comm_link[1:])
except HTTPError:
self.logger.warning("Page does not exist")
continue
try:
chamber = comm_json["chamber"]["name"]
except KeyError:
chamber = 'joint'
else:
if chamber == "Senate":
chamber = "upper"
elif chamber == "House":
chamber = "lower"
else:
raise AssertionError("Unknown committee chamber {}".format(chamber))
name = comm_json["name"]
try:
owning_comm = subcomms[name]
except KeyError:
name = name.replace("Statutory Committee on","").strip()
comm = Committee(chamber,name)
else:
name = name.replace("Statutory Committee on","").replace("Subcommittee","").strip()
comm = Committee(chamber,owning_comm,subcommittee=name)
chair = self.process_special_members(comm,comm_json,"chair")
vicechair = self.process_special_members(comm,comm_json,"viceChair")
ranking = self.process_special_members(comm,comm_json,"rankingMinMember")
#leadership is also listed in membership
#so we have to make sure we haven't seen them yet
comm_members = [m for m in [chair,vicechair,ranking] if m]
for mem in comm_json["members"]:
mem_name = mem["firstName"]+" "+mem["lastName"]
if mem_name not in comm_members:
comm_members.append(mem_name)
comm.add_member(mem_name)
api_source = api_base_url + comm_link
if comm_name[:10] == "committee_":
html_source = html_base_url + comm_name[10:]
comm.add_source(html_source)
comm.add_source(api_source)
self.save_committee(comm)
```
#### File: openstates/in/legislators.py
```python
import lxml.html
from billy.scrape.legislators import LegislatorScraper, Legislator
from apiclient import ApiClient
from .utils import get_with_increasing_timeout
import scrapelib
class INLegislatorScraper(LegislatorScraper):
jurisdiction = 'in'
def scrape(self, chamber, term):
client = ApiClient(self)
t = next((item for item in self.metadata["terms"] if item["name"] == term),None)
session = max(t["sessions"])
base_url = "http://iga.in.gov/legislative"
api_base_url = "https://api.iga.in.gov"
chamber_name = "Senate" if chamber == "upper" else "House"
r = client.get("chamber_legislators",session=session,chamber=chamber_name)
all_pages = client.unpaginate(r)
for leg in all_pages:
firstname = leg["firstName"]
lastname = leg["lastName"]
party = leg["party"]
link = leg["link"]
api_link = api_base_url+link
html_link = base_url+link.replace("legislators/","legislators/legislator_")
try:
html = get_with_increasing_timeout(self,html_link,fail=True,kwargs={"verify":False})
except scrapelib.HTTPError:
self.logger.warning("Legislator's page is not available.")
continue
doc = lxml.html.fromstring(html.text)
doc.make_links_absolute(html_link)
address, phone = doc.xpath("//address")
address = address.text_content().strip()
address = "\n".join([l.strip() for l in address.split("\n")])
phone = phone.text_content().strip()
district = doc.xpath("//span[@class='district-heading']")[0].text.lower().replace("district","").strip()
image_link = base_url+link.replace("legislators/","portraits/legislator_")
legislator = Legislator(term,
chamber,
district,
" ".join([firstname,lastname]),
party=party,
photo_url = image_link)
legislator.add_office('capitol', 'Capitol Office', address=address,
phone=phone)
legislator.add_source(html_link)
legislator.add_source(api_link)
self.save_legislator(legislator)
```
#### File: openstates/ks/legislators.py
```python
from billy.scrape.legislators import LegislatorScraper, Legislator
from openstates.utils import LXMLMixin
from . import ksapi
import json
import scrapelib
class KSLegislatorScraper(LegislatorScraper, LXMLMixin):
jurisdiction = 'ks'
def scrape(self, term, chambers):
content = json.loads(self.get(ksapi.url + 'members/').text)['content']
if 'upper' in chambers:
for member in content['senate_members']:
self.get_member(term, 'upper', member['KPID'])
if 'lower' in chambers:
for member in content['house_members']:
self.get_member(term, 'lower', member['KPID'])
def get_member(self, term, chamber, kpid):
url = '%smembers/%s' % (ksapi.url, kpid)
content = json.loads(self.get(url).text)['content']
party = content['PARTY']
if party == 'Democrat':
party = 'Democratic'
slug = {'2013-2014': 'b2013_14',
'2015-2016': 'b2015_16'}[term]
leg_url = 'http://www.kslegislature.org/li/%s/members/%s/' % (slug, kpid)
try:
legislator_page = self.lxmlize(leg_url)
(photo_url, ) = legislator_page.xpath(
'//img[@class="profile-picture"]/@src')
except scrapelib.HTTPError:
self.warning("{}'s legislator bio page not found".format(content['FULLNAME']))
leg_url = ''
photo_url = ''
legislator = Legislator(term, chamber, str(content['DISTRICT']),
content['FULLNAME'],
party=party, url=leg_url, photo_url=photo_url,
occupation=content['OCCUPATION'],
)
address = ('Room %s\n'
'Kansas State Capitol Building\n'
'300 SW 10th St.\n'
'Topeka, KS 66612') % content['OFFICENUM']
legislator.add_office('capitol', 'Capitol Office',
phone=content['OFFPH'] or None,
address=address,
email=content['EMAIL'])
legislator.add_source(url)
self.save_legislator(legislator)
```
#### File: openstates/ky/legislators.py
```python
from collections import defaultdict
from billy.scrape.legislators import Legislator, LegislatorScraper
import lxml.html
class KYLegislatorScraper(LegislatorScraper):
jurisdiction = 'ky'
latest_only = True
def scrape(self, chamber, year):
if chamber == 'upper':
leg_list_url = 'http://www.lrc.ky.gov/senate/senmembers.htm'
else:
leg_list_url = 'http://www.lrc.ky.gov/house/hsemembers.htm'
page = self.get(leg_list_url).text
page = lxml.html.fromstring(page)
for link in page.xpath('//a[@onmouseout="hidePicture();"]'):
self.scrape_member(chamber, year, link.get('href'))
def scrape_office_info(self, url):
ret = {}
legislator_page = self.get(url).text
legislator_page = lxml.html.fromstring(legislator_page)
legislator_page.make_links_absolute(url)
info = legislator_page.xpath("//table//span")
for span in info:
elements = span.xpath("./*")
if len(elements) < 1:
continue
if elements[0].tag != "b":
continue
txt = elements[0].text_content().strip()
if txt == "Bio" or \
"committees" in txt.lower() or \
"service" in txt.lower() or \
txt == "":
continue
def _handle_phone(obj):
ret = defaultdict(list)
for x in obj.xpath(".//*")[:-1]:
phone = x.tail.strip()
obj = phone.split(":", 1)
if len(obj) != 2:
continue
typ, number = obj
typ, number = typ.strip(), number.strip()
ret[typ].append(number)
return ret
def _handle_address(obj):
addr = " ".join([x.tail or "" for x in obj.xpath(".//*")[1:]])
return [addr.strip()]
def _handle_emails(obj):
ret = []
emails = obj.xpath(".//a[contains(@href, 'mailto')]")
if len(emails) < 1:
return []
for email in emails:
_, efax = email.attrib['href'].split(":", 1)
ret.append(efax)
return ret
handlers = {
"Mailing Address": _handle_address,
"Frankfort Address(es)": _handle_address,
"Phone Number(s)": _handle_phone,
"Email Address(es)": _handle_emails
}
try:
handler = handlers[txt]
ret[txt] = handler(span)
except KeyError:
pass
return ret
def scrape_member(self, chamber, year, member_url):
member_page = self.get(member_url).text
doc = lxml.html.fromstring(member_page)
photo_url = doc.xpath('//div[@id="bioImage"]/img/@src')[0]
name_pieces = doc.xpath('//span[@id="name"]/text()')[0].split()
full_name = ' '.join(name_pieces[1:-1]).strip()
party = name_pieces[-1]
if party == '(R)':
party = 'Republican'
elif party == '(D)':
party = 'Democratic'
elif party == '(I)':
party = 'Independent'
district = doc.xpath('//span[@id="districtHeader"]/text()')[0].split()[-1]
leg = Legislator(year, chamber, district, full_name, party=party,
photo_url=photo_url, url=member_url)
leg.add_source(member_url)
address = '\n'.join(doc.xpath('//div[@id="FrankfortAddresses"]//span[@class="bioText"]/text()'))
phone = None
fax = None
phone_numbers = doc.xpath('//div[@id="PhoneNumbers"]//span[@class="bioText"]/text()')
for num in phone_numbers:
if num.startswith('Annex: '):
num = num.replace('Annex: ', '')
if num.endswith(' (fax)'):
fax = num.replace(' (fax)', '')
else:
phone = num
emails = doc.xpath(
'//div[@id="EmailAddresses"]//span[@class="bioText"]//a/text()'
)
email = reduce(
lambda match, address: address if '@<EMAIL>' in str(address) else match,
[None] + emails
)
if address.strip() == "":
self.warning("Missing Capitol Office!!")
else:
leg.add_office(
'capitol', 'Capitol Office',
address=address,
phone=phone,
fax=fax,
email=email
)
self.save_legislator(leg)
```
#### File: openstates/ky/votes.py
```python
import sys
from billy.scrape.votes import VoteScraper, Vote
#import Image
#import ImageChops
#def crop(image, threshold=0.99):
# """
# Crop the leftmost/topmost rows/cols with percentage of white pixel
# less than threshold.
# """
# bbox = [0, 0, image.size[0], image.size[1]]
# for x in xrange(0, image.size[0]):
# row = image.crop((x, 0, x + 1, image.size[1]))
# first = row.getcolors()[0]
# if first[1] == (255, 255, 255):
# if first[0] / float(image.size[1]) < threshold:
# bbox[0] = x
# break
# for y in xrange(0, image.size[1]):
# row = image.crop((0, y, image.size[0], y + 1))
# first = row.getcolors()[0]
# if first[1] == (255, 255, 255):
# if first[0] / float(image.size[0]) < threshold:
# bbox[1] = y
# break
# return image.crop(bbox)
#def get_rect_color(image, rect):
# box = image.crop(rect)
# colors = box.getcolors()
# if len(colors) > 1:
# raise ValueError("Not a solid color: %r" % colors)
# return colors[0][1]
#def parse_votes(filename):
# "Extract votes from roll call images from the KY Senate."
# image = Image.open(filename)
# # The vote pages have a variable amount of whitespace around the
# # top and left that we want to strip
# image = crop(image)
# votes = []
# cols = [365, 885, 1410]
# for col_x in cols:
# for row in xrange(0, 13):
# if col_x == 1410 and row == 12:
# # Thrid column only has 11 entries
# continue
# y = 395 + 50 * row
# yes_rect = (col_x, y, col_x + 10, y + 15)
# if get_rect_color(image, yes_rect) == (0, 0, 0):
# yes = True
# else:
# yes = False
# no_rect = (col_x + 35, y, col_x + 45, y + 15)
# if get_rect_color(image, no_rect) == (0, 0, 0):
# no = True
# else:
# no = False
# if yes and no:
# raise ValueError("Double vote")
# if yes:
# votes.append('yes')
# elif no:
# votes.append('no')
# else:
# votes.append('other')
# return votes
class KYVoteScraper(VoteScraper):
jurisdiction = 'ky'
def scrape(self, chamber, session):
pass
```
#### File: openstates/la/legislators.py
```python
import re
from billy.scrape.legislators import LegislatorScraper, Legislator
from openstates.utils import LXMLMixin
def xpath_one(el, expr):
ret = el.xpath(expr)
if len(ret) != 1:
print(ret, expr)
raise Exception
return ret[0]
class LALegislatorScraper(LegislatorScraper, LXMLMixin):
jurisdiction = 'la'
latest_only = True
def scrape_upper_leg_page(self, term, url, who):
page = self.lxmlize(url)
(who, ) = [x for x in
page.xpath('//tr/td/font/text()') if
x.strip().startswith("Senator ")
]
who = re.search(r'(?u)^\s*Senator\s*(.*?)\s*$', who).group(1)
(district, ) = [x for x in
page.xpath('//tr/td/font/text()') if
x.strip().startswith("District - ")
]
district = re.search(
r'(?u)^\s*District\s*-\s*(.*?)\s*$', district).group(1)
info = [x.strip() for x in
page.xpath('//font[contains(text(), "Information:")]/'
'ancestor::table[1]//text()') if
x.strip()
]
parties = {
"Republican": "Republican",
"Democrat": "Democratic",
}
party_index = info.index("Party:") + 1
party = parties[info[party_index]]
phone_index = info.index("District Phone") + 1
phone = info[phone_index]
assert (sum(c.isdigit() for c in phone) == 9,
"Phone number is invalid: {}".format(phone))
# Address exists for all lines between party and phone
address = "\n".join(info[party_index + 2:phone_index - 1])
address = address.replace("\r", "")
if not address:
address = "No Address Found"
fax_index = info.index("Fax") + 1
fax = info[fax_index]
assert (sum(c.isdigit() for c in fax) == 9,
"Fax number is invalid: {}".format(fax))
email_index = info.index("E-mail Address") + 1
email = info[email_index]
assert "@" in email, "Email info is not valid: {}".format(email)
leg = Legislator(term,
'upper',
district,
who,
party=party)
leg.add_office('district',
'District Office',
address=address,
phone=phone,
fax=fax,
email=email)
leg.add_source(url)
self.save_legislator(leg)
def scrape_upper(self, chamber, term):
url = "http://senate.la.gov/Senators/"
page = self.lxmlize(url)
table = page.xpath("//table[@width='96%']")[0]
legs = table.xpath(".//tr//a[contains(@href, 'senate.la.gov')]")
for leg in legs:
who = leg.text_content().strip()
if who == "":
continue
self.scrape_upper_leg_page(term, leg.attrib['href'], who)
def scrape_lower_legislator(self, url, leg_info, term):
page = self.lxmlize(url)
name = page.xpath('//div[@class="FullName"]/text()')[0].strip()
if name.startswith("District ") or name.startswith("Vacant "):
self.warning("Seat is vacant: {}".format(name))
return
photo = xpath_one(page, '//a[@rel="lightbox"]').attrib['href']
infoblk = xpath_one(
page, '//td/b[contains(text(), "CAUCUS/DELEGATION MEMBERSHIP")]')
infoblk = infoblk.getparent()
cty = xpath_one(infoblk, "./b[contains(text(), 'ASSIGNMENTS')]")
cty = cty.getnext()
party_flags = {
"Democrat": "Democratic",
"Republican": "Republican",
"Independent": "Independent"
}
party_info = page.xpath(
'//div[@class="FullName"]//following-sibling::text()[1]')
(party_info, ) = [x.strip() for x in party_info if x.strip()]
party_info = party_info.split('-')[0].strip()
party = party_flags[party_info]
kwargs = {"url": url,
"party": party,
"photo_url": photo}
leg = Legislator(term,
'lower',
leg_info['dist'],
leg_info['name'],
**kwargs)
kwargs = {
"address": leg_info['office'],
"phone": leg_info['phone'],
"email": leg_info['email'],
}
for key in kwargs.keys():
if not kwargs[key].strip():
kwargs[key] = None
leg.add_office('district',
'District Office',
**kwargs)
leg.add_source(url)
self.save_legislator(leg)
def scrape_lower(self, chamber, term):
url = "http://house.louisiana.gov/H_Reps/H_Reps_FullInfo.asp"
page = self.lxmlize(url)
meta = ["name", "dist", "office", "phone", "email"]
for tr in page.xpath("//table[@id='table61']//tr"):
tds = tr.xpath("./td")
if tds == []:
continue
info = {}
for i in range(0, len(meta)):
info[meta[i]] = tds[i].text_content().strip()
hrp = tr.xpath(
".//a[contains(@href, 'H_Reps')]")[0].attrib['href']
self.scrape_lower_legislator(hrp, info, term)
def scrape(self, chamber, term):
if chamber == "upper":
return self.scrape_upper(chamber, term)
if chamber == "lower":
return self.scrape_lower(chamber, term)
```
#### File: openstates/md/bills.py
```python
import datetime
import re
import lxml.html
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
CHAMBERS = {
'upper': ('SB','SJ'),
'lower': ('HB','HJ'),
}
classifiers = {
r'Committee Amendment .+? Adopted': 'amendment:passed',
r'Favorable': 'committee:passed:favorable',
r'First Reading': 'committee:referred',
r'Floor (Committee )?Amendment\s?\(.+?\)$': 'amendment:introduced',
r'Floor Amendment .+? Rejected': 'amendment:failed',
r'Floor (Committee )?Amendment.+?Adopted': 'amendment:passed',
r'Floor Amendment.+? Withdrawn': 'amendment:withdrawn',
r'Pre\-filed': 'bill:introduced',
r'Re\-(referred|assigned)': 'committee:referred',
r'Recommit to Committee': 'committee:referred',
r'Referred': 'committee:referred',
r'Third Reading Passed': 'bill:passed',
r'Third Reading Failed': 'bill:failed',
r'Unfavorable': 'committee:passed:unfavorable',
r'Vetoed': 'governor:vetoed',
r'Approved by the Governor': 'governor:signed',
r'Conference Committee|Passed Enrolled|Special Order|Senate Concur|Motion|Laid Over|Hearing|Committee Amendment|Assigned a chapter|Second Reading|Returned Passed|House Concur|Chair ruled|Senate Refuses to Concur|Senate Requests': 'other',
}
vote_classifiers = {
r'third': 'passage',
r'fla|amend|amd': 'amendment',
}
def _classify_action(action):
if not action:
return None
ctty = None
for regex, type in classifiers.iteritems():
if re.match(regex, action):
if 'committee:referred' in type:
ctty = re.sub(regex, "", action).strip()
return ( type, ctty )
return ( None, ctty )
def _clean_sponsor(name):
if name.startswith('Delegate') or name.startswith('Senator'):
name = name.split(' ', 1)[1]
if ', District' in name:
name = name.rsplit(',', 1)[0]
return name.strip().strip('*')
def _get_td(doc, th_text):
td = doc.xpath('//th[text()="%s"]/following-sibling::td' % th_text)
if td:
return td[0]
td = doc.xpath('//th/span[text()="%s"]/../following-sibling::td' % th_text)
if td:
return td[0]
class MDBillScraper(BillScraper):
jurisdiction = 'md'
def parse_bill_sponsors(self, doc, bill):
sponsor_list = doc.xpath('//a[@name="Sponlst"]')
if sponsor_list:
# more than one bill sponsor exists
elems = sponsor_list[0].xpath('../../..//dd/a')
for elem in elems:
bill.add_sponsor('cosponsor',
_clean_sponsor(elem.text.strip()))
else:
# single bill sponsor
sponsor = doc.xpath('//a[@name="Sponsors"]/../../dd')[0].text_content()
bill.add_sponsor('primary', _clean_sponsor(sponsor))
def parse_bill_actions(self, doc, bill):
for h5 in doc.xpath('//h5'):
if h5.text == 'House Action':
chamber = 'lower'
elif h5.text == 'Senate Action':
chamber = 'upper'
elif h5.text.startswith('Action after passage'):
chamber = 'governor'
else:
break
dts = h5.getnext().xpath('dl/dt')
for dt in dts:
action_date = dt.text.strip()
if action_date and action_date != 'No Action':
year = int(bill['session'][:4])
action_date += ('/%s' % year)
action_date = datetime.datetime.strptime(action_date,
'%m/%d/%Y')
# no actions after June?, decrement the year on these
if action_date.month > 6:
year -= 1
action_date = action_date.replace(year)
# iterate over all dds following the dt
dcursor = dt
while (dcursor.getnext() is not None and
dcursor.getnext().tag == 'dd'):
dcursor = dcursor.getnext()
actions = dcursor.text_content().split('\r\n')
for act in actions:
act = act.strip()
if not act:
continue
atype, committee = _classify_action(act)
kwargs = {
"type": atype
}
if committee is not None:
kwargs['committees'] = committee
if atype:
bill.add_action(chamber, act, action_date,
**kwargs)
else:
self.log('unknown action: %s' % act)
def parse_bill_documents(self, doc, bill):
bill_text_b = doc.xpath('//b[contains(text(), "Bill Text")]')[0]
for sib in bill_text_b.itersiblings():
if sib.tag == 'a':
bill.add_version(sib.text.strip(','), sib.get('href'),
mimetype='application/pdf')
note_b = doc.xpath('//b[contains(text(), "Fiscal and Policy")]')[0]
for sib in note_b.itersiblings():
if sib.tag == 'a' and sib.text == 'Available':
bill.add_document('Fiscal and Policy Note', sib.get('href'))
def parse_bill_votes(self, doc, bill):
params = {
'chamber': None,
'date': None,
'motion': None,
'passed': None,
'yes_count': None,
'no_count': None,
'other_count': None,
}
elems = doc.xpath('//a')
# MD has a habit of listing votes twice
seen_votes = set()
for elem in elems:
href = elem.get('href')
if (href and "votes" in href and href.endswith('htm') and
href not in seen_votes):
seen_votes.add(href)
vote = self.parse_vote_page(href)
vote.add_source(href)
bill.add_vote(vote)
def parse_vote_page(self, vote_url):
vote_html = self.get(vote_url).text
doc = lxml.html.fromstring(vote_html)
# chamber
if 'senate' in vote_url:
chamber = 'upper'
else:
chamber = 'lower'
# date in the following format: Mar 23, 2009
date = doc.xpath('//td[starts-with(text(), "Legislative")]')[0].text
date = date.replace(u'\xa0', ' ')
date = datetime.datetime.strptime(date[18:], '%b %d, %Y')
# motion
motion = ''.join(x.text_content() for x in \
doc.xpath('//td[@colspan="23"]'))
if motion == '':
motion = "No motion given" # XXX: Double check this. See SJ 3.
motion = motion.replace(u'\xa0', ' ')
# totals
tot_class = doc.xpath('//td[contains(text(), "Yeas")]')[0].get('class')
totals = doc.xpath('//td[@class="%s"]/text()' % tot_class)[1:]
yes_count = int(totals[0].split()[-1])
no_count = int(totals[1].split()[-1])
other_count = int(totals[2].split()[-1])
other_count += int(totals[3].split()[-1])
other_count += int(totals[4].split()[-1])
passed = yes_count > no_count
vote = Vote(chamber=chamber, date=date, motion=motion,
yes_count=yes_count, no_count=no_count,
other_count=other_count, passed=passed)
# go through, find Voting Yea/Voting Nay/etc. and next tds are voters
func = None
for td in doc.xpath('//td/text()'):
td = td.replace(u'\xa0', ' ')
if td.startswith('Voting Yea'):
func = vote.yes
elif td.startswith('Voting Nay'):
func = vote.no
elif td.startswith('Not Voting'):
func = vote.other
elif td.startswith('Excused'):
func = vote.other
elif func:
func(td)
return vote
def scrape_bill_2012(self, chamber, session, bill_id, url):
""" Creates a bill object """
if len(session) == 4:
session_url = session+'rs'
else:
session_url = session
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
# find <a name="Title">, get parent dt, get parent dl, then dd n dl
title = doc.xpath('//a[@name="Title"][1]/../../dd[1]/text()')[0].strip()
summary = doc.xpath('//font[@size="3"]/p/text()')[0].strip()
if 'B' in bill_id:
_type = ['bill']
elif 'J' in bill_id:
_type = ['joint resolution']
bill = Bill(session, chamber, bill_id, title, type=_type,
summary=summary)
bill.add_source(url)
self.parse_bill_sponsors(doc, bill) # sponsors
self.parse_bill_actions(doc, bill) # actions
self.parse_bill_documents(doc, bill) # documents and versions
self.parse_bill_votes(doc, bill) # votes
# subjects
subjects = []
for subj in doc.xpath('//a[contains(@href, "/subjects/")]'):
subjects.append(subj.text.split('-see also-')[0])
bill['subjects'] = subjects
# add bill to collection
self.save_bill(bill)
def scrape_vote(self, bill, action_text, url):
doc = lxml.html.fromstring(self.get(url).text)
date = None
yes_count = no_count = other_count = None
# process action_text - might look like "Vote - Senate Floor - Third Reading Passed (46-0) - 01/16/12"
if action_text.startswith('Vote - Senate Floor - '):
action_text = action_text[22:]
chamber = 'upper'
elif action_text.startswith('Vote - House Floor - '):
action_text = action_text[21:]
chamber = 'lower'
motion, unused_date = action_text.rsplit(' - ', 1)
yes_count, no_count = re.findall('\((\d+)-(\d+)\)', motion)[0]
if 'Passed' in motion:
motion = motion.split(' Passed')[0]
passed = True
elif 'Adopted' in motion:
motion = motion.split(' Adopted')[0]
passed = True
elif 'Rejected' in motion:
motion = motion.split(' Rejected')[0]
passed = False
elif 'Failed' in motion:
motion = motion.split(' Failed')[0]
passed = False
elif 'Concur' in motion:
passed = True
elif 'Floor Amendment' in motion:
passed = int(yes_count) > int(no_count)
else:
raise Exception('unknown motion: %s' % motion)
vote = Vote(chamber=chamber, date=None, motion=motion,
yes_count=int(yes_count), no_count=int(no_count),
other_count=0, passed=passed)
vfunc = None
nobrs = doc.xpath('//nobr/text()')
for text in nobrs:
text = text.replace(u'\xa0', ' ')
if text.startswith('Calendar Date: '):
if vote['date']:
self.warning('two dates!, skipping rest of bill')
break
vote['date'] = datetime.datetime.strptime(text.split(': ', 1)[1], '%b %d, %Y %H:%M %p')
elif 'Yeas' in text and 'Nays' in text and 'Not Voting' in text:
yeas, nays, nv, exc, absent = re.match('(\d+) Yeas\s+(\d+) Nays\s+(\d+) Not Voting\s+(\d+) Excused \(Absent\)\s+(\d+) Absent', text).groups()
vote['yes_count'] = int(yeas)
vote['no_count'] = int(nays)
vote['other_count'] = int(nv) + int(exc) + int(absent)
elif 'Voting Yea' in text:
vfunc = vote.yes
elif 'Voting Nay' in text:
vfunc = vote.no
elif 'Not Voting' in text or 'Excused' in text:
vfunc = vote.other
elif vfunc:
if ' and ' in text:
a, b = text.split(' and ')
vfunc(a)
vfunc(b)
else:
vfunc(text)
vote.validate()
vote.add_source(url)
bill.add_vote(vote)
def scrape_bill(self, chamber, session, bill_id, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
title = doc.xpath('//h3[@class="h3billright"]')[0].text_content()
# TODO: grab summary (none present at time of writing)
if 'B' in bill_id:
_type = ['bill']
elif 'J' in bill_id:
_type = ['joint resolution']
else:
raise ValueError('unknown bill type ' + bill_id)
bill = Bill(session, chamber, bill_id, title, type=_type)
bill.add_source(url)
# process sponsors
sponsors = _get_td(doc, 'All Sponsors:').text_content()
sponsors = sponsors.replace('Delegates ', '')
sponsors = sponsors.replace('Delegate ', '')
sponsors = sponsors.replace('Senator ', '')
sponsors = sponsors.replace('Senators ', '')
sponsor_type = 'primary'
for sponsor in re.split(', (?:and )?', sponsors):
sponsor = sponsor.strip()
if not sponsor:
continue
bill.add_sponsor(sponsor_type, sponsor)
sponsor_type = 'cosponsor'
# subjects
subject_list = []
for heading in ('Broad Subject(s):', 'Narrow Subject(s):'):
subjects = _get_td(doc, heading).xpath('a/text()')
subject_list += [s.split(' -see also-')[0] for s in subjects if s]
bill['subjects'] = subject_list
# documents
self.scrape_documents(bill, url.replace('stab=01', 'stab=02'))
# actions
self.scrape_actions(bill, url.replace('stab=01', 'stab=03'))
self.save_bill(bill)
def scrape_documents(self, bill, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
for td in doc.xpath('//table[@class="billdocs"]//td'):
a = td.xpath('a')[0]
if a.text == 'Text':
bill.add_version('Bill Text', a.get('href'),
mimetype='application/pdf')
elif a.text == 'Analysis':
bill.add_document(a.tail.replace(' - ', ' ').strip(),
a.get('href'), mimetype='application/pdf')
elif a.text in ('Bond Bill Fact Sheet',
"Attorney General's Review Letter",
"Governor's Veto Letter",
):
bill.add_document(a.text, a.get('href'),
mimetype='application/pdf')
elif a.text in ('Amendments', 'Conference Committee Amendment',
'Conference Committee Report'):
bill.add_document(a.text + ' - ' + a.tail.strip(),
a.get('href'), mimetype='application/pdf')
elif a.text == 'Vote - Senate - Committee':
bill.add_document('Senate %s Committee Vote' %
a.tail.replace(' - ', ' ').strip(),
a.get('href'), mimetype='application/pdf')
elif a.text == 'Vote - House - Committee':
bill.add_document('House %s Committee Vote' %
a.tail.replace(' - ', ' ').strip(),
a.get('href'), mimetype='application/pdf')
elif a.text == 'Vote - Senate Floor':
self.scrape_vote(bill, td.text_content(), a.get('href'))
elif a.text == 'Vote - House Floor':
self.scrape_vote(bill, td.text_content(), a.get('href'))
else:
raise ValueError('unknown document type: %s', a.text)
def scrape_actions(self, bill, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
for row in doc.xpath('//table[@class="billgrid"]/tr')[1:]:
new_chamber, cal_date, leg_date, action, proceedings = row.xpath('td')
if new_chamber.text == 'Senate':
chamber = 'upper'
elif new_chamber.text == 'House':
chamber = 'lower'
elif new_chamber.text == 'Post Passage':
chamber = 'executive'
elif new_chamber.text is not None:
raise ValueError('unexpected chamber: ' + new_chamber.text)
action = action.text
if cal_date.text:
action_date = datetime.datetime.strptime(cal_date.text, '%m/%d/%Y')
atype, committee = _classify_action(action)
kwargs = { "type": atype }
if committee is not None:
kwargs['committees'] = committee
bill.add_action(chamber, action, action_date, **kwargs)
def scrape(self, chamber, session):
session_slug = session if 's' in session else session + 'rs'
main_page = 'http://mgaleg.maryland.gov/webmga/frmLegislation.aspx?pid=legisnpage&tab=subject3&ys=' + session_slug
chamber_prefix = 'S' if chamber == 'upper' else 'H'
html = self.get(main_page).text
doc = lxml.html.fromstring(html)
ranges = doc.xpath('//table[@class="box1leg"]//td/text()')
for range_text in ranges:
match = re.match('(\w{2})0*(\d+) - \wB0*(\d+)', range_text.strip())
if match:
prefix, begin, end = match.groups()
if prefix[0] == chamber_prefix:
self.debug('scraping %ss %s-%s', prefix, begin, end)
for number in range(int(begin), int(end)+1):
bill_id = prefix + str(number)
url = 'http://mgaleg.maryland.gov/webmga/frmMain.aspx?id=%s&stab=01&pid=billpage&tab=subject3&ys=%s' % (bill_id, session_slug)
if session < '2013':
self.scrape_bill_2012(chamber, session, bill_id, url)
else:
self.scrape_bill(chamber, session, bill_id, url)
```
#### File: openstates/md/events.py
```python
import datetime as dt
from openstates.utils import LXMLMixin
from billy.scrape.events import Event, EventScraper
import re
import pytz
import lxml.html
def last_space(string):
# this is a big hack.
for x in range(0, len(string)):
if string[x] != " ":
return x
return None
class MDEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'md'
_tz = pytz.timezone('US/Eastern')
def scrape(self, chamber, session):
self.logger.warning("MD's events schedule is a blob of hand-indented text and has changed from last year. Skipping for now.")
return
if chamber != 'other':
return None # We're going to do it all on one shot.
if session[-2:] == "s1":
return None # Special sessions 404
url = "http://mlis.state.md.us/%s/hearsch/alladd.htm" % ( session )
page = self.lxmlize(url)
events = page.xpath("//pre")
for event in events:
ctty_name = [
x.strip() for x in
event.getparent().getprevious().text_content().split("-", 1)
]
ctty_name = ctty_name[0]
event_text = event.text_content()
if "This meeting has been cancelled." in event_text:
continue
# OK. In order to process this text-only notice, we have to resort
# to some major hackage. Just roll with it.
lines = event_text.split("\n")
# In order to get the key stuff, we need to figure out where the
# address "block" starts.
address_block = last_space(lines[4])
assert address_block is not None
# OK. Given the offset, we can "split" the time off the date block.
time_room = lines[3]
time = time_room[:address_block].strip()
if "TBD" in time:
continue # Nothing's set yet.
time = "%s %s" % (
lines[1],
time
)
time = re.sub("\s+", " ", time).strip()
trans = {
"P.M." : "PM",
"A.M." : "AM"
}
for transition in trans:
time = time.replace(transition, trans[transition])
when = dt.datetime.strptime(time, "%A %B %d, %Y %I:%M %p")
room = time_room[address_block:].strip()
place_block = lines[4:]
where = room + "\n"
done = False
offset = 4
for place in place_block:
if place.strip() == "":
done = True
if done:
continue
offset += 1
where += place.strip() + "\n"
where = where.strip()
# Now that the date's processed, we can move on.
moreinfo = lines[offset + 1:]
info = {}
key = "unattached_header"
for inf in moreinfo:
if ":" in inf:
key, value = inf.split(":", 1)
key = key.strip()
info[key] = value.strip()
else:
info[key] += " " + inf.strip()
# Alright. We should have enough now.
subject = info['Subject']
event = Event(session, when, 'committee:meeting',
subject, location=where)
event.add_source(url)
flags = {
"joint": "joint",
"house": "lower",
"senate": "upper"
}
chamber = "other"
for flag in flags:
if flag in ctty_name.lower():
chamber = flags[flag]
# Let's try and hack out some bill names.
trans = {
"SENATE": "S",
"HOUSE": "H",
"JOINT": "J",
"BILL": "B",
"RESOLUTION": "R",
}
_t_subject = subject.upper()
for t in trans:
regex = "%s(\s+)?" % t
_t_subject = re.sub(regex, trans[t], _t_subject)
print _t_subject
bills = re.findall("(S|H)(J)?(B|R|M)\s*(\d{4})", _t_subject)
for bill in bills:
name = bill[:3]
bid = bill[3]
bill_id = "%s %s" % ( ''.join(name), bid )
event.add_related_bill(bill_id,
description=subject,
type='consideration')
event.add_participant("host", ctty_name, 'committee',
chamber=chamber)
self.save_event(event)
```
#### File: openstates/mi/events.py
```python
from openstates.utils import LXMLMixin
import datetime as dt
import re
from billy.scrape.events import Event, EventScraper
import lxml.html
import pytz
mi_events = "http://legislature.mi.gov/doc.aspx?CommitteeMeetings"
class MIEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'mi'
_tz = pytz.timezone('US/Eastern')
def scrape_event_page(self, url, chamber, session):
page = self.lxmlize(url)
trs = page.xpath("//table[@id='frg_committeemeeting_MeetingTable']/tr")
metainf = {}
for tr in trs:
tds = tr.xpath(".//td")
if len(tds) <= 1:
continue
key = tds[0].text_content().strip()
val = tds[1]
metainf[key] = {
"txt": val.text_content().strip(),
"obj": val
}
if metainf == {}:
return
# Wednesday, 5/16/2012 3:00 pm
datetime = "%s %s" % (
metainf['Date']['txt'],
metainf['Time']['txt'].replace(".","")
)
if "Cancelled" in datetime:
return
translate = {
"noon": " PM",
"a.m.": " AM",
"am": " AM", # This is due to a nasty line they had.
"a.m": "AM" #another weird one
}
for t in translate:
if t in datetime:
datetime = datetime.replace(t, translate[t])
datetime = re.sub("\s+", " ", datetime)
for text_to_remove in [
"or after committees are given leave",
"or later immediately after committees are given leave",
"or later after committees are given leave by the House to meet",
"**Please note time**"
]:
datetime = datetime.split(text_to_remove)[0].strip()
datetime = datetime.replace('p.m.', 'pm')
datetime = datetime.replace('Noon',"pm")
datetime = dt.datetime.strptime(datetime, "%A, %m/%d/%Y %I:%M %p")
where = metainf['Location']['txt']
title = metainf['Committee']['txt'] # XXX: Find a better title
if chamber == 'other':
chamber = 'joint'
event = Event(session, datetime, 'committee:meeting',
title, location=where)
event.add_source(url)
event.add_source(mi_events)
chair_name = metainf['Chair']['txt'].strip()
if chair_name:
event.add_participant('chair', chair_name, 'legislator', chamber=chamber)
else:
self.warning("No chair found for event '{}'".format(title))
event.add_participant('host', metainf['Committee']['txt'],
'committee',
chamber=chamber)
agenda = metainf['Agenda']['obj']
agendas = agenda.text_content().split("\r")
related_bills = agenda.xpath("//a[contains(@href, 'getObject')]")
for bill in related_bills:
description = agenda
for a in agendas:
if bill.text_content() in a:
description = a
event.add_related_bill(
bill.text_content(),
description=description,
type='consideration'
)
self.save_event(event)
def scrape(self, chamber, session):
page = self.lxmlize(mi_events)
xpaths = {
"lower": "//span[@id='frg_committeemeetings_HouseMeetingsList']",
"upper": "//span[@id='frg_committeemeetings_SenateMeetingsList']",
"other": "//span[@is='frg_committeemeetings_JointMeetingsList']"
}
span = page.xpath(xpaths[chamber])
if len(span) > 0:
span = span[0]
else:
return
events = span.xpath(".//a[contains(@href, 'committeemeeting')]")
for event in events:
url = event.attrib['href']
if 'doPostBack' in url:
continue
self.scrape_event_page(url, chamber, session)
```
#### File: openstates/mn/__init__.py
```python
import lxml.html
import gc
from .bills import MNBillScraper
from .legislators import MNLegislatorScraper
from .committees import MNCommitteeScraper
from .events import MNEventScraper
from .votes import MNVoteScraper
"""
Minnesota legislative data can be found at the Office of the Revisor
of Statutes:
https://www.revisor.mn.gov/
Votes:
There are not detailed vote data for Senate votes, simply yes and no counts.
Bill pages have vote counts and links to House details, so it makes more
sense to get vote data from the bill pages.
"""
metadata = dict(
name='Minnesota',
abbreviation='mn',
capitol_timezone='America/Chicago',
legislature_name='Minnesota State Legislature',
legislature_url='http://www.leg.state.mn.us/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms=[
{
'name': '2009-2010',
'sessions': ['2009-2010', '2010 1st Special Session', '2010 2nd Special Session'],
'start_year': 2009,
'end_year': 2010,
'biennium': '86',
},
{
'name': '2011-2012',
'sessions': ['2011-2012', '2011s1', '2012s1'],
'start_year': 2011,
'end_year': 2012,
'biennium': '87',
},
{
'name': '2013-2014',
'sessions': ['2013-2014', '2013s1'],
'start_year': 2013,
'end_year': 2014,
'biennium': 88
},
{
'name': '2015-2016',
'sessions': ['2015s1', '2015-2016'],
'start_year': 2015,
'end_year': 2016,
'biennium': 89,
},
],
session_details={
'2009-2010': {
'site_id': '0862009', 'type':'primary',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls86.asp',
'display_name': '2009-2010 Regular Session',
'_scraped_name': '86th Legislature, 2009-2010',
},
'2010 1st Special Session': {
'site_id': '1862010', 'type':'special',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls8620101.asp',
'display_name': '2010, 1st Special Session',
'_scraped_name': '86th Legislature, 2010 1st Special Session',
},
'2010 2nd Special Session': {
'site_id': '2862010', 'type':'special',
'display_name': '2010, 2nd Special Session',
'_scraped_name': '86th Legislature, 2010 2nd Special Session',
},
'2011-2012': {
'site_id': '0872011', 'type':'primary',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls87.asp',
'display_name': '2011-2012 Regular Session',
'_scraped_name': '87th Legislature, 2011-2012',
},
'2011s1': {
'site_id': '1872011', 'type': 'special',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls8720111.asp',
'display_name': '2011, 1st Special Session',
'_scraped_name': '87th Legislature, 2011 1st Special Session',
},
'2012s1': {
'site_id': '1872012', 'type': 'special',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls8720121.asp',
'display_name': '2012, 1st Special Session',
'_scraped_name': '87th Legislature, 2012 1st Special Session',
},
'2013-2014': {
'site_id': '0882013',
'type': "primary",
'display_name': '2013-2014 Regular Session',
'_scraped_name': '88th Legislature, 2013-2014',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls88.asp',
},
'2013s1': {
'site_id': '1882013', 'type': 'special',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls8820131.asp',
'display_name': '2013, 1st Special Session',
'_scraped_name': '88th Legislature, 2013 1st Special Session',
},
'2015-2016': {
'site_id': '0892015',
'type': "primary",
'display_name': '2015-2016 Regular Session',
'_scraped_name': '89th Legislature, 2015-2016',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls89.asp',
},
'2015s1': {
'site_id': '1892015', 'type': 'special',
'votes_url': 'http://www.house.leg.state.mn.us/votes/getVotesls8920151.asp',
'display_name': '2015, 1st Special Session',
'_scraped_name': '89th Legislature, 2015 1st Special Session',
},
},
feature_flags=['subjects', 'events', 'influenceexplorer'],
_ignored_scraped_sessions=['85th Legislature, 2007-2008',
'85th Legislature, 2007 1st Special Session',
'84th Legislature, 2005-2006',
'84th Legislature, 2005 1st Special Session',
'83rd Legislature, 2003-2004',
'83rd Legislature, 2003 1st Special Session',
'82nd Legislature, 2001-2002',
'82nd Legislature, 2002 1st Special Session',
'82nd Legislature, 2001 1st Special Session',
'81st Legislature, 1999-2000',
'80th Legislature, 1997-1998',
'80th Legislature, 1998 1st Special Session',
'80th Legislature, 1997 3rd Special Session',
'80th Legislature, 1997 2nd Special Session',
'80th Legislature, 1997 1st Special Session',
'79th Legislature, 1995-1996',
'79th Legislature, 1995 1st Special Session',
'89th Legislature, 2015-2016',
]
)
def session_list():
from billy.scrape.utils import url_xpath
return url_xpath('https://www.revisor.mn.gov/revisor/pages/search_status/'
'status_search.php?body=House',
'//select[@name="session"]/option/text()')
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
xtend = doc.xpath('//div[@class="xtend"]')[0].text_content()
for v in doc.xpath('.//var/text()'):
xtend = xtend.replace(v, '')
doc = None
gc.collect()
return xtend
```
#### File: openstates/ms/bills.py
```python
from .utils import chamber_name, parse_ftp_listing
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import VoteScraper, Vote
from billy.scrape.utils import convert_pdf
from datetime import datetime
import lxml.etree
import os
import re
def _combine_lines(lines):
newlines = []
lastline = '.'
for line in lines:
if lastline and lastline[-1] in '.,:' and not line.startswith('('):
newlines.append(line)
lastline = line
else:
lastline = newlines[-1] = newlines[-1] + ' ' + line
return newlines
class MSBillScraper(BillScraper):
jurisdiction = 'ms'
_action_types = (
('Died in Committee', 'committee:failed'),
('Enrolled Bill Signed', 'other'),
('Immediate Release', 'other'),
('Passed', 'bill:passed'),
('Adopted', 'bill:passed'),
('Amended', 'amendment:passed'),
('Failed', 'bill:failed'),
('Committee Substitute Adopted', 'bill:substituted'),
('Amendment Failed', 'amendment:failed'),
('Amendment Withdrawn', 'amendment:withdrawn'),
('Referred To', 'committee:referred'),
('Rereferred To', 'committee:referred'),
('Transmitted To', 'bill:introduced'),
('Approved by Governor', 'governor:signed'),
('Vetoed', 'governor:vetoed'),
('Partially Vetoed', 'governor:vetoed:line-item'),
('Title Suff Do', 'committee:passed'),
('Read the Third Time', 'bill:reading:3'),
)
def scrape(self, chamber, session):
self.save_errors=False
if int(session[0:4]) < 2008:
raise NoDataForPeriod(session)
self.scrape_bills(chamber, session)
def scrape_bills(self, chamber_to_scrape, session):
url = 'http://billstatus.ls.state.ms.us/%s/pdf/all_measures/allmsrs.xml' % session
bill_dir_page = self.get(url)
root = lxml.etree.fromstring(bill_dir_page.content)
for mr in root.xpath('//LASTACTION/MSRGROUP'):
bill_id = mr.xpath('string(MEASURE)').replace(" ", "")
if bill_id[0] == "S":
chamber = "upper"
else:
chamber = "lower"
bill_type = {'B':'bill', 'C': 'concurrent resolution',
'R': 'resolution', 'N': 'nomination'}[bill_id[1]]
# just skip past bills that are of the wrong chamber
if chamber != chamber_to_scrape:
continue
link = mr.xpath('string(ACTIONLINK)').replace("..", "")
main_doc = mr.xpath('string(MEASURELINK)').replace("../../../", "")
main_doc_url = 'http://billstatus.ls.state.ms.us/%s' % main_doc
bill_details_url = 'http://billstatus.ls.state.ms.us/%s/pdf/%s' % (session, link)
details_page = self.get(bill_details_url)
page = details_page.content.replace(chr(11), "")
# Some pages have the (invalid) byte 11 sitting around. Just drop
# them out. Might as well.
details_root = lxml.etree.fromstring(page)
title = details_root.xpath('string(//SHORTTITLE)')
longtitle = details_root.xpath('string(//LONGTITLE)')
bill = Bill(session, chamber, bill_id, title,
type=bill_type, summary=longtitle)
#sponsors
main_sponsor = details_root.xpath('string(//P_NAME)').split()
if main_sponsor:
main_sponsor = main_sponsor[0]
main_sponsor_link = details_root.xpath('string(//P_LINK)').replace(" ", "_")
main_sponsor_url = 'http://billstatus.ls.state.ms.us/%s/pdf/House_authors/%s.xml' % (session, main_sponsor_link)
type = "primary"
bill.add_sponsor(type, main_sponsor, main_sponsor_url = main_sponsor_url)
for author in details_root.xpath('//AUTHORS/ADDITIONAL'):
leg = author.xpath('string(CO_NAME)').replace(" ", "_")
if leg:
leg_url = 'http://billstatus.ls.state.ms.us/%s/pdf/House_authors/%s.xml' % (session, leg)
type = "cosponsor"
bill.add_sponsor(type, leg, leg_url=leg_url)
#Versions
curr_version = details_root.xpath('string(//CURRENT_OTHER)').replace("../../../../", "")
if curr_version != "":
curr_version_url = "http://billstatus.ls.state.ms.us/" \
+ curr_version
bill.add_version("Current version", curr_version_url,
on_duplicate='use_new',
mimetype='text/html')
intro_version = details_root.xpath('string(//INTRO_OTHER)').replace("../../../../", "")
if intro_version != "":
intro_version_url = "http://billstatus.ls.state.ms.us/"\
+ intro_version
bill.add_version("As Introduced", intro_version_url,
on_duplicate='use_new',
mimetype='text/html')
comm_version = details_root.xpath('string(//CMTESUB_OTHER)').replace("../../../../", "")
if comm_version.find("documents") != -1:
comm_version_url = "http://billstatus.ls.state.ms.us/" + comm_version
bill.add_version("Committee Substitute", comm_version_url,
on_duplicate='use_new',
mimetype='text/html')
passed_version = details_root.xpath('string(//PASSED_OTHER)').replace("../../../../", "")
if passed_version.find("documents") != -1:
passed_version_url = "http://billstatus.ls.state.ms.us/" + passed_version
title = "As Passed the " + chamber
bill.add_version(title, passed_version_url,
on_duplicate='use_new',
mimetype='text/html')
asg_version = details_root.xpath('string(//ASG_OTHER)').replace("../../../../", "")
if asg_version.find("documents") != -1:
asg_version_url = "http://billstatus.ls.state.ms.us/" + asg_version
bill.add_version("Approved by the Governor", asg_version_url,
on_duplicate='use_new',
mimetype='text/html')
# avoid duplicate votes
seen_votes = set()
#Actions
for action in details_root.xpath('//HISTORY/ACTION'):
action_num = action.xpath('string(ACT_NUMBER)').strip()
action_num = int(action_num)
act_vote = action.xpath('string(ACT_VOTE)').replace("../../../..", "")
action_desc = action.xpath('string(ACT_DESC)')
date, action_desc = action_desc.split(" ", 1)
date = date + "/" + session[0:4]
date = datetime.strptime(date, "%m/%d/%Y")
if action_desc.startswith("(H)"):
actor = "lower"
action = action_desc[4:]
elif action_desc.startswith("(S)"):
actor = "upper"
action = action_desc[4:]
else:
actor = "executive"
action = action_desc
if action.find("Veto") != -1:
version_path = details_root.xpath("string(//VETO_OTHER)")
version_path = version_path.replace("../../../../", "")
version_url = "http://billstatus.ls.state.ms.us/" + version_path
bill.add_document("Veto", version_url)
atype = 'other'
for prefix, prefix_type in self._action_types:
if action.startswith(prefix):
atype = prefix_type
break
bill.add_action(actor, action, date, type=atype,
action_num=action_num)
# use committee names as scraped subjects
subjects = details_root.xpath('//H_NAME/text()')
subjects += details_root.xpath('//S_NAME/text()')
bill['subjects'] = subjects
if act_vote:
vote_url = 'http://billstatus.ls.state.ms.us%s' % act_vote
if vote_url not in seen_votes:
seen_votes.add(vote_url)
vote = self.scrape_votes(vote_url, action,
date, actor)
vote.add_source(vote_url)
bill.add_vote(vote)
bill.add_source(bill_details_url)
self.save_bill(bill)
_vote_mapping = {
'Passed': ('Passage', True),
'Adopted': ('Passage', True),
'Failed': ('Passage', False),
'Passed As Amended': ('Passage as Amended', True),
'Adopted As Amended': ('Passage as Amended', True),
'Appointment Confirmed': ('Appointment Confirmation', True),
'Committee Substitute Adopted': ('Adopt Committee Substitute', True),
'Committee Substitute Failed': ('Adopt Committee Substitute', False),
'Conference Report Adopted': ('Adopt Conference Report', True),
'Conference Report Failed': ('Adopt Conference Report', False),
'Motion to Reconsider Tabled': ('Table Motion to Reconsider', True),
'Motion to Recnsdr Tabled Lost': ('Table Motion to Reconsider', False),
'Veto Overridden': ('Override Veto', True),
'Veto Sustained': ('Override Veto', False),
'Concurred in Amend From House': ('Concurrence in Amendment From House', True),
'Concurred in Amend From Senate': ('Concurrence in Amendment From Senate', True),
'Decline to Concur/Invite Conf': ('Decline to Concur', True),
'Decline Concur/Inv Conf Lost': ('Decline to Concur', False),
'Failed to Suspend Rules': ('Motion to Suspend Rules', False),
'Motion to Recommit Lost': ('Motion to Recommit', True),
'Reconsidered': ('Reconsideration', True),
'Motion to Concur Failed': ('Motion to Concur', False),
'Recommitted to Committee': ('Recommit to Committee', True),
}
def scrape_votes(self, url, motion, date, chamber):
vote_pdf, resp = self.urlretrieve(url)
text = convert_pdf(vote_pdf, 'text')
os.remove(vote_pdf)
# this way we get a key error on a missing vote type
motion, passed = self._vote_mapping[motion]
yes_votes = []
no_votes = []
other_votes = []
# point at array to add names to
cur_array = None
precursors = (
('Yeas--', yes_votes),
('Nays--', no_votes),
('Absent or those not voting--', other_votes),
('Absent and those not voting--', other_votes),
('Not Voting--', other_votes),
('Voting Present--', other_votes),
('Present--', other_votes),
('DISCLAIMER', None),
)
# split lines on newline, recombine lines that don't end in punctuation
lines = _combine_lines(text.split('\n'))
for line in lines:
# check if the line starts with a precursor, switch to that array
for pc, arr in precursors:
if pc in line:
cur_array = arr
line = line.replace(pc, '')
# split names
for name in line.split(','):
name = name.strip()
# move on if that's all there was
if not name:
continue
# None or a Total indicate the end of a section
if 'None.' in name:
cur_array = None
match = re.match(r'(.+?)\. Total--.*', name)
if match:
cur_array.append(match.groups()[0])
cur_array = None
# append name if it looks ok
junk_in_name = False
for junk in ('on final passage', 'Necessary', 'who would have',
'being a tie', 'therefore', 'Vacancies', 'a pair',
'Total-', 'ATTORNEY', 'on final passage',
'SPEAKER', 'BOARD', 'TREASURER', 'GOVERNOR',
'ARCHIVES', 'SECRETARY'):
if junk in name:
junk_in_name = True
break
if cur_array is not None and not junk_in_name:
# strip trailing .
if name[-1] == '.':
name = name[:-1]
cur_array.append(name)
# return vote object
yes_count = len(yes_votes)
no_count = len(no_votes)
other_count = len(other_votes)
vote = Vote(chamber, date, motion, passed, yes_count, no_count,
other_count)
vote['yes_votes'] = yes_votes
vote['no_votes'] = no_votes
vote['other_votes'] = other_votes
return vote
```
#### File: openstates/ms/utils.py
```python
import re
def clean_committee_name(comm_name):
comm_name = comm_name.strip()
comm_name = re.sub(' ?[-,] (Co|Vice)?[- ]?Chair$', '', comm_name)
comm_name = re.sub('Appropriations - S/C:', 'Appropriations-S/C on',
comm_name)
if comm_name == 'Appropriations-S/C Stimulus':
comm_name = 'Appropriations-S/C on Stimulus'
return comm_name
def parse_ftp_listing(text):
lines = text.strip().split('\r\n')
return (' '.join(line.split()[3:]) for line in lines)
def chamber_name(chamber):
if chamber == 'upper':
return 'senate'
else:
return 'house'
```
#### File: openstates/mt/__init__.py
```python
from billy.utils.fulltext import pdfdata_to_text, text_after_line_numbers
from .bills import MTBillScraper
from .legislators import MTLegislatorScraper
from .committees import MTCommitteeScraper
metadata = {
'name': 'Montana',
'abbreviation': 'mt',
'legislature_name': 'Montana Legislature',
'legislature_url': 'http://leg.mt.gov/',
'capitol_timezone': 'America/Denver',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{'name': '2011-2012',
'sessions': ['2011'],
'session_number': '62nd',
'start_year': 2011, 'end_year': 2012},
{'name': '2013-2014',
'sessions': ['2013'],
'session_number': '63rd',
'start_year': 2013, 'end_year': 2014},
{'name': '2015-2016',
'sessions': ['2015'],
'session_number': '64th',
'start_year': 2015, 'end_year': 2016},
],
'session_details': {
'2011': {'display_name': '2011 Regular Session',
'years': [2011, 2012],
'_scraped_name': '2011 Regular Session',
},
'2013': {'display_name': '2013 Regular Session',
'years': [2013, 2014],
'_scraped_name': '2013 Regular Session',
},
'2015': {'display_name': '2015 Regular Session',
'years': [2015],
'_scraped_name': '2015 Regular Session',
},
},
'feature_flags': ['influenceexplorer'],
'_ignored_scraped_sessions': [
'2009 Regular Session',
'2007 Special Session',
'2007 Regular Session',
'2005 Special Session',
'2005 Regular Session',
'2003 Regular Session',
'2002 Special Session',
'2001 Regular Session',
'2000 Special Session',
'1999 Regular Session',
'1999 Special Session']
}
def session_list():
from billy.scrape.utils import url_xpath
return url_xpath('http://leg.mt.gov/css/bills/Default.asp',
"//td[@id='cont']/ul/li/a/text()")
def extract_text(doc, data):
return text_after_line_numbers(pdfdata_to_text(data))
```
#### File: openstates/nc/legislators.py
```python
from billy.scrape.legislators import LegislatorScraper, Legislator
import lxml.html
party_map = {'Dem': 'Democratic',
'Rep': 'Republican',
'Una': 'Unaffiliated',
'D': 'Democratic',
'R': 'Republican',
'U': 'Unaffiliated'}
def get_table_item(doc, name):
# get span w/ item
span = doc.xpath('//span[text()="{0}"]'.format(name))[0]
# get neighboring td's span
dataspan = span.getparent().getnext().getchildren()[0]
if dataspan.text:
return (dataspan.text + '\n' +
'\n'.join([x.tail for x in dataspan.getchildren()])).strip()
else:
return None
class NCLegislatorScraper(LegislatorScraper):
jurisdiction = 'nc'
def scrape(self, term, chambers):
for chamber in chambers:
self.scrape_chamber(chamber, term)
def scrape_chamber(self, chamber, term):
url = "http://www.ncga.state.nc.us/gascripts/members/"\
"memberListNoPic.pl?sChamber="
if chamber == 'lower':
url += 'House'
else:
url += 'Senate'
data = self.get(url).text
doc = lxml.html.fromstring(data)
doc.make_links_absolute('http://www.ncga.state.nc.us')
rows = doc.xpath('//div[@id="mainBody"]/table/tr')
for row in rows[1:]:
party, district, full_name, counties = row.getchildren()
party = party.text_content().strip("()")
party = party_map[party]
district = district.text_content().replace("District","").strip()
notice = full_name.xpath('span')
if notice:
notice = notice[0].text_content()
# skip resigned legislators
if 'Resigned' in notice or 'Deceased' in notice:
continue
else:
notice = None
link = full_name.xpath('a/@href')[0]
full_name = full_name.xpath('a')[0].text_content()
full_name = full_name.replace(u'\u00a0', ' ')
# scrape legislator page details
lhtml = self.get(link).text
ldoc = lxml.html.fromstring(lhtml)
ldoc.make_links_absolute('http://www.ncga.state.nc.us')
photo_url = ldoc.xpath('//a[contains(@href, "pictures")]/@href')[0]
phone = get_table_item(ldoc, 'Phone:') or None
address = get_table_item(ldoc, 'Address:') or None
email ,= ldoc.xpath('//a[starts-with(@href, "mailto:")]')
capitol_email = email.text
capitol_phone = email.xpath('ancestor::tr[1]/preceding-sibling::tr[1]/td/span')[0].text
capitol_address = email.xpath('ancestor::tr[1]/preceding-sibling::tr[2]/td/text()')
capitol_address = [x.strip() for x in capitol_address]
capitol_address = '\n'.join(capitol_address)
capitol_phone = capitol_phone.strip()
# save legislator
legislator = Legislator(term, chamber, district, full_name,
photo_url=photo_url, party=party,
url=link, notice=notice)
legislator.add_source(link)
legislator.add_office('district', 'District Office',
address=address, phone=phone)
legislator.add_office('capitol', 'Capitol Office',
address=capitol_address, phone=capitol_phone, email=capitol_email)
self.save_legislator(legislator)
```
#### File: openstates/nd/bills.py
```python
from collections import defaultdict
from urlparse import urljoin
from datetime import datetime
import lxml.html
from billy.scrape import NoDataForPeriod, ScrapeError
from billy.scrape.bills import Bill, BillScraper
from billy.scrape.votes import Vote
from .actions import NDCategorizer
import re
base_url = "http://www.legis.nd.gov/assembly/%s-%s/subject-index/major-topic.html"
class NDBillScraper(BillScraper):
"""
Scrapes available legislative information from the website of the North
Dakota legislature and stores it in the openstates backend.
"""
jurisdiction = 'nd'
categorizer = NDCategorizer()
def scrape_actions(self, session, subject, href, bid):
page = self.get(href).text
page = lxml.html.fromstring(page)
page.make_links_absolute(href)
table = page.xpath("//table[contains(@summary, 'Number Breakdown')]")
# some pages say "Measure Number Breakdown", others "Bill..."
if len(table) > 1: # Pre-2013 pages.
ttable, table = table[0], table[1]
ttrows = ttable.xpath(".//tr")
descr = ttrows[-1]
else:
table = table[0]
ttrows = page.xpath("//div[@id='application']/p")
descr = ttrows[-2]
title = re.sub("\s+", " ", descr.text_content()).strip()
ttrows = ttrows[:-1]
chamber = {
"H": "lower",
"S": "upper"
}[bid[0]]
type_ = bid[1:3]
bill_type = "bill"
if type_.startswith("B"):
bill_type = "bill"
if type_.startswith("R"):
bill_type = "resolution"
if type_ == "CR":
bill_type = "concurrent resolution"
bill = Bill(session,
chamber,
bid,
title,
subject=subject,
type=bill_type)
bill.add_source(href)
for row in ttrows:
if isinstance(row, lxml.html.HtmlComment):
continue # ignore HTML comments, no text_content()
sponsors = row.text_content().strip()
sinf = re.match(
"(?i)introduced by( (rep\.|sen\.))? (?P<sponsors>.*)",
sponsors
)
if sinf:
sponsors = sinf.groupdict()
for sponsor in [
x.strip() for x in sponsors['sponsors'].split(",")
]:
bill.add_sponsor('primary',
sponsor)
dt = None
oldchamber = 'other'
for row in table.xpath(".//tr"):
if row.text_content().strip() == '':
continue
if "Meeting Description" in [
x.strip() for x in row.xpath(".//th/text()")
]:
continue
row = row.xpath("./*")
row = [x.text_content().strip() for x in row]
if len(row) > 3:
row = row[:3]
date, chamber, action = row
try:
chamber = {
"House": "lower",
"Senate": "upper"
}[chamber]
oldchamber = chamber
except KeyError:
chamber = oldchamber
if date != '':
dt = datetime.strptime("%s %s" % (date, self.year), "%m/%d %Y")
kwargs = self.categorizer.categorize(action)
bill.add_action(chamber, action, dt, **kwargs)
version_url = page.xpath("//a[contains(text(), 'Versions')]")
if len(version_url) == 1:
href = version_url[0].attrib['href']
bill = self.scrape_versions(bill, href)
self.save_bill(bill)
def scrape_versions(self, bill, href):
page = self.get(href).text
page = lxml.html.fromstring(page)
page.make_links_absolute(href)
versions = page.xpath("//a[contains(@href, '/documents/')]")
for version in versions:
name, href = version.text, version.attrib['href']
bill.add_version(name, href, mimetype='application/pdf')
return bill
def scrape_subject(self, session, href, subject):
page = self.get(href).text
page = lxml.html.fromstring(page)
page.make_links_absolute(href)
bills = page.xpath("//a[contains(@href, 'bill-actions')]")
for bill in bills:
bt = bill.text_content().strip().split()
typ, idd = bt[0], bt[1]
bid = "%s %s" % (typ, idd)
self.scrape_actions(session, subject, bill.attrib['href'], bid)
def scrape(self, term, chambers):
# figuring out starting year from metadata
for t in self.metadata['terms']:
if t['name'] == term:
start_year = t['start_year']
self.year = start_year
break
url = base_url % (term, start_year)
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
subjects = page.xpath(
"//div[@id='application']"
"//a[not(contains(@href, 'major-topic'))]"
)
for subject in subjects:
subject_name = subject.xpath("text()")
if subject_name == [] \
or subject_name[0].strip() == '' \
or 'href' not in subject.attrib:
continue
href = subject.attrib['href']
self.scrape_subject(term, href, subject.text.strip())
```
#### File: openstates/nh/__init__.py
```python
import lxml.html
from .bills import NHBillScraper
from .legislators import NHLegislatorScraper
metadata = {
'abbreviation': 'nh',
'name': 'New Hampshire',
'capitol_timezone': 'America/New_York',
'legislature_name': 'New Hampshire General Court',
'legislature_url': 'http://www.gencourt.state.nh.us/',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{'name': '2011-2012', 'sessions': ['2011', '2012'],
'start_year': 2011, 'end_year': 2012},
{'name': '2013-2014', 'sessions': ['2013', '2014'],
'start_year': 2013, 'end_year': 2014},
{'name': '2015-2016', 'sessions': ['2015','2016'],
'start_year': 2015, 'end_year': 2016}
],
'session_details': {
'2011': {'display_name': '2011 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2011%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2011 Session',
},
'2012': {'display_name': '2012 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2012%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2012 Session',
},
'2013': {'display_name': '2013 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2013%20Session%20Bill%20Status%20Tables.zip',
# Their dump filename changed, probably just a hiccup.
'_scraped_name': '2013',
# '_scraped_name': '2013 Session',
},
'2014': {'display_name': '2014 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2014%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2014 Session',
},
'2015': {'display_name': '2015 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2015%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2015 Session',
},
'2016': {'display_name': '2016 Regular Session',
'zip_url': 'http://gencourt.state.nh.us/downloads/2016%20Session%20Bill%20Status%20Tables.zip',
'_scraped_name': '2016 Session',
},
},
'feature_flags': ['influenceexplorer'],
'_ignored_scraped_sessions': ['2013 Session'],
}
def session_list():
from billy.scrape.utils import url_xpath
zips = url_xpath('http://gencourt.state.nh.us/downloads/',
'//a[contains(@href, "Bill%20Status%20Tables")]/text()')
return [zip.replace(' Bill Status Tables.zip', '') for zip in zips]
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
return doc.xpath('//html')[0].text_content()
```
#### File: openstates/nj/__init__.py
```python
import datetime
import lxml.html
from billy.scrape.utils import url_xpath
from billy.utils.fulltext import text_after_line_numbers
from .bills import NJBillScraper
from .legislators import NJLegislatorScraper
from .committees import NJCommitteeScraper
from .events import NJEventScraper
# don't retry- if a file isn't on FTP just let it go
settings = dict(SCRAPELIB_RETRY_ATTEMPTS=0)
metadata = {
'name': 'New Jersey',
'abbreviation': 'nj',
'capitol_timezone': 'America/New_York',
'legislature_name': 'New Jersey Legislature',
'legislature_url': 'http://www.njleg.state.nj.us/',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'Assembly', 'title': 'Assembly Member'},
},
'terms': [
#{
# 'name': '2000-2001',
# 'start_year': 2000,
# 'end_year': 2001,
# 'sessions': ['209'],
#},
#{
# 'name': '2002-2003',
# 'start_year': 2002,
# 'end_year': 2003,
# 'sessions': ['210'],
#},
#{
# 'name': '2004-2005',
# 'start_year': 2004,
# 'end_year': 2005,
# 'sessions': ['211'],
#},
#{
# 'name': '2006-2007',
# 'start_year': 2006,
# 'end_year': 2007,
# 'sessions': ['212'],
#},
{
'name': '2008-2009',
'start_year': 2008,
'end_year': 2009,
'sessions': ['213'],
},
{
'name': '2010-2011',
'start_year': 2010,
'end_year': 2011,
'sessions': ['214'],
},
{
'name': '2012-2013',
'start_year': 2012,
'end_year': 2013,
'sessions': ['215'],
},
{
'name': '2014-2015',
'start_year': 2014,
'end_year': 2015,
'sessions': ['216'],
},
{
'name': '2016-2017',
'start_year': 2016,
'end_year': 2017,
'sessions': ['217'],
},
],
'session_details': {
'213': {
'start_date': datetime.date(2008, 1, 12),
'display_name': '2008-2009 Regular Session',
'_scraped_name': '2008-2009',
},
'214': {
'start_date': datetime.date(2010, 1, 12),
'display_name': '2010-2011 Regular Session',
'_scraped_name': '2010-2011',
},
'215': {
'start_date': datetime.date(2012, 1, 10),
'display_name': '2012-2013 Regular Session',
'_scraped_name': '2012-2013',
},
'216': {
'start_date': datetime.date(2014, 1, 15),
'display_name': '2014-2015 Regular Session',
'_scraped_name': '2014-2015',
},
'217': {
'start_date': datetime.date(2016, 1, 12),
'display_name': '2016-2017 Regular Session',
'_scraped_name': '2016-2017',
},
},
'feature_flags': ['subjects', 'events', 'influenceexplorer'],
'_ignored_scraped_sessions': [
'2006-2007',
'2004-2005',
'2002-2003',
'2000-2001',
'1998-1999',
'1996-1997',
],
}
def session_list():
return url_xpath('http://www.njleg.state.nj.us/',
'//select[@name="DBNAME"]/option/text()')
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = doc.xpath('//div[@class="Section3"]')[0].text_content()
return text
```
#### File: openstates/nm/legislators.py
```python
import re
import lxml.html
from billy.scrape.legislators import LegislatorScraper, Legislator
from openstates.utils import LXMLMixin
class NMLegislatorScraper(LegislatorScraper, LXMLMixin):
jurisdiction = 'nm'
def validate_phone_number(self, phone_number):
is_valid = False
# Phone format validation regex.
phone_pattern = re.compile(r'\(?\d{3}\)?\s?-?\d{3}-?\d{4}')
phone_match = phone_pattern.match(phone_number)
if phone_match is not None:
is_valid = True
return is_valid
def scrape(self, chamber, term):
self.validate_term(term, latest_only=True)
self.logger.info('Scraping {} {} chamber.'.format(
self.jurisdiction.upper(),
chamber))
# Xpath query string format for legislative chamber.
base_xpath = '//table[@id="ctl00_mainCopy_gridView{}Districts"]'\
'//a[contains(@href, "SPONCODE")]/@href'
if chamber == 'lower':
chamber_xpath = base_xpath.format('House')
elif chamber == 'upper':
chamber_xpath = base_xpath.format('Senate')
url = 'http://www.nmlegis.gov/lcs/districts.aspx'
page = self.lxmlize(url)
legislator_urls = self.get_nodes(
page,
chamber_xpath)
for legislator_url in legislator_urls:
# Indicators used for empty seats.
vacancy_strings = ('SNULL', 'SPONCODE=HVACA')
if any(x in legislator_url for x in vacancy_strings):
self.logger.info('Skipping vacant seat.')
continue
self.scrape_legislator(chamber, term, legislator_url)
def scrape_legislator(self, chamber, term, url):
# Initialize default values for legislator attributes.
full_name = None
party = None
photo_url = None
email = None
capitol_address = None
capitol_phone = None
district_address = None
district_phone = None
# Xpath query string format for legislator information nodes.
xpath = './/span[@id="ctl00_mainCopy_formViewLegislator_{}"]'
page = self.lxmlize(url)
info_node = self.get_node(
page,
'//table[@id="ctl00_mainCopy_formViewLegislator"]')
if info_node is None:
raise ValueError('Could not locate legislator data.')
district_node = self.get_node(
info_node,
'.//a[@id="ctl00_mainCopy_formViewLegislator_linkDistrict"]')
if district_node is not None:
district = district_node.text.strip()
header_node = self.get_node(
info_node,
xpath.format('lblHeader'))
if header_node is not None:
full_name, party = header_node.text.strip().rsplit('-', 1)
full_name = full_name.replace('Representative', '').replace(
'Senator', '').strip()
if '(D)' in party:
party = 'Democratic'
elif '(R)' in party:
party = 'Republican'
elif '(DTS)' in party:
# decline to state = independent
party = 'Independent'
else:
raise AssertionError('Unknown party {} for {}'.format(
party,
full_name))
photo_url = self.get_node(
info_node,
'.//img[@id="ctl00_mainCopy_formViewLegislator_imgLegislator"]/@src')
email_node = self.get_node(
info_node,
'.//a[@id="ctl00_mainCopy_formViewLegislator_linkEmail"]')
if email_node.text is not None:
email = email_node.text.strip()
capitol_address_node = self.get_node(
info_node,
xpath.format('lblCapitolRoom'))
if capitol_address_node is not None:
capitol_address_text = capitol_address_node.text
if capitol_address_text is not None:
capitol_address = 'Room {} State Capitol\nSanta Fe, NM 87501'\
.format(capitol_address_text.strip())
capitol_phone_node = self.get_node(
info_node,
xpath.format('lblCapitolPhone'))
if capitol_phone_node is not None:
capitol_phone_text = capitol_phone_node.text
if capitol_phone_text is not None:
capitol_phone_text = capitol_phone_text.strip()
if self.validate_phone_number(capitol_phone_text):
capitol_phone = capitol_phone_text
district_address_node = self.get_node(
info_node,
xpath.format('lblAddress')).xpath('text()')
if district_address_node:
district_address = '\n'.join(district_address_node)
district_phone_node = self.get_node(
info_node,
xpath.format('lblHomePhone')) \
or self.get_node(info_node, xpath.format('lblOfficePhone'))
if district_phone_node is not None:
district_phone_text = district_phone_node.text
if district_phone_text is not None:
district_phone_text = district_phone_text.strip()
if self.validate_phone_number(district_phone_text):
district_phone = district_phone_text
legislator = Legislator(
term=term,
chamber=chamber,
district=district,
full_name=full_name,
party=party,
photo_url=photo_url)
if email:
legislator['email'] = email
legislator.add_source(url)
legislator.add_office(
'district',
'District Office',
address=district_address,
phone=district_phone)
legislator.add_office(
'capitol',
'Capitol Office',
address=capitol_address,
phone=capitol_phone,
email=email)
committees_nodes = self.get_nodes(
info_node,
'//table[@id="ctl00_mainCopy_gridViewCommittees"]/tr')
# First row node should contain header - skip.
for committee_node in committees_nodes[1:]:
role, committee, note = [x.text_content() for x in committee_node\
.xpath('./td')]
committee = committee.title()
if 'Interim' in note:
role = 'interim ' + role.lower()
else:
role = role.lower()
if ' Committee' in committee:
committee = committee.replace(" Committee", '')
if ' Subcommittee' in committee:
committee = committee.replace(' Subcommittee', '')
legislator.add_role(
'committee member',
term,
committee=committee,
position=role,
chamber=chamber)
self.save_legislator(legislator)
```
#### File: openstates/nv/bills.py
```python
import re
from datetime import datetime
from collections import defaultdict
import lxml.html
import scrapelib
from .utils import chamber_name, parse_ftp_listing
from openstates.utils import LXMLMixin
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import VoteScraper, Vote
class NVBillScraper(BillScraper, LXMLMixin):
jurisdiction = 'nv'
_classifiers = (
('Approved by the Governor', 'governor:signed'),
('Bill read. Veto not sustained', 'bill:veto_override:passed'),
('Bill read. Veto sustained', 'bill:veto_override:failed'),
('Enrolled and delivered to Governor', 'governor:received'),
('From committee: .+? adopted', 'committee:passed'),
('From committee: .+? pass', 'committee:passed'),
('Prefiled. Referred', ['bill:introduced', 'committee:referred']),
('Read first time. Referred', ['bill:reading:1', 'committee:referred']),
('Read first time.', 'bill:reading:1'),
('Read second time.', 'bill:reading:2'),
('Read third time. Lost', ['bill:failed', 'bill:reading:3']),
('Read third time. Passed', ['bill:passed', 'bill:reading:3']),
('Read third time.', 'bill:reading:3'),
('Rereferred', 'committee:referred'),
('Resolution read and adopted', 'bill:passed'),
('Vetoed by the Governor', 'governor:vetoed')
)
def scrape(self, chamber, session):
if 'Special' in session:
year = session[0:4]
elif int(session) >= 71:
year = ((int(session) - 71) * 2) + 2001
else:
raise NoDataForPeriod(session)
sessionsuffix = 'th'
if str(session)[-1] == '1':
sessionsuffix = 'st'
elif str(session)[-1] == '2':
sessionsuffix = 'nd'
elif str(session)[-1] == '3':
sessionsuffix = 'rd'
self.subject_mapping = defaultdict(list)
if 'Special' in session:
insert = session[-2:] + sessionsuffix + str(year) + "Special"
else:
insert = str(session) + sessionsuffix + str(year)
self.scrape_subjects(insert, session, year)
if chamber == 'upper':
self.scrape_senate_bills(chamber, insert, session, year)
elif chamber == 'lower':
self.scrape_assem_bills(chamber, insert, session, year)
def scrape_subjects(self, insert, session, year):
url = 'http://www.leg.state.nv.us/Session/%s/Reports/TablesAndIndex/%s_%s-index.html' % (insert, year, session)
html = self.get(url).text
doc = lxml.html.fromstring(html)
# first, a bit about this page:
# Level0 are the bolded titles
# Level1,2,3,4 are detailed titles, contain links to bills
# all links under a Level0 we can consider categorized by it
# there are random newlines *everywhere* that should get replaced
subject = None
for p in doc.xpath('//p'):
if p.get('class') == 'Level0':
subject = p.text_content().replace('\r\n', ' ')
else:
if subject:
for a in p.xpath('.//a'):
bill_id = (a.text.replace('\r\n', '') if a.text
else None)
self.subject_mapping[bill_id].append(subject)
def scrape_senate_bills(self, chamber, insert, session, year):
doc_type = {2: 'bill', 4: 'resolution', 7: 'concurrent resolution',
8: 'joint resolution'}
for docnum, bill_type in doc_type.iteritems():
parentpage_url = 'http://www.leg.state.nv.us/Session/%s/Reports/HistListBills.cfm?DoctypeID=%s' % (insert, docnum)
links = self.scrape_links(parentpage_url)
count = 0
for link in links:
count = count + 1
page_path = 'http://www.leg.state.nv.us/Session/%s/Reports/%s' % (insert, link)
page = self.get(page_path).text
page = page.replace(u"\xa0", " ")
root = lxml.html.fromstring(page)
bill_id = root.xpath('string(/html/body/div[@id="content"]/table[1]/tr[1]/td[1]/font)')
title = self.get_node(
root,
'//div[@id="content"]/table/tr[preceding-sibling::tr/td/'
'b[contains(text(), "By:")]]/td/em/text()')
bill = Bill(session, chamber, bill_id, title,
type=bill_type)
bill['subjects'] = list(set(self.subject_mapping[bill_id]))
for table in root.xpath('//div[@id="content"]/table'):
if 'Bill Text' in table.text_content():
bill_text = table.xpath("string(tr/td[2]/a/@href)")
text_url = "http://www.leg.state.nv.us" + bill_text
bill.add_version("Bill Text", text_url,
mimetype='application/pdf')
primary, secondary = self.scrape_sponsors(page)
for leg in primary:
bill.add_sponsor('primary', leg)
for leg in secondary:
bill.add_sponsor('cosponsor', leg)
minutes_count = 2
for mr in root.xpath('//table[4]/tr/td[3]/a'):
minutes = mr.xpath("string(@href)")
minutes_url = "http://www.leg.state.nv.us" + minutes
minutes_date_path = "string(//table[4]/tr[%s]/td[2])" % minutes_count
minutes_date = mr.xpath(minutes_date_path).split()
minutes_date = minutes_date[0] + minutes_date[1] + minutes_date[2] + " Agenda"
bill.add_document(minutes_date, minutes_url)
minutes_count = minutes_count + 1
self.scrape_actions(root, bill, "upper")
self.scrape_votes(page, page_path, bill, insert, year)
bill.add_source(page_path)
self.save_bill(bill)
def scrape_assem_bills(self, chamber, insert, session, year):
doc_type = {1: 'bill', 3: 'resolution', 5: 'concurrent resolution',
6: 'joint resolution',9:'petition'}
for docnum, bill_type in doc_type.iteritems():
parentpage_url = 'http://www.leg.state.nv.us/Session/%s/Reports/HistListBills.cfm?DoctypeID=%s' % (insert, docnum)
links = self.scrape_links(parentpage_url)
count = 0
for link in links:
count = count + 1
page_path = 'http://www.leg.state.nv.us/Session/%s/Reports/%s' % (insert, link)
page = self.get(page_path).text
page = page.replace(u"\xa0", " ")
root = lxml.html.fromstring(page)
root.make_links_absolute("http://www.leg.state.nv.us/")
bill_id = root.xpath('string(/html/body/div[@id="content"]/table[1]/tr[1]/td[1]/font)')
title = self.get_node(
root,
'//div[@id="content"]/table/tr[preceding-sibling::tr/td/'
'b[contains(text(), "By:")]]/td/em/text()')
bill = Bill(session, chamber, bill_id, title,
type=bill_type)
bill['subjects'] = list(set(self.subject_mapping[bill_id]))
billtext = root.xpath("//b[text()='Bill Text']")[0].getparent().getnext()
text_urls = billtext.xpath("./a")
for text_url in text_urls:
version_name = text_url.text.strip()
version_url = text_url.attrib['href']
bill.add_version(version_name, version_url,
mimetype='application/pdf')
primary, secondary = self.scrape_sponsors(page)
for leg in primary:
bill.add_sponsor('primary', leg)
for leg in secondary:
bill.add_sponsor('cosponsor', leg)
minutes_count = 2
for mr in root.xpath('//table[4]/tr/td[3]/a'):
minutes = mr.xpath("string(@href)")
minutes_url = "http://www.leg.state.nv.us" + minutes
minutes_date_path = "string(//table[4]/tr[%s]/td[2])" % minutes_count
minutes_date = mr.xpath(minutes_date_path).split()
minutes_date = minutes_date[0] + minutes_date[1] + minutes_date[2] + " Minutes"
bill.add_document(minutes_date, minutes_url)
minutes_count = minutes_count + 1
self.scrape_actions(root, bill, "lower")
self.scrape_votes(page, page_path, bill, insert, year)
bill.add_source(page_path)
self.save_bill(bill)
def scrape_links(self, url):
links = []
page = self.get(url).text
root = lxml.html.fromstring(page)
path = '/html/body/div[@id="ScrollMe"]/table/tr[1]/td[1]/a'
for mr in root.xpath(path):
if '*' not in mr.text:
web_end = mr.xpath('string(@href)')
links.append(web_end)
return links
def scrape_sponsors(self, page):
primary = []
sponsors = []
doc = lxml.html.fromstring(page)
# These bold tagged elements should contain the primary sponsors.
b_nodes = self.get_nodes(
doc,
'//div[@id="content"]/table/tr/td[contains(./b/text(), "By:")]/b/'
'text()')
for b in b_nodes:
name = b.strip()
# add these as sponsors (excluding junk text)
if name not in ('By:', 'Bolded'):
primary.append(name)
nb_nodes = self.get_nodes(
doc,
'//div[@id="content"]/table/tr/td[contains(./b/text(), "By:")]/text()')
# tail of last b has remaining sponsors
for node in nb_nodes:
if node == ' name indicates primary sponsorship)':
continue
names = re.sub('([\(\r\t\n\s])', '', node).split(',')
for name in names:
if name:
sponsors.append(name.strip())
return primary, sponsors
def scrape_actions(self, root, bill, actor):
path = '/html/body/div[@id="content"]/table/tr/td/p[1]'
for mr in root.xpath(path):
date = mr.text_content().strip()
date = date.split()[0] + " " + date.split()[1] + " " + date.split()[2]
date = datetime.strptime(date, "%b %d, %Y")
for el in mr.xpath('../../following-sibling::tr[1]/td/ul/li'):
action = el.text_content().strip()
# skip blank actions
if not action:
continue
action = " ".join(action.split())
# catch chamber changes
if action.startswith('In Assembly'):
actor = 'lower'
elif action.startswith('In Senate'):
actor = 'upper'
elif 'Governor' in action:
actor = 'executive'
action_type = 'other'
for pattern, atype in self._classifiers:
if re.match(pattern, action):
action_type = atype
break
if "Committee on" in action:
committees = re.findall("Committee on ([a-zA-Z, ]*)\.",action)
if len(committees) > 0:
bill.add_action(actor, action, date, type=action_type,committees=committees)
continue
bill.add_action(actor, action, date, type=action_type)
def scrape_votes(self, bill_page, page_url, bill, insert, year):
root = lxml.html.fromstring(bill_page)
trs = root.xpath('/html/body/div/table[6]//tr')
assert len(trs) >= 1, "Didn't find the Final Passage Votes' table"
for tr in trs[1:]:
links = tr.xpath('td/a[contains(text(), "Passage")]')
if len(links) == 0:
self.warning("Non-passage vote found for {}; ".format(bill['bill_id']) +
"probably a motion for the calendar. It will be skipped.")
else:
assert len(links) == 1, \
"Too many votes found for XPath query, on bill {}".format(bill['bill_id'])
link = links[0]
motion = link.text
if 'Assembly' in motion:
chamber = 'lower'
else:
chamber = 'upper'
votes = {}
tds = tr.xpath('td')
for td in tds:
if td.text:
text = td.text.strip()
date = re.match('... .*?, ....',text)
count = re.match('(?P<category>.*?) (?P<votes>[0-9]+)[,]?',text)
if date:
vote_date = datetime.strptime(text, '%b %d, %Y')
elif count:
votes[count.group('category')] = int(count.group('votes'))
yes = votes['Yea']
no = votes['Nay']
excused = votes['Excused']
not_voting = votes['Not Voting']
absent = votes['Absent']
other = excused + not_voting + absent
passed = yes > no
vote = Vote(chamber, vote_date, motion, passed, yes, no,
other, not_voting=not_voting, absent=absent)
# try to get vote details
try:
vote_url = 'http://www.leg.state.nv.us/Session/%s/Reports/%s' % (
insert, link.get('href'))
page = self.get(vote_url).text
page = page.replace(u"\xa0", " ")
root = lxml.html.fromstring(page)
for el in root.xpath('//table[2]/tr'):
tds = el.xpath('td')
name = tds[1].text_content().strip()
vote_result = tds[2].text_content().strip()
if vote_result == 'Yea':
vote.yes(name)
elif vote_result == 'Nay':
vote.no(name)
else:
vote.other(name)
vote.add_source(page_url)
except scrapelib.HTTPError:
self.warning("failed to fetch vote page, adding vote without details")
bill.add_vote(vote)
```
#### File: openstates/ny/bills.py
```python
import re
import datetime
import scrapelib
import lxml.html
import lxml.etree
from collections import defaultdict
from billy.utils import term_for_session
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from apiclient import OpenLegislationAPIClient
from .models import AssemblyBillPage
from .actions import Categorizer
class NYBillScraper(BillScraper):
jurisdiction = 'ny'
categorizer = Categorizer()
def _date_from_timestamp(self, timestamp):
return datetime.datetime.fromtimestamp(int(timestamp) / 1000)
def _parse_bill_number(self, bill_id):
bill_id_regex = r'(^[ABCEJKLRS])(\d{,6})'
bill_id_match = re.search(bill_id_regex, bill_id)
bill_prefix, bill_number = bill_id_match.groups()
return (bill_prefix, bill_number)
def _parse_bill_prefix(self, bill_prefix):
"""
Legacy holdover, but still useful for determining companion bill
chambers.
"""
bill_chamber, bill_type = {
'S': ('upper', 'bill'),
'R': ('upper', 'resolution'),
'J': ('upper', 'legislative resolution'),
'B': ('upper', 'concurrent resolution'),
'C': ('lower', 'concurrent resolution'),
'A': ('lower', 'bill'),
'E': ('lower', 'resolution'),
'K': ('lower', 'legislative resolution'),
'L': ('lower', 'joint resolution')}[bill_prefix]
return (bill_chamber, bill_type)
def _parse_bill_details(self, bill):
bill_id = bill['printNo']
assert bill_id
# Parse the bill ID into its prefix and number.
prefix, number = self._parse_bill_number(bill_id)
bill_type = self._parse_bill_prefix(prefix)[1]
active_version = bill['activeVersion']
title = bill['title'].strip()
if not title:
self.logger.warn('Bill missing title.')
return
# Determine the chamber the bill originated from.
if bill['billType']['chamber'] == 'SENATE':
bill_chamber = 'upper'
elif bill['billType']['chamber'] == 'ASSEMBLY':
bill_chamber = 'lower'
else:
warning = 'Could not identify chamber for {}.'
self.logger.warn(warning).format(bill_id)
senate_url = (
'http://www.nysenate.gov/legislation/bills/{bill_session}/'
'{bill_id}'
).format(
bill_session=bill['session'], bill_id=bill_id)
assembly_url = (
'http://assembly.state.ny.us/leg/?default_fld=&bn={bill_id}'
'&Summary=Y&Actions=Y'
).format(
bill_id=bill_id)
return (senate_url, assembly_url, bill_chamber, bill_type, bill_id,
title, (prefix, number, active_version))
def _generate_bills(self, session):
self.logger.info('Generating bills.')
bills = defaultdict(list)
delimiter = '-'
(start_year, delimiter, end_year) = session.partition(delimiter)
page = 0
# 1000 is the current maximum returned record limit for all Open
# Legislature API calls that use the parameter.
limit = 1000
# Flag whether to retrieve full bill data.
full = True
while True:
# Updating the offset before the page matters here.
offset = limit * page + 1
page += 1
# Response should be a dict of the JSON data returned from
# the Open Legislation API.
response = self.api_client.get('bills', session_year=start_year,
limit=limit, offset=offset, full=full)
if response['responseType'] == 'empty list'\
or response['offsetStart'] > response['offsetEnd']:
break
else:
bills = response['result']['items']
for bill in bills:
yield bill
def _scrape_bill(self, session, bill_data):
details = self._parse_bill_details(bill_data)
(senate_url, assembly_url, bill_chamber, bill_type, bill_id,
title, (prefix, number, active_version)) = details
"""
Note: This needs to scrape both assembly and senate sites.
Neither house has the other's votes, so you have to scrape both
and merge them.
"""
assembly = AssemblyBillPage(self, session, bill_chamber, details)
assembly.build()
bill = assembly.bill
bill.add_source(assembly_url)
# Add companion bill.
same_as = bill_data.get('amendments', {}).get('items', {})\
.get(active_version, {}).get('sameAs', {})
# Check whether "sameAs" property is populated with at least one bill.
if same_as and 'items' in same_as and 'size' in same_as and\
same_as['size'] > 0:
# Get companion bill ID.
companion_bill_id = same_as['items'][0]['basePrintNo']
# Build companion bill session.
start_year = same_as['items'][0]['session']
end_year = start_year + 1
companion_bill_session = '-'.join([str(start_year), str(end_year)])
# Determine companion bill chamber.
companion_bill_prefix = self._parse_bill_number(
same_as['items'][0]['basePrintNo'])[0]
companion_bill_chamber = self._parse_bill_prefix(
companion_bill_prefix)[0]
# Attach companion bill data.
bill.add_companion(
companion_bill_id,
companion_bill_session,
companion_bill_chamber,
)
# Determine whether to count votes.
votes_detected = False
try:
# This counts the vote categories, not the votes themselves
# (i.e. AYE, NAY, EXC). If a category is present, there
# should be votes available to record.
if bill_data['votes']['memberVotes']['size'] > 0:
votes_detected = True
except KeyError:
pass
if votes_detected:
for vote_data in bill_data['votes']['memberVotes']:
vote = Vote(
chamber='upper',
date=self.date_from_timestamp(vote_data['voteDate']),
motion=vote_data['description'] or '[No motion available.]',
passed=False,
yes_votes=[],
no_votes=[],
other_votes=[],
yes_count=0,
no_count=0,
other_count=0)
# Count all yea votes.
if 'items' in vote_data.get('AYE', {}):
for legislator in vote_data['AYE']['items']:
vote.yes(legislator['fullName'])
vote['yes_count'] += 1
if 'items' in vote_data.get('AYEWR', {}):
for legislator in vote_data['AYEWR']['items']:
vote.yes(legislator['fullName'])
vote['yes_count'] += 1
# Count all nay votes.
if 'items' in vote_data.get('NAY', {}):
for legislator in vote_data['NAY']['items']:
vote.no(legislator['fullName'])
vote['no_count'] += 1
# Count all non-yea/nay votes.
other_vote_types = ('EXC', 'ABS', 'ABD')
for vote_type in other_vote_types:
if 'items' in vote_data.get(vote_type, {}):
for legislator in vote_data[vote_type]['items']:
vote.other(legislator['fullName'])
vote['other_count'] += 1
vote['passed'] = vote['yes_count'] > vote['no_count']
bill.add_vote(vote)
if bill_data['title'] is None:
bill['title'] = bill_data['summary']
return bill
def scrape(self, session, chambers):
self.api_client = OpenLegislationAPIClient(self)
term_id = term_for_session('ny', session)
for term in self.metadata['terms']:
if term['name'] == term_id:
break
self.term = term
for bill in self._generate_bills(session):
bill_object = self._scrape_bill(session, bill)
self.save_bill(bill_object)
```
#### File: openstates/ny/__init__.py
```python
import re
import lxml.html
from billy.utils.fulltext import text_after_line_numbers
from .bills import NYBillScraper
from .legislators import NYLegislatorScraper
from .committees import NYCommitteeScraper
from .events import NYEventScraper
settings = dict(SCRAPELIB_TIMEOUT=120)
metadata = dict(
name='New York',
abbreviation='ny',
capitol_timezone='America/New_York',
legislature_name='New York Legislature',
# unfortunate - there isn't a decent combined site
legislature_url='http://public.leginfo.state.ny.us/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'Assembly', 'title': 'Assembly Member'},
},
terms=[
dict(name='2009-2010', start_year=2010, end_year=2011,
sessions=['2009-2010']),
dict(name='2011-2012', start_year=2011, end_year=2012,
sessions=['2011-2012']),
dict(name='2013-2014', start_year=2013, end_year=2014,
sessions=['2013-2014']),
dict(name='2015-2016', start_year=2015, end_year=2016,
sessions=['2015-2016'])
],
session_details={
'2009-2010': {
'display_name': '2009 Regular Session',
'_scraped_name': '2009',
},
'2011-2012': {
'display_name': '2011 Regular Session',
'_scraped_name': '2011',
},
'2013-2014': {
'display_name': '2013 Regular Session',
'_scraped_name': '2013',
},
'2015-2016': {
'display_name': '2015 Regular Session',
'_scraped_name': '2015',
}
},
feature_flags=['subjects', 'events', 'influenceexplorer'],
_ignored_scraped_sessions=['2009'],
requests_per_minute=30,
)
def session_list():
from billy.scrape.utils import url_xpath
url = 'http://nysenate.gov/search/legislation'
sessions = url_xpath(url,
'//select[@name="bill_session_year"]/option[@value!=""]/@value')
return sessions
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = doc.xpath('//pre')[0].text_content()
# if there's a header above a _________, ditch it
text = text.rsplit('__________', 1)[-1]
# strip numbers from lines (not all lines have numbers though)
text = re.sub('\n\s*\d+\s*', ' ', text)
return text
```
#### File: openstates/pr/bills.py
```python
from billy.scrape.votes import Vote
from billy.scrape.bills import BillScraper, Bill
import lxml.html
import datetime
import itertools
import subprocess
import shutil
import os
import re
class NoSuchBill(Exception):
pass
_voteChambers = (
(u'Aprobado por el Senado en Votac','upper'),
(u'Aprobado por C','lower'),
)
_docVersion = (
('Entirillado del Informe'),
('Texto de Aprobaci'),
# ('Ley N'),
('rendido con enmiendas'),
('Radicado'),
)
_classifiers = (
('Radicado','', 'bill:introduced'),
(u'Aprobado por Cámara en Votación Final','lower', 'bill:passed'),
(u'Aprobado por el Senado en Votación','upper', 'bill:passed'),
('Aparece en Primera Lectura del', 'upper','bill:reading:1'),
('Aparece en Primera Lectura de la','lower','bill:reading:1'),
('Enviado al Gobernador', 'governor','governor:received'),
('Veto', 'governor','governor:vetoed'),
('Veto de Bolsillo','governor','governor:vetoed'),
# comissions give a report but sometimes they dont do any amendments and
# leave them as they are.
# i am not checking if they did or not. but it be easy just read the end and
# if it dosnt have amendments it should say 'sin enmiendas'
('1er Informe','committee','amendment:amended'),
('2do Informe','committee','amendment:amended'),
('Aprobado con enmiendas','','amendment:passed'),
(u'Remitido a Comisión','', 'committee:referred'),
(u'Referido a Comisión','', 'committee:referred'),
('En el Calendario de Ordenes Especiales de la C','lower','other'),
('Texto de Aprobación Final enviado al Senado','upper','other'),
('Retirada por su Autor','sponsor','bill:withdrawn'),
('Comisión : * no recomienda aprobación de la medida','','committee:passed:unfavorable'),
('<NAME>','governor','governor:signed')
)
class PRBillScraper(BillScraper):
jurisdiction = 'pr'
bill_types = {
'P': 'bill',
'R': 'resolution',
'RK': 'concurrent resolution',
'RC': 'joint resolution',
#'PR': 'plan de reorganizacion',
}
def clean_name(self, name):
for ch in ['Sr,','Sr.','Sra.','Rep.','Sen.']:
if ch in name:
name = name.replace(ch,'')
return name
def scrape(self, chamber, session):
year = session[0:4]
self.base_url = 'http://www.oslpr.org/legislatura/tl%s/tl_medida_print2.asp' % year
chamber_letter = {'lower':'C','upper':'S'}[chamber]
for code, type in self.bill_types.iteritems():
counter = itertools.count(1)
for n in counter:
bill_id = '%s%s%s' % (code, chamber_letter, n)
try:
self.scrape_bill(chamber, session, bill_id, type)
except NoSuchBill:
break
def parse_action(self,chamber,bill,action,action_url,date):
#if action.startswith('Referido'):
#committees = action.split(',',1)
#multiple committees
if action.startswith('Ley N'):
action = action[0:42]
elif action.startswith('Res. Conj.'):
action = action[0:42]
action_actor = ''
atype = 'other'
#check it has a url and is not just text
if action_url:
action_url = action_url[0]
isVersion = False;
for text_regex in _docVersion:
if re.match(text_regex, action):
isVersion = True;
if isVersion:
# versions are mentioned several times, lets use original name
erroneous_filename = False
action_url = action_url.lower().strip()
if action_url.endswith('.doc'):
mimetype = 'application/msword'
elif action_url.endswith('.rtf'):
mimetype = 'application/rtf'
elif action_url.endswith('.pdf'):
mimetype = 'application/pdf'
elif action_url.endswith('docx'):
mimetype = 'application/octet-stream'
elif action_url.endswith('docm'):
self.warning("Erroneous filename found: {}".format(action_url))
erroneous_filename = True
else:
raise Exception('unknown version type: %s' % action_url)
if not erroneous_filename:
bill.add_version(action, action_url, on_duplicate='use_old',
mimetype=mimetype)
else:
bill.add_document(action, action_url)
for pattern, action_actor,atype in _classifiers:
if re.match(pattern, action):
break
else:
action_actor = ''
atype = 'other'
if action_actor == '':
if action.find('SENADO') != -1:
action_actor = 'upper'
elif action.find('CAMARA') != -1:
action_actor = 'lower'
else:
action_actor = chamber
#if action.startswith('Referido'):
#for comme in committees:
#print comme
bill.add_action(action_actor, action.replace('.',''),date,type=atype)
return atype,action
def scrape_bill(self, chamber, session, bill_id, bill_type):
url = '%s?r=%s' % (self.base_url, bill_id)
html = self.get(url).text
if "error '80020009'" in html:
self.warning('asp error on page, skipping %s', bill_id)
return
doc = lxml.html.fromstring(html)
# search for Titulo, accent over i messes up lxml, so use 'tulo'
title = doc.xpath(u'//td/b[contains(text(),"tulo")]/../following-sibling::td/text()')
if not title:
raise NoSuchBill()
bill = Bill(session, chamber, bill_id, title[0], type=bill_type)
author = doc.xpath(u'//td/b[contains(text(),"Autor")]/../text()')[0]
for aname in author.split(','):
aname = self.clean_name(aname).strip()
if aname:
bill.add_sponsor('primary', aname)
co_authors = doc.xpath(u'//td/b[contains(text(),"Co-autor")]/../text()')
if len(co_authors) != 0:
for co_author in co_authors[1].split(','):
bill.add_sponsor('cosponsor', self.clean_name(co_author).strip());
action_table = doc.xpath('//table')[-1]
for row in action_table[1:]:
tds = row.xpath('td')
# ignore row missing date
if len(tds) != 2:
continue
if tds[0].text_content():
date = datetime.datetime.strptime(tds[0].text_content(), "%m/%d/%Y")
action = tds[1].text_content().strip()
#parse the text to see if it's a new version or a unrelated document
#if has a hyphen let's assume it's a vote document
#get url of action
action_url = tds[1].xpath('a/@href')
atype,action = self.parse_action(chamber,bill,action,action_url,date)
# Some lower-house roll calls could be parsed, but finnicky
# Most roll lists are just images embedded within a document,
# and offer no alt text to scrape
# Instead, just scrape the vote counts
vote_info = re.search(r'(?u)^(.*),\s([\s\d]{2})-([\s\d]{2})-([\s\d]{2})-([\s\d]{0,2})$', action)
if vote_info and re.search(r'\d{1,2}', action):
vote_name = vote_info.group(1)
if u"Votación Final" in vote_name:
(vote_chamber, vote_name) = re.search(
r'(?u)^\w+ por (.*?) en (.*)$', vote_name).groups()
if "Senado" in vote_chamber:
vote_chamber = 'upper'
else:
vote_chamber = 'lower'
elif "Cuerpo de Origen" in vote_name:
vote_name = re.search(
r'(?u)^Cuerpo de Origen (.*)$', vote_name).group(1)
vote_chamber = chamber
elif u"informe de Comisión de Conferencia" in vote_name:
(vote_chamber, vote_name) = re.search(
r'(?u)^(\w+) (\w+ informe de Comisi\wn de Conferencia)$',
vote_name).groups()
if vote_chamber == "Senado":
vote_chamber = 'upper'
else:
vote_chamber = 'lower'
elif u"Se reconsideró" in vote_name:
if bill['votes']:
vote_chamber = bill['votes'][-1]['chamber']
else:
vote_chamber = chamber
else:
raise AssertionError(
u"Unknown vote text found: {}".format(vote_name))
vote_name = vote_name.title()
yes = int(vote_info.group(2))
no = int(vote_info.group(3))
other = 0
if vote_info.group(4).strip():
other += int(vote_info.group(4))
if vote_info.group(5).strip():
other += int(vote_info.group(5))
vote = Vote(
chamber=vote_chamber,
date=date,
motion=vote_name,
passed=(yes > no),
yes_count=yes,
no_count=no,
other_count=other
)
vote.add_source(url)
bill.add_vote(vote)
bill.add_source(url)
self.save_bill(bill)
```
#### File: openstates/pr/utils.py
```python
import itertools
# From the itertools docs's recipe section
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def clean_newline(str):
new_str = ' '.join(str.split('\n'))
return new_str
def between_keywords(key1, key2, str):
right_part = str.split(key1)[0]
return right_part.split(key2)[1]
def doc_link_url(doc_link_part):
return 'http://www.camaraderepresentantes.org' + doc_link_part
def year_from_session(session):
return int(session.split()[0])
```
#### File: openstates/ri/votes.py
```python
from billy.scrape.votes import VoteScraper, Vote
from billy.scrape.utils import url_xpath
import datetime as dt
import urllib
import lxml
import re
RI_URL_BASE = "http://webserver.rilin.state.ri.us"
class RIVoteScraper(VoteScraper):
jurisdiction = 'ri'
def get_dates(self, page):
dates = url_xpath( page, "//select[@name='votedate']" )[0].\
xpath("./*")
ret = [ a.text for a in dates ]
return ret
def get_votes(self, url, session):
ret = {}
html = self.get(url).text
p = lxml.html.fromstring(html)
tables = \
p.xpath("//td[@background='/images/capBG.jpg']/div/table")
metainf = tables[0]
table = tables[1]
inf = metainf.xpath("./tr/td/pre")[0]
headers = [ br.tail for br in inf.xpath("./*") ]
dateinf = metainf.xpath("./tr/td")[3]
date = dateinf.text
time = dateinf.xpath("./*")[0].tail
vote_digest = metainf.xpath("./tr/td[@colspan='3']")
digest = vote_digest[2].text_content()
dig = []
for d in digest.split("\n"):
lis = d.strip().split("-")
for l in lis:
if l != None and l != "":
dig.append(l.strip())
digest = dig
il = iter( digest )
d = dict(zip(il, il))
vote_count = d
vote_count['passage'] = int(vote_count['YEAS']) > \
int(vote_count['NAYS'])
# XXX: This here has a greater then normal chance of failing.
# However, it's an upstream issue.
time_string = "%s %s" % ( time, date )
fmt_string = "%I:%M:%S %p %A, %B %d, %Y"
# 4:31:14 PM TUESDAY, JANUARY 17, 2012
date_time = dt.datetime.strptime( time_string, fmt_string )
bill_s_n_no = r"(?P<year>[0-9]{2,4})(-?)(?P<chamber>[SH])\s*(?P<bill>[0-9]+)"
# This is technically wrong, but it's close enough to be fine.
# something like "123S 3023" is technically valid, even though it's
# silly
bill_metainf = None
remaining = None
for hid in range(0,len(headers)):
h = headers[hid]
inf = re.search( bill_s_n_no, h )
if inf != None:
bill_metainf = inf.groupdict()
if bill_metainf['year'][-2:] != session[-2:]:
self.log(
"Skipping vote - it's in the %s session, we're in the %s session." % (
bill_metainf['year'][-2:],
session[-2:]
)
)
return ret
remaining = headers[hid+1:]
if bill_metainf == None:
self.warning("No metainf for this bill. Aborting snag")
return ret
try:
motion = remaining[-2]
except IndexError:
self.warning("Mission motion on this vote")
motion = "Unknown" # XXX: Because the motion is not on some
# pages.
bill_metainf['extra'] = {
"motion" : motion
}
votes = []
for t in table.xpath("./tr/td"):
nodes = t.xpath("./*")
for node in nodes:
if node.tag == "span":
vote = node.text.strip().upper()
name = node.tail.strip()
votes.append({
"name" : name,
"vote" : vote
})
if len(votes) > 0:
bid = bill_metainf['bill']
ret[bid] = {
"votes" : votes,
"meta" : bill_metainf,
"time" : date_time,
"count" : vote_count,
"source": url
}
return ret
def parse_vote_page(self, page, context_url, session):
ret = []
p = lxml.html.fromstring(page)
votes = p.xpath( "//center/div[@class='vote']" )
for vote in votes:
votes = self.get_votes( context_url + "/" +
vote.xpath("./a")[0].attrib["href"], session )
ret.append(votes)
return ret
def post_to(self, url, vote):
headers = {
"votedate" : vote
}
return self.post(url, data=headers).text
def scrape(self, chamber, session):
url = {
"upper" : "%s/%s" % ( RI_URL_BASE, "SVotes" ),
"lower" : "%s/%s" % ( RI_URL_BASE, "HVotes" )
}
url = url[chamber]
action = "%s/%s" % ( url, "votes.asp" )
dates = self.get_dates( url )
for date in dates:
votes = self.parse_vote_page( self.post_to( action, date ), url,
session )
for vote_dict in votes:
for vote in vote_dict:
vote = vote_dict[vote]
count = vote['count']
chamber = {
"H" : "lower",
"S" : "upper"
}[vote['meta']['chamber']]
v = Vote( chamber, vote['time'] ,
vote['meta']['extra']['motion'],
count['passage'], int(count['YEAS']),
int(count['NAYS']),
int(count['NOT VOTING']),
session=session,
bill_id=vote['meta']['bill'],
bill_chamber=chamber,
bill_session=vote['meta']['year'],
)
v.add_source( vote['source'] )
for vt in vote['votes']:
if vt['vote'] == "Y":
v.yes( vt['name'] )
elif vt['vote'] == "N":
v.no( vt['name'] )
else:
v.other( vt['name'] )
self.save_vote(v)
```
#### File: openstates/sc/bills.py
```python
import scrapelib
import datetime
import os
import re
from collections import defaultdict
from billy.scrape import ScrapeError
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from billy.scrape.utils import convert_pdf
import lxml.html
def action_type(action):
# http://www.scstatehouse.gov/actionsearch.php is very useful for this
classifiers = (('Adopted', 'bill:passed'),
('Amended and adopted',
['bill:passed', 'amendment:passed']),
('Amended', 'amendment:passed'),
('Certain items vetoed', 'governor:vetoed:line-item'),
('Committed to', 'committee:referred'),
('Committee Amendment Adopted', 'amendment:passed'),
('Committee Amendment Amended and Adopted',
['amendment:passed', 'amendment:amended']),
('Committee Amendment Amended', 'amendment:amended'),
('Committee Amendment Tabled', 'amendment:tabled'),
('Committee report: Favorable',
'committee:passed:favorable'),
('Committee report: Majority favorable',
'committee:passed'),
('House amendment amended', 'amendment:amended'),
('Introduced and adopted',
['bill:introduced', 'bill:passed']),
('Introduced, adopted',
['bill:introduced', 'bill:passed']),
('Introduced and read first time', ['bill:introduced', 'bill:reading:1']),
('Introduced, read first time', ['bill:introduced', 'bill:reading:1']),
('Introduced', 'bill:introduced'),
('Prefiled', 'bill:filed'),
('Read second time', 'bill:reading:2'),
('Read third time', ['bill:passed', 'bill:reading:3']),
('Recommitted to Committee', 'committee:referred'),
('Referred to Committee', 'committee:referred'),
('Rejected', 'bill:failed'),
('Senate amendment amended', 'amendment:amended'),
('Signed by governor', 'governor:signed'),
('Signed by Governor', 'governor:signed'),
('Tabled', 'bill:failed'),
('Veto overridden', 'bill:veto_override:passed'),
('Veto sustained', 'bill:veto_override:failed'),
('Vetoed by Governor', 'governor:vetoed'),
)
for prefix, atype in classifiers:
if action.lower().startswith(prefix.lower()):
return atype
# otherwise
return 'other'
class SCBillScraper(BillScraper):
jurisdiction = 'sc'
urls = {
'lower' : {
'daily-bill-index': "http://www.scstatehouse.gov/hintro/hintros.php",
},
'upper' : {
'daily-bill-index': "http://www.scstatehouse.gov/sintro/sintros.php",
}
}
_subjects = defaultdict(set)
def scrape_subjects(self, session_code):
# only need to do it once
if self._subjects:
return
subject_search_url = 'http://www.scstatehouse.gov/subjectsearch.php'
data = self.post(subject_search_url,
data=dict((('GETINDEX','Y'), ('SESSION', session_code),
('INDEXCODE','0'), ('INDEXTEXT', ''),
('AORB', 'B'), ('PAGETYPE', '0')))).text
doc = lxml.html.fromstring(data)
# skip first two subjects, filler options
for option in doc.xpath('//option')[2:]:
subject = option.text
code = option.get('value')
url = '%s?AORB=B&session=%s&indexcode=%s' % (subject_search_url,
session_code, code)
data = self.get(url).text
doc = lxml.html.fromstring(data)
for bill in doc.xpath('//span[@style="font-weight:bold;"]'):
match = re.match('(?:H|S) \d{4}', bill.text)
if match:
# remove * and leading zeroes
bill_id = match.group().replace('*', ' ')
bill_id = re.sub(' 0*', ' ', bill_id)
self._subjects[bill_id].add(subject)
def scrape_vote_history(self, bill, vurl):
html = self.get(vurl).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(vurl)
# skip first two rows
for row in doc.xpath('//table/tr')[2:]:
tds = row.getchildren()
if len(tds) != 11:
self.warning('irregular vote row: %s' % vurl)
continue
timestamp, motion, vote, yeas, nays, nv, exc, pres, abst, total, result = tds
timestamp = timestamp.text.replace(u'\xa0', ' ')
timestamp = datetime.datetime.strptime(timestamp,
'%m/%d/%Y %H:%M %p')
yeas = int(yeas.text)
nays = int(nays.text)
others = int(nv.text) + int(exc.text) + int(abst.text) + int(pres.text)
assert yeas + nays + others == int(total.text)
passed = (result.text == 'Passed')
vote_link = vote.xpath('a')[0]
if '[H]' in vote_link.text:
chamber = 'lower'
else:
chamber = 'upper'
vote = Vote(chamber, timestamp, motion.text, passed, yeas, nays,
others)
vote.add_source(vurl)
rollcall_pdf = vote_link.get('href')
self.scrape_rollcall(vote, rollcall_pdf)
vote.add_source(rollcall_pdf)
bill.add_vote(vote)
def scrape_rollcall(self, vote, vurl):
(path, resp) = self.urlretrieve(vurl)
pdflines = convert_pdf(path, 'text')
os.remove(path)
current_vfunc = None
for line in pdflines.split('\n'):
line = line.strip()
# change what is being recorded
if line.startswith('YEAS') or line.startswith('AYES'):
current_vfunc = vote.yes
elif line.startswith('NAYS'):
current_vfunc = vote.no
elif (line.startswith('EXCUSED') or
line.startswith('NOT VOTING') or
line.startswith('ABSTAIN')):
current_vfunc = vote.other
# skip these
elif not line or line.startswith('Page '):
continue
# if a vfunc is active
elif current_vfunc:
# split names apart by 3 or more spaces
names = re.split('\s{3,}', line)
for name in names:
if name:
current_vfunc(name.strip())
def scrape_details(self, bill_detail_url, session, chamber, bill_id):
page = self.get(bill_detail_url).text
if 'INVALID BILL NUMBER' in page:
self.warning('INVALID BILL %s' % bill_detail_url)
return
doc = lxml.html.fromstring(page)
doc.make_links_absolute(bill_detail_url)
bill_div = doc.xpath('//div[@style="margin:0 0 40px 0;"]')[0]
bill_type = bill_div.xpath('span/text()')[0]
if 'General Bill' in bill_type:
bill_type = 'bill'
elif 'Concurrent Resolution' in bill_type:
bill_type = 'concurrent resolution'
elif 'Joint Resolution' in bill_type:
bill_type = 'joint resolution'
elif 'Resolution' in bill_type:
bill_type = 'resolution'
else:
raise ValueError('unknown bill type: %s' % bill_type)
# this is fragile, but less fragile than it was
b = bill_div.xpath('./b[text()="Summary:"]')[0]
bill_summary = b.getnext().tail.strip()
bill = Bill(session, chamber, bill_id, bill_summary, type=bill_type)
bill['subjects'] = list(self._subjects[bill_id])
# sponsors
for sponsor in doc.xpath('//a[contains(@href, "member.php")]/text()'):
bill.add_sponsor('primary', sponsor)
for sponsor in doc.xpath('//a[contains(@href, "committee.php")]/text()'):
sponsor = sponsor.replace(u'\xa0', ' ').strip()
bill.add_sponsor('primary', sponsor)
# find versions
version_url = doc.xpath('//a[text()="View full text"]/@href')[0]
version_html = self.get(version_url).text
version_doc = lxml.html.fromstring(version_html)
version_doc.make_links_absolute(version_url)
for version in version_doc.xpath('//a[contains(@href, "/prever/")]'):
# duplicate versions with same date, use first appearance
bill.add_version(version.text, version.get('href'),
on_duplicate='use_old',
mimetype='text/html')
# actions
for row in bill_div.xpath('table/tr'):
date_td, chamber_td, action_td = row.xpath('td')
date = datetime.datetime.strptime(date_td.text, "%m/%d/%y")
action_chamber = {'Senate':'upper',
'House':'lower',
None: 'other'}[chamber_td.text]
action = action_td.text_content()
action = action.split('(House Journal')[0]
action = action.split('(Senate Journal')[0].strip()
atype = action_type(action)
bill.add_action(action_chamber, action, date, atype)
# votes
vurl = doc.xpath('//a[text()="View Vote History"]/@href')
if vurl:
vurl = vurl[0]
self.scrape_vote_history(bill, vurl)
bill.add_source(bill_detail_url)
self.save_bill(bill)
def scrape(self, chamber, session):
# start with subjects
session_code = self.metadata['session_details'][session]['_code']
self.scrape_subjects(session_code)
# get bill index
index_url = self.urls[chamber]['daily-bill-index']
chamber_letter = 'S' if chamber == 'upper' else 'H'
page = self.get(index_url).text
doc = lxml.html.fromstring(page)
doc.make_links_absolute(index_url)
# visit each day and extract bill ids
days = doc.xpath('//div/b/a/@href')
for day_url in days:
try:
data = self.get(day_url).text
except scrapelib.HTTPError:
continue
doc = lxml.html.fromstring(data)
doc.make_links_absolute(day_url)
for bill_a in doc.xpath('//p/a[1]'):
bill_id = bill_a.text.replace('.', '')
if bill_id.startswith(chamber_letter):
self.scrape_details(bill_a.get('href'), session, chamber,
bill_id)
```
#### File: openstates/tx/events.py
```python
from openstates.utils import LXMLMixin
import re
import datetime as dt
from collections import OrderedDict
from billy.scrape import NoDataForPeriod
from billy.scrape.events import EventScraper, Event
import pytz
class TXEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'tx'
_tz = pytz.timezone('US/Central')
def scrape(self, chamber, session):
if not session.startswith(session): # XXX: Fixme
raise NoDataForPeriod(session)
self.scrape_committee_upcoming(session, chamber)
def scrape_event_page(self, session, chamber, url, datetime):
page = self.lxmlize(url)
info = page.xpath("//p")
metainf = {}
plaintext = ""
for p in info:
content = re.sub("\s+", " ", p.text_content())
plaintext += content + "\n"
if ":" in content:
key, val = content.split(":", 1)
metainf[key.strip()] = val.strip()
ctty = metainf['COMMITTEE']
where = metainf['PLACE']
if "CHAIR" in where:
where, chair = where.split("CHAIR:")
metainf['PLACE'] = where.strip()
metainf['CHAIR'] = chair.strip()
chair = None
if "CHAIR" in metainf:
chair = metainf['CHAIR']
plaintext = re.sub("\s+", " ", plaintext).strip()
regexp = r"(S|J|H)(B|M|R) (\d+)"
bills = re.findall(regexp, plaintext)
event = Event(session,
datetime,
'committee:meeting',
ctty,
chamber=chamber,
location=where,
agenda=plaintext)
event.add_source(url)
event.add_participant('host', ctty, 'committee', chamber=chamber)
if chair is not None:
event.add_participant('chair', chair, 'legislator', chamber=chamber)
for bill in bills:
chamber, type, number = bill
bill_id = "%s%s %s" % ( chamber, type, number )
event.add_related_bill(bill_id,
type='consideration',
description='Bill up for discussion')
self.save_event(event)
def scrape_page(self, session, chamber, url):
page = self.lxmlize(url)
events = page.xpath("//a[contains(@href, 'schedules/html')]")
for event in events:
peers = event.getparent().getparent().xpath("./*")
date = peers[0].text_content()
time = peers[1].text_content()
tad = "%s %s" % ( date, time )
tad = re.sub(r"(PM|AM).*", r"\1", tad)
tad_fmt = "%m/%d/%Y %I:%M %p"
if "AM" not in tad and "PM" not in tad:
tad_fmt = "%m/%d/%Y"
tad = date
# Time expressed as 9:00 AM, Thursday, May 17, 2012
datetime = dt.datetime.strptime(tad, tad_fmt)
self.scrape_event_page(session, chamber, event.attrib['href'], datetime)
def scrape_upcoming_page(self, session, chamber, url):
page = self.lxmlize(url)
date = None
time = None
for row in page.xpath(".//tr"):
title = row.xpath(".//div[@class='sectionTitle']")
if len(title) > 0:
date = title[0].text_content()
time_elem = row.xpath(".//td/strong")
if time_elem:
time = time_elem[0].text_content()
events = row.xpath(".//a[contains(@href, 'schedules/html')]")
for event in events:
# Ignore text after the datetime proper (ie, after "AM" or "PM")
datetime = "{} {}".format(date, time)
datetime = re.search(r'(?i)(.+?[ap]m).+', datetime).group(1)
datetime = dt.datetime.strptime(datetime, "%A, %B %d, %Y %I:%M %p")
self.scrape_event_page(session, chamber, event.attrib['href'], datetime)
def scrape_committee_upcoming(self, session, chamber):
chid = {'upper': 'S',
'lower': 'H',
'other': 'J'}[chamber]
url = "http://www.capitol.state.tx.us/Committees/Committees.aspx" + \
"?Chamber=" + chid
page = self.lxmlize(url)
refs = page.xpath("//div[@id='content']//a")
for ref in refs:
self.scrape_page(session, chamber, ref.attrib['href'])
url = "http://www.capitol.state.tx.us/Committees/MeetingsUpcoming.aspx" + \
"?Chamber=" + chid
self.scrape_upcoming_page(session, chamber, url)
```
#### File: openstates/ut/committees.py
```python
import re
from billy.scrape import NoDataForPeriod
from billy.scrape.committees import CommitteeScraper, Committee
from openstates.utils import LXMLMixin
import lxml.html
class UTCommitteeScraper(CommitteeScraper, LXMLMixin):
jurisdiction = 'ut'
def scrape(self, term, chambers):
self.validate_term(term, latest_only=True)
url = "http://le.utah.gov/asp/interim/Main.asp?ComType=All&Year=2015&List=2#Results"
page = self.lxmlize(url)
for comm_link in page.xpath("//a[contains(@href, 'Com=')]"):
comm_name = comm_link.text.strip()
if "House" in comm_name:
chamber = "lower"
elif "Senate" in comm_name:
chamber = "upper"
else:
chamber = "joint"
# Drop leading "House" or "Senate" from name
comm_name = re.sub(r"^(House|Senate) ", "", comm_name)
comm = Committee(chamber, comm_name)
committee_page = self.lxmlize(comm_link.attrib['href'])
for mbr_link in committee_page.xpath(
"//table[@class='memberstable']//a"):
name = mbr_link.text.strip()
name = re.sub(r' \([A-Z]\)$', "", name)
name = re.sub(r'^Sen. ', "", name)
name = re.sub(r'^Rep. ', "", name)
role = mbr_link.tail.strip().strip(",").strip()
type = "member"
if role:
type = role
comm.add_member(name, type)
comm.add_source(url)
comm.add_source(comm_link.get('href'))
self.save_committee(comm)
```
#### File: openstates/utils/__init__.py
```python
from .lxmlize import LXMLMixin
import re
def validate_phone_number(phone_number):
is_valid = False
# Phone format validation regex.
phone_pattern = re.compile(r'\(?\d{3}\)?\s?-?\d{3}-?\d{4}')
phone_match = phone_pattern.match(phone_number)
if phone_match is not None:
is_valid = True
return is_valid
def validate_email_address(email_address):
is_valid = False
email_pattern = re.compile(r'\b[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.'
r'[a-zA-Z]{2,}\b')
email_match = email_pattern.match(email_address)
if email_match is not None:
is_valid = True
return is_valid
```
#### File: openstates/ut/__init__.py
```python
import datetime
import re
from billy.utils.fulltext import pdfdata_to_text, text_after_line_numbers
from billy.scrape.utils import url_xpath
from .bills import UTBillScraper
from .legislators import UTLegislatorScraper
from .committees import UTCommitteeScraper
from .events import UTEventScraper
metadata = {
'name': 'Utah',
'abbreviation': 'ut',
'legislature_name': 'Utah State Legislature',
'legislature_url': 'http://le.utah.gov/',
'capitol_timezone': 'America/Denver',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011', '2011S1', '2011S2', '2011S3', '2012',
'2012S4'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013', '2013h1', '2013s1', '2013s2', '2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015', '2015s1', '2016'],
},
],
'session_details': {
'2011': {
'type': 'primary',
'start_date': datetime.date(2011, 1, 24),
'display_name': '2011 Regular Session',
'_scraped_name': '2011 General Session',
},
'2011S1': {
'type': 'special',
'display_name': '2011, 1st Special Session',
'_scraped_name': '2011 1st Special Session',
},
'2011S2': {
'type': 'special',
'display_name': '2011, 2nd Special Session',
'_scraped_name': '2011 2nd Special Session',
},
'2011S3': {
'type': 'special',
'display_name': '2011, 3rd Special Session',
'_scraped_name': '2011 3rd Special Session',
},
'2012': {
'type': 'primary',
'display_name': '2012 General Session',
'_scraped_name': '2012 General Session',
},
'2012S4': {
'type': 'special',
'display_name': '2012, 4th Special Session',
'_scraped_name': '2012 4th Special Session',
},
'2013': {
'type': 'primary',
'display_name': '2013 General Session',
'_scraped_name': '2013 General Session',
},
'2013h1': {
'type': 'special',
'display_name': '2013 House Session',
'_scraped_name': '2013 House Session',
},
'2013s1': {
'type': 'special',
'display_name': '2013 1st Special Session',
'_scraped_name': '2013 1st Special Session',
},
'2013s2': {
'type': 'special',
'display_name': '2013 2nd Special Session',
'_scraped_name': '2013 2nd Special Session',
},
'2014': {
'type': 'primary',
'display_name': '2014 General Session',
'_scraped_name': '2014 General Session',
},
'2015': {
'type': 'primary',
'display_name': '2015 General Session',
'_scraped_name': '2015 General Session',
},
'2015s1': {
'type': 'special',
'display_name': '2015 1st Special Session',
'_scraped_name': '2015 1st Special Session',
},
'2016': {
'type': 'primary',
'start_date': datetime.date(2016, 1, 25),
'display_name': '2016 General Session',
'_scraped_name': '2016 General Session',
}
},
'feature_flags': ['events', 'subjects', 'influenceexplorer'],
'_ignored_scraped_sessions': [
'2011 Veto Override Session',
'2010 2nd Special Session',
'2010 General Session',
'2009 1st Special Session',
'2009 General Session',
'2008 2nd Special Session',
'2008 General Session',
'2007 1st Special Session',
'2007 General Session',
'2006 5th Special Session',
'2006 4th Special Session',
'2006 3rd Special Session',
'2006 General Session',
'2005 2nd Special Session',
'2005 1st Special Session',
'2005 General Session',
'2004 4th Special Session',
'2004 3rd Special Session',
'2004 General Session',
'2003 2nd Special Session',
'2003 1st Special Session',
'2003 General Session',
'2002 Veto Override Session',
'2002 6th Special Session',
'2002 5th Special Session',
'2002 4th Special Session',
'2002 3rd Special Session',
'2002 General Session',
'2001 2nd Special Session',
'2001 1st Special Session',
'2001 General Session',
'2000 General Session',
'1999 General Session',
'1998 General Session',
'1997 2nd Special Session',
'1997 1st Special Session',
'1997 General Session',
'1990-1996',
],
}
def session_list():
sessions = url_xpath(
'http://le.utah.gov/Documents/bills.htm',
'//p/a[contains(@href, "session")]/text()'
)
return [ re.sub(r'\s+', ' ', session.strip()) for session in sessions ]
def extract_text(doc, data):
if doc['mimetype'] == 'application/pdf':
return text_after_line_numbers(pdfdata_to_text(data))
```
#### File: openstates/va/bills.py
```python
import re
import datetime
from collections import defaultdict
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
import lxml.html
BASE_URL = 'http://lis.virginia.gov'
class VABillScraper(BillScraper):
jurisdiction = 'va'
vote_strip_re = re.compile(r'(.+)\((\d{1,2})-Y (\d{1,2})-N\)')
actor_map = {'House': 'lower', 'Senate': 'upper', 'Governor': 'governor',
'Conference': 'conference'}
_action_classifiers = (
('Approved by Governor', 'governor:signed'),
('\s*Amendment(s)? .+ agreed', 'amendment:passed'),
('\s*Amendment(s)? .+ withdrawn', 'amendment:withdrawn'),
('\s*Amendment(s)? .+ rejected', 'amendment:failed'),
('Subject matter referred', 'committee:referred'),
('Rereferred to', 'committee:referred'),
('Referred to', 'committee:referred'),
('Assigned ', 'committee:referred'),
('Reported from', 'committee:passed'),
('Read third time and passed', ['bill:passed', 'bill:reading:3']),
('Read third time and agreed', ['bill:passed', 'bill:reading:3']),
('Passed (Senate|House)', 'bill:passed'),
('Read third time and defeated', 'bill:failed'),
('Presented', 'bill:introduced'),
('Prefiled and ordered printed', 'bill:introduced'),
('Read first time', 'bill:reading:1'),
('Read second time', 'bill:reading:2'),
('Read third time', 'bill:reading:3'),
('Senators: ', None),
('Delegates: ', None),
('Committee substitute printed', None),
('Bill text as passed', None),
('Acts of Assembly', None),
)
link_xpath = '//ul[@class="linkSect"]/li/a'
def accept_response(self, response):
# check for rate limit pages
normal = super(VABillScraper, self).accept_response(response)
return (normal and
'Sorry, your query could not be processed' not in response.text
and 'the source database is temporarily unavailable' not in response.text)
def get_page_bills(self, issue_name, href):
issue_html = self.get('http://lis.virginia.gov' + href,
retry_on_404=True).text
idoc = lxml.html.fromstring(issue_html)
for ilink in idoc.xpath(self.link_xpath):
self.subject_map[ilink.text].append(issue_name)
more_links = idoc.xpath('//a/b[text()="More..."]/../@href')
if more_links:
self.get_page_bills(issue_name, more_links[0])
def build_subject_map(self):
url = 'http://lis.virginia.gov/cgi-bin/legp604.exe?%s+sbj+SBJ' % self.site_id
self.subject_map = defaultdict(list)
# loop over list of all issue pages
html = self.get(url).text
doc = lxml.html.fromstring(html)
for link in doc.xpath(self.link_xpath):
# get bills from page
self.get_page_bills(link.text, link.get('href'))
def scrape(self, chamber, session):
self.user_agent = 'openstates +mozilla'
# internal id for the session, store on self so all methods have access
self.site_id = self.metadata['session_details'][session]['site_id']
self.build_subject_map()
# used for skipping bills from opposite chamber
start_letter = 'H' if chamber == 'lower' else 'S'
url = 'http://lis.virginia.gov/cgi-bin/legp604.exe?%s+lst+ALL' % self.site_id
while url:
html = self.get(url, retry_on_404=True).text
doc = lxml.html.fromstring(html)
url = None # no more unless we encounter 'More...'
bills = doc.xpath('//ul[@class="linkSect"]/li')
for bill in bills:
link = bill.getchildren()[0]
bill_id = str(link.text_content())
# check if this is the 'More...' link
if bill_id.startswith('More'):
url = BASE_URL + link.get('href')
# skip bills from the other chamber
elif not bill_id.startswith(start_letter):
continue
else:
# create a bill
desc = bill.xpath('text()')[0].strip()
bill_type = {'B': 'bill',
'J': 'joint resolution',
'R': 'resolution'}[bill_id[1]]
bill = Bill(session, chamber, bill_id, desc,
type=bill_type)
bill_url = BASE_URL + link.get('href')
self.fetch_sponsors(bill)
self.scrape_bill_details(bill_url, bill)
bill['subjects'] = self.subject_map[bill_id]
bill.add_source(bill_url)
self.save_bill(bill)
def scrape_bill_details(self, url, bill):
html = self.get(url, retry_on_404=True).text
doc = lxml.html.fromstring(html)
# summary sections
summary = doc.xpath('//h4[starts-with(text(), "SUMMARY")]/following-sibling::p/text()')
if summary and summary[0].strip():
bill['summary'] = summary[0].strip()
# versions
for va in doc.xpath('//h4[text()="FULL TEXT"]/following-sibling::ul[1]/li/a[1]'):
# 11/16/09 \xa0House: Prefiled and ordered printed; offered 01/13/10 10100110D
date, desc = va.text.split(u' \xa0')
desc.rsplit(' ', 1)[0] # chop off last part
link = va.get('href')
date = datetime.datetime.strptime(date, '%m/%d/%y')
# budget bills in VA are searchable but no full text available
if '+men+' in link:
self.warning('not adding budget version, bill text not available')
else:
# VA duplicates reprinted bills, lets keep the original name
bill.add_version(desc, BASE_URL+link, date=date,
mimetype='text/html',
on_duplicate='use_old')
# actions
for ali in doc.xpath('//h4[text()="HISTORY"]/following-sibling::ul[1]/li'):
date, action = ali.text_content().split(u' \xa0')
actor, action = action.split(': ', 1)
actor = self.actor_map[actor]
date = datetime.datetime.strptime(date.strip(), '%m/%d/%y')
# if action ends in (##-Y ##-N) remove that part
vrematch = self.vote_strip_re.match(action)
if vrematch:
action, y, n = vrematch.groups()
vote = Vote(actor, date, action, int(y) > int(n),
int(y), int(n), 0)
vote_url = ali.xpath('a/@href')
if vote_url:
self.parse_vote(vote, vote_url[0])
vote.add_source(BASE_URL + vote_url[0])
# set other count, it isn't provided
vote['other_count'] = len(vote['other_votes'])
#vote.validate()
bill.add_vote(vote)
# categorize actions
for pattern, atype in self._action_classifiers:
if re.match(pattern, action):
break
else:
atype = 'other'
# if matched a 'None' atype, don't add the action
if atype:
bill.add_action(actor, action, date, type=atype)
def fetch_sponsors(self, bill):
url = "http://lis.virginia.gov/cgi-bin/legp604.exe?%s+mbr+%s" % (
self.site_id, bill['bill_id'].replace(' ', ''))
# order of chamber uls
#if bill['chamber'] == 'lower':
# order = ['lower', 'upper']
#else:
# order = ['upper', 'lower']
html = self.get(url, retry_on_404=True).text
doc = lxml.html.fromstring(html)
for slist in doc.xpath('//ul[@class="linkSect"]'):
# note that first ul is origin chamber
for sponsor in slist.xpath('li'):
name = sponsor.text_content().strip()
if name.endswith(u' (chief\xa0patron)'):
name = name[:-15]
type = 'primary'
elif name.endswith(u' (chief\xa0co-patron)'):
name = name[:-18]
type = 'cosponsor'
else:
type = 'cosponsor'
bill.add_sponsor(type, name)
def split_vote(self, block):
if block:
block = block[0].text.replace('\r\n', ' ')
pieces = block.split('--')
# if there are only two pieces, there are no abstentions
if len(pieces) <= 2:
return []
else:
# lookahead and don't split if comma precedes initials
# Also, Bell appears as Bell, <NAME>. and <NAME>.
# and so needs the lookbehind assertion.
return [x.strip() for x in re.split('(?<!Bell), (?!\w\.\w?\.?)', pieces[1]) if x.strip()]
else:
return []
def parse_vote(self, vote, url):
url = BASE_URL + url
html = self.get(url, retry_on_404=True).text
doc = lxml.html.fromstring(html)
yeas = doc.xpath('//p[contains(text(), "YEAS--")]')
nays = doc.xpath('//p[contains(text(), "NAYS--")]')
absts = doc.xpath('//p[contains(text(), "ABSTENTIONS")]')
#no_votes = doc.xpath('//p[contains(text(), "NOT VOTING")]')[0].text
map(vote.yes, self.split_vote(yeas))
map(vote.no, self.split_vote(nays))
map(vote.other, self.split_vote(absts))
# don't count not voting as anything?
#map(vote.other, self.split_vote(no_votes))
```
#### File: openstates/vt/committees.py
```python
import json
import re
from billy.scrape.committees import Committee, CommitteeScraper
class VTCommitteeScraper(CommitteeScraper):
jurisdiction = 'vt'
latest_only = True
def scrape(self, session, chambers):
year_slug = session[5: ]
# Load all committees via the private API
committee_dump_url = \
'http://legislature.vermont.gov/committee/loadList/{}/'.\
format(year_slug)
json_data = self.get(committee_dump_url).text
committees = json.loads(json_data)['data']
# Parse the information from each committee
for info in committees:
# Strip whitespace from strings
info = { k:v.strip() for k, v in info.iteritems() }
# Determine the chamber
if info['CommitteeType'] == 'House Standing':
chamber = 'lower'
elif info['CommitteeType'] == 'Senate Standing':
chamber = 'upper'
elif info['CommitteeType'] == 'Joint Committee':
chamber = 'joint'
elif info['CommitteeType'] in ('Study Committee', 'Commissions'):
if info['CommitteeName'].startswith("House"):
chamber = 'lower'
elif info['CommitteeName'].startswith("Senate"):
chamber = 'upper'
else:
chamber = 'joint'
else:
raise AssertionError(
"Unknown committee type found: '{}'".
format(info['CommitteeType'])
)
comm = Committee(
chamber=chamber,
committee=info['CommitteeName']
)
# Determine membership and member roles
# First, parse the member list and make sure it isn't a placeholder
REMOVE_TAGS_RE = r'<.*?>'
members = [
re.sub(REMOVE_TAGS_RE, '', x)
for x
in info['Members'].split('</br>')
]
members = [x.strip() for x in members if x.strip()]
for member in members:
# Strip out titles, and exclude committee assistants
if member.startswith("Rep. "):
member = member[len("Rep. "): ]
elif member.startswith("Sen. "):
member = member[len("Sen. "): ]
else:
self.info("Non-legislator member found: {}".format(member))
# Determine the member's role in the committee
if ',' in member:
(member, role) = [x.strip() for x in member.split(',')]
if 'jr' in role.lower() or 'sr' in role.lower():
raise AssertionError(
"Name suffix confused for a committee role")
else:
role = 'member'
comm.add_member(
legislator=member,
role=role
)
comm.add_source(committee_dump_url)
self.save_committee(comm)
```
#### File: openstates/vt/events.py
```python
import datetime
import json
from billy.scrape.events import Event, EventScraper
class VTEventScraper(EventScraper):
jurisdiction = 'vt'
def scrape(self, session, chambers):
year_slug = session[5: ]
url = 'http://legislature.vermont.gov/committee/loadAllMeetings/{}'.\
format(year_slug)
json_data = self.get(url).text
events = json.loads(json_data)['data']
for info in events:
# Determine when the committee meets
if info['TimeSlot'] == '1':
when = datetime.datetime.strptime(info['MeetingDate'], '%A, %B %d, %Y')
all_day = True
else:
try:
when = datetime.datetime.strptime(
info['MeetingDate'] + ', ' + info['TimeSlot'],
'%A, %B %d, %Y, %I:%M %p'
)
except ValueError:
when = datetime.datetime.strptime(
info['MeetingDate'] + ', ' + info['StartTime'],
'%A, %B %d, %Y, %I:%M %p'
)
all_day = False
event = Event(
session=session,
when=when,
all_day=all_day,
type='committee:meeting',
description="Meeting of the {}".format(info['LongName']),
location="{0}, Room {1}".format(info['BuildingName'], info['RoomNbr'])
)
event.add_source(url)
event.add_participant(
type='host',
participant=info['LongName'],
participant_type='committee'
)
self.save_event(event)
```
#### File: openstates/vt/__init__.py
```python
from billy.utils.fulltext import pdfdata_to_text, text_after_line_numbers
from .bills import VTBillScraper
from .legislators import VTLegislatorScraper
from .committees import VTCommitteeScraper
from .events import VTEventScraper
metadata = dict(
name='Vermont',
abbreviation='vt',
capitol_timezone='America/New_York',
legislature_name='Vermont General Assembly',
legislature_url='http://legislature.vermont.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator', 'term': 2},
'lower': {'name': 'House', 'title': 'Representative', 'term': 2},
},
terms=[{'name': '2009-2010',
'start_year': 2009,
'end_year': 2010,
'sessions': ['2009-2010']},
{'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011-2012']},
{'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013-2014']},
{'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015-2016']},
],
session_details={'2009-2010': {'type': 'primary',
'display_name': '2009-2010 Regular Session',
'_scraped_name': '2009-2010 Session',
},
'2011-2012': {'type': 'primary',
'display_name': '2011-2012 Regular Session',
'_scraped_name': '2011-2012 Session',
},
'2013-2014': {'type': 'primary',
'display_name': '2013-2014 Regular Session',
'_scraped_name': '2013-2014 Session',
},
'2015-2016': {'type': 'primary',
'display_name': '2015-2016 Regular Session',
'_scraped_name': '2015-2016 Session',
},
},
feature_flags=['influenceexplorer'],
_ignored_scraped_sessions= ['2009 Special Session']
)
def session_list():
from billy.scrape.utils import url_xpath
return url_xpath(
'http://legislature.vermont.gov/bill/search/2016',
'//fieldset/div[@id="selected_session"]/div/select/option/text()')
def extract_text(doc, data):
return text_after_line_numbers(pdfdata_to_text(data))
```
#### File: openstates/vt/legislators.py
```python
import json
from billy.scrape.legislators import Legislator, LegislatorScraper
from openstates.utils import LXMLMixin
class VTLegislatorScraper(LegislatorScraper, LXMLMixin):
jurisdiction = 'vt'
latest_only = True
CHAMBERS = {'Senator': 'upper', 'Representative': 'lower'}
def scrape(self, term, chambers):
year_slug = term[5:]
# Load all members via the private API
legislator_dump_url = (
'http://legislature.vermont.gov/people/loadAll/{}'.
format(year_slug))
json_data = self.get(legislator_dump_url).text
legislators = json.loads(json_data)['data']
# Parse the information from each legislator
for info in legislators:
# Strip whitespace from strings
info = {k: v.strip() for k, v in info.iteritems()}
# Gather photo URL from the member's page
member_url = ('http://legislature.vermont.gov/people/single/{}/{}'.
format(year_slug, info['PersonID']))
page = self.lxmlize(member_url)
(photo_url, ) = page.xpath('//img[@class="profile-photo"]/@src')
# Also grab their state email address
state_email = page.xpath(
'//dl[@class="summary-table profile-summary"]/'
'dt[text()="Email"]/following-sibling::dd[1]/a/text()')
if state_email:
(state_email, ) = state_email
else:
state_email = None
leg = Legislator(
term=term,
chamber=self.CHAMBERS[info['Title']],
district=info['District'].replace(" District", ""),
party=info['Party'].replace("Democrat", "Democratic"),
full_name="{0} {1}".format(info['FirstName'], info['LastName']),
photo_url=photo_url
)
leg.add_office(
type='capitol',
name='Capitol Office',
address='Vermont State House\n115 State Street\nMontpelier, VT 05633',
email=state_email
)
leg.add_office(
type='district',
name='District Office',
address="{0}{1}\n{2}, {3} {4}".format(
info['MailingAddress1'],
("\n" + info['MailingAddress2']
if info['MailingAddress2'].strip()
else ""),
info['MailingCity'],
info['MailingState'],
info['MailingZIP']
),
phone=(info['HomePhone'].strip() or None),
email=(info['Email'].strip() or
info['HomeEmail'].strip() or
info['WorkEmail'].strip() or
None)
)
leg.add_source(legislator_dump_url)
leg.add_source(member_url)
self.save_legislator(leg)
```
#### File: openstates/wa/actions.py
```python
import re
from billy.scrape.actions import Rule, BaseCategorizer
# http://www.leg.wa.gov/legislature/pages/committeelisting.aspx#
committees_abbrs = {
u'AGNR': u'Agriculture & Natural Resources',
# u'APPE': '',
# u'APPG': '',
# u'APPH':
# u'ARED': '',
u'AWRD': u'Agriculture, Water & Rural Economic Development',
u'BFS': u'Business & Financial Services', # u'Early Learning & K-12 Education',
u'CB': u'Capital Budget',
u'CDH': u'Community & Economic Development & Housing',
u'ED': u'Education', # u'Education Appropriations & Oversight',
u'EDTI': u'Economic Development, Trade & Innovation',
u'EDU': u'Education',
u'ELHS': u'Early Learning & Human Services', # u'General Government Appropriations & Oversight',
u'ENRM': u'Energy, Natural Resources & Marine Waters',
u'ENV': u'Environment',
u'ENVI': u'Environment',
u'EWE': u'Health & Human Services Appropriations & Oversight',
u'FIHI': u'Financial Institutions, Housing & Insurance', # u'Health & Long-Term Care',
u'GO': u'Government Operations, Tribal Relations & Elections',
u'HCW': u'Health Care & Wellness',
u'HE': u'Higher Education',
u'HEA': 'Homeowners\' Association Act',
u'HEWD': u'Higher Education & Workforce Development',
u'HSC': u'Human Services & Corrections',
u'JUD': u'Judiciary',
u'JUDI': u'Judiciary',
u'LCCP': u'Labor, Commerce & Consumer Protection',
u'LG': u'Local Government',
u'LWD': u'Labor & Workforce Development',
# u'NRMW': '',
u'PSEP': u'Public Safety & Emergency Preparedness',
u'SGTA': u'State Government & Tribal Affairs',
u'TEC': u'Technology, Energy & Communications',
u'TR': u'Transportation',
u'TRAN': u'Transportation',
u'WAYS': u'Ways & Means'
}
committee_names = committees_abbrs.values()
committees_rgx = '(%s)' % '|'.join(
sorted(committee_names, key=len, reverse=True))
# These are regex patterns that map to action categories.
_categorizer_rules = (
Rule(r'yeas, (?P<yes_votes>\d+); nays, (?P<no_votes>\d+); '
r'absent, (?P<absent_voters>\d+); excused, (?P<excused_voters>\d+)'),
Rule(r'Committee on (?P<committees>.+?) at \d'),
Rule(r'(?P<committees>.+?) relieved of further'),
Rule(r'Passed to (?P<committees>.+?) for \S+ reading'),
Rule(r'by (?P<committees>.+?) Committee'),
Rule(r'^Adopted', 'bill:passed'),
Rule(r'^Introduced', 'bill:introduced'),
Rule(r'^Introduced', 'bill:introduced'),
Rule(r'Third reading, adopted', ['bill:reading:3', 'bill:passed']),
Rule(r'amendment adopted', 'amendment:passed'),
Rule(r'amendment not adopted', 'amendment:failed'),
Rule(r"(?i)third reading, (?P<pass_fail>(passed|failed))", 'bill:reading:3'),
Rule(r'Read first time', 'bill:reading:1'),
Rule(r"(?i)first reading, referred to (?P<committees>.*)\.", 'bill:reading:1'),
Rule(r"(?i)And refer to (?P<committees>.*)", 'committee:referred'),
Rule(r"(?i).* substitute bill substituted.*", 'bill:substituted'),
Rule(r"(?i)chapter (((\d+),?)+) \d+ laws.( .+)?", "other"), # XXX: Thom: Code stuff?
Rule(r"(?i)effective date \d{1,2}/\d{1,2}/\d{4}.*", "other"),
Rule(r"(?i)(?P<committees>\w+) - majority; do pass with amendment\(s\) (but without amendments\(s\))?.*\.", "committee:passed:favorable", "committee:passed"),
Rule(r"(?i)Executive action taken in the (House|Senate) committee on (?P<committees>.*) (at)? .*\.", "other"),
Rule(r"(?i)(?P<committees>\w+) \- Majority; do pass .* \(Majority Report\)", 'bill:passed'),
Rule(r"(?i)Conference committee appointed.", "other"),
Rule(r"(?i)Conference committee report;", 'other'),
Rule(r"(?i).+ - Majority; \d+.+ substitute bill be substituted, do pass", 'bill:passed'),
Rule(r"(?i)Signed by (?P<signed_chamber>(Representatives|Senators)) (?P<legislators>.*)", "bill:passed"),
Rule(r"(?i)Referred to (?P<committees>.*)(\.)?"),
Rule(r"(?i)(?P<from_committee>.*) relieved of further consideration. On motion, referred to (?P<committees>.*)", 'committee:referred'),
Rule(r"(?i)Governor partially vetoed", 'governor:vetoed:line-item'),
Rule(r"(?i)Governor vetoed", 'governor:vetoed'),
Rule(r"(?i)Governor signed", 'governor:signed'),
Rule(r"(?i)Passed final passage;", 'bill:passed'),
Rule(r"(?i)Failed final passage;", 'bill:failed'),
# Rule(r"(?i)"),
# Rule(r"(?i)"),
)
class Categorizer(BaseCategorizer):
rules = _categorizer_rules
def categorize(self, text):
'''Wrap categorize and add boilerplate committees.
'''
attrs = BaseCategorizer.categorize(self, text)
if 'committees' in attrs:
committees = attrs['committees']
for committee in re.findall(committees_rgx, text, re.I):
if committee not in committees:
committees.append(committee)
return attrs
```
#### File: openstates/wa/committees.py
```python
from .utils import xpath
from billy.scrape.committees import CommitteeScraper, Committee
import lxml.etree
class WACommitteeScraper(CommitteeScraper):
jurisdiction = 'wa'
_base_url = 'http://wslwebservices.leg.wa.gov/CommitteeService.asmx'
def scrape(self, chamber, term):
biennium = "%s-%s" % (term[0:4], term[7:9])
url = "%s/GetActiveCommittees?biennium=%s" % (self._base_url, biennium)
page = self.get(url)
page = lxml.etree.fromstring(page.content)
for comm in xpath(page, "//wa:Committee"):
agency = xpath(comm, "string(wa:Agency)")
comm_chamber = {'House': 'lower', 'Senate': 'upper'}[agency]
if comm_chamber != chamber:
continue
name = xpath(comm, "string(wa:Name)")
comm_id = xpath(comm, "string(wa:Id)")
# acronym = xpath(comm, "string(wa:Acronym)")
phone = xpath(comm, "string(wa:Phone)")
comm = Committee(chamber, name, _code=comm_id,
office_phone=phone)
self.scrape_members(comm, agency)
comm.add_source(url)
if comm['members']:
self.save_committee(comm)
def scrape_members(self, comm, agency):
# Can't get them to accept special characters (e.g. &) in URLs,
# no matter how they're encoded, so we use the SOAP API here.
template = """
<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<GetActiveCommitteeMembers xmlns="http://WSLWebServices.leg.wa.gov/">
<agency>%s</agency>
<committeeName>%s</committeeName>
</GetActiveCommitteeMembers>
</soap12:Body>
</soap12:Envelope>
""".strip()
body = template % (agency, comm['committee'].replace('&', '&'))
headers = {'Content-Type': 'application/soap+xml; charset=utf-8'}
resp = self.post(self._base_url, data=body, headers=headers)
doc = lxml.etree.fromstring(resp.content)
if 'subcommittee' in comm['committee'].lower():
roles = ['chair', 'ranking minority member']
else:
roles = ['chair', 'vice chair', 'ranking minority member',
'assistant ranking minority member']
for i, member in enumerate(xpath(doc, "//wa:Member")):
name = xpath(member, "string(wa:Name)")
try:
role = roles[i]
except IndexError:
role = 'member'
comm.add_member(name, role)
```
#### File: openstates/wa/__init__.py
```python
import lxml.html
from billy.utils.fulltext import text_after_line_numbers
from .bills import WABillScraper
from .legislators import WALegislatorScraper
from .committees import WACommitteeScraper
from .events import WAEventScraper
settings = dict(SCRAPELIB_TIMEOUT=300)
metadata = dict(
name='Washington',
abbreviation='wa',
capitol_timezone='America/Los_Angeles',
legislature_name='Washington State Legislature',
legislature_url='http://www.leg.wa.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms=[
{'name': '2009-2010', 'start_year': 2009, 'end_year': 2010,
'sessions': ['2009-2010']},
{'name': '2011-2012', 'start_year': 2011, 'end_year': 2012,
'sessions': ['2011-2012']},
{'name': '2013-2014', 'start_year': 2013, 'end_year': 2014,
'sessions': ['2013-2014']},
{'name': '2015-2016', 'start_year': 2015, 'end_year': 2016,
'sessions': ['2015-2016']},
],
session_details = {
'2009-2010': {'display_name': '2009-2010 Regular Session',
'_scraped_name': '2009-10',
},
'2011-2012': {'display_name': '2011-2012 Regular Session',
'_scraped_name': '2011-12',
},
'2013-2014': {'display_name': '2013-2014 Regular Session',
'_scraped_name': '2013-14',
},
'2015-2016': {'display_name': '2015-2016 Regular Session',
'_scraped_name': '2015-16',
},
},
feature_flags = ['events', 'subjects', 'capitol_maps', 'influenceexplorer'],
capitol_maps=[
{"name": "Floor 1",
"url": 'http://static.openstates.org/capmaps/wa/f1.gif'
},
{"name": "Floor 2",
"url": 'http://static.openstates.org/capmaps/wa/f2.gif'
},
{"name": "Floor 3",
"url": 'http://static.openstates.org/capmaps/wa/f3.gif'
},
{"name": "Floor 4",
"url": 'http://static.openstates.org/capmaps/wa/f4.gif'
},
],
_ignored_scraped_sessions=['2007-08'],
)
def session_list():
from billy.scrape.utils import url_xpath
return url_xpath('http://apps.leg.wa.gov/billinfo/',
'//td[starts-with(@id, "ctl00_ContentPlaceHolder1_TabControl1")]/text()')
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = ' '.join(x.text_content() for x in doc.xpath('//body/p'))
return text
```
#### File: openstates/wi/bills.py
```python
import datetime
import lxml.html
import os
import re
from collections import defaultdict
import scrapelib
from billy.scrape.utils import convert_pdf
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
motion_classifiers = {
'(Assembly|Senate)( substitute)? amendment': 'amendment',
'Report (passage|concurrence)': 'passage',
'Report (adoption|introduction and adoption) of Senate( Substitute)? Amendment': 'amendment',
'Report Assembly( Substitute)? Amendment': 'amendment',
'Read a third time': 'passage',
'Adopted': 'passage'
}
action_classifiers = {
'(Senate|Assembly)( substitute)? amendment .* offered': 'amendment:introduced',
'(Senate|Assembly)( substitute)? amendment .* rejected': 'amendment:failed',
'(Senate|Assembly)( substitute)? amendment .* adopted': 'amendment:passed',
'(Senate|Assembly)( substitute)? amendment .* laid on table': 'amendment:tabled',
'(Senate|Assembly)( substitute)? amendment .* withdrawn': 'amendment:withdrawn',
'Report (passage|concurrence).* recommended': 'committee:passed:favorable',
'Report approved by the Governor': 'governor:signed',
'.+ (withdrawn|added) as a co(author|sponsor)': 'other',
'R(ead (first time )?and r)?eferred to committee': 'committee:referred',
'Read a third time and (passed|concurred)': 'bill:passed',
'Adopted': 'bill:passed',
'Presented to the Governor': 'governor:received',
'Introduced by': 'bill:introduced',
'Read a second time': 'bill:reading:2',
}
class WIBillScraper(BillScraper):
jurisdiction = 'wi'
def scrape_subjects(self, year, site_id):
last_url = None
next_url = 'http://docs.legis.wisconsin.gov/%s/related/subject_index/index/' % year
# if you visit this page in your browser it is infinite-scrolled
# but if you disable javascript you'll see the 'Down' links
# that we use to scrape the data
self.subjects = defaultdict(list)
while last_url != next_url:
html = self.get(next_url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(next_url)
last_url = next_url
# get the 'Down' url
next_url = doc.xpath('//a[text()="Down"]/@href')[0]
# slug is upper case in links for special sessions
if site_id != 'reg':
site_id = site_id.upper()
a_path = '/document/session/%s/%s/' % (year, site_id)
# find all bill links to bills in this session
for bill_a in doc.xpath('//a[contains(@href, "%s")]' % a_path):
bill_id = bill_a.text_content().split()[-1]
# subject is in the immediately preceding span
preceding_subject = bill_a.xpath(
'./preceding::div[contains(@class,"qsSubject")]/text()')
# there wasn't a subject get the one from end of the prior page
if not preceding_subject:
preceding_subject = last_subject[0]
else:
preceding_subject = preceding_subject[-1]
preceding_subject = preceding_subject.replace(u'\xe2\x80\x94',
'')
self.subjects[bill_id].append(preceding_subject)
# last subject on the page, in case we get a bill_id on next page
last_subject_div = doc.xpath(
'//div[contains(@class,"qsSubject")]/text()')
if last_subject_div:
last_subject = last_subject_div[0]
def scrape(self, chamber, session):
# get year
for t in self.metadata['terms']:
if session in t['sessions']:
year = t['name'][0:4]
break
site_id = self.metadata['session_details'][session].get('site_id',
'reg')
chamber_slug = {'upper': 'sen', 'lower': 'asm'}[chamber]
self.scrape_subjects(year, site_id)
types = ('bill', 'joint_resolution', 'resolution')
for type in types:
url = 'http://docs.legis.wisconsin.gov/%s/proposals/%s/%s/%s' % (
year, site_id, chamber_slug, type)
self.scrape_bill_list(chamber, session, url)
def scrape_bill_list(self, chamber, session, url):
if 'joint_resolution' in url:
bill_type = 'joint resolution'
elif 'resolution' in url:
bill_type = 'resolution'
elif 'bill' in url:
bill_type = 'bill'
try:
data = self.get(url).text
except scrapelib.HTTPError:
self.warning('skipping URL %s' % url)
return
doc = lxml.html.fromstring(data)
doc.make_links_absolute(url)
bill_list = doc.xpath('//ul[@class="infoLinks"]/li/div[@class="row-fluid"]')
for b in bill_list:
bill_url = b.xpath('./div[@class="span3"]/a/@href')[0]
bill_id = bill_url.rsplit('/', 1)[-1]
bill_id = bill_id.upper()
title = b.xpath('./div[@class="span6"]/text()')[0].replace(' - Relating to: ', '').strip()
bill = Bill(session, chamber, bill_id, title,
type=bill_type)
bill['subjects'] = list(set(self.subjects[bill_id]))
self.scrape_bill_history(bill, bill_url)
def scrape_bill_history(self, bill, url):
body = self.get(url).text
doc = lxml.html.fromstring(body)
doc.make_links_absolute(url)
bill['status'] = doc.xpath('//div[@class="propStatus"]/h2/text()')[0]
# add versions
for a in doc.xpath('//ul[@class="docLinks"]/li//a'):
# blank ones are PDFs that follow HTML
if not a.text:
continue
elif ('Wisconsin Act' in a.text or
'Memo' in a.text or
'Government Accountability Board' in a.text or
'Redistricting Attachment' in a.text or
'Budget Index Report' in a.text or
'Veto Message' in a.text
):
bill.add_document(a.text, a.get('href'))
elif ('Bill Text' in a.text or
'Resolution Text' in a.text or
'Enrolled Joint Resolution' in a.text or
'Engrossed Resolution' in a.text or
'Text as Enrolled' in a.text
):
bill.add_version(a.text, a.get('href'),
on_duplicate="ingore", mimetype="text/html")
pdf = a.xpath('following-sibling::span/a/@href')[0]
bill.add_version(a.text, pdf,
on_duplicate="ignore",
mimetype="application/pdf")
elif a.text in ('Amendments', 'Fiscal Estimates',
'Record of Committee Proceedings'):
extra_doc_url = a.get('href')
extra_doc = lxml.html.fromstring(self.get(extra_doc_url).text)
extra_doc.make_links_absolute(extra_doc_url)
for extra_a in extra_doc.xpath('//li//a'):
if extra_a.text:
bill.add_document(extra_a.text, extra_a.get('href'))
else:
self.warning('unknown document %s %s' % (bill['bill_id'],
a.text))
# add actions (second history dl is the full list)
hist_table = doc.xpath('//table[@class="history"]')[1]
for row in hist_table.xpath('.//tr[@class="historyRow"]'):
date_house, action_td, journal = row.getchildren()
date, actor = date_house.text_content().split()
date = datetime.datetime.strptime(date, '%m/%d/%Y')
actor = {'Asm.': 'lower', 'Sen.': 'upper'}[actor]
action = action_td.text_content()
if 'Introduced by' in action:
self.parse_sponsors(bill, action)
# classify actions
atype = 'other'
for regex, type in action_classifiers.iteritems():
if re.match(regex, action):
atype = type
break
kwargs = {}
if "committee:referred" in atype:
kwargs['committees'] = re.sub(
'R(ead (first time )?and r)?eferred to committee',
'', action)
bill.add_action(actor, action, date, atype, **kwargs)
# if this is a vote, add a Vote to the bill
if 'Ayes' in action:
vote_url = action_td.xpath('a/@href')
if vote_url:
self.add_vote(bill, actor, date, action, vote_url[0])
bill.add_source(url)
self.save_bill(bill)
def parse_sponsors(self, bill, action):
if ';' in action:
lines = action.split(';')
else:
lines = [action]
for line in lines:
match = re.match(
'(Introduced|Cosponsored) by (?:joint )?(Senator|Representative|committee|Joint Legislative Council|Law Revision Committee)s?(.*)',
line)
if not match:
# So far, the only one that doens't match is
# http://docs.legis.wisconsin.gov/2011/proposals/ab568
# In the following format:
# Introduced by Representatives Krusick and <NAME>, by ... ;
match = re.match(
'Introduced by (Representatives|Senators) (.*),',
line
)
if not match:
# Nothing to do here :)
continue
type = "Introduced"
title, names = match.groups()
raise Exception("Foo")
else:
type, title, people = match.groups()
if type == 'Introduced':
sponsor_type = 'primary'
elif type == 'Cosponsored':
sponsor_type = 'cosponsor'
if title == 'Senator':
sponsor_chamber = 'upper'
elif title == 'Representative':
sponsor_chamber = 'lower'
elif title == 'committee':
sponsor_chamber = bill['chamber']
people = 'Committee ' + people
elif title in ('Joint Legislative Council',
'Law Revision Committee'):
sponsor_chamber = bill['chamber']
people = title
for r in re.split(r'\sand\s|\,', people):
if r.strip():
bill.add_sponsor(sponsor_type, r.strip(),
chamber=sponsor_chamber)
def add_vote(self, bill, chamber, date, text, url):
votes = re.findall(r'Ayes,?[\s]?(\d+)[,;]\s+N(?:oes|ays),?[\s]?(\d+)', text)
(yes, no) = int(votes[0][0]), int(votes[0][1])
vtype = 'other'
for regex, type in motion_classifiers.iteritems():
if re.match(regex, text):
vtype = type
break
v = Vote(chamber, date, text, yes > no, yes, no, 0, type=vtype)
# fetch the vote itself
if url:
v.add_source(url)
if 'av' in url:
self.add_house_votes(v, url)
elif 'sv' in url:
self.add_senate_votes(v, url)
# other count is brute forced
v['other_count'] = len(v['other_votes'])
v.validate()
bill.add_vote(v)
def add_senate_votes(self, vote, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
# what to do with the pieces
vfunc = None
# a game of div-div-table
for ddt in doc.xpath('//div/div/table'):
text = ddt.text_content()
if 'Wisconsin Senate' in text or 'SEQUENCE NO' in text:
continue
elif 'AYES -' in text:
for name in text.split('\n\n\n\n\n')[1:]:
if name.strip() and 'AYES' not in name:
vote.yes(name.strip())
elif 'NAYS -' in text:
for name in text.split('\n\n\n\n\n')[1:]:
if name.strip() and 'NAYS' not in name:
vote.no(name.strip())
elif 'NOT VOTING -' in text:
for name in text.split('\n\n\n\n\n')[1:]:
if name.strip() and "NOT VOTING" not in name:
vote.other(name.strip())
elif text.strip():
raise ValueError('unexpected block in vote')
def add_house_votes(self, vote, url):
html = self.get(url).text
doc = lxml.html.fromstring(html)
header_td = doc.xpath('//td[@align="center"]')[0].text_content()
ayes_nays = re.findall('AYES - (\d+) .*? NAYS - (\d+)', header_td)
vote['yes_count'] = int(ayes_nays[0][0])
vote['no_count'] = int(ayes_nays[0][1])
for td in doc.xpath('//td[@width="120"]'):
name = td.text_content()
if name == 'NAME':
continue
for vote_td in td.xpath('./preceding-sibling::td'):
if vote_td.text_content() == 'Y':
vote.yes(name)
elif vote_td.text_content() == 'N':
vote.no(name)
elif vote_td.text_content() == 'NV':
vote.other(name)
```
#### File: openstates/wi/committees.py
```python
from billy.scrape.committees import CommitteeScraper, Committee
import lxml.html
class WICommitteeScraper(CommitteeScraper):
jurisdiction = 'wi'
def scrape_committee(self, name, url, chamber):
com = Committee(chamber, name)
com.add_source(url)
data = self.get(url).text
doc = lxml.html.fromstring(data)
for leg in doc.xpath('//div[@id="members"]/div[@id="members"]/p/a/text()'):
leg = leg.replace('Representative ', '')
leg = leg.replace('Senator ', '')
leg = leg.strip()
if ' (' in leg:
leg, role = leg.split(' (')
if 'Vice-Chair' in role:
role = 'vice-chair'
elif 'Co-Chair' in role:
role = 'co-chair'
elif 'Chair' in role:
role = 'chair'
else:
raise Exception('unknown role: %s' % role)
else:
role = 'member'
com.add_member(leg, role)
self.save_committee(com)
def scrape(self, term, chambers):
for chamber in chambers+["joint"]:
url = 'http://docs.legis.wisconsin.gov/2015/committees/'
if chamber == 'joint':
url += "joint"
elif chamber == 'upper':
url += 'senate'
else:
url += 'assembly'
data = self.get(url).text
doc = lxml.html.fromstring(data)
doc.make_links_absolute(url)
for a in doc.xpath('//ul[@class="docLinks"]/li/p/a'):
if "(Disbanded" not in a.text:
comm_name = a.text
comm_name = comm_name.replace("Committee on", "")
comm_name = comm_name.replace("Assembly", "")
comm_name = comm_name.replace("Joint Survey", "")
comm_name = comm_name.replace("Joint Review", "")
comm_name = comm_name.replace("Joint", "")
comm_name = comm_name.replace("Senate", "")
comm_name = comm_name.replace("Committee for", "")
comm_name = comm_name.replace("Committee", "")
comm_name = comm_name.strip()
self.scrape_committee(comm_name, a.get('href'), chamber)
```
#### File: openstates/wi/events.py
```python
import datetime as dt
from billy.scrape.events import Event, EventScraper
from openstates.utils import LXMLMixin
import scrapelib
import pytz
calurl = "http://committeeschedule.legis.wisconsin.gov/?filter=Upcoming&committeeID=-1"
class WIEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'wi'
_tz = pytz.timezone('US/Eastern')
def scrape_participants(self, session, href):
try:
page = self.lxmlize(href)
except scrapelib.HTTPError:
self.warning("Committee page not found for this event")
return []
legs = page.xpath("//a[contains(@href, '/Pages/leg-info.aspx')]/text()")
role_map = {"participant": "participant",
"Chair": "chair",
"Co-Chair": "chair",
"Vice-Chair": "participant"}
ret = []
for leg in legs:
name = leg
title = 'participant'
if "(" and ")" in leg:
name, title = leg.split("(", 1)
title = title.replace(")", " ").strip()
name = name.strip()
title = role_map[title]
ret.append({
"name": name,
"title": title
})
return ret
def scrape(self, session, chambers):
page = self.lxmlize(calurl)
events = page.xpath("//table[@class='agenda-body']//tr")[1:]
for event in events:
comit_url = event.xpath(
".//a[contains(@href, '/Pages/comm-info.aspx?c=')]")
if len(comit_url) != 1:
raise Exception
comit_url = comit_url[0]
who = self.scrape_participants(session, comit_url.attrib['href'])
tds = event.xpath("./*")
date = tds[0].text_content().strip()
cttie = tds[1].text_content().strip()
cttie_chamber, cttie = [x.strip() for x in cttie.split(" - ", 1)]
info = tds[2]
name = info.xpath("./a[contains(@href, 'raw')]")[0]
notice = name.attrib['href']
name = name.text
time, where = info.xpath("./i/text()")
what = tds[3].text_content()
what = what.replace("Items: ", "")
if "(None)" in what:
continue
what = [x.strip() for x in what.split(";")]
when = ", ".join([date, str(dt.datetime.now().year), time])
when = dt.datetime.strptime(when, "%a %b %d, %Y, %I:%M %p")
event = Event(session, when, 'committee:meeting', name,
location=where, link=notice)
event.add_source(calurl)
event.add_participant('host', cttie, 'committee',
chamber=cttie_chamber)
event.add_document("notice", notice, mimetype='application/pdf')
for thing in who:
event.add_participant(thing['title'], thing['name'],
'legislator', chamber=cttie_chamber)
self.save_event(event)
```
#### File: openstates/wi/__init__.py
```python
import datetime
from billy.utils.fulltext import pdfdata_to_text, text_after_line_numbers
from .bills import WIBillScraper
from .legislators import WILegislatorScraper
from .committees import WICommitteeScraper
from .events import WIEventScraper
metadata = {
'abbreviation': 'wi',
'name': 'Wisconsin',
'capitol_timezone': 'America/Chicago',
'legislature_name': 'Wisconsin State Legislature',
'legislature_url': 'http://legis.wisconsin.gov/',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'Assembly', 'title': 'Representative'},
},
'terms': [
#{'name': '2001-2002',
# 'sessions': ['2001 Regular Session',
# 'May 2002 Special Session',
# 'Jan 2002 Special Session',
# 'May 2001 Special Session'],
# 'start_year': 2001, 'end_year': 2002},
#{'name': '2003-2004',
# 'sessions': ['2003 Regular Session',
# 'Jan 2003 Special Session'],
# 'start_year': 2003, 'end_year': 2004},
#{'name': '2005-2006',
# 'sessions': ['2005 Regular Session',
# 'Jan 2005 Special Session'],
# 'start_year': 2005, 'end_year': 2006 },
#{'name': '2007-2008',
# 'sessions': ['March 2008 Special Session',
# 'April 2008 Special Session',
# 'Jan 2007 Special Session',
# 'Oct 2007 Special Session',
# 'Dec 2007 Special Session',
# '2007 Regular Session' ],
# 'start_year': 2007, 'end_year': 2008 },
{'name': '2009-2010',
'sessions': ['June 2009 Special Session',
'December 2009 Special Session',
'2009 Regular Session'],
'start_year': 2009, 'end_year': 2010},
{'name': '2011-2012',
'sessions': ['2011 Regular Session', 'January 2011 Special Session',
'September 2011 Special Session'],
'start_year': 2011, 'end_year': 2012},
{'name': '2013-2014',
'sessions': ['2013 Regular Session', 'October 2013 Special Session',
'December 2013 Special Session', 'January 2014 Special Session' ],
'start_year': 2013, 'end_year': 2014},
{'name': '2015-2016',
'sessions': ['2015 Regular Session'],
'start_year': 2015, 'end_year': 2016},
],
'session_details': {
'2009 Regular Session': {'start_date': datetime.date(2009,1,13),
'end_date': datetime.date(2011,1,3),
'type': 'primary',
'display_name': '2009 Regular Session',
'_scraped_name': '2009 Regular Session',
},
'June 2009 Special Session': {
'type': 'special', 'site_id': 'jn9',
'display_name': 'Jun 2009 Special Session',
'_scraped_name': 'June 2009 Special Session',
},
'December 2009 Special Session': {
'type': 'special', 'site_id': 'de9',
'display_name': 'Dec 2009 Special Session',
'_scraped_name': 'December 2009 Special Session',
},
'2011 Regular Session': {'start_date': datetime.date(2011,1,11),
'end_date': datetime.date(2013,1,7),
'type': 'primary',
'display_name': '2011 Regular Session',
'_scraped_name': '2011 Regular Session',
},
'January 2011 Special Session': {
'type': 'special', 'site_id': 'jr1',
'display_name': 'Jan 2011 Special Session',
'_scraped_name': 'January 2011 Special Session',
},
'September 2011 Special Session': {
'type': 'special', 'site_id': 'se1',
'display_name': 'Sep 2011 Special Session',
'_scraped_name': 'September 2011 Special Session',
},
'2013 Regular Session': {'start_date': datetime.date(2013,1,7),
'end_date': datetime.date(2014,1,13),
'type': 'primary',
'display_name': '2013 Regular Session',
'_scraped_name': '2013 Regular Session',
},
'October 2013 Special Session': {
'type': 'special',
'display_name': 'Oct 2013 Special Session',
'_scraped_name': 'October 2013 Special Session',
'site_id': 'oc3'
},
'December 2013 Special Session': {
'type': 'special',
'display_name': 'Dec 2013 Special Session',
'_scraped_name': 'December 2013 Special Session',
'site_id': 'de3'
},
'January 2014 Special Session': {
'type': 'special',
'display_name': 'Jan 2014 Special Session',
'_scraped_name': 'January 2014 Special Session',
'site_id': 'jr4'
},
'2015 Regular Session': {'start_date': datetime.date(2015,1,5),
'end_date': datetime.date(2016,1,11),
'type': 'primary',
'display_name': '2015 Regular Session',
'_scraped_name': '2015 Regular Session',
},
},
'feature_flags': ['subjects',
'events', 'influenceexplorer'],
'_ignored_scraped_sessions': [
'February 2015 Extraordinary Session',
'2007 Regular Session', 'April 2008 Special Session',
'March 2008 Special Session', 'December 2007 Special Session',
'October 2007 Special Session', 'January 2007 Special Session',
'February 2006 Special Session',
'2005 Regular Session', 'January 2005 Special Session',
'2003 Regular Session', 'January 2003 Special Session',
'2001 Regular Session', 'May 2002 Special Session',
'January 2002 Special Session', 'May 2001 Special Session',
'1999 Regular Session', 'May 2000 Special Session',
'October 1999 Special Session', '1997 Regular Session',
'April 1998 Special Session', '1995 Regular Session',
'January 1995 Special Session', 'September 1995 Special Session']
}
def session_list():
from billy.scrape.utils import url_xpath
sessions = url_xpath('http://docs.legis.wisconsin.gov/search',
"//select[@name='sessionNumber']/option/text()")
return [session.strip(' -') for session in sessions]
def extract_text(doc, data):
is_pdf = (doc['mimetype'] == 'application/pdf' or
doc['url'].endswith('.pdf'))
if is_pdf:
return text_after_line_numbers(pdfdata_to_text(data))
```
#### File: openstates/wv/committees.py
```python
import re
from billy.scrape.committees import CommitteeScraper, Committee
import lxml.html
class WVCommitteeScraper(CommitteeScraper):
jurisdiction = "wv"
def scrape(self, chamber, term):
getattr(self, 'scrape_' + chamber)()
def scrape_lower(self):
url = 'http://www.legis.state.wv.us/committees/house/main.cfm'
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
xpath = '//a[contains(@href, "HouseCommittee")]'
for link in doc.xpath(xpath):
text = link.text_content().strip()
if text == '-':
continue
committee = self.scrape_lower_committee(link=link, name=text)
committee.add_source(url)
self.save_committee(committee)
url = 'http://www.legis.state.wv.us/committees/interims/interims.cfm'
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
xpath = '//a[contains(@href, "committee.cfm")]'
for link in doc.xpath(xpath):
text = link.text_content().strip()
if text == '-':
continue
committee = self.scrape_interim_committee(link=link, name=text)
committee.add_source(url)
self.save_committee(committee)
def scrape_lower_committee(self, link, name):
url = re.sub(r'\s+', '', link.attrib['href'])
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
comm = Committee('lower', name)
comm.add_source(url)
xpath = '//a[contains(@href, "?member=")]'
for link in doc.xpath(xpath):
name = link.text_content().strip()
name = re.sub(r'^Delegate\s+', '', name)
role = link.getnext().text or 'member'
comm.add_member(name, role.strip())
return comm
def scrape_interim_committee(self, link, name):
url = re.sub(r'\s+', '', link.attrib['href'])
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
comm = Committee('joint', name)
comm.add_source(url)
xpath = '//a[contains(@href, "?member=")]'
for link in doc.xpath(xpath):
name = link.text_content().strip()
name = re.sub(r'^Delegate\s+', '', name)
name = re.sub(r'^Senator\s+', '', name)
role = link.getnext().text or 'member'
comm.add_member(name, role.strip())
return comm
def scrape_upper(self):
url = 'http://www.legis.state.wv.us/committees/senate/main.cfm'
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
xpath = '//a[contains(@href, "SenateCommittee")]'
for link in doc.xpath(xpath):
text = link.text_content().strip()
if text == '-':
continue
committee = self.scrape_upper_committee(link=link, name=text)
committee.add_source(url)
self.save_committee(committee)
def scrape_upper_committee(self, link, name):
url = re.sub(r'\s+', '', link.attrib['href'])
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
comm = Committee('upper', name)
comm.add_source(url)
xpath = '//a[contains(@href, "?member=")]'
for link in doc.xpath(xpath):
name = link.text_content().strip()
name = re.sub(r'^Delegate\s+', '', name)
role = link.getnext().text or 'member'
comm.add_member(name, role.strip())
return comm
```
#### File: openstates/wy/bills.py
```python
from collections import defaultdict
import datetime
import re
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from billy.scrape.utils import convert_pdf
from openstates.utils import LXMLMixin
import scrapelib
def split_names(voters):
"""Representative(s) <NAME>, <NAME>, Burkhart, Byrd, Campbell, <NAME>, Connolly, <NAME>., <NAME>, Harshman, Illoway, Jaggi, Kasperik, Krone, Lockhart, Loucks, Lubnau, Madden, McOmie, Moniz, <NAME>., <NAME>, Petersen, Petroff, Roscoe, Semlek, Steward, <NAME>, Throne, Vranish, Wallis, Zwonitzer, Dn. and Zwonitzer, Dv."""
voters = voters.split(':', 1)[-1]
voters = re.sub(r'(Senator|Representative)(\(s\))?', "", voters)
voters = re.sub(r'\s+', " ", voters)
# Split on a comma or "and" except when there's a following initial
voters = [
x.strip() for x in
re.split(r'(?:,\s(?![A-Z]\.))|(?:\sand\s)', voters)
]
return voters
def clean_line(line):
return line.\
replace('\n', ' ').\
decode('utf-8').\
strip()
def categorize_action(action):
categorizers = (
('Introduced and Referred', ('bill:introduced', 'committee:referred')),
('Rerefer to', 'committee:referred'),
('Do Pass Failed', 'committee:failed'),
('2nd Reading:Passed', 'bill:reading:2'),
('3rd Reading:Passed', ('bill:reading:3', 'bill:passed')),
('Failed 3rd Reading', ('bill:reading:3', 'bill:failed')),
('Did Not Adopt', 'amendment:failed'),
('Withdrawn by Sponsor', 'bill:withdrawn'),
('Governor Signed', 'governor:signed'),
('Recommend (Amend and )?Do Pass', 'committee:passed:favorable'),
('Recommend (Amend and )?Do Not Pass', 'committee:passed:unfavorable'),
)
for pattern, types in categorizers:
if re.findall(pattern, action):
return types
return 'other'
class WYBillScraper(BillScraper, LXMLMixin):
jurisdiction = 'wy'
def scrape(self, chamber, session):
chamber_abbrev = {'upper': 'SF', 'lower': 'HB'}[chamber]
url = ("http://legisweb.state.wy.us/%s/billreference/"
"BillReference.aspx?type=%s" % (session, chamber_abbrev))
page = self.lxmlize(url)
for tr in page.xpath("//table[contains(@id,'cphContent_gvBills')]//tr")[1:]:
bill_id = tr.xpath("string(td[1])").strip()
title = tr.xpath("string(td[2])").strip()
if bill_id[0:2] in ['SJ', 'HJ']:
bill_type = 'joint resolution'
else:
bill_type = 'bill'
bill = Bill(session, chamber, bill_id, title, type=bill_type)
self.scrape_digest(bill)
# versions
for a in (tr.xpath('td[8]//a') + tr.xpath('td[11]//a') +
tr.xpath('td[12]//a')):
# skip references to other bills
if a.text.startswith('See'):
continue
bill.add_version(a.text, a.get('href'),
mimetype='application/pdf')
# documents
fnote = tr.xpath('td[9]//a')
if fnote:
bill.add_document('Fiscal Note', fnote[0].get('href'))
summary = tr.xpath('td[14]//a')
if summary:
bill.add_document('Summary', summary[0].get('href'))
bill.add_source(url)
self.save_bill(bill)
def scrape_digest(self, bill):
digest_url = 'http://legisweb.state.wy.us/%(session)s/Digest/%(bill_id)s.pdf' % bill
bill.add_source(digest_url)
try:
(filename, response) = self.urlretrieve(digest_url)
all_text = convert_pdf(filename, type='text')
except scrapelib.HTTPError:
self.warning('no digest for %s' % bill['bill_id'])
return
if all_text.strip() == "":
self.warning(
'Non-functional digest for bill {}'.
format(bill['bill_id'])
)
return
# Split the digest's text into sponsors, description, and actions
SPONSOR_RE = r'(?sm)Sponsored By:\s+(.*?)\n\n'
DESCRIPTION_RE = r'(?sm)\n\n((?:AN\s*?ACT|A JOINT RESOLUTION) .*?)\n\n'
ACTIONS_RE = r'(?sm)\n\n(\d{1,2}/\d{1,2}/\d{4}.*)'
ext_title = re.search(DESCRIPTION_RE, all_text).group(1)
bill_desc = ext_title.replace('\n', ' ')
bill_desc = re.sub(" *"," ",bill_desc.decode('utf-8')).encode('utf-8')
bill['description'] = bill_desc
sponsor_span = re.search(SPONSOR_RE, all_text).group(1)
sponsors = ''
sponsors = sponsor_span.replace('\n', ' ')
if sponsors:
if 'Committee' in sponsors:
bill.add_sponsor('primary', sponsors)
else:
if bill['chamber'] == 'lower':
sp_lists = sponsors.split('and Senator(s)')
else:
sp_lists = sponsors.split('and Representative(s)')
for spl in sp_lists:
for sponsor in split_names(spl):
sponsor = sponsor.strip()
if sponsor != "":
bill.add_sponsor('primary', sponsor)
action_re = re.compile('(\d{1,2}/\d{1,2}/\d{4})\s+(H |S )?(.+)')
vote_total_re = re.compile('(Ayes )?(\d*)(\s*)Nays(\s*)(\d+)(\s*)Excused(\s*)(\d+)(\s*)Absent(\s*)(\d+)(\s*)Conflicts(\s*)(\d+)')
# initial actor is bill chamber
actor = bill['chamber']
actions = []
action_lines = re.search(ACTIONS_RE, all_text).group(1).split('\n')
action_lines = iter(action_lines)
for line in action_lines:
line = clean_line(line)
# skip blank lines
if not line:
continue
amatch = action_re.match(line)
if amatch:
date, achamber, action = amatch.groups()
# change actor if one is on this action
if achamber == 'H ':
actor = 'lower'
elif achamber == 'S ':
actor = 'upper'
date = datetime.datetime.strptime(date, '%m/%d/%Y')
bill.add_action(actor, action.strip(), date,
type=categorize_action(action))
elif line == 'ROLL CALL':
voters = defaultdict(str)
# if we hit a roll call, use an inner loop to consume lines
# in a psuedo-state machine manner, 3 types
# Ayes|Nays|Excused|... - indicates next line is voters
# : (Senators|Representatives): ... - voters
# \d+ Nays \d+ Excused ... - totals
voters_type = None
for ainext in action_lines:
nextline = clean_line(ainext)
if not nextline:
continue
breakers = [ "Ayes:", "Nays:", "Nayes:", "Excused:",
"Absent:", "Conflicts:" ]
for breaker in breakers:
if nextline.startswith(breaker):
voters_type = breaker[:-1]
if voters_type == "Nayes":
voters_type = "Nays"
self.log("Fixed a case of 'Naye-itis'")
nextline = nextline[len(breaker)-1:]
if nextline.startswith(': '):
voters[voters_type] = nextline
elif nextline in ('Ayes', 'Nays', 'Excused', 'Absent',
'Conflicts'):
voters_type = nextline
elif vote_total_re.match(nextline):
#_, ayes, _, nays, _, exc, _, abs, _, con, _ = \
tupple = vote_total_re.match(nextline).groups()
ayes = tupple[1]
nays = tupple[4]
exc = tupple[7]
abs = tupple[10]
con = tupple[13]
passed = (('Passed' in action or
'Do Pass' in action or
'Did Concur' in action or
'Referred to' in action) and
'Failed' not in action)
vote = Vote(actor, date, action, passed, int(ayes),
int(nays), int(exc) + int(abs) + int(con))
vote.add_source(digest_url)
for vtype, voters in voters.iteritems():
for voter in split_names(voters):
if voter:
if vtype == 'Ayes':
vote.yes(voter)
elif vtype == 'Nays':
vote.no(voter)
else:
vote.other(voter)
# done collecting this vote
bill.add_vote(vote)
break
else:
# if it is a stray line within the vote, is is a
# continuation of the voter list
# (sometimes has a newline)
voters[voters_type] += ' ' + nextline
```
#### File: openstates/wy/__init__.py
```python
import re
import datetime
from billy.scrape.utils import url_xpath
from billy.utils.fulltext import pdfdata_to_text
from .bills import WYBillScraper
from .legislators import WYLegislatorScraper
from .committees import WYCommitteeScraper
from .events import WYEventScraper
metadata = {
'name': 'Wyoming',
'abbreviation': 'wy',
'legislature_name': 'Wyoming State Legislature',
'legislature_url': 'http://legisweb.state.wy.us/',
'capitol_timezone': 'America/Denver',
'chambers': {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
'terms': [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011','2012'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013', '2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015', '2016'],
},
],
'session_details': {
'2011': {
'type': 'primary',
'display_name': '2011 General Session',
'_scraped_name': '2011 General Session'
},
'2012': {
'type': 'special',
'display_name': '2012 Budget Session',
'_scraped_name': '2012 Budget Session'
},
'2013': {
'type': 'primary',
'display_name': '2013 General Session',
'_scraped_name': '2013 General Session'
},
'2014': {
'type': 'primary',
'display_name': '2014 General Session',
'_scraped_name': '2014 General Session'
},
'2015': {
'type': 'primary',
'display_name': '2015 General Session',
'_scraped_name': '2015 General Session'
},
'2016': {
'type': 'primary',
'display_name': '2016 General Session',
'_scraped_name': '2016 General Session',
},
},
'feature_flags': ['influenceexplorer', 'events'],
# The reason the Budget sessions are in ignore is because the budget
# session is just for the budget bill, which is HB 1
# (http://openstates.org/wy/bills/2014/HB1/)
# So - we avoid the new session, because we'd dupe all bills.
'_ignored_scraped_sessions': [
'2014 Budget Session',
'2010 Budget Session',
'2009 General Session',
'2008 Budget Session',
'2007 General Session',
'2006 Budget Session',
'2005 General Session',
'2004 Budget Session',
'2003 General Session',
'2002 Budget Session',
'2001 General Session',
],
}
def session_list():
sessions = url_xpath('http://legisweb.state.wy.us/LSOWeb/SessionArchives'
'.aspx', '//div[@id="divLegContent"]/a/p/text()')
return sessions
def extract_text(doc, data):
return ' '.join(line for line in pdfdata_to_text(data).splitlines()
if re.findall('[a-z]', line))
```
#### File: scripts/affected_code/fl-debug.py
```python
import re
import webbrowser
import collections
import lxml.html
import logbook
from core.utils import parse, get_billtext
from core.fl import Lexer, Parser
logger = logbook.Logger('fl-debug')
Section = collections.namedtuple('Section', 'enum content')
def extract_sections(text):
doc = lxml.html.fromstring(text)
text = '\n'.join(n.text_content() for n in doc.xpath('//td[2]'))
text = text.replace(u'\xa0', ' ')
# Note, currently skips last section (usually effective date).
matches = re.finditer(' Section (\d\w*)\.\s+(.+?)(?:\n )', text, re.S)
for m in matches:
enum = m.group(1)
content = re.sub(r'\s+', ' ', m.group(2))
yield Section(enum, content)
def main():
for filename, text in get_billtext('fl'):
logger.info('extracting sections: %r' % filename)
# webbrowser.open('file:///%s' % filename)
for section in extract_sections(text):
section_text = section.content
print section_text
if 'repeal' in section_text.lower() or 'add' in section_text.lower():
# import pdb;pdb.set_trace()
tokens = parse(Lexer, Parser, None, section_text)
import pdb;pdb.set_trace()
if __name__ == '__main__':
main()
```
#### File: scripts/affected_code/ny-debug.py
```python
import os
import re
import pprint
from functools import partial
from os.path import join
from utils import parse
from ny import Lexer, Parser, ParserState
DATA = '/home/thom/data/ny_billtext/data'
def extract_sections(text):
# Slice beginning crap.
_, text = text.split('DO ENACT AS FOLLOWS:')
# Kill line numbers.
text = re.sub(r' {3,4}\d+ {2}', '', text)
paragraphs = []
text = iter(text.splitlines())
lines = []
while True:
try:
line = next(text)
except StopIteration:
paragraphs.append(' '.join(lines))
break
lines.append(line)
if len(line) != 72:
paragraphs.append(' '.join(lines))
lines = []
def filterfunc(s):
return (not (s.isupper() or ('shall take effect' in s)) \
and (re.search(r'^ Section +1.', s) or \
re.search(r'^ S {1,2}\d+', s)))
paragraphs = filter(filterfunc, paragraphs)
paragraphs = map(partial(re.sub, r'\s+', ' '), paragraphs)
return paragraphs
def main():
for filename in os.listdir(DATA):
filename = join(DATA, filename)
with open(filename) as f:
text = f.read()
sections = extract_sections(text)
for s in sections:
print s
parsed = parse(Lexer, Parser, ParserState, s)
print s
print filename
pprint.pprint(parsed)
import pdb;pdb.set_trace()
if __name__ == '__main__':
main()
```
#### File: affected_code/scripts/fetch_fulltext.py
```python
import sys
import os
from os.path import join
from billy import db
from billy.conf import settings
import scrapelib
import logbook
def main(abbr):
request_defaults = {
# 'proxies': {"http": "localhost:8888"},
'timeout': 5.0,
'headers': {
'Accept': ('text/html,application/xhtml+xml,application/'
'xml;q=0.9,*/*;q=0.8'),
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
'User-Agent': ('Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0.2) '
'Gecko/20100101 Firefox/10.0.2'),
},
'follow_robots': False,
# Note, this script needs run in the same dir as billy_settings.py
}
logger = logbook.Logger()
DATA = join(settings.BILLY_DATA_DIR, abbr, 'billtext')
try:
os.makedirs(DATA)
except OSError:
pass
logger.info('writing files to %r' % DATA)
session = scrapelib.Scraper(
cache_obj=scrapelib.FileCache('cache'),
cache_write_only=False,
use_cache_first=True,
requests_per_minute=0,
**request_defaults)
for bill in db.bills.find({'state': abbr}):
if len(bill['versions']):
bill_id = bill['bill_id']
url = bill['versions'][0]['url']
logger.info('trying %r: %r' % (bill_id, url))
text = session.get(url).text
with open(join(DATA, bill['_id']), 'w') as f:
f.write(text.encode('utf-8'))
if __name__ == '__main__':
main(sys.argv[1])
```
#### File: openstates-master/scripts/count_bill_key_lengths.py
```python
from collections import defaultdict, OrderedDict, namedtuple
from decimal import Decimal
from operator import itemgetter
from billy import db
KEYS = 'versions actions documents votes sponsors'.split()
class SaneReprList(list):
def __repr__(self):
return '<SaneReprList: %d elements>' % len(self)
class Summarizer(object):
def __init__(self, spec={}):
self.spec = spec
def build(self, keys=KEYS):
listdict = lambda: defaultdict(SaneReprList)
counts = defaultdict(listdict)
keys = 'versions actions documents votes sponsors'.split()
for bill in db.bills.find(self.spec):
for k in keys:
counts[k][len(bill[k])].append(bill['_id'])
self.counts = dict(counts)
return dict(counts)
def count(self):
return db.bills.find(self.spec).count()
def max_ids(self):
'''Yield the key, maximum value length, and the id of the
bill in which the max was found for each key in KEYS. In
other words, if TAB0000001 has the most actions (345), then
one tuple yielded from this generator would be:
('actions', 345, 'TAB0000001')
'''
for k, v in self.counts.items():
max_ = max(v)
id_ = v[max_]
yield k, max_, id_
def mean(self, key):
counts = self.counts[key]
sum_ = sum(k * len(v) for (k, v) in counts.items())
return sum_ / self.count()
def median(self, key):
counts = self.counts[key]
if 1 < len(counts):
counts = self.counts[key]
div, mod = divmod(len(counts), 2)
return div
else:
return list(counts).pop()
def mode(self, key):
counts = self.counts[key]
if 1 < len(counts):
return (max(counts) + min(counts)) / 2
else:
return list(counts).pop()
def percentages(self, key):
'''Returns an OrderedDict where the keys are the numbers of
actions/votes found and the values are the percentages of how
many bills had that number of actions out of the total number
of bills.
'''
counts = self.counts[key]
sum_ = Decimal(self.count())
items = ((k, (len(v) / sum_) * 100) for (k, v) in counts.items())
sorter = itemgetter(slice(None, None, -1))
items = sorted(items, key=sorter, reverse=True)
return OrderedDict(items)
def report(self):
Stats = namedtuple('Stats', 'mean median mode percentages')
methods = [self.mean, self.median, self.mode, self.percentages]
return dict((key, Stats(*[meth(key) for meth in methods])) for key in KEYS)
def print_report(self):
tab = ' '
for k, v in self.report().items():
print
print repr(k)
for key in ('mean', 'median', 'mode'):
print tab, key, '->', getattr(v, key)
print
print tab, 'Percentage breakdown'
for value, percentage in v.percentages.items():
print tab * 2, value, "{0:.2f}".format(percentage)
if __name__ == '__main__':
# import pprint
# pprint.pprint(get_counts())
x = Summarizer()
x.build()
x.print_report()
```
#### File: scripts/fl/2014_unretire_eisnaugle.py
```python
from billy.core import db
def main():
eisnaugle = db.legislators.find_one('FLL000075')
# Make him active.
eisnaugle['active'] = True
# Hack his current roles.
eisnaugle['roles'].insert(0, {
"term": "2013-2014",
"end_date": None,
"district": "44",
"chamber": "lower",
"state": "fl",
"party": "Republican",
"type": "member",
"start_date": None
})
# Save this hotness
db.legislators.save(eisnaugle)
if __name__ == '__main__':
main()
```
#### File: scripts/one-off/committees.py
```python
from billy.core import db
import us
states = (x.abbr.lower() for x in us.STATES)
HEADER = "committee,member,role,phone,email"
def extract(leg, key):
x = leg.get(key, None)
if x is not None:
yield x
for office in leg['offices']:
x = office.get(key, None)
if x is not None:
yield x
def write(fd, keys):
fd.write('"{}"'.format(",".join(keys)))
for state in states:
with open("out/{}.csv".format(state), 'w') as fd:
fd.write(HEADER)
fd.write("\n")
for committee in db.committees.find({"state": state}):
committee_name = committee['committee']
if committee['subcommittee'] is not None:
committee_name = "{subcommittee} ({committee} subcommittee)".format(
**committee)
for member in committee['members']:
fd.write(u'"{}","{}","{}"'.format(
committee_name, member['name'], member['role']
).encode("utf-8"))
lid = member['leg_id']
if lid is not None:
leg = db.legislators.find_one({"_id": lid})
write(fd, extract(leg, 'phone'))
write(fd, extract(leg, 'email'))
fd.write("\n")
```
#### File: openstates-master/scripts/purge_old_committee_ids.py
```python
from billy.core import db, feeds_db
from billy.core import settings
from billy.core import logging
def main():
import sys
abbrs = sys.argv[1:] or [x['abbreviation'] for x in db.metadata.find()]
logger = logging.getLogger('purge_committee_ids')
logger.setLevel(logging.DEBUG)
for abbr in abbrs:
spec = {settings.LEVEL_FIELD: abbr}
committee_ids = [c['_id'] for c in db.committees.find(spec, fields=['_id'])]
# Events with committee participants.
spec = {
settings.LEVEL_FIELD: abbr,
'participants.committee_id': {'$nin': committee_ids}
}
for event in db.events.find(spec):
old_ids = set()
count = 0
found = False
for participant in event['participants']:
for id_key in 'committee_id', 'id':
_id = participant.get(id_key, None)
type_ = participant.get('participant_type')
if id_key == 'id' and type_ != 'committee':
continue
if _id and (_id not in committee_ids):
found = True
msg = 'Removing participant %r from event %r'
logger.info(msg % (participant[id_key], event['_id']))
# Leave the participant in but set their id to none.
# Text will still be displayed without a hyperlink.
participant[id_key] = None
if found:
msg = 'Removed %d old committee %r ids from %r'
logger.info(msg % (count, old_ids, event['_id']))
db.events.save(event, safe=True)
# Related committees in bill actions.
spec = {
settings.LEVEL_FIELD: abbr,
'actions.related_entities.type': 'committee'
}
for bill in db.bills.find(spec):
old_ids = set()
count = 0
found = False
for action in bill['actions']:
for entity in action['related_entities']:
if entity['type'] == 'committee':
if entity['id'] and (entity['id'] not in committee_ids):
found = True
count += 1
old_ids.add(entity['id'])
msg = 'Removing entity %r from action in %r'
logger.debug(msg % (entity['id'], bill['bill_id']))
# Completely remove the related entity. Without an
# id it has no other purpose.
action['related_entities'].remove(entity)
if found:
msg = 'Removed %d old committee %r ids from %r'
logger.info(msg % (count, old_ids, bill['_id']))
db.bills.save(bill, safe=True)
# Legislator old roles.
spec = {
settings.LEVEL_FIELD: abbr,
'old_roles': {'$exists': True}
}
for leg in db.legislators.find(spec):
old_ids = set()
count = 0
found = False
for role in leg['old_roles']:
if 'committee_id' in role:
_id = role['committee_id']
if _id and (_id not in committee_ids):
found = True
count += 1
old_ids.add(_id)
msg = 'Removing id %r from old_role in %r'
logger.info(msg % (role['committee_id'], leg['full_name']))
# Set the id to None.
role['committee_id'] = None
if found:
msg = 'Removed %d old committee %r ids from %r'
logger.info(msg % (count, old_ids, leg['_id']))
db.legislators.save(leg, safe=True)
# Related entities in feeds.
spec = {
settings.LEVEL_FIELD: abbr,
'entity_ids': {'$ne': None}
}
for entry in feeds_db.entries.find(spec):
old_ids = set()
count = 0
found = False
for entity_id in entry['entity_ids']:
if entity_id[2] == 'C':
if entity_id not in committee_ids:
found = True
count += 1
msg = 'Removing id %r from feed %r'
logger.info(msg % (entity_id, entry['_id']))
# Delete the entity from the feed.
old_ids.add(entity_id)
index = entry['entity_ids'].index(entity_id)
del entry['entity_ids'][index]
del entry['entity_strings'][index]
if found:
msg = 'Removed %d old committee ids %r from %r'
logger.info(msg % (count, old_ids, entry['_id']))
feeds_db.entries.save(entry, safe=True)
# Nuke any committee sponsors of bills.
spec = {
settings.LEVEL_FIELD: abbr,
'sponsors.committee_id': {'$nin': committee_ids}
}
for bill in db.bills.find(spec):
count = 0
found = False
old_ids = set()
for sponsor in bill.get('sponsors', []):
if 'committee_id' in sponsor:
_id = sponsor['committee_id']
old_ids.add(_id)
found = True
count += 1
del sponsor['committee_id']
if found:
msg = 'Removed %d old committee ids %r from %r'
logger.info(msg % (count, old_ids, bill['_id']))
db.bills.save(bill)
if __name__ == '__main__':
main()
```
#### File: scripts/tn_109fix/tnfix.py
```python
import sys
import plop
import pymongo
def main(state):
db = pymongo.MongoClient().fiftystates
index = plop.Index()
spec = dict(state=state)
print('adding bills')
for bill in db.bills.find(spec):
index.add_object(bill)
print('adding legislators')
for obj in db.legislators.find(spec):
index.add_object(obj)
print('adding committees')
for obj in db.committees.find(spec):
index.add_object(obj)
print('adding votes')
for obj in db.votes.find(spec):
index.add_object(obj)
import pdb; pdb.set_trace()
if __name__ == "__main__":
main(*sys.argv[1:])
```
|
{
"source": "jgosmann/aoc2020",
"score": 3
}
|
#### File: jgosmann/aoc2020/day25.py
```python
import sys
def transform(value, subject_value):
return (value * subject_value) % 20201227
def find_loop_size(public_key, subject_value=7):
value = 1
loop_size = 0
while value != public_key:
value = transform(value, subject_value)
loop_size += 1
return loop_size
def find_encryption_key(key, loop_size):
value = 1
for _ in range(loop_size):
value = transform(value, key)
return value
public_keys = [int(line) for line in sys.stdin]
loop_sizes = [find_loop_size(key) for key in public_keys]
print(find_encryption_key(public_keys[0], loop_sizes[1]))
```
#### File: jgosmann/aoc2020/day6.py
```python
import string
import sys
def count_distinct_answers(group):
return len(set(answer for answer in group if answer in string.ascii_lowercase))
def count_unanimous_answers(group):
return len(set.intersection(*(set(individual) for individual in group.split('\n'))))
groups = sys.stdin.read().split("\n\n")
print(
"Count for anyone answered yes:",
sum(count_distinct_answers(group) for group in groups)
)
print(
"Count for everyone answered yes:",
sum(count_unanimous_answers(group) for group in groups)
)
```
#### File: jgosmann/aoc2020/test_day15.py
```python
import pytest
from day15 import play_memory
@pytest.mark.parametrize(
"starting_numbers,expected",
[
([1, 3, 2], 1),
([2, 1, 3], 10),
([1, 2, 3], 27),
([2, 3, 1], 78),
([3, 2, 1], 438),
([3, 1, 2], 1836),
],
)
def test_play_memory(starting_numbers, expected):
assert play_memory(starting_numbers, 2020) == expected
```
|
{
"source": "jgosmann/aoc2021",
"score": 3
}
|
#### File: aoc2021/day02-python/part1.py
```python
import sys
class Submarine:
def __init__(self):
self.horizontal_pos = 0
self.depth = 0
def execute(self, command, arg):
getattr(self, command)(arg)
def forward(self, distance):
self.horizontal_pos += distance
def up(self, rise):
self.depth -= rise
def down(self, fall):
self.depth += fall
submarine = Submarine()
for line in sys.stdin.readlines():
command, arg = line.split()
submarine.execute(command, int(arg))
print(submarine.horizontal_pos * submarine.depth)
```
#### File: aoc2021/day08-python/part1.py
```python
def count_easy_digits(lines):
segment_counts = [
len(output.strip())
for line in lines
for output in line.split("|")[1].split(" ")
]
return sum(1 for s in segment_counts if s in set((2, 3, 4, 7)))
def test_count_easy_digits():
import os.path
with open(os.path.join(os.path.dirname(__file__), "test.input"), "r") as f:
assert count_easy_digits(f.readlines()) == 26
if __name__ == "__main__":
import sys
print(count_easy_digits(sys.stdin.readlines()))
```
|
{
"source": "jgosmann/bite",
"score": 3
}
|
#### File: bite/bite/parse_functions.py
```python
from asyncio import StreamReader
from typing import AsyncGenerator, TypeVar
from bite.io import BytesBuffer, StreamReaderBuffer
from bite.parsers import ParsedNode, Parser, TrailingBytesError
T = TypeVar("T", covariant=True)
V = TypeVar("V", covariant=True)
async def parse_incremental(
grammar: Parser[T, V], reader: StreamReader
) -> AsyncGenerator[ParsedNode[T, V], None]:
r"""Parse bytes from an asynchronous stream incrementally.
Parameters
----------
grammar:
Parser combinators defining the grammar to parse.
reader:
The stream reader to read bytes with.
Yields
------
:
A parse tree for each complete match of the given *grammar*. Note that
location indices of the parse tree will be relative to the start of that
parsed segment.
Raises
------
bite.parsers.ParseError
If the provided *grammar* fails to parse the incoming bytes.
Examples
--------
.. testcode:: parse_incremental
import asyncio
from bite import CharacterSet, Combine, Literal, parse_incremental, Suppress
integer_token = Combine(CharacterSet(b'0123456789')[1, ...])
line = integer_token + Literal(b'+') + integer_token + Suppress(Literal(b'\r\n'))
async def open_reader():
# For example:
# reader, _ = await asyncio.open_connection(...)
# return reader
...
.. testcode:: parse_incremental
:hide:
from bite.tests.mock_reader import MockReader
async def open_reader():
return MockReader(b"1+2\r\n23+42\r\n1234+4321\r\n")
.. testcode:: parse_incremental
async def main():
reader = await open_reader()
async for parsed_line in parse_incremental(line, reader):
print("Parsed line:", parsed_line.values)
asyncio.run(main())
Assuming the bytes ``b"1+2\r\n23+42\r\n1234+4321\r\n"`` can be read from
the *reader*:
.. testoutput:: parse_incremental
Parsed line: (b'1', b'+', b'2')
Parsed line: (b'23', b'+', b'42')
Parsed line: (b'1234', b'+', b'4321')
"""
buffer = StreamReaderBuffer(reader)
while not buffer.at_eof():
parse_tree = await grammar.parse(buffer, 0)
yield parse_tree
await buffer.drop_prefix(parse_tree.end_loc)
await buffer.get(slice(0, 1)) # Ensure to read EOF state
async def parse_bytes(
grammar: Parser[T, V], data: bytes, *, parse_all: bool = False
) -> ParsedNode[T, V]:
"""Parse an in-memory bytes object.
Parameters
----------
grammar:
Parser combinators defining the grammar to parse.
data:
The bytes object to parse.
parse_all:
If set to ``True``, the all bytes must be parsed. Otherwise, trailing,
unparsed bytes are allowed.
Returns
-------
The resulting parse tree.
Exceptions
----------
bite.parsers.TrailingBytesError
If ``parse_all=True`` and not all input was consumed by the parser.
bite.parsers.ParseError
If the provided *grammar* fails to parse the incoming bytes.
Examples
--------
.. testcode:: parse_bytes
import asyncio
from bite import Literal, parse_bytes
print(asyncio.run(parse_bytes(Literal(b'A'), b'AB')).values)
.. testoutput:: parse_bytes
(b'A',)
.. testcode:: parse_bytes
asyncio.run(parse_bytes(Literal(b'A'), b'AB', parse_all=True))
.. testoutput:: parse_bytes
Traceback (most recent call last):
...
bite.parsers.TrailingBytesError: trailing bytes
"""
parse_tree = await grammar.parse(BytesBuffer(data))
if parse_all and parse_tree.end_loc < len(data):
raise TrailingBytesError("trailing bytes")
return parse_tree
```
#### File: bite/bite/parsers.py
```python
import itertools
from dataclasses import dataclass
from typing import (
Any,
Callable,
Generic,
Iterable,
NoReturn,
Optional,
Tuple,
TypeVar,
Union,
)
from typing_extensions import Protocol
from bite.io import ParserBuffer
T = TypeVar("T", covariant=True)
V = TypeVar("V", covariant=True)
class ParsedNode(Protocol[T, V]):
"""A single node in a parse tree."""
@property
def name(self) -> Optional[str]:
"""Name of the node."""
...
@property
def parse_tree(self) -> T:
"""Children of the node."""
...
@property
def values(self) -> Iterable[V]:
"""Value of the node.
By default this will be a single element tuple with a ``bytes`` object
for the parse tree leaves and a flattened list of the children's values
for other nodes. However, it can be overridden to produce any other
type.
"""
...
@property
def start_loc(self) -> int:
"""Start index into the input buffer of the segmend parsed by the
node."""
...
@property
def end_loc(self) -> int:
"""End index (exclusive) into the input buffer of the segmend parsed by
the node."""
...
@dataclass(frozen=True)
class ParsedBaseNode(Generic[T]):
"""Implementation of common fields to all parse tree nodes."""
name: Optional[str]
"""Name of the node."""
parse_tree: T
"""Children of the node."""
@dataclass(frozen=True)
class ParsedLeaf(ParsedBaseNode[T]):
"""A leaf node in a parse tree."""
start_loc: int
"""Start index into the input buffer of the segmend parsed by the node."""
end_loc: int
"""End index (exclusive) into the input buffer of the segmend parsed by the
node."""
@property
def values(self) -> Tuple[T]:
"""Value of the node.
Returns
-------
:
A single element tuple with the :attr:`.parse_tree`.
"""
return (self.parse_tree,)
@dataclass(frozen=True)
class ParsedNil:
"""A leaf node in a parse tree representing a zero-length segment.
Such zero-length segments can be generated by look-aheads which do not
actually consume any input.
"""
name: Optional[str]
"""Name of the node."""
loc: int
"""Index into the input buffer to where the node was generated."""
@property
def parse_tree(self) -> None:
"""Children of the node. Will always return ``None``."""
return None
@property
def values(self) -> Tuple[()]:
"""Value of the node. Will always return the empty tuple ``()``."""
return ()
@property
def start_loc(self) -> int:
"""Start index into the input buffer of the segmend parsed by the node."""
return self.loc
@property
def end_loc(self) -> int:
"""End index (exclusive) into the input buffer of the segmend parsed by
the node. Will always be equal to `start_loc`."""
return self.loc
class Parser(Generic[T, V]):
"""Abstract base class for parsers.
Implementors must at least override the :meth:`.parse` method.
The following operator implementations are provided:
- ``+`` (:class:`And`): Apply parsers in sequence.
- ``|`` (:class:`MatchFirst`): Apply the first parser that succeeds parsing
the input.
- ``~`` (:class:`Not`): Negative look-ahead.
- ``[x, y]`` (:class:`Repeat`): Apply a parser repeatedly.
`` x`` must be a non-negative integer.
``y`` must be either a positive integer or the ellipsis ``...`` to
allow for unlimited repetitions.
Parameters
----------
name:
Name to assign to the resulting parse tree node.
"""
def __init__(self, name=None):
self.name = name
def __str__(self) -> str:
return self.name if self.name else super().__str__()
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedNode[T, V]:
"""Try to parse the provided input.
Starts parsing from the given location and does not need to consume
all provided input.
Parameters
----------
buf:
Buffer providing access to the input.
loc:
Index into the buffer from where to start parsing.
Returns
-------
:
If parsing is successful, a parse tree representing the parse result
is returned.
Raises
------
UnmetExpectationError
If parsing was unsuccessful, because the input does not match what
is expected from this parser.
# noqa: DAR202
"""
raise NotImplementedError()
def __add__(self, other: "Parser") -> "And":
return And((self, other), name=f"({self}) + ({other})")
def __or__(self, other: "Parser") -> "MatchFirst":
return MatchFirst((self, other), name=f"({self}) | ({other})")
def __invert__(self) -> "Not":
return Not(self)
def __getitem__(
self, repeats: Union[int, Tuple[int, Union[int, "ellipsis", None]]]
) -> "Repeat":
if isinstance(repeats, int):
min_repeats = repeats
max_repeats: Optional[int] = repeats
else:
min_repeats = repeats[0]
max_repeats = repeats[1] if isinstance(repeats[1], int) else None
return Repeat(
self,
min_repeats,
max_repeats,
name=f"({self})[{min_repeats}, {'...' if max_repeats is None else max_repeats}]",
)
@dataclass(frozen=True)
class ParsedMatchFirst(ParsedBaseNode[ParsedNode[T, V]]):
choice_index: int
"""Index into :attr:`MatchFirst.parsers` of the parsed variant."""
@property
def values(self) -> Iterable[V]:
"""Values of the parsed child nodes."""
return self.parse_tree.values
@property
def start_loc(self) -> int:
"""Start index into the input buffer of the segmend parsed by the
node."""
return self.parse_tree.start_loc
@property
def end_loc(self) -> int:
"""End index (exclusive) into the input buffer of the segmend parsed by
the node."""
return self.parse_tree.end_loc
class MatchFirst(Parser[ParsedNode[Any, V], V]):
"""Apply the first parser that succeeds parsing the input.
Parameters
----------
choices:
Parsers to try in the given order until one succeeds parsing the input.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: match-first
import asyncio
from bite import Literal, MatchFirst, parse_bytes
print(asyncio.run(parse_bytes(
MatchFirst([Literal(b'a'), Literal(b'b'), Literal(b'bb')]),
b'bb'
)).values)
.. testoutput:: match-first
(b'b',)
"""
def __init__(self, choices: Iterable[Parser], *, name: str = None):
super().__init__(name)
self.choices = choices
def __str__(self):
return " | ".join(f"({choice})" for choice in self.choices)
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedMatchFirst:
for i, choice in enumerate(self.choices):
try:
parsed_node = await choice.parse(buf, loc)
return ParsedMatchFirst(self.name, parsed_node, i)
except UnmetExpectationError:
pass
raise UnmetExpectationError(self, loc)
def __or__(self, other: "Parser") -> "MatchFirst":
return MatchFirst(tuple(self.choices) + (other,), name=f"{self} | ({other})")
@dataclass(frozen=True)
class ParsedList(ParsedBaseNode[Tuple[ParsedNode[T, V], ...]]):
loc: int
"""Index into the input buffer of the location where this parsed expression
starts."""
@property
def values(self) -> Tuple[V, ...]:
"""Values of the children of this parse tree node."""
return tuple(
itertools.chain.from_iterable(node.values for node in self.parse_tree)
)
@property
def start_loc(self) -> int:
"""Start index into the input buffer of the segmend parsed by the node."""
if len(self.parse_tree) > 0:
return self.parse_tree[0].start_loc
else:
return self.loc
@property
def end_loc(self) -> int:
"""End index (exclusive) into the input buffer of the segmend parsed by
the node. Will be equal to `start_loc` if the parsed list is empty."""
if len(self.parse_tree) > 0:
return self.parse_tree[-1].end_loc
else:
return self.loc
ParsedAnd = ParsedList[Any, Any]
class And(Parser[Tuple[ParsedNode, ...], Any]):
"""Apply multiple parsers in sequence.
Each parser must be able to parse the input when applied in sequence.
Parameters
----------
parsers:
Parser to apply in sequence.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: and
import asyncio
from bite import And, Literal, parse_bytes
print(asyncio.run(parse_bytes(And([Literal(b'a'), Literal(b'b')]), b'ab')).values)
.. testoutput:: and
(b'a', b'b')
"""
def __init__(self, parsers: Iterable[Parser], *, name: str = None):
super().__init__(name)
self.parsers = parsers
def __str__(self):
return " + ".join(f"({parser})" for parser in self.parsers)
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedAnd:
current_loc = loc
parsed_nodes = []
for parser in self.parsers:
parsed_nodes.append(await parser.parse(buf, current_loc))
current_loc = parsed_nodes[-1].end_loc
return ParsedAnd(self.name, tuple(parsed_nodes), loc)
def __add__(self, other: "Parser") -> "And":
return And(tuple(self.parsers) + (other,), name=f"{self} + ({other})")
ParsedRepeat = ParsedList
class Repeat(Parser[Tuple[ParsedNode[T, V], ...], V]):
"""Apply a parser repeatedly.
Parameters
----------
parser:
Parser to apply repeatedly.
min_repeats:
Minimun number of applications of the parser.
max_repeats:
Maximum number of applications of the parser. If ``None``, infinitly
many applications are allowed.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: repeat
import asyncio
from bite import Literal, parse_bytes, Repeat
repeat = Repeat(Literal(b'a'), min_repeats=1, max_repeats=2)
print(asyncio.run(parse_bytes(repeat, b'')).values)
.. testoutput:: repeat
Traceback (most recent call last):
...
bite.parsers.UnmetExpectationError: expected b'a' at position 0
.. testcode:: repeat
print(asyncio.run(parse_bytes(repeat, b'a')).values)
.. testoutput:: repeat
(b'a',)
.. testcode:: repeat
print(asyncio.run(parse_bytes(repeat, b'aa')).values)
.. testoutput:: repeat
(b'a', b'a')
.. testcode:: repeat
print(asyncio.run(parse_bytes(repeat, b'aaa')).values)
.. testoutput:: repeat
(b'a', b'a')
"""
def __init__(
self,
parser: Parser[T, V],
min_repeats: int = 0,
max_repeats: int = None,
*,
name: str = None,
):
super().__init__(name)
self.parser = parser
self.min_repeats = min_repeats
self.max_repeats = max_repeats
def __str__(self):
return f"({self.parser})[{self.min_repeats}, {self.max_repeats}]"
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedRepeat:
current_loc = loc
parsed = []
for _ in range(self.min_repeats):
parsed.append(await self.parser.parse(buf, current_loc))
current_loc = parsed[-1].end_loc
for i in itertools.count(self.min_repeats):
if self.max_repeats is not None and i >= self.max_repeats:
break
try:
parsed.append(await self.parser.parse(buf, current_loc))
current_loc = parsed[-1].end_loc
except UnmetExpectationError:
break
return ParsedRepeat(self.name, tuple(parsed), loc)
class Not(Parser[None, NoReturn]):
"""Negative look-ahead.
This parser does not consume any input bytes, but will only succeed parsing
if the following input bytes are not parsed by the given parser.
Parameters
----------
parser:
Parser that is supposed to not match the input.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: not
import asyncio
from bite import FixedByteCount, Literal, Not, parse_bytes
expr = Not(Literal(b'a')) + FixedByteCount(1)
print(asyncio.run(parse_bytes(expr, b'b')).values)
.. testoutput:: not
(b'b',)
.. testcode:: not
asyncio.run(parse_bytes(expr, b'a'))
.. testoutput:: not
Traceback (most recent call last):
...
bite.parsers.UnmetExpectationError: expected Not(b'a') at position 0
"""
def __init__(self, parser: Parser[Any, Any], *, name: str = None):
super().__init__(name if name else f"Not({parser})")
self.parser = parser
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedNil:
try:
await self.parser.parse(buf, loc)
except UnmetExpectationError:
return ParsedNil(self.name, loc)
else:
raise UnmetExpectationError(self, loc)
class Forward(Parser[T, V]):
"""Forward declaration allowing the definition of recursive rules.
Use the :meth:`assign` method to set the actual parser definition.
.. warning::
Rules must not be left-recursive. Otherwise, the parser will
recursively call itself causing a stack overflow.
Parameters
----------
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: forward
import asyncio
from bite import Forward, Literal, Opt, parse_bytes
expr = Forward()
expr.assign(Literal(b'[') + Opt(expr) + Literal(b']'))
print(asyncio.run(parse_bytes(expr, b'[[]]')).values)
.. testoutput:: forward
(b'[', b'[', b']', b']')
"""
parser: Optional[Parser[T, V]]
def __init__(self, *, name: str = None):
super().__init__(name if name else "forward")
self.parser = None
def assign(self, parser: Parser[T, V]):
"""Assign a concrete parser to the forward declaration."""
self.parser = parser
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedNode[T, V]:
if self.parser is None:
raise ValueError("unassigned forward parser")
return await self.parser.parse(buf, loc)
ParsedLiteral = ParsedLeaf[bytes]
class Literal(Parser[bytes, bytes]):
"""Parses an exact sequence of bytes.
Parameters
----------
literal:
The bytes to match.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: literal
import asyncio
from bite import Literal, parse_bytes
print(asyncio.run(parse_bytes(Literal(b'abc'), b'abc')).values)
.. testoutput:: literal
(b'abc',)
"""
def __init__(self, literal: bytes, *, name: str = None):
super().__init__(name if name else str(literal))
self.literal = literal
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedLiteral:
end_loc = loc + len(self.literal)
peek = await buf.get(slice(loc, end_loc))
if peek == self.literal:
return ParsedLiteral(self.name, self.literal, loc, end_loc)
else:
raise UnmetExpectationError(self, loc)
class CaselessLiteral(Parser[bytes, bytes]):
"""Parses a case-insensitive sequence of bytes.
The *literal* passed to the :class:`CaselessLiteral` constructor will be
treated as the canconical form, i.e. the value returned from the parse tree
node.
Parameters
----------
literal:
The canonical form of the bytes to match.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: caseless-literal
import asyncio
from bite import CaselessLiteral, parse_bytes
print(asyncio.run(parse_bytes(CaselessLiteral(b'abc'), b'AbC')).values)
.. testoutput:: caseless-literal
(b'abc',)
"""
def __init__(self, literal: bytes, *, name: str = None):
super().__init__(name if name else str(literal))
self.literal = literal
self._lowercased_literal = self.literal.lower()
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedLiteral:
end_loc = loc + len(self.literal)
peek = await buf.get(slice(loc, end_loc))
if peek.lower() == self._lowercased_literal:
return ParsedLiteral(self.name, self.literal, loc, end_loc)
else:
raise UnmetExpectationError(self, loc)
ParsedCharacterSet = ParsedLeaf[bytes]
class CharacterSet(Parser[bytes, bytes]):
"""Parses a single byte from a given set.
.. note::
Besides listing each byte in the set explicitly (e.g. ``b'abc\x1F'``),
you can define a range using something like
``bytes(range(0x7F, 0x9F + 1))``. It is also possible to combine both
forms: ``b'abc\x1F' + bytes(range(0x7F, 0x9F + 1))``.
Parameters
----------
charset:
The set of bytes parsed by this parser.
invert:
Set to ``true`` to match all bytes *not* given by the *charset*.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: character-set
import asyncio
from bite import CharacterSet, parse_bytes
print(asyncio.run(parse_bytes(CharacterSet(b'abc'), b'b')).values)
.. testoutput:: character-set
(b'b',)
"""
def __init__(
self, charset: Iterable[int], *, invert: bool = False, name: str = None
):
super().__init__(name if name else f"CharacterSet({charset})")
self.charset = frozenset(charset)
self.invert = invert
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedCharacterSet:
char = await buf.get(loc)
if len(char) == 1 and (char[0] in self.charset) != self.invert:
return ParsedCharacterSet(self.name, char, loc, loc + 1)
else:
raise UnmetExpectationError(self, loc)
ParsedFixedByteCount = ParsedLeaf[bytes]
class FixedByteCount(Parser[bytes, bytes]):
"""Parses a fixed number of bytes.
Parameters
----------
count:
How many bytes to read.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: fixed-byte-count
import asyncio
from bite import FixedByteCount, parse_bytes
print(asyncio.run(parse_bytes(FixedByteCount(3), b'01234567890')).values)
.. testoutput:: fixed-byte-count
(b'012',)
"""
def __init__(self, count: int, *, name: str = None):
super().__init__(name if name else f"FixedByteCount({count})")
self.count = count
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedFixedByteCount:
read_bytes = await buf.get(slice(loc, loc + self.count))
if len(read_bytes) == self.count:
return ParsedFixedByteCount(
self.name, read_bytes, loc, loc + len(read_bytes)
)
else:
raise UnmetExpectationError(self, loc)
ParsedZeroOrMore = ParsedRepeat
class ZeroOrMore(Repeat[T, V]):
"""Require a parser to apply zero or more times.
This is parser is equivalent to the :class:`Repeat` parser with
``min_repeats=0``.
Parameters
----------
parser:
Parser for a single application.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: zero-or-more
import asyncio
from bite import Literal, parse_bytes, ZeroOrMore
print(asyncio.run(parse_bytes(ZeroOrMore(Literal(b'a')), b'')).values)
print(asyncio.run(parse_bytes(ZeroOrMore(Literal(b'a')), b'aaa')).values)
.. testoutput:: zero-or-more
()
(b'a', b'a', b'a')
"""
def __init__(self, parser: Parser[T, V], *, name: str = None):
super().__init__(parser, min_repeats=0, name=name)
ParsedOneOrMore = ParsedRepeat
class OneOrMore(Repeat[T, V]):
"""Require a parser to apply one or more times.
This is parser is equivalent to the :class:`Repeat` parser with
``min_repeats=1``.
Parameters
----------
parser:
Parser for a single application.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: one-or-more
import asyncio
from bite import Literal, OneOrMore, parse_bytes
asyncio.run(parse_bytes(OneOrMore(Literal(b'a')), b''))
.. testoutput:: one-or-more
Traceback (most recent call last):
...
bite.parsers.UnmetExpectationError: expected b'a' at position 0
.. testcode:: one-or-more
print(asyncio.run(parse_bytes(OneOrMore(Literal(b'a')), b'a')).values)
print(asyncio.run(parse_bytes(OneOrMore(Literal(b'a')), b'aaa')).values)
.. testoutput:: one-or-more
(b'a',)
(b'a', b'a', b'a')
"""
def __init__(self, parser: Parser[T, V], *, name: str = None):
super().__init__(parser, min_repeats=1, name=name)
ParsedOpt = ParsedRepeat
class Opt(Repeat[T, V]):
"""Make a parser optional.
This is parser is equivalent to the :class:`Repeat` parser with
``min_repeats=0`` and ``max_repeats=1``.
Parameters
----------
parser:
Parser to apply optionally.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: opt
import asyncio
from bite import Literal, Opt, parse_bytes
print(asyncio.run(parse_bytes(Opt(Literal(b'a')), b'')).values)
print(asyncio.run(parse_bytes(Opt(Literal(b'a')), b'a')).values)
.. testoutput:: opt
()
(b'a',)
"""
def __init__(self, parser: Parser[T, V], *, name: str = None):
super().__init__(parser, min_repeats=0, max_repeats=1, name=name)
@dataclass(frozen=True)
class CountedParseTree:
"""Parse tree children created by the `Counted` parser."""
count_expr: ParsedNode[Any, int]
"""Parse tree of the *count* expression."""
counted_expr: ParsedNode
"""Parse tree of the expressions counted by the *count* expression."""
@property
def start_loc(self) -> int:
"""Start index into the input buffer of the segmend parsed by the
node."""
return self.count_expr.start_loc
@property
def end_loc(self) -> int:
"""End index (exclusive) into the input buffer of the segmend parsed by
the node."""
return self.counted_expr.end_loc
@dataclass(frozen=True)
class ParsedCounted(ParsedBaseNode[CountedParseTree], Generic[V]):
parse_tree: CountedParseTree
@property
def values(self) -> Iterable[V]:
"""Values of the :attr:`CountedParseTree.counted_expr` of the
:attr:`.parse_tree`."""
return self.parse_tree.counted_expr.values
@property
def start_loc(self) -> int:
"""Start index into the input buffer of the segmend parsed by the
node."""
return self.parse_tree.start_loc
@property
def end_loc(self) -> int:
"""End index (exclusive) into the input buffer of the segmend parsed by
the node."""
return self.parse_tree.end_loc
class Counted(Parser[CountedParseTree, V]):
"""Read a count and create a parser from it.
Parameters
----------
count_parser:
Parser to read the count. The resulting parse tree must return a single
value that can be converted to an ``int``.
counted_parser_factory:
Callable that gets passed the count and returns a parser that is used
to parse the subsequent bytes.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: counted
import asyncio
from bite import CharacterSet, Counted, FixedByteCount, parse_bytes
print(asyncio.run(parse_bytes(
Counted(
CharacterSet(b'012345689'),
lambda count: FixedByteCount(count)
),
b'3abcde'
)).values)
.. testoutput:: counted
(b'abc',)
"""
def __init__(
self,
count_parser: Parser[Any, int],
counted_parser_factory: Callable[[int], Parser[Any, V]],
*,
name: str = None,
):
super().__init__(
name if name else f"Counted({count_parser.name}, {counted_parser_factory})"
)
self.count_parser = count_parser
self.counted_parser_factory = counted_parser_factory
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedCounted[V]:
count_parse_tree = await self.count_parser.parse(buf, loc)
values_iter = iter(count_parse_tree.values)
try:
count = int(next(values_iter))
except StopIteration:
raise ValueError("count expression did not return a value") from None
try:
next(values_iter)
except StopIteration:
counted = await self.counted_parser_factory(count).parse(
buf, count_parse_tree.end_loc
)
return ParsedCounted(self.name, CountedParseTree(count_parse_tree, counted))
else:
raise ValueError("count expression returned more than one value")
ParsedCombine = ParsedLeaf[bytes]
class Combine(Parser[bytes, bytes]):
"""Combine parse tree leaves into a single node.
This parser is helpful to obtain a single byte string when using multiple
parsers producing individual segments of this byte string.
Parameters
----------
parser:
Parser to obtain the individual segments to combine.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: combine
import asyncio
from bite import CharacterSet, Combine, parse_bytes
digits = CharacterSet(b'0123456789')[1, ...]
integer = Combine(digits)
print(asyncio.run(parse_bytes(digits, b'12345')).values)
print(asyncio.run(parse_bytes(integer, b'12345')).values)
.. testoutput:: combine
(b'1', b'2', b'3', b'4', b'5')
(b'12345',)
"""
def __init__(self, parser: Parser[Any, bytes], *, name: str = None):
super().__init__(name if name else f"Combine({parser})")
self.parser = parser
async def parse(self, buf: ParserBuffer, loc: int = 0) -> ParsedCombine:
parse_tree = await self.parser.parse(buf, loc)
return ParsedCombine(
self.name,
b"".join(parse_tree.values),
parse_tree.start_loc,
parse_tree.end_loc,
)
class ParseError(Exception):
"""Base class for errors resulting from input that fails to parse."""
class UnmetExpectationError(ParseError):
"""Error raised when the input does not match the syntax expected by a
parser."""
def __init__(self, expected: Parser, at_loc: int):
super().__init__(f"expected {expected} at position {at_loc}")
self.expected = expected
self.at_loc = at_loc
class TrailingBytesError(ParseError):
"""Error raised when the whole input is expected to be consumed by a parser,
but trailing bytes where found."""
__all__ = [
"And",
"CaselessLiteral",
"CharacterSet",
"Combine",
"Counted",
"FixedByteCount",
"Forward",
"Literal",
"MatchFirst",
"Not",
"OneOrMore",
"Opt",
"ParseError",
"Parser",
"Repeat",
"Repeat",
"TrailingBytesError",
"UnmetExpectationError",
"ZeroOrMore",
]
```
#### File: bite/tests/test_parse_functions.py
```python
import pytest
from bite.parse_functions import parse_bytes, parse_incremental
from bite.parsers import Literal, ParsedLiteral, TrailingBytesError
from bite.tests.mock_reader import MockReader
@pytest.mark.asyncio
async def test_parse_incremental():
grammar = Literal(b"A", name="A")
reader = MockReader(b"AAA")
count = 0
async for parse_tree in parse_incremental(grammar, reader):
assert parse_tree == ParsedLiteral("A", b"A", 0, 1)
count += 1
assert count == 3
@pytest.mark.asyncio
async def test_parse_bytes():
grammar = Literal(b"A", name="A")
assert await parse_bytes(grammar, b"AAA") == ParsedLiteral("A", b"A", 0, 1)
@pytest.mark.asyncio
async def test_parse_bytes_parse_all():
grammar = Literal(b"A", name="A")
assert await parse_bytes(grammar, b"A", parse_all=True) == ParsedLiteral(
"A", b"A", 0, 1
)
@pytest.mark.asyncio
async def test_parse_bytes_parse_all_failure():
grammar = Literal(b"A", name="A")
with pytest.raises(TrailingBytesError):
assert await parse_bytes(grammar, b"AA", parse_all=True)
```
#### File: bite/bite/transformers.py
```python
from dataclasses import dataclass
from typing import Callable, Generic, Iterable, Tuple, TypeVar
from bite.io import ParserBuffer
from bite.parsers import ParsedBaseNode, ParsedNode, Parser
T = TypeVar("T", covariant=True)
VIn = TypeVar("VIn", covariant=True)
VOut = TypeVar("VOut", covariant=True)
@dataclass(frozen=True)
class ParsedTransform(ParsedBaseNode[ParsedNode[T, VIn]], Generic[T, VIn, VOut]):
transform: Callable[[ParsedNode[T, VIn]], Iterable[VOut]]
"""Function to transfrom the child nodes."""
@property
def values(self) -> Iterable[VOut]:
"""Transformed values of the child nodes."""
# for some reason mypy thinks transform is a bare object
return self.transform(self.parse_tree) # type: ignore
@property
def start_loc(self) -> int:
"""Start index into the input buffer of the segmend parsed by the
node."""
return self.parse_tree.start_loc
@property
def end_loc(self) -> int:
"""End index (exclusive) into the input buffer of the segmend parsed by
the node."""
return self.parse_tree.end_loc
class Transform(Parser[ParsedNode[T, VIn], VOut]):
"""Transform a resulting parse tree node to produce different values.
Parameters
----------
parser:
Parser of which the resulting parse tree will be transformed.
transform:
Function that takes the parse tree produced by the *parser* and produces
the transformed values.
name:
Name to assign to the resulting parse tree node.
See Also
--------
.TransformValues: Passes only the parse tree node values instead of the
complete node to the *transform*.
Examples
--------
.. testcode:: transform
import asyncio
from bite import CharacterSet, Combine, parse_bytes, Transform
integer_token = Combine(CharacterSet(b'0123456789')[1, ...])
print(asyncio.run(parse_bytes(integer_token, b'42')).values)
print(asyncio.run(parse_bytes(
Transform(integer_token, lambda node: (int(node.parse_tree),)),
b'42'
)).values)
.. testoutput:: transform
(b'42',)
(42,)
"""
def __init__(
self,
parser: Parser[T, VIn],
transform: Callable[[ParsedNode[T, VIn]], Iterable[VOut]],
*,
name: str = None,
):
super().__init__(name if name else f"Transform({parser.name})")
self.parser = parser
self.transform = transform
async def parse(
self, buf: ParserBuffer, loc: int = 0
) -> ParsedTransform[T, VIn, VOut]:
return ParsedTransform(
self.name, await self.parser.parse(buf, loc), self.transform
)
class Suppress(Transform[T, VIn, None]):
"""Suppresses a parse tree from the values.
Parameters
----------
parser:
Parser of which the resulting parse tree will be suppressed.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: suppress
import asyncio
from bite import CharacterSet, Combine, Literal, parse_bytes, Suppress
integer_token = Combine(CharacterSet(b'0123456789')[1, ...])
print(asyncio.run(parse_bytes(
Suppress(Literal(b'[')) + integer_token + Suppress(Literal(b']')),
b'[42]'
)).values)
.. testoutput:: suppress
(b'42',)
"""
def __init__(self, parser: Parser[T, VIn], *, name: str = None):
super().__init__(
parser, lambda _: [], name=name if name else f"Suppress({parser.name})"
)
class TransformValues(Transform[T, VIn, VOut]):
"""Transform parsed values.
Parameters
----------
parser:
Parser of which the resulting parse tree values will be transformed.
transform:
Function that takes the values produced by the *parser* and produces
the transformed values.
name:
Name to assign to the resulting parse tree node.
See Also
--------
.Transform: Passes the complete parse tree node instead of just the values
to the *transform*.
Examples
--------
.. testcode:: transform-values
import asyncio
from bite import CharacterSet, Combine, Literal, parse_bytes, TransformValues
def sum_values(values):
return (sum(int(v) for v in values if v != b'+'),)
integer_token = Combine(CharacterSet(b'0123456789')[1, ...])
print(asyncio.run(parse_bytes(
TransformValues(integer_token + Literal(b'+') + integer_token, sum_values),
b'42+23'
)).values)
.. testoutput:: transform-values
(65,)
"""
def __init__(
self,
parser: Parser[T, VIn],
transform: Callable[[Iterable[VIn]], Iterable[VOut]],
*,
name: str = None,
):
super().__init__(
parser,
lambda parse_tree: transform(parse_tree.values),
name=name if name else f"TransformValues({parser.name})",
)
class Group(TransformValues[T, VIn, Tuple[VIn, ...]]):
"""Group the values of a resulting parse tree node into a tuple.
This allows to introduce structure into the otherwise flat
:attr:`ParsedNote.value` tuple.
Parameters
----------
parser:
Parser of which the resulting parse tree values will be grouped.
name:
Name to assign to the resulting parse tree node.
Examples
--------
.. testcode:: group
import asyncio
from bite import CharacterSet, Combine, Group, Literal, parse_bytes, Suppress
item = Combine(CharacterSet(b'[],', invert=True))
delimited_list = Group(
Suppress(Literal(b'['))
+ item
+ (Suppress(Literal(b',')) + item)[0, ...]
+ Suppress(Literal(b']'))
)
print(asyncio.run(parse_bytes(
delimited_list[0, ...],
b'[A,B][1,2,3]'
)).values)
.. testoutput:: group
((b'A', b'B'), (b'1', b'2', b'3'))
"""
def __init__(self, parser: Parser[T, VIn], *, name: str = None):
super().__init__(
parser,
lambda values: (tuple(values),),
name=name if name else f"Group({parser.name})",
)
__all__ = [
"Group",
"Suppress",
"Transform",
"TransformValues",
]
```
|
{
"source": "jgosmann/boox-tools",
"score": 3
}
|
#### File: jgosmann/boox-tools/mv-annotated-to-main.py
```python
from __future__ import print_function
import argparse
import os
import os.path
import re
import recolor
REGEX = re.compile(r'^(.*)_\d{4}-\d{1,2}-\d{1,2}_\d{1,2}-\d{1,2}-\d{1,2}.pdf$')
def process(filename):
match = REGEX.match(filename)
if match is not None:
new_filename = match.group(1) + '.pdf'
os.rename(filename, new_filename)
print('{} -> {}'.format(filename, new_filename))
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Removes the date suffix from the PDF files with '
'merged annotations from the Onyx Boox M92 ebook reader.')
PARSER.add_argument(
'-r', help='Descend recursively into directories.',
action='store_true', default=False)
PARSER.add_argument(
'-c', '--highlight-color',
help='Change the color of highlight annotations to the given color. '
'Specify `nochange` to preserve color. The color string is NOT '
'validated and has to be a valid PDF color.',
nargs=1, default=['1. 1. .5'])
PARSER.add_argument('filenames', nargs='*', help='Files to process.')
ARGS = PARSER.parse_args()
for name in ARGS.filenames:
if os.path.isdir(name):
for dirpath, dirnames, sub_filenames in os.walk(name):
for sub_filename in sub_filenames:
process(os.path.join(dirpath, sub_filenames))
else:
if ARGS.highlight_color[0] != 'nochange':
recolor.process_file(name, ARGS.highlight_color[0])
process(name)
```
|
{
"source": "jgosmann/merge-conflicts",
"score": 3
}
|
#### File: merge-conflicts/goppy/core.py
```python
import numpy as np
from numpy.linalg import cholesky, inv
from goppy.growable import GrowableArray
__all__ = ['OnlineGP']
class OnlineGP(object):
"""Online Gaussian Process.
Provides a Gaussian process to which further data can be added efficiently
after the initial training.
Parameters
----------
kernel : :class:`.Kernel`
Covariance function of the Gaussian process.
noise_var : float, optional
The assumed variance of the noise on the training targets.
expected_size : int, optional
The overall expected number of training samples to be added to the
Gaussian process. Setting this parameter can be more efficient as it
may avoid memory reallocations.
buffer_factory : function, optional
Function to call to create buffer arrays for data storage.
Attributes
----------
kernel : :class:`.Kernel`
Covariance function of the Gaussian process.
noise_var : float
The assumed variance of the noise on the training targets.
x_train : (`N`, `D`) ndarray
The `N` training data inputs of dimension `D`. This will be ``None`` as
long as the Gaussian process has not been trained.
y_train : (`N`, `D`) ndarray
The `N` training data targets of dimension `D`. This will be ``None``
as long as the Gaussian process has not been trained.
inv_chol : (`N`, `N`) ndarray
Inverted lower Cholesky factor of the covariance matrix
(upper triangular matrix). This will be ``None`` as long as the
Gaussian process has not been trained.
trained : bool
Indicates whether the Gaussian process has been fitted to some training
data.
Examples
--------
>>> from goppy import OnlineGP, SquaredExponentialKernel
>>> gp = OnlineGP(SquaredExponentialKernel([1.0]), noise_var=0.1)
>>> gp.fit(np.array([[2, 4]]).T, np.array([[3, 1]]).T)
>>> gp.add(np.array([[0]]), np.array([[3]]))
>>> gp.predict(np.array([[1, 3]]).T)
{'mean': array([[ 2.91154709],
[ 1.82863199]])}
"""
def __init__(
self, kernel, noise_var=0.0, expected_size=None,
buffer_factory=GrowableArray):
self.kernel = kernel
self.noise_var = noise_var
self._expected_size = expected_size
self._buffer_factory = buffer_factory
self.x_train = None
self.y_train = None
self.inv_chol = None
self.__inv_cov_matrix = None
self.trained = False
@property
def inv_cov_matrix(self):
""" Inverted covariance matrix.
Cannot be accessed before the Gaussian process has been trained.
"""
if self.__inv_cov_matrix is None:
self.__inv_cov_matrix = np.dot(self.inv_chol.T, self.inv_chol)
return self.__inv_cov_matrix
@inv_cov_matrix.deleter
def inv_cov_matrix(self):
self.__inv_cov_matrix = None
def fit(self, x, y):
"""Fits the Gaussian process to training data.
Parameters
----------
x : (`N`, `D`) array-like
The `N` input data points of dimension `D` to train on.
y : (`N`, `D`) array-like
The `N` training targets with `D` independent dimensions.
"""
x = np.asarray(x)
y = np.asarray(y)
if self._expected_size is not None:
buffer_shape = (self._expected_size,)
buffer_shape2 = (self._expected_size, self._expected_size)
else:
buffer_shape = buffer_shape2 = None
self.x_train = self._buffer_factory(x.shape, buffer_shape=buffer_shape)
self.y_train = self._buffer_factory(y.shape, buffer_shape=buffer_shape)
self.x_train[:, :] = x
self.y_train[:, :] = y
self.inv_chol = self._buffer_factory(
(x.shape[0], x.shape[0]), buffer_shape=buffer_shape2)
self.inv_chol[:, :] = inv(cholesky(
self.kernel(x, x) + np.eye(len(x)) * self.noise_var))
del self.inv_cov_matrix
self.trained = True
def add(self, x, y):
"""Adds additional training data to the Gaussian process and adjusts
the fit.
Parameters
----------
x : (`N`, `D`) array-like
The `N` input data points of dimension `D` to train on.
y : (`N`, `D`) array-like
The `N` training targets with `D` independent dimensions.
"""
if len(x) <= 0:
return
x = np.asarray(x)
y = np.asarray(y)
if not self.trained:
self.fit(x, y)
return
input_vs_train_dist = self.kernel(x, self.x_train)
proj = np.dot(input_vs_train_dist, self.inv_chol.T)
covmat = self.kernel(x, x) + np.eye(len(x)) * self.noise_var - \
np.dot(proj, proj.T)
diag_indices = np.diag_indices_from(covmat)
covmat[diag_indices] = np.maximum(self.noise_var, covmat[diag_indices])
self.x_train.grow_by((len(x), 0))
self.y_train.grow_by((len(y), 0))
self.x_train[-len(x):, :] = x
self.y_train[-len(y):, :] = y
new_inv_chol = inv(cholesky(covmat))
l = len(self.inv_chol)
self.inv_chol.grow_by((len(x), len(x)))
self.inv_chol[:l, l:] = 0.0
self.inv_chol[l:, :l] = -np.dot(
np.dot(new_inv_chol, proj), self.inv_chol[:l, :l])
self.inv_chol[l:, l:] = new_inv_chol
del self.inv_cov_matrix
def predict(self, x, what=('mean',)):
r"""Predict with the Gaussian process.
Depending on the values included in the `what` parameter different
predictions will be made and returned a dictionary ``res``:
* ``'mean'``: Mean prediction of the Gaussian process of shape (`N`,
`D`).
* ``'mse'``: Predictive variance of the Gaussian process of shape
(`N`,).
* ``'derivative'``: Predicted derivative of the mean.
``res['derivative'][i, :, j]`` will correspond to
:math:`\left(\frac{\partial \mu}{\partial x_j}\right)
\left(x_i\right)` with the `i`-th input data point :math:`x_i`,
and mean function :math:`\mu(x)`.
* ``'mse_derivative'``: Predicted derivative of the variance.
``res['mse_derivative'][i, :]`` will correspond to
:math:`\left(\frac{d \sigma^2}{d x}\right)
\left(x_i\right)` with the `i`-th input data point :math:`x_i`,
and variance function :math:`\sigma^2(x)`.
Parameters
----------
x : (`N`, `D`) array-like
The `N` data points of dimension `D` to predict data for.
what : set-like, optional
Types of predictions to be made (see above).
Returns
-------
dict
Dictionary with the elements of `what` as keys and the
corresponding predictions as values.
"""
pred = {}
if 'derivative' in what or 'mse_derivative' in what:
kernel_what = ('y', 'derivative')
else:
kernel_what = ('y',)
lazy_vars = _LazyVarCollection(
input_vs_train_dist=lambda v: self.kernel.full(
x, self.x_train, kernel_what),
svs=lambda v: np.dot(self.inv_cov_matrix, self.y_train),
mean=lambda v: np.dot(v.input_vs_train_dist['y'], v.svs),
mse_svs=lambda v: np.dot(
self.inv_cov_matrix, v.input_vs_train_dist['y'].T),
mse=lambda v: np.maximum(
self.noise_var,
self.noise_var + self.kernel.diag(x, x) - np.einsum(
'ij,ji->i', v.input_vs_train_dist['y'], v.mse_svs)),
derivative=lambda v: np.einsum(
'ijk,jl->ilk', v.input_vs_train_dist['derivative'], v.svs),
mse_derivative=lambda v: -2 * np.einsum(
'ijk,ji->ik', v.input_vs_train_dist['derivative'], v.mse_svs))
if 'mean' in what:
pred['mean'] = lazy_vars.mean
if 'mse' in what:
pred['mse'] = lazy_vars.mse
if 'derivative' in what:
pred['derivative'] = lazy_vars.derivative
if 'mse_derivative' in what:
pred['mse_derivative'] = lazy_vars.mse_derivative
return pred
def calc_log_likelihood(self, what=('value',)):
r"""Calculate the log likelihood or its derivative of the Gaussian
process.
Depending on the values included in the `what` parameter different
values will be calculated:
* ``'value'``: The log likelihood of the Gaussian process as scalar.
* ``'derivative'``: Partial derivatives of the log likelihood for each
kernel parameter as array. See the ``params`` property of the used
kernel for the order.
Parameters
----------
what : set-like, optional
Values to calculate (see above).
Returns
-------
dict
Dictionary with the elements of `what` as keys and the
corresponding calculated values.
"""
res = {}
svs = np.dot(self.inv_chol, self.y_train)
if 'value' in what:
res['value'] = np.squeeze(
-0.5 * np.dot(svs.T, svs) +
np.sum(np.log(np.diag(self.inv_chol))) -
0.5 * len(self.y_train) * np.log(2 * np.pi))
if 'derivative' in what:
alpha = np.dot(self.inv_chol.T, svs)
grad_weighting = np.dot(alpha, alpha.T) - self.inv_cov_matrix
res['derivative'] = np.array([
0.5 * np.sum(np.einsum(
'ij,ji->i', grad_weighting, param_deriv))
for param_deriv in self.kernel.full(
self.x_train, self.x_train, what='param_derivatives')[
'param_derivatives']])
return res
class _LazyVarCollection(object):
def __init__(self, **kwargs):
self._eval_fns = kwargs
def __getattr__(self, name):
value = self._eval_fns[name](self)
setattr(self, name, value)
return value
```
|
{
"source": "jgosmann/plato",
"score": 3
}
|
#### File: plato/examples/shop.py
```python
from dataclasses import InitVar, asdict, dataclass
from decimal import Decimal
from pprint import pprint
from faker import Faker
from plato import Provider, Shared, formclass, sample
from plato.formclasses import derivedfield
from plato.providers.faker import FromFaker
fake = FromFaker(Faker(["en-CA", "de-DE"]))
# TODO generate either address randomly => abstract flag, might produce derived class flag
@formclass
class Address:
street: str
city: str
zip_code: str
country: str
name: str = fake.name()
@formclass
class GermanPostalCodeWithCity:
zip_code: str = fake["de-DE"].postcode()
city: str = fake["de-DE"].city()
@formclass
class GermanAddress(Address):
street: str = fake["de-DE"].street_address()
zip_code_and_city = Shared(GermanPostalCodeWithCity())
city: str = zip_code_and_city.city
zip_code: str = zip_code_and_city.zip_code
country: str = "Germany"
@dataclass
class CanadianPostalCodeWithCity:
zip_code: str
city: str
class CanadianPostalCodeWithCityProvider(Provider):
def sample(self, context):
return context.rng.choice(
(
CanadianPostalCodeWithCity("N2L 3G1", "Waterloo"),
CanadianPostalCodeWithCity("N2J 1A3", "Waterloo"),
CanadianPostalCodeWithCity("V5K 0A4", "Vancouver"),
)
)
@formclass
class CanadianAddress(Address):
street: str = fake["en-CA"].street_address()
zip_code_and_city = Shared(CanadianPostalCodeWithCityProvider())
city: str = zip_code_and_city.city
zip_code: str = zip_code_and_city.zip_code
country: str = "Canada"
@formclass
class Product:
name: str = fake.random_element(["Apple", "Banana", "Orange", "Pear"])
product_number: str = fake.bothify("?????-###")
description: str = fake.paragraph()
class SelectProvider(Provider):
def __init__(self, provider, dispatch_table):
self.dispatch_table = dispatch_table
self.provider = provider
def sample(self, context):
value = self.provider
if isinstance(value, Provider):
value = value.sample(context)
return self.dispatch_table[value].sample(context)
@formclass
class Price:
locale: InitVar[str]
base_price: Decimal = fake.pydecimal(1, 2)
@derivedfield
def vat_percent(self, locale) -> Decimal:
return SelectProvider(
locale,
{
"en-CA": fake.random_element([Decimal(5), Decimal(13)]),
"de-DE": fake.random_element([Decimal(7), Decimal(19)]),
},
)
@formclass
class OrderLine:
locale: str
quantity: int = fake.pyint(1, 10)
product: Product = Product()
@derivedfield
def price(self, locale) -> Price:
return Price(locale)
class OrderNumber(Provider):
numbers_issued = 0
def sample(self, context):
order_no = f"ABC-{self.numbers_issued:05d}"
self.numbers_issued += 1
return order_no
class ListProvider(Provider):
def __init__(self, min_elements: int, max_elements: int, provider: Provider):
self.min_elements = min_elements
self.max_elements = max_elements
self.provider = provider
def sample(self, context):
num_elements = context.rng.randint(self.min_elements, self.max_elements + 1)
return [sample(self.provider, context) for _ in range(num_elements)]
# want different addresses by default, but allow for matching
@formclass
class Order:
locale: str = fake.random_element(["en-CA", "de-DE"])
order_number: str = OrderNumber() # globally unique
@derivedfield
def billing_address(self, locale) -> Address:
return {
"de-DE": GermanAddress(),
"en-CA": CanadianAddress(),
}[locale]
@derivedfield
def shipping_address(self, billing_address) -> Address:
return billing_address
@derivedfield
def order_lines(self, locale) -> str:
return ListProvider(1, 5, OrderLine(locale))
if __name__ == "__main__":
pprint(asdict(sample(Order())), width=180)
pprint(asdict(sample(Order())), width=180)
pprint(asdict(sample(Order())), width=180)
pprint(asdict(sample(Order())), width=180)
```
#### File: plato/plato/context.py
```python
import random
from collections import defaultdict
from hashlib import blake2b
from typing import Any, Dict, Optional, Type
from typing_extensions import Protocol
class Hasher(Protocol):
"""Protocol of classes to perform incremental hashing."""
def copy(self) -> "Hasher":
"""Create a copy of the current hasher state."""
...
def update(self, data: bytes) -> None:
"""Update the hash with *data*."""
...
def digest(self) -> bytes:
"""Return the current hash."""
...
class Context:
"""Context used in sampling from `.Provider` instances.
Arguments
---------
hasher
Hasher used to derive the random number seed and to derive hashers for
subcontexts.
parent
The parent context if any.
meta
A dictionary that can be used by `.Provider` instances to store
additional information in the context. Be aware that the passed instance
might be modified.
"""
parent: Optional["Context"]
"""The parent context or `None` if this is a root context."""
meta: dict
""" Dictionary that can be used by providers to store additional information
across invocations of `.Provider.sample()`. Use the `.Provider` instance
or concrete class as key to avoid key collisions with other providers.
"""
seed: bytes
"""Seed to use for the generation of random numbers."""
rng: random.Random
""" A seeded random number generator that may be used for the generation of
random numbers.
"""
def __init__(
self, hasher: Hasher, parent: "Context" = None, meta: Dict[Any, Any] = None
):
self._hasher = hasher
self.parent = parent
if meta is None:
meta = {}
self.meta = meta
self.seed = self._hasher.digest()
self.rng = random.Random(self.seed)
def subcontext(self, name: str) -> "Context":
"""Derive a subcontext.
A subcontext is derived by updating a copy of the *hasher* with the
*name*, setting the *parent* accordingly, and (flat) copying the
*meta* dictionary.
Arguments
---------
name: str
A name to identify the subcontext. Reusing the same name will give
a subcontext with the same random number seed.
Returns
-------
Context
The derived subcontext.
"""
subhasher = self._hasher.copy()
subhasher.update(name.encode())
return Context(subhasher, self, dict(self.meta))
_TYPE_COUNTS: Dict[Type, int] = defaultdict(lambda: 0)
def seed(value: int) -> None:
"""Set the global Plato base seed."""
# pylint: disable=global-statement
global _TYPE_COUNTS
_TYPE_COUNTS = defaultdict(lambda: value)
def _int2bytes(value: int) -> bytes:
return value.to_bytes(value.bit_length() // 8 + 1, "big")
def _create_hasher(hasher_seed: int) -> blake2b:
hasher = blake2b()
hasher.update(_int2bytes(hasher_seed))
return hasher
def get_root_context(type_: Type) -> Context:
"""Get a root context for a given type."""
root_seed = _TYPE_COUNTS[type_]
_TYPE_COUNTS[type_] += 1
return Context(_create_hasher(root_seed))
```
#### File: plato/plato/formclasses.py
```python
import inspect
import sys
from dataclasses import Field, InitVar, fields, is_dataclass, make_dataclass
from typing import (
Any,
Callable,
ClassVar,
Dict,
FrozenSet,
Generic,
List,
MutableMapping,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from weakref import WeakKeyDictionary
from .context import Context, get_root_context
from .internal.graph import toposort
from .internal.weak_id_dict import WeakIdDict
from .providers.base import Provider, ProviderProtocol
_init_var_registry: WeakIdDict[Dict[str, Any]] = WeakIdDict()
_post_init_registry: MutableMapping[
object, Dict[str, Callable[..., Any]]
] = WeakKeyDictionary()
T = TypeVar("T")
def formclass(cls: type) -> type:
"""Class decorator to process a class definition as formclass.
The *formclass* decorator is one of the main parts of the Plato API. A class
annotated with it will be processed to enable Plato's feature. In
particular, it will become a :func:`~dataclasses.dataclass` and support
for the `.derivedfield` decorator will be added.
Similar to a :func:`~dataclasses.dataclass`, you can define fields in
a *formclass* using type annotations. In addition to normal default values
and :func:`~dataclasses.field` assignments, you can assign a `.Provider`
that is used to generate values when using `.sample`.
Like `~dataclasses`, a *formclass* supports the `InitVar` type. A field
with such a type will not be available on the instance, but will be passed
as argument to the `__post_init__` method (in order of declaration) and
`.derivedfield` methods (as keyword argument by name).
Example
-------
.. testcode:: formclass
fake = plato.providers.faker.FromFaker()
@formclass
class MyFormclass:
field: str = "value"
generated_field: str = fake.first_name()
data = sample(MyFormclass())
print(data.field)
print(data.generated_field)
.. testoutput:: formclass
value
Alicia
..
# noqa: DAR101 cls
# noqa: DAR201 return
"""
post_init_fns: Dict[str, Callable[..., Any]] = {}
annotations = getattr(cls, "__annotations__", {})
instance_fields = [
(name, type_)
for name, type_ in annotations.items()
if not _type_origin_matches(type_, ClassVar[Any])
]
namespace: Dict[str, Any] = {}
for name, value in cls.__dict__.items():
if name in {"__annotations__", "__dict__"}:
continue
if isinstance(value, _DerivedField):
instance_fields.append((name, value.type))
post_init_fns[name] = value.fn
value = None
namespace[name] = value
orig_post_init = namespace.get("__post_init__", None)
init_var_names = [
name for name, type_ in annotations.items() if _is_init_var(type_)
]
def __post_init__(self: Any, *args: Any) -> None:
_init_var_registry[self] = dict(zip(init_var_names, args))
if orig_post_init:
orig_post_init(self, *args)
namespace["__post_init__"] = __post_init__
instance_fields_with_field_def: List[
Union[Tuple[str, type], Tuple[str, type, Field]]
] = [
(name, type_, cast(Field, namespace.pop(name)))
if isinstance(namespace.get(name, None), Field)
else (name, type_)
for name, type_ in instance_fields
]
dc = make_dataclass(
cls.__name__,
instance_fields_with_field_def,
bases=cls.__mro__[1:],
namespace=namespace,
)
_post_init_registry[dc] = post_init_fns
return dc
def _type_origin_matches(annotation: Type, type_: object) -> bool:
return hasattr(annotation, "__origin__") and getattr(
annotation, "__origin__"
) is getattr(type_, "__origin__", None)
def _is_init_var(type_: type) -> bool:
is_py37_init_var = (
sys.version_info[:2] <= (3, 7) and type_.__class__ is InitVar.__class__
)
return is_py37_init_var or isinstance(type_, InitVar)
DerivedFieldT = TypeVar("DerivedFieldT", bound=Callable)
class _DerivedField(Generic[DerivedFieldT]):
"""Method decorator to derive a `.formclass` field from other fields.
When instantiating a `.formclass`, the decorated method will be run after
initializing all normal fields. The returned value will be used to add
a field with the method's name to the `.formclass` instance. You get
access to your fields (including `InitVar` fields) by declaring additional
arguments for the `.derivedfield` using the same name as the field.
Attributes
----------
fn
Decorated method.
"""
def __init__(self, fn: DerivedFieldT):
self.fn = fn
@property
def type(self) -> Type:
"""Type annotation of the derived field."""
annotation = getattr(self.fn, "__annotations__", {}).get("return", Any)
if _type_origin_matches(annotation, ProviderProtocol[Any]):
annotation = annotation.__args__[0]
return annotation
# pylint: disable=invalid-name
derivedfield = _DerivedField
def sample(form: T, context: Context = None) -> T:
"""Generates a dataclass with concrete values from a `.formclass` instance.
Recursively processes a `.formclass` instance and returns an analogous
:func:`~dataclasses.dataclass` where all `.Provider` have been replaced
with values generated from these providers. The returned
`~dataclasses.dataclass` will also have fields added for `.derivedfield`
annotated methods.
This function uses a context to provide deterministic random number seeds
based on the field names and allow information to be shared between
`.Provider` instances. Usually it will not be necessary to provide this
context as it will be automatically initialized for each top-level
invocation.
Arguments
---------
form
Usually a `.formclass` instance to be processed. But can also be a
`.Provider` instance which will forward the call to the provider's
`.Provider.sample` method. Any other type of object will be returned
unchanged.
context
Context of the sample operation, for example, the random number seed to
use. Usually this argument has not to be set manually and will be
initialized automatically.
Returns
-------
T
For a `.formclass` a `~dataclasses.dataclass` instance is returned
with `.Provider` instances replaced by sampled values and
`.derviedfield` methods added as fields. For a `.Provider` the sampled
value will be returned and for all other objects, the object itself
is returned.
Examples
--------
With `.formclass`:
.. testcode:: sample
fake = plato.providers.faker.FromFaker()
@formclass
class MyFormclass:
field: str = "value"
generated_field: str = fake.first_name()
data = sample(MyFormclass())
print(data.field)
print(data.generated_field)
.. testoutput:: sample
value
Alicia
With `.Provider`:
.. testcode:: sample
fake = plato.providers.faker.FromFaker()
print(sample(fake.first_name()))
.. testoutput:: sample
Thomas
Any other object:
.. testcode:: sample
print(sample("foo"))
.. testoutput:: sample
foo
"""
if context is None:
context = get_root_context(form.__class__)
if isinstance(form, Provider):
return form.sample(context)
if not is_dataclass(form):
return form
init_args = dict(_init_var_registry[form])
init_args.update(
{
field_def.name: sample(
getattr(form, field_def.name), context.subcontext(field_def.name)
)
for field_def in fields(form)
}
)
instance = form.__class__(**init_args) # type: ignore[call-arg]
if form.__class__ in _post_init_registry:
def get_post_init_arg(name: str) -> Any:
if name in _init_var_registry[form]:
return _init_var_registry[form][name]
return getattr(instance, name)
dependency_graph: Dict[str, FrozenSet[str]] = {
name: frozenset() for name in init_args
}
for name, fn in _post_init_registry[form.__class__].items():
parameter_iter = iter(inspect.signature(fn).parameters)
next(parameter_iter) # skip self
dependency_graph[name] = frozenset(parameter_iter)
eval_order = toposort(dependency_graph)
for name in eval_order:
if name in _init_var_registry[form]:
continue
value = getattr(instance, name, None)
if value is not None:
continue
if name not in _post_init_registry[form.__class__]:
continue
fn = _post_init_registry[form.__class__][name]
init_var_args = {
name: get_post_init_arg(name) for name in dependency_graph[name]
}
value = fn(instance, **init_var_args)
setattr(instance, name, sample(value, context.subcontext(name)))
return instance
```
#### File: plato/internal/graph.py
```python
from typing import Collection, Dict, Mapping, Set, TypeVar
T = TypeVar("T")
def toposort(graph: Mapping[T, Collection[T]]) -> Collection[T]:
"""Returns the topological sort of ``graph``.
Arguments
---------
graph
A mapping to the dependencies of a vertex that need to
be sorted prior to a vertex.
Returns
-------
The topological sort of ``graph``, i.e. the keys in ``graph`` in the order
they need to be processed to always have all necessary dependencies
available when processing a particular key.
Raises
------
ValueError
If the ``graph`` contains a cycle and thus no topological sorting
exists.
"""
remaining_graph = {vertex: set(edges) for vertex, edges in graph.items()}
toposorted = []
dependencies_fulfilled = set(
vertex for vertex, edges in graph.items() if len(edges) == 0
)
dependents: Dict[T, Set[T]] = {vertex: set() for vertex in graph}
for vertex, edges in graph.items():
for edge in edges:
dependents[edge].add(vertex)
while dependencies_fulfilled:
vertex = dependencies_fulfilled.pop()
toposorted.append(vertex)
for dependent in dependents[vertex]:
remaining_graph[dependent].remove(vertex)
if len(remaining_graph[dependent]) == 0:
dependencies_fulfilled.add(dependent)
if any(len(edges) > 0 for edges in remaining_graph.values()):
raise ValueError("The graph must not contain cycles.")
return toposorted
```
#### File: tests/internal/test_weak_id_dict.py
```python
import weakref
import pytest
from plato.internal.weak_id_dict import WeakIdDict
class Dummy:
pass
def test_missing_key():
weak_id_dict = WeakIdDict()
key = Dummy()
assert key not in weak_id_dict
with pytest.raises(KeyError):
weak_id_dict[key] # pylint: disable=pointless-statement
with pytest.raises(KeyError):
del weak_id_dict[key]
def test_setting_key():
weak_id_dict = WeakIdDict()
key = Dummy()
weak_id_dict[key] = "value"
assert key in weak_id_dict
assert weak_id_dict[key] == "value"
weak_id_dict[key] = "different-value"
assert key in weak_id_dict
assert weak_id_dict[key] == "different-value"
different_key = Dummy()
assert different_key not in weak_id_dict
weak_id_dict[different_key] = "another-value"
assert weak_id_dict[key] == "different-value"
def test_deleting_key():
weak_id_dict = WeakIdDict()
key = Dummy()
weak_id_dict[key] = "value"
del weak_id_dict[key]
assert key not in weak_id_dict
with pytest.raises(KeyError):
weak_id_dict[key] # pylint: disable=pointless-statement
def test_iterating_keys():
weak_id_dict = WeakIdDict()
keys = [Dummy() for i in range(4)]
for k in keys:
weak_id_dict[k] = "value"
assert list(weak_id_dict) == keys
assert len(weak_id_dict) == len(keys)
def test_keys_are_weak():
weak_id_dict = WeakIdDict()
key = Dummy()
ref = weakref.ref(key)
weak_id_dict[key] = "value"
del key
assert ref() is None
```
|
{
"source": "jgosmann/psyrun",
"score": 3
}
|
#### File: psyrun/psyrun/mapper.py
```python
from multiprocessing import Pool
import os.path
import warnings
from psyrun.exceptions import IneffectiveExcludeWarning
from psyrun.pspace import dict_concat, missing, Param
def get_result(fn, params, exclude=None):
"""Evaluates a function with given parameters.
Evaluates *fn* with the parameters *param* and returns a dictionary with
the input parameters and returned output values.
Parameters
----------
fn : function
Function to evaluate. Has to return a dictionary.
params : dict
Parameters passed to *fn* as keyword arguments.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
dict
Returns *params* updated with the return value of *fn*.
Examples
--------
>>> def fn(x, is_result):
... return {'y': x * x, 'is_result': 1}
>>>
>>> from pprint import pprint
>>> pprint(get_result(fn, {'x': 4, 'is_result': 0}))
{'is_result': 1, 'x': 4, 'y': 16}
"""
result = dict(params)
result.update(fn(**params))
if exclude is not None:
for k in exclude:
if k in result:
del result[k]
else:
warnings.warn(IneffectiveExcludeWarning(k))
return result
def _get_result_single_arg(args):
return get_result(*args)
def map_pspace(fn, pspace, exclude=None):
"""Maps a function to parameter space values.
Parameters
----------
fn : function
Function to evaluate on parameter space. Has to return a dictionary.
pspace : `ParameterSpace`
Parameter space providing parameter values to evaluate function on.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
dict
Dictionary with the input parameter values and the function return
values.
Examples
--------
>>> def fn(x):
... return {'y': x * x}
>>>
>>> from pprint import pprint
>>> from psyrun import Param
>>> pprint(map_pspace(fn, Param(x=[1, 2])))
{'x': [1, 2], 'y': [1, 4]}
"""
return dict_concat(list(get_result(
fn, p, exclude) for p in pspace.iterate()))
def map_pspace_hdd_backed(
fn, pspace, filename, store, return_data=True, pool_size=1,
exclude=None):
"""Maps a function to parameter space values while storing produced data.
Data is stored progressively. Thus, if the program crashes, not all data
will be lost.
Parameters
----------
fn : function
Function to evaluate on parameter space. Has to return a dictionary.
pspace : `ParameterSpace`
Parameter space providing parameter values to evaluate function on.
filename : str
Filename of file to store data to.
store : `Store`
Store to save data with.
return_data : bool, optional
Whether to return the resulting data after mapping the function. This
will read all produced data from the disk.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
None or dict
Dictionary with the input parameter values and the function return
values if requested.
"""
if os.path.exists(filename):
pspace = missing(pspace, Param(**store.load(filename)))
chunksize = max(1, len(pspace) // pool_size)
for r in Pool(pool_size).imap_unordered(
_get_result_single_arg,
((fn, p, exclude) for p in pspace.iterate()), chunksize):
store.append(filename, dict_concat((r,)))
if not os.path.exists(filename):
store.save(filename, {})
if return_data:
return store.load(filename)
def map_pspace_parallel(
fn, pspace, n_jobs=-1, backend='multiprocessing', exclude=None):
"""Maps a function to parameter space values in parallel.
Requires `joblib <https://pythonhosted.org/joblib/>`_.
Parameters
----------
fn : function
Function to evaluate on parameter space. Has to return a dictionary.
pspace : ParameterSpace
Parameter space providing parameter values to evaluate function on.
n_jobs : int, optional
Number of parallel jobs. Set to -1 to automatically determine.
backend : str, optional
Backend to use. See `joblib documentation
<https://pythonhosted.org/joblib/parallel.html#using-the-threading-backend>`_
for details.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
dict
Dictionary with the input parameter values and the function return
values.
Examples
--------
>>> from pprint import pprint
>>> from psyrun import Param
>>> from psyrun.utils.example import square
>>>
>>> pprint(map_pspace_parallel(square, Param(x=[1, 2])))
{'x': [1, 2], 'y': [1, 4]}
"""
import joblib
parallel = joblib.Parallel(n_jobs=n_jobs, backend=backend)
return dict_concat(parallel(
joblib.delayed(get_result)(fn, p, exclude) for p in pspace.iterate()))
```
#### File: psyrun/store/base.py
```python
import os.path
from pkg_resources import iter_entry_points
class Store(object):
"""Defines the interface of stores.
Register implemented stores as
`entry points <https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
in the ``psyrun.stores`` groupn. For example, add the following to the
``setup`` call in your store's ``setup.py`` for a store providing the
``.ext`` format::
entry_points={
'psyrun.stores': ['.ext = pkg.name:ClassName'],
}
Attributes
----------
ext : str
Filename extension used by the store.
"""
ext = ''
def append(self, filename, data):
"""Append data to file.
When trying to append data to a non-existing file, a new file will be
created. The backend may require that a file was created with this
function to be able to append to it.
Parameters
----------
filename : str
Filename of file to append the data to.
data : dict
Dictionary with data to append.
"""
raise NotImplementedError()
def save(self, filename, data):
"""Save data to a file.
Parameters
----------
filename : str
Filename of file to save data to.
data : dict
Dictionary with data to store.
"""
raise NotImplementedError()
def load(self, filename, row=None):
"""Load data from a file.
Parameters
----------
filename : str
Filename of file to load data from.
row : int, optional
If given, only the row with this index will be loaded.
Returns
-------
dict
Loaded data.
"""
raise NotImplementedError()
def _safe_ep_load():
for ep in iter_entry_points('psyrun.stores'):
try:
yield ep.name, ep.load()
except ImportError:
pass
class AutodetectStore(Store):
"""Automatically selects the store based on the file extension."""
registry = dict(_safe_ep_load())
@classmethod
def get_concrete_store(cls, filename):
_, ext = os.path.splitext(filename)
return cls.registry[ext.lower()]()
def append(self, filename, data):
return self.get_concrete_store(filename).append(filename, data)
def save(self, filename, data):
return self.get_concrete_store(filename).save(filename, data)
def load(self, filename, row=None):
return self.get_concrete_store(filename).load(filename, row=row)
```
#### File: psyrun/psyrun/tasks.py
```python
from __future__ import print_function
import os
import os.path
import re
import sys
import traceback
import warnings
from psyrun.backend import DefaultBackend
from psyrun.pspace import Param
from psyrun.store import DefaultStore
from psyrun.scheduler import ImmediateRun
class TaskDef(object):
"""Task defined by a Python file.
Parameters
----------
path : str
Python file to load as task.
conf : `Config`
Default values for task parameters.
Attributes
----------
TASK_PATTERN : re.RegexObject
Regular expression to match task filenames.
"""
TASK_PATTERN = re.compile(r'^task_(.*)$')
def __init__(self, path, conf=None):
if conf is None:
taskdir = os.path.dirname(path)
conffile = os.path.join(taskdir, 'psy-conf.py')
if os.path.exists(conffile):
conf = Config.load_from_file(conffile)
else:
conf = Config()
_set_public_attrs_from_dict(
self, _load_pyfile(path), only_existing=False)
self.path = path
if not hasattr(self, 'name'):
prefixed_name, _ = os.path.splitext(os.path.basename(path))
m = self.TASK_PATTERN.match(prefixed_name)
if m:
self.name = m.group(1)
else:
self.name = prefixed_name
conf.apply_as_default(self)
def _load_pyfile(filename):
source = ''
with open(filename, 'r') as f:
source += f.read()
code = compile(source, filename, 'exec')
loaded = {'__file__': filename}
exec(code, loaded) # pylint: disable=exec-used
return loaded
def _set_public_attrs_from_dict(obj, d, only_existing=True):
for k, v in d.items():
if not k.startswith('_') and (not only_existing or hasattr(obj, k)):
setattr(obj, k, v)
class Config(object): # pylint: disable=too-many-instance-attributes
"""Task configuration.
Attributes
----------
backend : `Backend`, default: `DistributeBackend`
The processing backend which determines how work is distributed across
jobs.
exclude_from_result : sequence of str, default: ``[]``
Keys of items to exclude from result. This can be useful if parameters
or parts of the result cannot be saved to disk.
file_dep : sequence of str, default: ``[]``
Additional files the task depends on.
max_jobs : int, default: 100
Maximum number of jobs to start. With less jobs each job has to process
more parameter assignments. It depends on the scheduler and backend
used to which degree these will run in parallel.
min_items : int, default: 1
Minimum number of parameter assignment to evaluate per job. If a single
assignment is fast to evaluate, increasing this number can improve
performance because Psyrun will not start a new job for each parameter
assignment which can save some overhead.
overwrite_dirty : bool, default: True
Whether to overwrite dirty workdirs without a warning.
pool_size : int, default: 1
Number of parallel threads or processes each job will run. This allows
for parallelization without a proper scheduler (e.g. when using
`psyrun.scheduler.ImmediateRun`).
pspace : `ParameterSpace`, required
Parameter space to evaluate.
python : str, default: ``sys.executable``
Path to Python interpreter to use.
resultfile : str or None, default: None
Path to save the results of the finished task at. If None, this
defaults to ``'result.<ext>'`` in the *workdir*.
scheduler : `Scheduler`, default: `ImmediateRun`
Scheduler to use to submit individual jobs.
scheduler_args : dict, default: ``{}``
Additional scheduler arguments. See the documentation of the
scheduler for details.
setup : function, default: None
Function to call after starting a worker process before any parameter
sets are processed. The function gets the ID of the worker process
(usually starting at 0 and incremented by one for each process) as sole
argument. It may return a dictionary of additional arguments to pass
to the processing function. The setup function can be used to
initialize process wide resources.
store : `Store`, default: `PickleStore`
Input/output backend.
workdir : str, default: ``'psy-work'``
Working directory to store results and supporting data to process the
task.
"""
__slots__ = [
'backend', 'exclude_from_result', 'file_dep', 'max_jobs',
'min_items', 'pool_size', 'pspace', 'overwrite_dirty', 'python',
'resultfile', 'scheduler', 'scheduler_args', 'setup', 'store',
'workdir']
def __init__(self):
self.backend = DefaultBackend
self.exclude_from_result = []
self.file_dep = []
self.max_jobs = 100
self.min_items = 1
self.overwrite_dirty = True
self.pool_size = 1
self.pspace = Param()
self.python = sys.executable
self.resultfile = None
self.scheduler = ImmediateRun()
self.scheduler_args = dict()
self.setup = None
self.store = DefaultStore()
self.workdir = os.path.abspath('psy-work')
@classmethod
def load_from_file(cls, filename):
"""Load the configuration values from a Python file.
Parameters
----------
filename : str
Python file to load.
"""
conf = cls()
loaded_conf = _load_pyfile(filename)
_set_public_attrs_from_dict(conf, loaded_conf)
return conf
def apply_as_default(self, task):
"""Copies the attributes to a different object given they are not set
in that object.
Parameters
----------
task : obj
Object to copy the attributes to.
"""
for attr in self.__slots__:
if not hasattr(task, attr):
setattr(task, attr, getattr(self, attr))
class PackageLoader(object):
"""Loads tasks from Python files.
Filenames have to match the regular expression defined in
`TaskDef.TASK_PATTERN`. See `Config` for supported module
level variables in the task definition.
It is possible to set these variables for all tasks by setting them in
the file ``psy-conf.py`` in the *taskdir*.
Parameters
----------
taskdir : str
Directory to load task files from.
Attributes
----------
taskdir : str
Directory to load task files from.
conf : `Config`
Default values for module level task variables.
"""
def __init__(self, taskdir):
super(PackageLoader, self).__init__()
self.taskdir = taskdir
conffile = os.path.join(self.taskdir, 'psy-conf.py')
if os.path.exists(conffile):
self.conf = Config.load_from_file(conffile)
else:
self.conf = Config()
def load_task_defs(self):
"""Load task definitions.
Returns
-------
list of `TaskDef`
Task definitions.
"""
task_defs = []
for filename in os.listdir(self.taskdir):
root, ext = os.path.splitext(filename)
if TaskDef.TASK_PATTERN.match(root) and ext == '.py':
path = os.path.join(self.taskdir, filename)
try:
task_defs.append(TaskDef(path, self.conf))
except Exception: # pylint: disable=broad-except
traceback.print_exc()
warnings.warn("Task {path!r} could not be loaded.".format(
path=path))
return task_defs
```
#### File: tests/tasks/task_square_load_balanced.py
```python
from psyrun import Param
from psyrun.backend import LoadBalancingBackend
pspace = Param(x=range(4))
max_jobs = 2
backend = LoadBalancingBackend
exclude_from_result = ['z']
def setup(proc_id):
assert proc_id in (0, 1)
return {'p': 2}
def execute(x, p):
return {'y': x ** p, 'z': -1}
```
#### File: tests/tasks/task_square.py
```python
from psyrun import Param
pspace = Param(x=range(4))
overwrite_dirty = False
exclude_from_result = ['z']
def setup(proc_id):
assert 0 <= proc_id < 4
return {'p': 2}
def execute(x, p):
return {'y': x ** p, 'z': -1}
```
#### File: psyrun/tests/test_main.py
```python
from __future__ import unicode_literals
import os
import os.path
import re
import shutil
import time
import numpy as np
import pytest
from psyrun.main import psy_main
from psyrun.exceptions import JobsRunningWarning, TaskWorkdirDirtyWarning
from psyrun.store.h5 import H5Store
from psyrun.store.npz import NpzStore
from psyrun.store.pickle import PickleStore
from psyrun.tasks import TaskDef, Config
from psyrun.utils.testing import MockScheduler, TASKDIR, taskenv
@pytest.fixture
def scheduler(taskenv, request):
jobfile = os.path.join(taskenv.rootdir, 'jobfile')
mock = MockScheduler(jobfile)
def fin():
try:
os.remove(jobfile)
except OSError:
pass
request.addfinalizer(fin)
return mock
def get_task_path(name):
return os.path.join(TASKDIR, 'task_' + name + '.py')
def test_load_task_defaults():
task = TaskDef(get_task_path('square'))
assert task.path == get_task_path('square')
assert task.name == 'square'
assert hasattr(task, 'scheduler')
assert hasattr(task, 'scheduler_args')
assert hasattr(task, 'python')
def test_load_task_uses_config_as_default():
conf = Config()
conf.python = 'env python'
task1 = TaskDef(get_task_path('square'), conf)
assert task1.python == 'env python'
task2 = TaskDef(get_task_path('noop'), conf)
assert task2.python == 'true'
def test_load_config_from_file(tmpdir):
conffile = os.path.join(str(tmpdir), 'conf.py')
with open(conffile, 'w') as f:
f.write('python = "env python"')
conf = Config.load_from_file(conffile)
assert conf.python == 'env python'
@pytest.mark.parametrize('task', ['square', 'square_load_balanced'])
def test_psyrun(taskenv, task):
psy_main(['run', '--taskdir', taskenv.taskdir, task])
result = PickleStore().load(
os.path.join(taskenv.workdir, task, 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9]
def test_psyrun_h5_backend(taskenv):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square_h5'])
result = H5Store().load(
os.path.join(taskenv.workdir, 'square_h5', 'result.h5'))
assert sorted(result['y']) == [0, 1, 4, 9]
def test_psyrun_npz_backend(taskenv):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square_npz'])
result = NpzStore().load(
os.path.join(taskenv.workdir, 'square_npz', 'result.npz'))
assert sorted(result['y']) == [0, 1, 4, 9]
@pytest.mark.parametrize('task', ['square', 'square_load_balanced'])
def test_exclude_from_result(taskenv, task):
psy_main(['run', '--taskdir', taskenv.taskdir, task])
result = PickleStore().load(
os.path.join(taskenv.workdir, task, 'result.pkl'))
assert 'z' not in result
def test_fails_for_existing_old_results_by_default(taskenv):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
# Still up to date, not warning
with pytest.warns(None) as record:
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
for w in record:
assert not issubclass(w.category, TaskWorkdirDirtyWarning)
time.sleep(1)
os.utime(os.path.join(taskenv.taskdir, 'task_square.py'), None)
with pytest.warns(TaskWorkdirDirtyWarning):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
@pytest.mark.parametrize('task', ['square', 'square_load_balanced'])
def test_psyrun_can_continue_interrupted_job(taskenv, task):
psy_main(['run', '--taskdir', taskenv.taskdir, task])
result = PickleStore().load(
os.path.join(taskenv.workdir, task, 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9]
time.sleep(1)
with open(os.path.join(
taskenv.taskdir, 'task_{}.py'.format(task)), 'a') as f:
f.write('\npspace += Param(x=[4, 5])\n')
psy_main(['run', '--taskdir', taskenv.taskdir, '-c', task])
result = PickleStore().load(
os.path.join(taskenv.workdir, task, 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9, 16, 25]
@pytest.mark.parametrize('task', ['square', 'square_load_balanced'])
def test_psyrun_can_continue_job_with_outdated_taskfile(taskenv, task):
result_file = os.path.join(taskenv.workdir, task, 'result.pkl')
psy_main(['run', '--taskdir', taskenv.taskdir, task])
result = PickleStore().load(result_file)
assert sorted(result['y']) == [0, 1, 4, 9]
with open(os.path.join(
taskenv.taskdir, 'task_{}.py'.format(task)), 'a') as f:
f.write('\npspace += Param(x=[4, 5])\n')
psy_main(['status', '--taskdir', taskenv.taskdir, task])
os.utime(result_file, None)
psy_main(['run', '--taskdir', taskenv.taskdir, '-c', task])
result = PickleStore().load(result_file)
assert sorted(result['y']) == [0, 1, 4, 9, 16, 25]
def test_psyrun_can_continue_interrupted_job_no_result_file(taskenv):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
result = PickleStore().load(
os.path.join(taskenv.workdir, 'square', 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9]
with open(os.path.join(taskenv.taskdir, 'task_square.py'), 'a') as f:
f.write('\npspace += Param(x=[4, 5])\n')
os.remove(os.path.join(taskenv.workdir, 'square', 'result.pkl'))
psy_main(['run', '--taskdir', taskenv.taskdir, '-c', 'square'])
result = PickleStore().load(
os.path.join(taskenv.workdir, 'square', 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9, 16, 25]
def test_allows_to_clean_results(taskenv):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
time.sleep(1)
os.utime(os.path.join(taskenv.taskdir, 'task_square.py'), None)
psy_main(['clean', '--taskdir', taskenv.taskdir, 'square'])
with pytest.warns(None) as record:
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
for w in record:
assert not issubclass(w.category, TaskWorkdirDirtyWarning)
def test_psyrun_workdir_contents(taskenv):
workdir = os.path.join('psy-work', 'square')
os.remove(os.path.join(taskenv.taskdir, 'psy-conf.py'))
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
assert os.path.exists(os.path.join(workdir, 'in', '0.pkl'))
assert os.path.exists(os.path.join(workdir, 'out', '0.pkl'))
assert os.path.exists(os.path.join(workdir, 'result.pkl'))
assert os.path.exists(os.path.join(workdir, 'square:split.py'))
assert os.path.exists(os.path.join(workdir, 'square:process.py'))
assert os.path.exists(os.path.join(workdir, 'square:merge.py'))
assert os.path.exists(os.path.join(workdir, 'square:split.log'))
assert os.path.exists(os.path.join(workdir, 'square:process:0.log'))
assert os.path.exists(os.path.join(workdir, 'square:merge.log'))
def test_psyrun_workdir_contents_load_balanced(taskenv):
workdir = os.path.join('psy-work', 'square_load_balanced')
os.remove(os.path.join(taskenv.taskdir, 'psy-conf.py'))
psy_main(['run', '--taskdir', taskenv.taskdir, 'square_load_balanced'])
assert os.path.exists(os.path.join(workdir, 'in.pkl'))
assert os.path.exists(os.path.join(workdir, 'result.pkl'))
assert os.path.exists(os.path.join(
workdir, 'square_load_balanced:pspace.py'))
assert os.path.exists(os.path.join(
workdir, 'square_load_balanced:process.py'))
assert os.path.exists(os.path.join(
workdir, 'square_load_balanced:process:0.log'))
assert os.path.exists(os.path.join(
workdir, 'square_load_balanced:process:1.log'))
def test_psyrun_file_dep(taskenv):
with open(os.path.join(taskenv.taskdir, 'in.txt'), 'w') as f:
f.write('2')
psy_main(['run', '--taskdir', taskenv.taskdir, 'file_dep'])
result = PickleStore().load(os.path.join(
taskenv.workdir, 'file_dep', 'result.pkl'))
assert sorted(result['y']) == [4]
# Ensure that modification time changes as some file systems only support
# 1s resolution.
time.sleep(1)
with open(os.path.join(taskenv.taskdir, 'in.txt'), 'w') as f:
f.write('3')
psy_main(['run', '--taskdir', taskenv.taskdir, 'file_dep'])
result = PickleStore().load(os.path.join(
taskenv.workdir, 'file_dep', 'result.pkl'))
assert sorted(result['y']) == [8]
@pytest.mark.filterwarnings(JobsRunningWarning)
def test_psyrun_does_not_resubmit_queued_jobs(taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
expected = scheduler.joblist
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(expected) == len(scheduler.joblist)
assert all(x['id'] == y['id'] for x, y in zip(expected, scheduler.joblist))
def test_psyrun_remerges_if_result_is_missing(taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
os.remove(os.path.join(taskenv.workdir, 'mocked_scheduler', 'result.pkl'))
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 1
assert 'merge' in scheduler.joblist[0]['name']
def test_psyrun_no_resubmits_if_result_is_uptodate(taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
for dirpath, _, filenames in os.walk(taskenv.workdir):
for filename in filenames:
if filename == 'result.pkl':
continue
os.remove(os.path.join(dirpath, filename))
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 0
def test_psyrun_resubmits_for_missing_job_output(taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
os.remove(os.path.join(taskenv.workdir, 'mocked_scheduler', 'result.pkl'))
os.remove(os.path.join(
taskenv.workdir, 'mocked_scheduler', 'out', '0.pkl'))
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 2
assert 'process:0' in scheduler.joblist[0]['name']
assert 'merge' in scheduler.joblist[1]['name']
def test_psyrun_does_not_resubmit_split_if_infiles_uptodate(
taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
for dirpath, _, filenames in os.walk(taskenv.workdir):
for filename in filenames:
if os.path.basename(dirpath) == 'out':
continue
os.remove(os.path.join(dirpath, filename))
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
for job in scheduler.joblist:
assert 'split' not in job['name']
def test_psyrun_resubmits_jobs_if_necessary(taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
shutil.rmtree(taskenv.workdir)
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 6
assert 'split' in scheduler.joblist[0]['name']
for i in range(4):
assert 'process:{0}'.format(i) in scheduler.joblist[i + 1]['name']
assert 'merge' in scheduler.joblist[5]['name']
def test_psyrun_shows_error_if_resubmit_of_queued_job_necessary(
taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume_job(scheduler.joblist[0])
scheduler.consume_job(scheduler.joblist[1])
expected = scheduler.joblist
time.sleep(1)
t = time.time()
os.utime(
os.path.join(taskenv.taskdir, 'task_mocked_scheduler.py'),
(t, t))
with pytest.warns(JobsRunningWarning):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(expected) == len(scheduler.joblist)
assert all(x['id'] == y['id'] for x, y in zip(expected, scheduler.joblist))
def test_psyrun_resubmits_merge_if_result_is_outdated(taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
time.sleep(1)
t = time.time()
os.utime(
os.path.join(taskenv.taskdir, 'task_mocked_scheduler.py'),
(t, t))
for i in range(4):
os.utime(os.path.join(
taskenv.workdir, 'mocked_scheduler', 'in',
str(i) + '.pkl'), (t, t))
os.utime(os.path.join(
taskenv.workdir, 'mocked_scheduler', 'out',
str(i) + '.pkl'), (t, t))
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 1
assert 'merge' in scheduler.joblist[0]['name']
def test_psyrun_resubmits_process_and_merge_if_outfile_is_outdated(
taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
time.sleep(1)
t = time.time()
os.utime(
os.path.join(taskenv.taskdir, 'task_mocked_scheduler.py'),
(t, t))
for i in range(4):
os.utime(os.path.join(
taskenv.workdir, 'mocked_scheduler', 'in',
str(i) + '.pkl'), (t, t))
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 5
for i in range(4):
assert 'process:{0}'.format(i) in scheduler.joblist[i]['name']
assert 'merge' in scheduler.joblist[4]['name']
def test_psyrun_resubmits_all_if_infile_is_outdated(
taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
scheduler.consume()
time.sleep(1)
t = time.time()
os.utime(
os.path.join(taskenv.taskdir, 'task_mocked_scheduler.py'),
(t, t))
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 6
assert 'split' in scheduler.joblist[0]['name']
for i in range(4):
assert 'process:{0}'.format(i) in scheduler.joblist[i + 1]['name']
assert 'merge' in scheduler.joblist[5]['name']
def test_multiple_splits(taskenv):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square2'])
result = PickleStore().load(os.path.join(
taskenv.workdir, 'square2', 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9]
def test_psy_run_runs_all_tasks(taskenv):
psy_main(['run', '--taskdir', taskenv.taskdir, 'square', 'square2'])
result = PickleStore().load(
os.path.join(taskenv.workdir, 'square', 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9]
result = PickleStore().load(os.path.join(
taskenv.workdir, 'square2', 'result.pkl'))
assert sorted(result['y']) == [0, 1, 4, 9]
def test_psy_list(taskenv, capsys):
expected = set()
for entry in os.listdir(taskenv.taskdir):
m = re.match(r'^task_(.*)\.py$', entry)
if m:
expected.add(m.group(1))
psy_main(['list', '--taskdir', taskenv.taskdir])
out, _ = capsys.readouterr()
listed = {x.strip() for x in out.split('\n') if x != ''}
assert listed == expected
def test_psy_status(taskenv, capsys):
psy_main(['status', '--taskdir', taskenv.taskdir, 'square'])
out, _ = capsys.readouterr()
assert out == """square:
0 out of 4 rows completed.
"""
psy_main(['status', '--taskdir', taskenv.taskdir, '-v', 'square'])
out, _ = capsys.readouterr()
assert out == """square:
0 out of 4 rows completed.
Missing parameter sets:
{'x': 0}
{'x': 1}
{'x': 2}
{'x': 3}
Queued parameter sets:
No failed jobs.
"""
psy_main(['run', '--taskdir', taskenv.taskdir, 'square'])
out, _ = capsys.readouterr()
psy_main(['status', '--taskdir', taskenv.taskdir, '-v', 'square'])
out, _ = capsys.readouterr()
assert out == """square:
4 out of 4 rows completed.
Missing parameter sets:
Queued parameter sets:
No failed jobs.
"""
os.remove(os.path.join(taskenv.workdir, 'square', 'out', '2.pkl'))
psy_main(['status', '--taskdir', taskenv.taskdir, '-v', 'square'])
out, _ = capsys.readouterr()
assert out == """square:
4 out of 4 rows completed.
Missing parameter sets:
Queued parameter sets:
Failed jobs:
square:process:2
"""
os.remove(os.path.join(taskenv.workdir, 'square', 'result.pkl'))
psy_main(['status', '--taskdir', taskenv.taskdir, '-v', 'square'])
out, _ = capsys.readouterr()
assert out == """square:
3 out of 4 rows completed.
Missing parameter sets:
{'x': 2}
Queued parameter sets:
Failed jobs:
square:process:2
"""
def test_psy_status_load_balanced(taskenv, capsys):
psy_main(['status', '--taskdir', taskenv.taskdir, 'square_load_balanced'])
out, _ = capsys.readouterr()
assert out == """square_load_balanced:
0 out of 4 rows completed.
"""
psy_main(['status', '--taskdir', taskenv.taskdir, '-v',
'square_load_balanced'])
out, _ = capsys.readouterr()
assert out == """square_load_balanced:
0 out of 4 rows completed.
Missing parameter sets:
{'x': 0}
{'x': 1}
{'x': 2}
{'x': 3}
"""
psy_main(['run', '--taskdir', taskenv.taskdir, 'square_load_balanced'])
out, _ = capsys.readouterr()
psy_main(['status', '--taskdir', taskenv.taskdir, '-v',
'square_load_balanced'])
out, _ = capsys.readouterr()
assert out == """square_load_balanced:
4 out of 4 rows completed.
Missing parameter sets:
"""
def test_psy_kill(taskenv, scheduler):
psy_main(['run', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) > 0
psy_main(['kill', '--taskdir', taskenv.taskdir, 'mocked_scheduler'])
assert len(scheduler.joblist) == 0
@pytest.mark.parametrize('store', [PickleStore(), NpzStore(), H5Store()])
def test_psy_merge(tmpdir, store):
resultfile = os.path.join(str(tmpdir), 'result' + store.ext)
outdir = os.path.join(str(tmpdir), 'out')
if not os.path.exists(outdir):
os.mkdir(outdir)
data_segments = ({'x': [1]}, {'x': [2]})
for i, d in enumerate(data_segments):
store.save(os.path.join(outdir, str(i) + store.ext), d)
psy_main(['merge', outdir, resultfile])
merged = store.load(resultfile)
assert list(merged.keys()) == ['x']
assert np.all(np.asarray(sorted(merged['x'])) == np.array([1, 2]))
def test_new_task(taskenv):
psy_main(['new-task', '--taskdir', taskenv.taskdir, 'new-task-test'])
assert os.path.exists(
os.path.join(taskenv.taskdir, 'task_new-task-test.py'))
assert psy_main(
['new-task', '--taskdir', taskenv.taskdir, 'new-task-test']) != 0
@pytest.mark.parametrize('scheduler_str', ['psyrun.scheduler.Sqsub', 'Sqsub'])
def test_new_task_scheduler_arg(taskenv, scheduler_str):
psy_main(['new-task', '--taskdir', taskenv.taskdir, '-s', scheduler_str,
'new-task-test'])
path = os.path.join(taskenv.taskdir, 'task_new-task-test.py')
with open(path, 'r') as f:
data = f.read()
assert 'from psyrun.scheduler import Sqsub' in data
assert 'scheduler = Sqsub' in data
assert 'scheduler_args = {' in data
assert 'scheduler_args = {}' not in data
```
#### File: psyrun/tests/test_mapper.py
```python
import os.path
import pytest
from psyrun.exceptions import IneffectiveExcludeWarning
from psyrun.store.h5 import H5Store
from psyrun.store.npz import NpzStore
from psyrun.store.pickle import PickleStore
from psyrun.pspace import Param
from psyrun.mapper import (
map_pspace, map_pspace_parallel, map_pspace_hdd_backed)
def square(a):
return {'x': a ** 2}
def test_map_pspace():
calls = []
def fn(**kwargs):
calls.append(kwargs)
return {'result': 42}
pspace = Param(a=[1, 2])
result = map_pspace(fn, pspace)
assert calls == [{'a': 1}, {'a': 2}]
assert result == {'a': [1, 2], 'result': [42, 42]}
@pytest.mark.parametrize('store', [PickleStore(), H5Store(), NpzStore()])
def test_hdd_backed_mapper(tmpdir, store):
pspace = Param(a=[1, 2])
filename = os.path.join(str(tmpdir), 'out' + store.ext)
result = map_pspace_hdd_backed(
square, pspace, filename=filename, store=store)
assert list(result['a']) == [1, 2]
assert list(result['x']) == [1, 4]
loaded = store.load(filename)
assert list(loaded['a']) == [1, 2]
assert list(loaded['x']) == [1, 4]
@pytest.mark.parametrize('store', [PickleStore(), H5Store(), NpzStore()])
def test_hdd_backed_mapper_continues(tmpdir, store):
pspace = Param(a=[1, 2])
filename = os.path.join(str(tmpdir), 'out' + store.ext)
store.append(filename, {'a': [1], 'x': [-1]})
result = map_pspace_hdd_backed(
square, pspace, filename=filename, store=store)
assert list(result['a']) == [1, 2]
assert list(result['x']) == [-1, 4]
loaded = store.load(filename)
assert list(loaded['a']) == [1, 2]
assert list(loaded['x']) == [-1, 4]
def test_map_pspace_parallel():
pspace = Param(a=[1, 2])
result = map_pspace_parallel(square, pspace)
assert result == {'a': [1, 2], 'x': [1, 4]}
@pytest.mark.parametrize(
'mapper', [map_pspace, map_pspace_parallel, map_pspace_hdd_backed])
def test_exclude(tmpdir, mapper):
pspace = Param(a=[1, 2])
kwargs = {}
if mapper is map_pspace_hdd_backed:
kwargs['store'] = PickleStore()
kwargs['filename'] = os.path.join(str(tmpdir), 'out.pkl')
result = mapper(square, pspace, exclude=['a'], **kwargs)
assert 'a' not in result
def test_exclude_warning_if_not_in_result():
pspace = Param(a=[1, 2])
with pytest.warns(IneffectiveExcludeWarning):
result = map_pspace(square, pspace, exclude=['b'])
```
#### File: psyrun/tests/test_scheduler.py
```python
import subprocess
import sys
from psyrun.scheduler import ImmediateRun, JobStatus, Slurm
def test_immediate_run_submit(tmpdir):
outfile = str(tmpdir) + '/out'
sched = ImmediateRun()
sched.submit(['echo', 'success'], outfile)
with open(outfile, 'r') as f:
assert f.read().strip() == 'success'
def test_immediate_run_failing_job(tmpdir):
outfile = str(tmpdir) + '/out'
sched = ImmediateRun()
jobid = sched.submit(['false'], outfile)
jobid = sched.submit(['echo', 'not echoed'], outfile, depends_on=[jobid])
with open(outfile, 'r') as f:
assert f.read().strip() == ''
def test_slurm_submit(monkeypatch, tmpdir):
def check_output(cmd, universal_newlines=None):
assert sorted(cmd) == sorted(
['sbatch', '-o', 'outfile', '-J', 'name', '0', '1'])
return b'name 1'
monkeypatch.setattr(subprocess, 'check_output', check_output)
sched = Slurm(str(tmpdir))
assert sched.submit(['0', '1'], 'outfile', name='name') == '1'
def test_slurm_submit_with_additional_args(monkeypatch, tmpdir):
def check_output(cmd, universal_newlines=None):
if cmd[0] == 'squeue':
return '\n0 R name None\n'
assert sorted(cmd) == sorted([
'sbatch', '-t', '1h', '-o', 'outfile', '-J', 'name', '-d',
'afterok:0', '0', '1'])
return b'name 1'
monkeypatch.setattr(subprocess, 'check_output', check_output)
sched = Slurm(str(tmpdir))
assert sched.submit(
['0', '1'], 'outfile', name='name', depends_on=['0'],
scheduler_args={'timelimit': '1h'}) == '1'
def test_slurm_kill(monkeypatch, tmpdir):
def check_call(cmd):
assert cmd == ['scancel', '1']
monkeypatch.setattr(subprocess, 'check_call', check_call)
sched = Slurm(str(tmpdir))
sched.kill('1')
def test_slurm_status(monkeypatch, tmpdir):
def check_output(cmd, universal_newlines=None, stderr=None):
assert cmd[:2] == ['squeue', '-u']
assert cmd[3:5] == ['-h', '-o']
assert cmd[5] == '%i %t %j %r' or cmd[5] == '%A %t %j %r'
return '''id state name reason
1 R running None
2 PD pending Dependency
3 PD priority Priority
4_[0-2] PD array Priority
'''
monkeypatch.setattr(subprocess, 'check_output', check_output)
sched = Slurm(str(tmpdir))
assert sorted(sched.get_jobs()) == ['1', '2', '3', '4_0', '4_1', '4_2']
assert sched.get_status('1') == JobStatus('1', 'R', 'running')
assert sched.get_status('2') == JobStatus('2', '*Q', 'pending')
assert sched.get_status('3') == JobStatus('3', 'Q', 'priority')
for i in range(3):
job_id = '4_' + str(i)
assert sched.get_status(job_id) == JobStatus(
job_id, 'Q', 'array:' + str(i))
def test_slurm_submit_array(monkeypatch, tmpdir):
class Popen(object):
def __init__(self, cmd, stdin=None, stdout=None):
assert sorted(cmd) == sorted(
['sbatch', '--array=0-2', '-o', 'outfile'])
self.returncode = 0
def communicate(self, input):
assert input.decode('utf-8') == '#!' + sys.executable + '''
import os
import subprocess
import sys
task_id = os.environ['SLURM_ARRAY_TASK_ID']
sys.exit(subprocess.call([a.replace('%a', str(task_id)) for a in ['arg0', 'arg1']]))
'''
return b'array 1', b''
monkeypatch.setattr(subprocess, 'Popen', Popen)
sched = Slurm(str(tmpdir))
sched.submit_array(3, ['arg0', 'arg1'], 'outfile')
```
#### File: psyrun/utils/testing.py
```python
import os.path
import pickle
import shutil
import subprocess
import pytest
from psyrun.scheduler import JobStatus, Scheduler
from psyrun.utils.doc import inherit_docs
TASKDIR = os.path.join(os.path.dirname(__file__), '../tests/tasks')
class TaskEnv(object):
def __init__(self, tmpdir):
self.rootdir = str(tmpdir)
self.taskdir = os.path.join(str(tmpdir), 'tasks')
self.workdir = os.path.join(str(tmpdir), 'work')
shutil.copytree(TASKDIR, self.taskdir)
with open(os.path.join(self.taskdir, 'psy-conf.py'), 'w') as f:
f.write('workdir = {0!r}'.format(self.workdir))
@pytest.fixture
def taskenv(tmpdir, request):
env = TaskEnv(tmpdir)
cwd = os.getcwd()
def fin():
os.chdir(cwd)
request.addfinalizer(fin)
os.chdir(str(env.rootdir))
return env
@inherit_docs
class MockScheduler(Scheduler):
"""Mock scheduler implementation.
Parameters
----------
datafile : str
File to store supporting data.
"""
def __init__(self, datafile):
self.datafile = datafile
@property
def next_id(self):
"""ID for next submitted job."""
if os.path.exists(self.datafile):
with open(self.datafile, 'rb') as f:
return pickle.load(f)['next_id']
else:
return 0
@next_id.setter
def next_id(self, value):
self._serialize(next_id=value)
@property
def joblist(self):
"""Tuple of current jobs."""
if os.path.exists(self.datafile):
with open(self.datafile, 'rb') as f:
return tuple(pickle.load(f)['joblist'])
else:
return tuple()
@joblist.setter
def joblist(self, value):
self._serialize(joblist=value)
def _serialize(self, next_id=None, joblist=None):
if next_id is None:
next_id = self.next_id
if joblist is None:
joblist = self.joblist
with open(self.datafile, 'wb') as f:
pickle.dump(
{'next_id': next_id, 'joblist': joblist},
f, pickle.HIGHEST_PROTOCOL)
def submit(
self, args, output_filename, name=None, depends_on=None,
scheduler_args=None):
if depends_on is None:
depends_on = []
jobid = self.next_id
self.next_id += 1
self.joblist = self.joblist + ({
'id': jobid,
'args': args,
'output_filename': output_filename,
'name': name or str(jobid),
'depends_on': depends_on,
'scheduler_args': scheduler_args,
'status': '*Q' if len(depends_on) > 0 else 'Q',
},)
return jobid
def mark_running(self):
"""Mark all jobs as running."""
updated = []
for job in self.joblist:
job.update({'status': 'R'})
updated.append(job)
self.joblist = tuple(updated)
def kill(self, jobid):
self.joblist = [job for job in self.joblist if job['id'] != jobid]
def get_status(self, jobid):
for job in self.joblist:
if job['id'] == jobid:
return JobStatus(job['id'], job['status'], job['name'])
return None
def consume(self):
"""Process all queued jobs."""
for job in self.joblist:
with open(job['output_filename'], 'a') as f:
subprocess.check_call(
job['args'], stdout=f, stderr=subprocess.STDOUT)
self.joblist = []
def consume_job(self, job):
"""Process a single job."""
with open(job['output_filename'], 'a') as f:
subprocess.check_call(
job['args'], stdout=f, stderr=subprocess.STDOUT)
self.joblist = [j for j in self.joblist if j != job]
def get_jobs(self):
return [job['id'] for job in self.joblist]
```
|
{
"source": "jgostick/OpenPNM",
"score": 2
}
|
#### File: models/geometry/throat_size.py
```python
r"""
.. autofunction:: openpnm.models.geometry.throat_size.weibull
.. autofunction:: openpnm.models.geometry.throat_size.normal
.. autofunction:: openpnm.models.geometry.throat_size.random
.. autofunction:: openpnm.models.geometry.throat_size.generic_distribution
.. autofunction:: openpnm.models.geometry.throat_size.from_neighbor_pores
.. autofunction:: openpnm.models.geometry.throat_size.equivalent_diameter
"""
from openpnm.models import misc as _misc
import numpy as _np
def weibull(target, shape, scale, loc, seeds='throat.seed'):
return _misc.weibull(target=target, shape=shape, scale=scale, loc=loc,
seeds=seeds)
weibull.__doc__ = _misc.weibull.__doc__
def normal(target, scale, loc, seeds='throat.seed'):
return _misc.normal(target=target, scale=scale, loc=loc, seeds=seeds)
normal.__doc__ = _misc.normal.__doc__
def generic_distribution(target, func, seeds='throat.seed'):
return _misc.generic_distribution(target=target, func=func, seeds=seeds)
generic_distribution.__doc__ = _misc.generic_distribution.__doc__
def random(target, seed=None, num_range=[0, 1]):
return _misc.random(target=target, element='throat', seed=seed,
num_range=num_range)
random.__doc__ = _misc.random.__doc__
def from_neighbor_pores(target, prop='pore.diameter', mode='min'):
return _misc.from_neighbor_pores(target=target, prop=prop,
mode=mode)
from_neighbor_pores.__doc__ = _misc.from_neighbor_pores.__doc__
def equivalent_diameter(target, throat_area='throat.area',
throat_shape='circle'):
r"""
Calculates the diameter of a cirlce or edge-length of a sqaure with same
area as the throat.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
thorat_area : string
The dictionary key to the throat area values
throat_shape : string
The shape cross-sectional shape of the throat to assume when
back-calculating from the area. Options are 'circle' (default) or
'square'.
Returns
-------
value : NumPy ndarray
Array containing throat equivalent diameter.
"""
area = target[throat_area]
if throat_shape.startswith('circ'):
value = 2*_np.sqrt(area/_np.pi)
elif throat_shape.startswith('square'):
value = _np.sqrt(area)
return value
```
|
{
"source": "jgotanegra/wallet",
"score": 2
}
|
#### File: eth_wallet/cli/eth_wallet_cli.py
```python
import click
from eth_wallet.cli.new_wallet import (
new_wallet,
)
from eth_wallet.cli.get_wallet import (
get_wallet,
)
from eth_wallet.cli.reveal_seed import (
reveal_seed
)
from eth_wallet.cli.get_balance import (
get_balance,
)
from eth_wallet.cli.send_transaction import (
send_transaction,
)
from eth_wallet.cli.restore_wallet import (
restore_wallet,
)
from eth_wallet.cli.add_token import (
add_token,
)
from eth_wallet.cli.list_tokens import (
list_tokens,
)
from eth_wallet.cli.network import (
network,
)
@click.group()
def eth_wallet_cli():
pass
eth_wallet_cli.add_command(new_wallet)
eth_wallet_cli.add_command(get_wallet)
eth_wallet_cli.add_command(reveal_seed)
eth_wallet_cli.add_command(get_balance)
eth_wallet_cli.add_command(send_transaction)
eth_wallet_cli.add_command(restore_wallet)
eth_wallet_cli.add_command(add_token)
eth_wallet_cli.add_command(list_tokens)
eth_wallet_cli.add_command(network)
if __name__ == "__main__":
eth_wallet_cli()
```
#### File: eth_wallet/cli/get_balance.py
```python
import click
from eth_wallet.cli.utils_cli import (
get_api,
)
from eth_wallet.configuration import (
Configuration,
)
from web3.exceptions import (
InvalidAddress,
)
from eth_wallet.exceptions import (
InfuraErrorException,
ERC20NotExistsException,
)
@click.command()
@click.option('-t', '--token', default=None,
help='Token symbol.')
def get_balance(token):
"""Get address balance."""
configuration = Configuration().load_configuration()
api = get_api()
try:
if token is None:
eth_balance, address = api.get_balance(configuration)
click.echo('Balance on address %s is: %sETH' % (address, eth_balance))
else:
token_balance, address = api.get_balance(configuration, token)
click.echo('Balance on address %s is: %s%s' % (address, token_balance, token))
except InvalidAddress:
click.echo('Invalid address or wallet does not exist!')
except InfuraErrorException:
click.echo('Wallet is not connected to Ethereum network!')
except ERC20NotExistsException:
click.echo('This token is not added to the wallet!')
```
#### File: eth_wallet/cli/get_wallet.py
```python
import click
from eth_wallet.cli.utils_cli import (
get_api,
)
from eth_wallet.configuration import (
Configuration,
)
@click.command()
def get_wallet():
"""Get wallet account from encrypted keystore."""
configuration = Configuration().load_configuration()
api = get_api()
address, pub_key = api.get_wallet(configuration)
click.echo('Account address: %s' % str(address))
click.echo('Account pub key: %s' % str(pub_key))
```
#### File: eth_wallet/cli/send_transaction.py
```python
import click
import getpass
from eth_wallet.cli.utils_cli import (
get_api,
)
from eth_wallet.configuration import (
Configuration,
)
from eth_wallet.exceptions import (
InsufficientFundsException,
InvalidValueException,
InvalidPasswordException,
InfuraErrorException,
InsufficientERC20FundsException,
ERC20NotExistsException,
)
from web3.exceptions import (
InvalidAddress,
)
@click.command()
@click.option('-t', '--to', default='', prompt='To address:',
help='Ethereum address where to send amount.')
@click.option('-v', '--value', default='', prompt='Value to send:',
help='Ether value to send.')
@click.option('--token', default=None,
help='Token symbol.')
def send_transaction(to, value, token):
"""Sends transaction."""
password = <PASSWORD>('Password from keystore: ') # Prompt the user for a password of keystore file
configuration = Configuration().load_configuration()
api = get_api()
try:
if token is None:
# send ETH transaction
tx_hash, tx_cost_eth = api.send_transaction(configuration,
password,
to,
value)
else:
# send erc20 transaction
tx_hash, tx_cost_eth = api.send_transaction(configuration,
password,
to,
value,
token)
click.echo('Hash of the transaction: %s' % str(tx_hash.hex()))
click.echo('Transaction cost was: %sETH' % str(tx_cost_eth))
except InsufficientFundsException:
click.echo('Insufficient ETH funds! Check balance on your address.')
except InsufficientERC20FundsException:
click.echo('Insufficient ERC20 token funds! Check balance on your address.')
except InvalidAddress:
click.echo('Invalid recipient(to) address!')
except InvalidValueException:
click.echo('Invalid value to send!')
except InvalidPasswordException:
click.echo('Incorrect password!')
except InfuraErrorException:
click.echo('Wallet is not connected to Ethereum network!')
except ERC20NotExistsException:
click.echo('This token is not added to the wallet!')
```
#### File: wallet/eth_wallet/transaction.py
```python
from eth_account import (
Account,
)
from eth_wallet.utils import (
public_key_to_keccak256,
)
from eth_utils import (
to_normalized_address,
to_hex,
remove_0x_prefix,
)
class Transaction:
"""Abstraction over Ethereum transaction."""
def __init__(self, account, w3):
self.account = account
self.w3 = w3
@staticmethod
def build_transaction(to_address,
value,
gas,
gas_price,
nonce,
chain_id,
data=None
):
"""Collects all necessary data to build transaction dict."""
if data is None: # tx dict for sending ETH
transaction = {
# Note that the address must be in checksum format:
'to': to_address,
'value': value,
'gas': gas,
'gasPrice': gas_price,
'nonce': nonce,
'chainId': chain_id
}
else: # tx dict for sending ERC20 tokens
transaction = {
# Note that the address must be in checksum format:
'to': to_address,
'value': value,
'gas': gas,
'gasPrice': gas_price,
'nonce': nonce,
'chainId': chain_id,
'data': data
}
return transaction
def send_transaction(self, transaction):
"""
Signs and send transaction
:param transaction: transaction dict
:return: transaction hash
"""
print('transaction: ' + str(transaction))
signed_tx = Account.signTransaction(transaction, self.account.privateKey)
tx_hash = self.w3.eth.sendRawTransaction(signed_tx.rawTransaction)
return tx_hash
@staticmethod
def get_tx_erc20_data_field(receiver, value):
"""
When creating transaction on ERC20 contract, we need to specify data field
within transaction dictionary. This field must be in hex format and
call solidity 'transfer(address,uint256)' function where is defined
token receiver and amount of tokens to send. All this values must be
concatenated as hex string
Example of data field value is:
0xa9059cbb --> solidity transfer(address,uint256) function an it's keccak256 hash's first 4 bytes
000000000000000000000000aad533eb7fe7f2657960ac7703f87e10c73ae73b --> token receiver
0000000000000000000000000000000000000000000000000de0b6b3a7640000 --> 1 * 10**ERC20-decimals value to transfer
concatenated together.
Description is also available within Ethereumbook:
https://github.com/ethereumbook/ethereumbook/blob/develop/06transactions.asciidoc#transmitting-a-data-payload-to-an-eoa-or-contract
:param receiver: address where smart contract send data
:param value: number of tokens to send, 1 token is often 10**18 but can depend on ERC20 decimals
:type value: integer
:return: hex string
"""
# 1. create hex of called function in solidity and take first 4 bytes
# ERC20 transfer function will always produce a9059cbb.....
transfer_hex = public_key_to_keccak256(b'transfer(address,uint256)').hex()[:8]
# 2. create 32 byte number (length 64)
# consisting of zeros and normalized hex address of receiver without 0x prefix
# example: 000000000000000000000000aad533eb7fe7f2657960ac7703f87e10c73ae73b
receiver = remove_0x_prefix(to_normalized_address(receiver))
receiver = '000000000000000000000000' + receiver # 32 bytes together
# 3. convert sending amount to hex and remove 0x prefix
# this number must be integer and therefore smallest units of token used
# usually it 1 token is often 10**18 but can depend on ERC20 decimals
# example: de0b6b3a7640000 (hex of 1000000000000000000)
value = remove_0x_prefix(to_hex(value))
# 4. add zeros in front of sending amount of hex value. Together it must be 32 bytes (length 64)
# example: 0000000000000000000000000000000000000000000000000de0b6b3a7640000
zero_end_point = 64 - len(value)
final_hex_amount = [value[x - zero_end_point] if x >= zero_end_point else 0 for x in range(0, 64)]
final_hex_amount = ''.join(str(x) for x in final_hex_amount) # convert list to string
# 5. concatenate final data field
data = '0x'+transfer_hex + receiver + final_hex_amount
return data
```
#### File: eth_wallet/ui/gui.py
```python
import threading
from tkinter import (
Tk,
Frame,
Label,
Button,
Entry,
Message,
CENTER,
Menu,
Menubutton,
StringVar,
RAISED,
messagebox,
)
from eth_wallet.configuration import (
Configuration,
)
from eth_wallet.api import (
WalletAPI,
)
from eth_wallet.ui.page import (
Page
)
class NewWalletPage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = None
self.api = WalletAPI()
self.wallet = None
lbl_pswd = Label(self,
text='Passphrase:',
width=60,
font=(None, 20))
lbl_pswd.pack()
entry_password = Entry(self,
show="*",
font=(None, 20),
justify=CENTER)
entry_password.pack()
btn_create_wallet = Button(self,
text="Generate",
width=60,
font=(None, 16),
command=lambda: self.create_wallet(btn_create_wallet,
entry_password.get()))
btn_create_wallet.pack()
def create_wallet(self, btn_create_wallet, password):
"""
Create new wallet
:param btn_create_wallet: generate button which change text and functionality
:param password: passphrase from the user
:return:
"""
self.configuration = Configuration().load_configuration()
self.wallet = self.api.new_wallet(self.configuration, password)
lbl_remember_words = Label(self,
text='Restore sentence:',
width=60)
lbl_remember_words.pack()
lbl_mnemonic = Message(self,
text=self.wallet.get_mnemonic(),
justify=CENTER,
borderwidth=10,
background='light blue')
lbl_mnemonic.pack()
btn_create_wallet.configure(text="Continue",
command=self.navigate_home_page)
def navigate_home_page(self):
"""
Navigate to home page
:return:
"""
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
class TransactionPage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = Configuration().load_configuration()
self.api = WalletAPI()
self.tokens = self.api.list_tokens(self.configuration)
self.eth_balance, _ = self.api.get_balance(self.configuration)
def change_token(token):
if token == 'ETH':
self.eth_balance, _ = self.api.get_balance(self.configuration)
else:
self.eth_balance, _ = self.api.get_balance(self.configuration, token)
balance.set(str(self.eth_balance) + ' ' + token)
token_symbol = StringVar()
token_symbol.set('ETH')
balance = StringVar()
balance.set(str(self.eth_balance) + ' ' + token_symbol.get())
mb = Menubutton(self,
width=60,
textvariable=token_symbol,
relief=RAISED)
mb.grid()
mb.menu = Menu(mb, tearoff=0)
mb["menu"] = mb.menu
mb.menu.add_radiobutton(label="ETH",
variable=token_symbol,
value='ETH',
command=lambda: change_token(token_symbol.get()))
for token in self.tokens:
mb.menu.add_radiobutton(label=token,
variable=token_symbol,
value=token,
command=lambda: change_token(token_symbol.get()))
mb.pack()
label = Label(self,
textvariable=balance,
width=60,
font=(None, 30))
label.pack()
lbl_address = Label(self,
text="To address:",
width=60,
font=(None, 20))
lbl_address.pack()
entry_address = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_address.pack()
lbl_amount = Label(self,
text="Amount:",
width=60,
font=(None, 20))
lbl_amount.pack()
entry_amount = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_amount.pack()
lbl_passphrase = Label(self,
text="Passphrase:",
width=60,
font=(None, 20))
lbl_passphrase.pack()
entry_passphrase = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_passphrase.pack()
btn_send = Button(self,
text="Send",
width=60,
font=(None, 16),
command=lambda: self.send_transaction(entry_address.get(),
entry_amount.get(),
entry_passphrase.get(),
token_symbol.get()))
btn_send.pack()
btn_back = Button(self,
text="Back",
width=60,
font=(None, 16),
command=self.navigate_home_page)
btn_back.pack()
def navigate_home_page(self):
"""
Navigate to home page
:return:
"""
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
def send_transaction(self, to, value, password, token):
"""
Send transaction
:return:
"""
if token == 'ETH':
tx_thread = TransactionThread(configuration=self.configuration,
password=password,
to=to,
value=value,
token=None)
else:
tx_thread = TransactionThread(configuration=self.configuration,
password=password,
to=to,
value=value,
token=token)
tx_thread.start()
class TransactionThread(threading.Thread):
def __init__(self, configuration, password, to, value, token=None):
threading.Thread.__init__(self)
self.api = WalletAPI()
self.configuration = configuration
self.password = password
self.to = to
self.value = value
self.token = token
def run(self):
if self.token is None:
# send ETH transaction
tx_hash, tx_cost_eth = self.api.send_transaction(self.configuration,
self.password,
self.to,
self.value)
else:
# send erc20 transaction
tx_hash, tx_cost_eth = self.api.send_transaction(self.configuration,
self.password,
self.to,
self.value,
self.token)
messagebox.showinfo("Transaction mined!",
"Transaction was mined for " + str(tx_cost_eth) + "ETH fee.")
class AddTokenPage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = Configuration().load_configuration()
self.api = WalletAPI()
lbl_symbol = Label(self,
text="Contract's symbol:",
width=60,
font=(None, 20))
lbl_symbol.pack()
entry_symbol = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_symbol.pack()
lbl_address = Label(self,
text="Contract's address:",
width=60,
font=(None, 20))
lbl_address.pack()
entry_address = Entry(self,
font=(None, 20),
width=60,
justify=CENTER)
entry_address.pack()
btn_back = Button(self,
text="Add",
font=(None, 16),
width=60,
command=lambda: self.add_token(entry_symbol.get(), entry_address.get()))
btn_back.pack()
btn_back = Button(self,
text="Back",
font=(None, 16),
width=60,
command=self.navigate_home_page)
btn_back.pack()
def navigate_home_page(self):
"""
Navigate to home page
:return:
"""
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
def add_token(self, symbol, contract):
"""
Add new token and navigate to home page
:param symbol: token symbol
:param contract: contracts address
:return:
"""
self.api.add_contract(self.configuration, symbol, contract)
info_page = HomePage(self)
info_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
info_page.show()
class HomePage(Page):
def __init__(self, *args, **kwargs):
Page.__init__(self, *args, **kwargs)
self.configuration = Configuration().load_configuration()
self.api = WalletAPI()
self.tokens = self.api.list_tokens(self.configuration)
self.eth_balance, self.address = self.api.get_balance(self.configuration)
def refresh():
change_token(token_symbol.get())
def change_token(token):
if token == 'ETH':
self.eth_balance, self.address = self.api.get_balance(self.configuration)
else:
self.eth_balance, self.address = self.api.get_balance(self.configuration, token)
balance.set(str(self.eth_balance) + ' ' + token)
token_symbol = StringVar()
token_symbol.set('ETH')
balance = StringVar()
balance.set(str(self.eth_balance) + ' ' + token_symbol.get())
mb = Menubutton(self,
width=60,
textvariable=token_symbol,
relief=RAISED)
mb.grid()
mb.menu = Menu(mb, tearoff=0)
mb["menu"] = mb.menu
mb.menu.add_radiobutton(label="ETH",
variable=token_symbol,
value='ETH',
command=lambda: change_token(token_symbol.get()))
for token in self.tokens:
mb.menu.add_radiobutton(label=token,
variable=token_symbol,
value=token,
command=lambda: change_token(token_symbol.get()))
mb.menu.add_radiobutton(label="Add new token ...",
command=self.navigate_add_token_page)
mb.pack()
label_address_lbl = Label(self,
text='Address:',
width=60,
font=(None, 10, "bold"))
label_address_lbl.pack()
label_address = Label(self,
text=self.address,
width=60,
font=(None, 10))
label_address.pack()
label_balance = Label(self,
textvariable=balance,
width=60,
font=(None, 30))
label_balance.pack()
btn_refresh = Button(self,
text="Refresh",
command=refresh,
width=60,
font=(None, 16))
btn_refresh.pack()
btn_copy_address = Button(self,
text="Copy address",
command=self.copy_address,
width=60,
font=(None, 16))
btn_copy_address.pack()
btn_send_transaction = Button(self,
text="Send Transaction",
command=self.navigate_transaction_page,
width=60,
font=(None, 16))
btn_send_transaction.pack()
def navigate_transaction_page(self):
"""
Navigate to transaction page
:return:
"""
transaction_page = TransactionPage(self)
transaction_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
transaction_page.show()
def navigate_add_token_page(self):
"""
Navigate to transaction page
:return:
"""
add_token_page = AddTokenPage(self)
add_token_page.place(in_=self, x=0, y=0, relwidth=1, relheight=1)
add_token_page.show()
def copy_address(self):
"""Add address to the clipboard"""
self.clipboard_clear() # clear clipboard contents
self.clipboard_append(self.address) # append new value to clipbao
class MainView(Frame):
def __init__(self, *args, **kwargs):
Frame.__init__(self, *args, **kwargs)
self.configuration = Configuration()
self.api = WalletAPI()
self.wallet = None
if self.configuration.is_configuration():
screen = HomePage(self)
else:
screen = NewWalletPage(self)
container = Frame(self)
container.pack(side="top", fill="both", expand=True)
screen.place(in_=container, x=0, y=0, relwidth=1, relheight=1)
screen.show()
if __name__ == "__main__":
root = Tk()
root.title("Ethereum wallet")
main = MainView(root)
main.pack(side="top", fill="both", expand=True)
root.wm_geometry("300x400")
root.mainloop()
```
#### File: wallet/eth_wallet/utils.py
```python
import os
import errno
import json
from eth_utils import (
keccak,
)
def public_key_to_keccak256(public_key_bytes: bytes) -> bytes:
return keccak(public_key_bytes)
def public_key_bytes_to_address(public_key_bytes: bytes) -> bytes:
return keccak(public_key_bytes)[-20:]
def is_file(path):
"""
Checks if path is file.
:param path: path with filename
:return: True if file exists
"""
return os.path.isfile(path)
def is_directory(path):
"""
Checks if path is directory.
:param path: path with directory
:return: True if is directory, False if directory doesn't exist
"""
if os.path.exists(path):
return True
return False
def create_directory(dirname):
"""
Crete directory if doesn't already exist.
:param dirname: path with new directory
:return: path with directory
"""
if not is_directory(dirname):
try:
os.makedirs(dirname)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def get_abi_json():
"""
Returns FITCOIN ERC20 token ABI
:return:
"""
root_dir = os.path.dirname(os.path.abspath(__file__))
abi_path = os.path.join(root_dir, 'erc20', 'abi.json')
with open(abi_path) as f:
fitcoin = json.load(f)
return fitcoin
```
#### File: wallet/tests/cli_tester.py
```python
from eth_wallet.cli.eth_wallet_cli import(
eth_wallet_cli,
)
from click.testing import(
CliRunner,
)
def call_eth_wallet(fnc=None, parameters=None, envs=None):
"""
Creates testing environment for cli application
:param fnc: command to run
:param parameters: program cmd argument
:param envs:
:return: invoked cli runner
"""
fnc = fnc or eth_wallet_cli
runner = CliRunner()
envs = envs or {}
parameters = parameters or []
# catch exceptions enables debugger
return runner.invoke(fnc, args=parameters, env=envs, catch_exceptions=False)
```
#### File: wallet/tests/conftest.py
```python
from eth_wallet.configuration import (
Configuration,
)
def prepare_conf(tmp_path):
"""
Prepare configuration file for tests
:param tmp_path: pytest tmp_path fixture
:return: configuration for tests
"""
test_config = dict(
keystore_location=str(tmp_path),
keystore_filename='/keystore',
eth_address='',
public_key='',
network=3,
)
test_config = Configuration(
config_dir=str(tmp_path),
initial_config=test_config
)
return test_config.load_configuration()
```
#### File: tests/unit/test_api.py
```python
from eth_wallet.api import(
WalletAPI,
)
from tests.conftest import (
prepare_conf,
)
from web3 import (
Web3,
)
from eth_utils import (
decode_hex,
)
def test_account(tmp_path):
test_configuration = prepare_conf(tmp_path)
WalletAPI.new_wallet(test_configuration, 'my-password')
assert len(list(tmp_path.iterdir())) == 2 # one config.yaml and one keystore
address, pub_key = WalletAPI.get_wallet(test_configuration)
public_key_bytes = decode_hex(pub_key)
assert len(public_key_bytes) == 64
assert Web3.isAddress(address)
assert Web3.isChecksumAddress(address)
```
#### File: tests/unit/test_utils.py
```python
from eth_wallet.utils import (
get_abi_json,
create_directory,
)
def test_create_dictionary(tmp_path):
create_directory(str(tmp_path)+'/test')
assert len(list(tmp_path.iterdir())) == 1
def test_abi_json():
erc20_abi = get_abi_json()
assert isinstance(erc20_abi, list)
assert isinstance(erc20_abi[0], dict)
```
|
{
"source": "jgottula/libratbag",
"score": 2
}
|
#### File: data/devices/data-parse-test.py
```python
import argparse
import os
import configparser
# Set on commandline with --svg-dir
svg_dirs = []
def assertIn(element, l):
if element not in l:
raise AssertionError('{} must be in {}'.format(element, l))
def assertNotIn(element, l):
if element in l:
raise AssertionError('{} must not be in {}'.format(element, l))
def check_svg_str(string):
assert(string.endswith('.svg'))
svg_file_found = False
for svg_dir in svg_dirs:
files = os.listdir(svg_dir)
if string in files:
svg_file_found = True
break
assert(svg_file_found)
def check_match_str(string):
bustypes = ['usb', 'bluetooth']
matches = string.split(';')
for match in matches:
if not match: # empty string if trailing ;
continue
parts = match.split(':')
assert(len(parts) == 3)
assertIn(parts[0], bustypes)
vid = parts[1]
assert(vid == '{:04x}'.format(int(vid, 16)))
pid = parts[2]
assert(pid == '{:04x}'.format(int(pid, 16)))
def check_ledtypes_str(string):
permitted_types = ['logo', 'side', 'battery', 'dpi']
types = string.split(';')
for t in types:
if not t: # emtpy string if trailing ;
continue
assertIn(t, permitted_types)
def check_section_device(section):
required_keys = ['Name', 'Driver', 'DeviceMatch']
permitted_keys = required_keys + ['Svg', 'LedTypes']
for key in section.keys():
assertIn(key, permitted_keys)
for r in required_keys:
assertIn(r, section)
try:
check_svg_str(section['Svg'])
except KeyError:
pass
try:
check_ledtypes_str(section['LedTypes'])
except KeyError:
pass
check_match_str(section['DeviceMatch'])
def check_dpi_range_str(string):
import re
m = re.search('^([0-9]+):([0-9]+)@([0-9\.]+)$', string)
assert(m is not None)
min = int(m.group(1))
max = int(m.group(2))
steps = float(m.group(3))
assert(min >= 0 and min <= 400)
assert(max >= 2000 and max <= 12000)
assert(steps > 0 and steps <= 100)
if int(steps) == steps:
steps = int(steps)
assert(string == '{}:{}@{}'.format(min, max, steps))
def check_dpi_list_str(string):
entries = string.split(';')
# Remove possible empty last entry if trailing with a ;
if not entries[len(entries) - 1]:
entries = entries[:-1]
for idx, entry in enumerate(entries):
dpi = int(entry)
assert(dpi >= 0 and dpi <= 12000)
if idx > 0:
prev = entries[idx - 1]
prev_dpi = int(prev)
assert(dpi > prev_dpi)
def check_profile_type_str(string):
types = ['G9', 'G500', 'G700']
assertIn(string, types)
def check_section_hidpp10(section):
permitted = ['Profiles', 'ProfileType', 'DpiRange', 'DpiList', 'DeviceIndex', 'Leds']
for key in section.keys():
assertIn(key, permitted)
try:
nprofiles = int(section['Profiles'])
# 10 is arbitrarily chosen
assert(nprofiles > 0 and nprofiles < 10)
except KeyError:
pass
try:
index = int(section['DeviceIndex'])
# 10 is arbitrarily chosen
assert(index > 0 and index < 10)
except KeyError:
pass
try:
check_dpi_range_str(section['DpiRange'])
assertNotIn('DpiList', section.keys())
except KeyError:
pass
try:
check_dpi_list_str(section['DpiList'])
assertNotIn('DpiRange', section.keys())
except KeyError:
pass
try:
check_profile_type_str(section['ProfileType'])
except KeyError:
pass
try:
leds = int(section['Leds'])
# 10 is arbitrarily chosen
assert(leds > 0 and leds < 10)
except KeyError:
pass
def check_section_hidpp20(section):
permitted = ['DeviceIndex']
for key in section.keys():
assertIn(key, permitted)
try:
index = int(section['DeviceIndex'])
# 10 is arbitrarily chosen
assert(index > 0 and index < 10)
except KeyError:
pass
def check_section_driver(driver, section):
if driver == 'hidpp10':
check_section_hidpp10(section)
elif driver == 'hidpp20':
check_section_hidpp20(section)
else:
assert('Unsupported driver section {}'.format(driver))
def parse_data_file(path):
print('Parsing file {}'.format(path))
data = configparser.ConfigParser(strict=True)
# Don't convert to lowercase
data.optionxform = lambda option: option
data.read(path)
assertIn('Device', data.sections())
check_section_device(data['Device'])
driver = data['Device']['Driver']
driver_section = 'Driver/{}'.format(driver)
permitted_sections = ['Device', driver_section]
for s in data.sections():
assertIn(s, permitted_sections)
if data.has_section(driver_section):
check_section_driver(driver, data[driver_section])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Device data-file checker")
parser.add_argument('file', nargs='+')
parser.add_argument('--svg-dir', metavar='dir', action='append',
type=str,
help='Directory to check for SVG files (may be given multiple times)')
args = parser.parse_args()
svg_dirs = args.svg_dir
for path in args.file:
parse_data_file(path)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.