input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
IP range, the provided "
f"ending IP address of {storage_controller_vm_ip_range_end_address} is "
"the same as the provided starting IP address of "
f"{storage_controller_vm_ip_range_start_address}. Please provide a "
"different ending IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP storage controller VM management starting IP address and subnet mask can be configured on an IP interface
try:
storage_controller_vm_ip_range_start_address_ip_interface = ipaddress.ip_interface(f"{storage_controller_vm_ip_range_start_address}/{storage_controller_vm_ip_range_subnet_mask}")
except Exception as exception_message:
print("There was an issue with testing the IP interface configuration for "
"the provided HXDP storage controller VM management starting IP "
f"address of {storage_controller_vm_ip_range_start_address} "
"and the associated subnet mask of "
f"{storage_controller_vm_ip_range_subnet_mask}.\n"
"Please review the error message below, repair the provided IP address "
"settings, then re-run the HX Auto Deploy Tool.\n")
print(exception_message)
sys.exit(0)
# Determine HXDP storage controller VM management IP network from provided starting IP address and subnet mask
storage_controller_vm_ip_range_start_address_network = storage_controller_vm_ip_range_start_address_ip_interface.network
# Verify the HXDP storage controller VM management ending IP address is in the same network as the starting IP address
if ipaddress.ip_address(storage_controller_vm_ip_range_end_address) not in storage_controller_vm_ip_range_start_address_network:
print("For the HXDP storage controller VM management IP range, the provided "
f"ending IP address of {storage_controller_vm_ip_range_end_address} is "
"not in the same subnet of the provided starting IP address of "
f"{storage_controller_vm_ip_range_start_address}. Please provide a "
"different ending IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP storage controller VM management gateway IP address is in the same network as the starting IP address
if ipaddress.ip_address(storage_controller_vm_ip_range_gateway) not in storage_controller_vm_ip_range_start_address_network:
print("For the HXDP storage controller VM management IP range, the provided "
f"gateway IP address of {storage_controller_vm_ip_range_gateway} is "
"not in the same subnet of the provided starting IP address of "
f"{storage_controller_vm_ip_range_start_address}. Please provide a "
"different gateway IP address and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Create the HXDP storage controller VM IP address range list
storage_controller_vm_ip_range_list = create_ip_list(storage_controller_vm_ip_range_start_address, storage_controller_vm_ip_range_end_address)
# Verify the created HXDP storage controller VM IP address range list can support the provided HyperFlex cluster size
if len(storage_controller_vm_ip_range_list) < hx_cluster_size:
print("The HXDP storage controller VM IP address range has a total of "
f"{len(storage_controller_vm_ip_range_list)} usable addresses. This is "
f"less than the provided HyperFlex cluster size of {hx_cluster_size}. "
"Please increase the size of the HXDP storage controller VM IP address "
"range then restart the HX Auto Deploy Tool. The provided value for "
"the starting or ending IP and subnet mask may need to be adjusted.\n"
f"Current starting IP address: {storage_controller_vm_ip_range_start_address}\n"
f"Current ending IP address: {storage_controller_vm_ip_range_end_address}\n"
f"Current subnet mask: {storage_controller_vm_ip_range_subnet_mask}\n")
sys.exit(0)
# Verify the created HXDP storage controller VM IP address range list does not conflict with any entries in the HX node attribute list
for storage_controller_vm_ip in storage_controller_vm_ip_range_list:
if storage_controller_vm_ip in hx_node_attribute_list:
print(f"The HXDP storage controller VM IP address of {storage_controller_vm_ip} "
"created from the IP address range of "
f"{storage_controller_vm_ip_range_start_address} - "
f"{storage_controller_vm_ip_range_end_address}, conflicts with an "
"IP address entry in the HX node attribute list variable named "
"hx_node_attribute_list.\n"
"The hx_node_attribute_list variable contains the following entries: \n"
f"{hx_node_attribute_list}\n"
"Please resolve the IP address conflict and restart "
"the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP cluster management (HX Connect) IP address does not conflict with any entries in the VMware ESXi hypervisor management IP address range list
if hx_connect_mgmt_ip_address in esxi_mgmt_ip_range_list:
print("The provided HyperFlex cluster management IP address of "
f"{hx_connect_mgmt_ip_address} is in the same range of IP addresses "
"allocated for the VMware ESXi hypervisor management interfaces. "
"Please provide a different HyperFlex cluster management IP address "
"and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP cluster management (HX Connect) IP address does not conflict with any entries in the HXDP storage controller VM management IP address range list
if hx_connect_mgmt_ip_address in storage_controller_vm_ip_range_list:
print("The provided HyperFlex cluster management IP address of "
f"{hx_connect_mgmt_ip_address} is in the same range of IP addresses "
"allocated for the HXDP storage controller VM management interfaces. "
"Please provide a different HyperFlex cluster management IP address "
"and restart the HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HXDP cluster management (HX Connect) IP address is in the same network as the HXDP storage controller VM IP addresses
if ipaddress.ip_address(hx_connect_mgmt_ip_address) not in storage_controller_vm_ip_range_start_address_network:
print("The provided HyperFlex cluster management IP address of "
f"{hx_connect_mgmt_ip_address} is not in the same subnet of "
f"{storage_controller_vm_ip_range_start_address_network} as the "
"provided starting IP address of "
f"{storage_controller_vm_ip_range_start_address} and the provided "
f"ending IP address of {storage_controller_vm_ip_range_end_address} for "
"the HXDP storage controller VM management interfaces. Please provide "
"a different HyperFlex cluster management IP address and restart the "
"HX Auto Deploy Tool.\n")
sys.exit(0)
# Verify the HyperFlex cluster management IP address does not conflict with any entries in the HX node attribute list
if hx_connect_mgmt_ip_address in hx_node_attribute_list:
print("The HyperFlex cluster management IP address of "
f"{hx_connect_mgmt_ip_address} conflicts with an IP address entry in "
"the HX node attribute list variable named hx_node_attribute_list.\n"
"The hx_node_attribute_list variable contains the following entries: \n"
f"{hx_node_attribute_list}\n"
"Please resolve the IP address conflict and restart "
"the HX Auto Deploy Tool.\n")
sys.exit(0)
# Completion of the preliminary check of the provided variable values
print("The preliminary check of the provided variable values is complete.\n")
# Set the Deployment Tool Type
maker_type = "Cisco HyperFlex Edge Automated Deployment Tool"
# Set the HyperFlex Management Deployment Type
hx_mgmt_platform_type = "EDGE"
# Set the HyperFlex MAC prefix starting and ending addresses based on the provided uplink speed
if hx_node_uplink_speed == "1G":
hx_mac_prefix_start_address_post_uplink_speed_checked = ""
hx_mac_prefix_end_address_post_uplink_speed_checked = ""
else:
hx_mac_prefix_start_address_post_uplink_speed_checked = f"00:25:B5:{hx_mac_prefix_start_address}"
hx_mac_prefix_end_address_post_uplink_speed_checked = f"00:25:B5:{hx_mac_prefix_end_address}"
# Set the HyperFlex MAC prefix address based on the provided uplink speed setting
if hx_node_uplink_speed == "1G":
hx_mac_prefix_address_post_uplink_speed_checked = ""
else:
hx_mac_prefix_address_post_uplink_speed_checked = f"00:25:B5:{hx_mac_prefix_address}"
# Define Intersight SDK IntersightApiClient variable
# Last tested on Cisco Intersight API Reference v1.0.9-4246 (SaaS)
api_instance = IntersightApiClient(host=intersight_base_url, private_key=key, api_key_id=key_id)
# Establish function to test for the availability of the Intersight API and Intersight account
def test_intersight_service():
"""This is a function to test the availability of the Intersight API and
Intersight account. The Intersight account tested for is the owner of the
provided Intersight API key and key ID.
"""
try:
# Check that Intersight Account is accessible
print("Testing access to the Intersight API by verifying the Intersight "
"account information...")
check_account = intersight.IamAccountApi(api_instance)
get_account = check_account.iam_accounts_get()
if check_account.api_client.last_response.status != 200:
print("The Intersight API and Account Availability Test did not pass.")
print("The Intersight account information could not be verified.")
print("Exiting due to the Intersight account being unavailable.\n")
print("Please verify that the correct API Key ID has been provided then "
"restart the HX Auto Deploy Tool.\n")
print("If applicable, also verify that all Intersight services are up \n"
"and operational at https://status.intersight.com.\n")
sys.exit(0)
else:
account_name = get_account.results[0].name
print("The Intersight API and Account Availability Test has passed.\n")
print(f"The Intersight account named '{account_name}' has been found.\n")
return account_name
except Exception:
print("Unable to access the Intersight API.")
print("Exiting due to the Intersight API being unavailable.\n")
print("Please verify that the correct API Key ID has been provided then "
"restart the HX Auto Deploy Tool.\n")
print("If applicable, also verify that all Intersight services are up \n"
"and operational at https://status.intersight.com.\n")
sys.exit(0)
# Establish function to retrieve Intersight API objects
def intersight_object_retriever(object_name, object_type, intersight_api_path, org="default"):
"""This is a function to retrieve named Intersight objects
using the Intersight API.
"""
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}"
try:
api_instance.call_api(full_intersight_api_path,"GET")
response = api_instance.last_response.data
results = json.loads(response)
# The Intersight API resource path has been accessed successfully.
get_intersight_objects = results
except Exception:
print("There was an issue retrieving the "
f"{object_type} from Intersight.")
print(f"Unable to access the provided Intersight API resource path '{intersight_api_path}'.")
print("Please review and resolve any error messages then restart "
f"the {maker_type}.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
if get_intersight_objects.get("Results"):
for intersight_object in get_intersight_objects.get("Results"):
if intersight_object.get("Organization"):
provided_org_moid = intersight_object_retriever(org,
"Organization",
"organization/Organizations")
if intersight_object.get("Organization", {}).get("Moid") == provided_org_moid:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided object and MOID has been identified and retrieved.
return intersight_object_moid
else:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided | |
<filename>sudokutools/solvers.py
"""High level solving of sudokus.
This module provides classes which represent typical sudoku solving
steps used by humans. Steps can be found and applied to a given
sudoku. But steps can also be printed without applying them, e.g. to inform
a user, what steps can be taken to solve the sudoku.
A single solve step may consist of multiple actions, e.g.
* Setting a number at a given field.
* Setting the candidates at a given field.
* Removing some of the candidates at a given field.
Solve steps defined here:
* CalcCandidates
* NakedSingle
* NakedPair
* NakedTriple
* NakedQuad
* NakedQuint
* HiddenSingle
* HiddenPair
* HiddenTriple
* HiddenQuad
* HiddenQuint
* PointingPair
* PointingTriple
* XWing
* Swordfish
* Jellyfish
* Bruteforce
"""
from collections import defaultdict, namedtuple
from functools import total_ordering
from itertools import combinations, product
from sudokutools.solve import init_candidates, calc_candidates, dlx
from sudokutools.sudoku import Sudoku
class Action(namedtuple("ActionTuple", ["func", "row", "col", "value"])):
"""Named tuple, that represents a single change operation on a sudoku.
Create with: Action(func, row, col, value)
Args:
func (callable): One of Sudoku.set_number, Sudoku.set_candidates and
Sudoku.remove_candidates
row (int): The row of the field, which will be changed.
col (int): The column of the field, which will be changed.
value (int or iterable): The number or candidates to set/remove.
"""
@total_ordering
class SolveStep(object):
def __init__(self, clues=(), affected=(), values=()):
"""Create a new solve step.
Args:
clues (iterable of (int, int)): An iterable of (row, col) pairs
which cause this step.
affected (iterable of (int, int)): An iterable of (row, col)
pairs which are affected by
this step.
values (iterable of int) : A list of values to apply to the
affected fields.
"""
self.clues = tuple(sorted(clues))
self.affected = tuple(sorted(affected))
self.values = tuple(sorted(values))
self.actions = []
def __eq__(self, other):
return (self.clues, self.affected, self.values) == (
other.clues, other.affected, other.values)
def __lt__(self, other):
return (self.clues, self.affected, self.values) < (
other.clues, other.affected, other.values)
def __repr__(self):
return "%s(%s, %s, %s)" % (
self.__class__.__name__, self.clues, self.affected, self.values)
def __str__(self):
return "%s at %s: %s" % (
self.__class__.__name__, self.clues, self.values)
@classmethod
def find(cls, sudoku):
"""Iterates through all possible solve steps of this class.
Args:
sudoku (Sudoku): The sudoku to solve.
Yields:
SolveStep: The next solve step.
"""
raise NotImplementedError("%s.find() not implemented." % cls.__name__)
def build_actions(self, sudoku):
raise NotImplementedError(
"%s.build_actions() not implemented." % self.__class__.__name__)
def apply(self, sudoku):
"""Apply this solve step to the sudoku."""
if not self.actions:
self.build_actions(sudoku)
for action in self.actions:
action.func(sudoku, action.row, action.col, action.value)
@classmethod
def apply_all(cls, sudoku):
"""Apply all possible steps of this class to the sudoku."""
for step in cls.find(sudoku):
step.apply(sudoku)
class CalculateCandidates(SolveStep):
"""Calculates the candidates of fields."""
@classmethod
def find(cls, sudoku):
for row, col in sudoku:
# ignore fields with defined candidates
if sudoku.get_candidates(row, col):
continue
values = calc_candidates(sudoku, row, col)
yield cls(((row, col),), ((row, col),), values)
def build_actions(self, sudoku):
row, col = self.clues[0]
self.actions.append(
Action(Sudoku.set_candidates, row, col, self.values))
class _SingleFieldStep(SolveStep):
"""Represents a solve method, which sets a single field."""
def __init__(self, row, col, value):
super(_SingleFieldStep, self).__init__(
((row, col),), ((row, col),), (value, ))
def __repr__(self):
row, col = self.clues[0]
value = self.values[0]
return "%s(%d, %d, %d)" % (self.__class__.__name__, row, col, value)
def __str__(self):
return "%s at %s: %s" % (
self.__class__.__name__, self.clues[0], self.values[0])
@classmethod
def find(cls, sudoku):
raise NotImplementedError("%s.find() not implemented." % cls.__name__)
def build_actions(self, sudoku):
row, col = self.affected[0]
value = self.values[0]
self.actions.append(
Action(Sudoku.set_number, row, col, value))
self.actions.append(
Action(Sudoku.set_candidates, row, col, {value}))
for i, j in sudoku.surrounding_of(row, col, include=False):
if value in sudoku.get_candidates(i, j):
self.actions.append(
Action(Sudoku.remove_candidates, i, j, {value}))
class NakedSingle(_SingleFieldStep):
"""Finds naked singles in a sudoku.
A naked single is a field with only one candidate.
The field can be set to this candidate and this candidate
can be removed from all fields in the same row, column and box.
"""
@classmethod
def find(cls, sudoku):
for row, col in sudoku.empty():
candidates = sudoku.get_candidates(row, col)
if len(candidates) == 1:
for value in candidates:
break
yield cls(row, col, value)
class HiddenSingle(_SingleFieldStep):
"""Finds hidden singles in a sudoku.
A hidden single is a field containing a candidate which
exists in no other fields in the same row, column or box.
The field can be set to this candidate and this candidate
can be removed from all fields in the same row, column and box.
"""
@classmethod
def find(cls, sudoku):
yielded_coords = []
for row, col in sudoku.empty():
for f in sudoku.column_of, sudoku.row_of, sudoku.box_of:
found_hidden_single = False
candidates = set(sudoku.numbers)
for i, j in f(row, col, include=False):
candidates -= sudoku.get_candidates(i, j)
for value in candidates:
if (row, col) not in yielded_coords:
yielded_coords.append((row, col))
yield cls(row, col, value)
found_hidden_single = True
# skip the other functions
if found_hidden_single:
break
class Bruteforce(_SingleFieldStep):
"""Solve the sudoku using brute force.
Bruteforce simply works by trial and error testing each
combination of valid candidates in a field until a
solution has been found.
"""
@classmethod
def find(cls, sudoku):
try:
solution = next(dlx(sudoku))
except StopIteration:
return
for row, col in sudoku.diff(solution):
yield cls(row, col, solution[row, col])
class NakedTuple(SolveStep):
"""Finds naked tuples in a sudoku.
A naked tuple is a set of n fields in a row, column or box,
which (in unison) contain a set of at most n candidates.
These candidates can be removed from all fields in the
same row, column or box.
"""
n = 2
def build_actions(self, sudoku):
for (i, j) in self.affected:
to_remove = set(self.values) & sudoku.get_candidates(i, j)
self.actions.append(
Action(Sudoku.remove_candidates, i, j, to_remove)
)
@classmethod
def find(cls, sudoku):
# keep track of yielded steps
yielded_coords = []
# we work through rows, cols and quads in 3 steps, since the
# empty fields can changed in-between
for func in sudoku.row_of, sudoku.column_of, sudoku.box_of:
clist = []
for (row, col) in sudoku.empty():
coords = func(row, col)
if coords not in clist:
clist.append(coords)
for coords in clist:
for step in cls.__find_at(sudoku, coords):
if step.clues not in yielded_coords:
yielded_coords.append(step.clues)
yield step
@classmethod
def __find_at(cls, sudoku, coords):
# Create a list of fields with at least 2 and at most n candidates.
# (We ignore naked singles here, because combinations() would
# return a very long list otherwise.)
n_candidates = [(row, col) for (row, col) in coords if 1 < len(
sudoku.get_candidates(row, col)) <= cls.n]
for fields in combinations(n_candidates, cls.n):
all_candidates = set()
for (row, col) in fields:
all_candidates |= sudoku.get_candidates(row, col)
if len(all_candidates) <= cls.n:
# Naked Tuple found - only yield, if actions can be applied.
affected = [(r, c) for r, c in coords if (r, c) not in fields
and set(all_candidates)& sudoku.get_candidates(r, c)]
if affected:
step = cls(
clues=fields, affected=affected, values=all_candidates)
yield step
NakedPair = type("NakedPair", (NakedTuple,), dict(n=2))
NakedTriple = type("NakedTriple", (NakedTuple,), dict(n=3))
NakedQuad = type("NakedQuad", (NakedTuple,), dict(n=4))
NakedQuint = type("NakedQuint", (NakedTuple,), dict(n=5))
class HiddenTuple(SolveStep):
"""Finds hidden tuples in a sudoku.
A hidden tuple is a set of n fields in a row, column or box,
which (in unison) contain a set of at most n candidates, which
are present in no other fields of the same row, column or box.
All other candidates can be removed from these fields.
"""
n = 2
def build_actions(self, sudoku):
for row, col in self.affected:
to_remove = sudoku.get_candidates(row, col) - set(self.values)
self.actions.append(
Action(Sudoku.remove_candidates, row, col, to_remove))
@classmethod
def find(cls, sudoku):
# keep track of yielded steps
yielded_coords = []
# we work through rows, cols and quads in 3 steps, since the
# empty fields can changed in-between
for func in sudoku.row_of, sudoku.column_of, sudoku.box_of:
clist = []
for (i, j) in sudoku.empty():
coords = func(i, j)
if coords not in clist:
clist.append(coords)
for coords in clist:
for step in cls.__find_at(sudoku, coords):
yield step
@classmethod
def __find_at(cls, sudoku, coords):
cand_coords = defaultdict(lambda: set())
# build coordinate list for each candidate
for cand in sudoku.numbers:
for (row, col) in coords:
if cand in sudoku.get_candidates(row, col):
cand_coords[cand].add((row, col))
# create a list of numbers with at most n occurrences
n_times = [c for c in sudoku.numbers if 1 < len(cand_coords[c]) <= cls.n]
# select n numbers from the n_times list
for numbers in combinations(n_times, cls.n):
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from datetime import *
import pytest
import os
import sys
from v8 import *
from v8.utils import *
if is_py3k:
def toUnicodeString(s):
return s
else:
def toUnicodeString(s, encoding='utf-8'):
return s if isinstance(s, unicode) else unicode(s, encoding)
def testObject():
with JSContext() as ctxt:
o = ctxt.eval("new Object()")
assert hash(o) > 0
o1 = o.clone()
assert hash(o1) == hash(o)
assert o != o1
pytest.raises(UnboundLocalError, o.clone)
def testAutoConverter():
with JSContext() as ctxt:
ctxt.eval("""
var_i = 1;
var_f = 1.0;
var_s = "test";
var_b = true;
var_s_obj = new String("test");
var_b_obj = new Boolean(true);
var_f_obj = new Number(1.5);
""")
vars = ctxt.locals
var_i = vars.var_i
assert var_i
assert 1 == int(var_i)
var_f = vars.var_f
assert var_f
assert 1.0 == float(vars.var_f)
var_s = vars.var_s
assert var_s
assert "test" == str(vars.var_s)
var_b = vars.var_b
assert var_b
assert bool(var_b)
assert "test" == vars.var_s_obj
assert vars.var_b_obj
assert 1.5 == vars.var_f_obj
attrs = dir(ctxt.locals)
assert attrs
assert "var_i" in attrs
assert "var_f" in attrs
assert "var_s" in attrs
assert "var_b" in attrs
assert "var_s_obj" in attrs
assert "var_b_obj" in attrs
assert "var_f_obj" in attrs
def testExactConverter():
class MyInteger(int, JSClass):
pass
class MyString(str, JSClass):
pass
class MyUnicode(unicode, JSClass):
pass
class MyDateTime(time, JSClass):
pass
class Global(JSClass):
var_bool = True
var_int = 1
var_float = 1.0
var_str = 'str'
var_unicode = u'unicode'
var_datetime = datetime.now()
var_date = date.today()
var_time = time()
var_myint = MyInteger()
var_mystr = MyString('mystr')
var_myunicode = MyUnicode('myunicode')
var_mytime = MyDateTime()
with JSContext(Global()) as ctxt:
typename = ctxt.eval("(function (name) { return this[name].constructor.name; })")
typeof = ctxt.eval("(function (name) { return typeof(this[name]); })")
assert 'Boolean' == typename('var_bool')
assert 'Number' == typename('var_int')
assert 'Number' == typename('var_float')
assert 'String' == typename('var_str')
assert 'String' == typename('var_unicode')
assert 'Date' == typename('var_datetime')
assert 'Date' == typename('var_date')
assert 'Date' == typename('var_time')
assert 'MyInteger' == typename('var_myint')
assert 'MyString' == typename('var_mystr')
assert 'MyUnicode' == typename('var_myunicode')
assert 'MyDateTime' == typename('var_mytime')
assert 'object' == typeof('var_myint')
assert 'object' == typeof('var_mystr')
assert 'object' == typeof('var_myunicode')
assert 'object' == typeof('var_mytime')
def testJavascriptWrapper():
with JSContext() as ctxt:
assert type(None) == type(ctxt.eval("null"))
assert type(None) == type(ctxt.eval("undefined"))
assert bool == type(ctxt.eval("true"))
assert str == type(ctxt.eval("'test'"))
assert int == type(ctxt.eval("123"))
assert float == type(ctxt.eval("3.14"))
assert datetime == type(ctxt.eval("new Date()"))
assert JSArray == type(ctxt.eval("[1, 2, 3]"))
assert JSFunction == type(ctxt.eval("(function() {})"))
assert JSObject == type(ctxt.eval("new Object()"))
def test_python_wrapper():
with JSContext() as ctxt:
typeof = ctxt.eval("(function type(value) { return typeof value; })")
protoof = ctxt.eval("(function protoof(value) { return Object.prototype.toString.apply(value); })")
assert '[object Null]' == protoof(None)
assert 'boolean' == typeof(True)
assert 'number' == typeof(123)
assert 'number' == typeof(3.14)
assert 'string' == typeof('test')
assert 'string' == typeof(u'test')
assert '[object Date]' == protoof(datetime.now())
assert '[object Date]' == protoof(date.today())
assert '[object Date]' == protoof(time())
def test():
pass
assert '[object Function]' == protoof(abs)
assert '[object Function]' == protoof(test)
assert '[object Function]' == protoof(test_python_wrapper)
assert '[object Function]' == protoof(int)
def testFunction():
with JSContext() as ctxt:
func = ctxt.eval("""
(function ()
{
function a()
{
return "abc";
}
return a();
})
""")
assert "abc" == str(func())
assert func != None
assert not (func == None)
func = ctxt.eval("(function test() {})")
assert "test" == func.name
assert "" == func.resname
assert 0 == func.linenum
assert 14 == func.colnum
assert 0 == func.lineoff
assert 0 == func.coloff
#TODO fix me, why the setter doesn't work?
# func.name = "hello"
# it seems __setattr__ was called instead of CJavascriptFunction::SetName
func.setName("hello")
assert "hello" == func.name
def testCall():
class Hello(object):
def __call__(self, name):
return "hello " + name
class Global(JSClass):
hello = Hello()
with JSContext(Global()) as ctxt:
assert "hello flier" == ctxt.eval("hello('flier')")
def testJSFunction():
with JSContext() as ctxt:
hello = ctxt.eval("(function (name) { return 'hello ' + name; })")
assert isinstance(hello, JSFunction)
assert "hello flier" == hello('flier')
assert "hello flier" == hello.invoke(['flier'])
obj = ctxt.eval("({ 'name': 'flier', 'hello': function (name) { return 'hello ' + name + ' from ' + this.name; }})")
hello = obj.hello
assert isinstance(hello, JSFunction)
assert "hello flier from flier" == hello('flier')
tester = ctxt.eval("({ 'name': 'tester' })")
assert "hello flier from tester" == hello.apply(tester, ['flier'])
assert "hello flier from json" == hello.apply({ 'name': 'json' }, ['flier'])
def testConstructor():
with JSContext() as ctx:
ctx.eval("""
var Test = function() {
this.trySomething();
};
Test.prototype.trySomething = function() {
this.name = 'flier';
};
var Test2 = function(first_name, last_name) {
this.name = first_name + ' ' + last_name;
};
""")
assert isinstance(ctx.locals.Test, JSFunction)
test = JSObject.create(ctx.locals.Test)
assert isinstance(ctx.locals.Test, JSObject)
assert "flier" == test.name;
test2 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu'))
assert "<NAME>" == test2.name;
test3 = JSObject.create(ctx.locals.Test2, ('Flier', 'Lu'), { 'email': '<EMAIL>' })
assert "<EMAIL>" == test3.email;
def testJSError():
with JSContext() as ctxt:
try:
ctxt.eval('throw "test"')
pytest.fail()
except:
assert JSError, sys.exc_info([0])
def testErrorInfo():
with JSContext() as ctxt:
with JSEngine() as engine:
try:
engine.compile("""
function hello()
{
throw Error("hello world");
}
hello();""", "test", 10, 10).run()
pytest.fail()
except JSError as e:
assert str(e).startswith('JSError: Error: hello world ( test @ 14 : 26 ) ->')
assert "Error" == e.name
assert "hello world" == e.message
assert "test" == e.scriptName
assert 14 == e.lineNum
assert 78 == e.startPos
assert 79 == e.endPos
assert 26 == e.startCol
assert 27 == e.endCol
assert 'throw Error("hello world");' == e.sourceLine.strip()
assert 'Error: hello world\n' +\
' at Error (native)\n' +\
' at hello (test:14:27)\n' +\
' at test:17:17' == e.stackTrace
def testParseStack():
assert [
('Error', 'unknown source', None, None),
('test', 'native', None, None),
('<anonymous>', 'test0', 3, 5),
('f', 'test1', 2, 19),
('g', 'test2', 1, 15),
(None, 'test3', 1, None),
(None, 'test3', 1, 1),
] == JSError.parse_stack("""Error: err
at Error (unknown source)
at test (native)
at new <anonymous> (test0:3:5)
at f (test1:2:19)
at g (test2:1:15)
at test3:1
at test3:1:1""")
def testStackTrace():
class Global(JSClass):
def GetCurrentStackTrace(self, limit):
return JSStackTrace.GetCurrentStackTrace(4, JSStackTrace.Options.Detailed)
with JSContext(Global()) as ctxt:
st = ctxt.eval("""
function a()
{
return GetCurrentStackTrace(10);
}
function b()
{
return eval("a()");
}
function c()
{
return new b();
}
c();""", "test")
assert 4 == len(st)
assert "\tat a (test:4:24)\n\tat (eval)\n\tat b (test:8:24)\n\tat c (test:12:24)\n" == str(st)
assert "test.a (4:24)\n. (1:1) eval\ntest.b (8:24) constructor\ntest.c (12:24)" ==\
"\n".join(["%s.%s (%d:%d)%s%s" % (
f.scriptName, f.funcName, f.lineNum, f.column,
' eval' if f.isEval else '',
' constructor' if f.isConstructor else '') for f in st])
def testPythonException():
class Global(JSClass):
def raiseException(self):
raise RuntimeError("Hello")
with JSContext(Global()) as ctxt:
r = ctxt.eval("""
msg ="";
try
{
this.raiseException()
}
catch(e)
{
msg += "catch " + e + ";";
}
finally
{
msg += "finally";
}""")
assert "catch Error: Hello;finally" == str(ctxt.locals.msg)
def testExceptionMapping():
class TestException(Exception):
pass
class Global(JSClass):
def raiseIndexError(self):
return [1, 2, 3][5]
def raiseAttributeError(self):
None.hello()
def raiseSyntaxError(self):
eval("???")
def raiseTypeError(self):
int(sys)
def raiseNotImplementedError(self):
raise NotImplementedError("Not support")
def raiseExceptions(self):
raise TestException()
with JSContext(Global()) as ctxt:
ctxt.eval("try { this.raiseIndexError(); } catch (e) { msg = e; }")
assert "RangeError: list index out of range" == str(ctxt.locals.msg)
ctxt.eval("try { this.raiseAttributeError(); } catch (e) { msg = e; }")
assert "ReferenceError: 'NoneType' object has no attribute 'hello'" == str(ctxt.locals.msg)
ctxt.eval("try { this.raiseSyntaxError(); } catch (e) { msg = e; }")
assert "SyntaxError: invalid syntax" == str(ctxt.locals.msg)
ctxt.eval("try { this.raiseTypeError(); } catch (e) { msg = e; }")
assert "TypeError: int() argument must be a string or a number, not 'module'" == str(ctxt.locals.msg)
ctxt.eval("try { this.raiseNotImplementedError(); } catch (e) { msg = e; }")
assert "Error: Not support" == str(ctxt.locals.msg)
pytest.raises(TestException, ctxt.eval, "this.raiseExceptions();")
def testArray():
with JSContext() as ctxt:
array = ctxt.eval("""
var array = new Array();
for (i=0; i<10; i++)
{
array[i] = 10-i;
}
array;
""")
assert isinstance(array, JSArray)
assert 10 == len(array)
assert 5 in array
assert 15 not in array
assert 10 == len(array)
for i in range(10):
assert 10-i == array[i]
array[5] = 0
assert 0 == array[5]
del array[5]
assert None == array[5]
# array [10, 9, 8, 7, 6, None, 4, 3, 2, 1]
# array[4:7] 4^^^^^^^^^7
# array[-3:-1] -3^^^^^^-1
# array[0:0] []
assert [6, None, 4] == array[4:7]
assert [3, 2] == array[-3:-1]
assert [] == array[0:0]
array[1:3] = [9, 9, 9]
assert [10, 9, 9, 9, 7, 6, None, 4, 3, 2, 1] == list(array)
array[5:8] = [8, 8]
assert [10, 9, | |
getattr(fixpart, attr)
stringified[attr] = val.replace(b'\x00', b'').decode('ascii')
fixpart = fixpart._replace(**stringified)
return fixpart
def read_fea_header(self, level=0):
'''Read header of FT_FEA with no fea subtype.'''
# TODO: combine feapart1 and feapart2
hdr = EspsHeader(level)
feapart1 = EspsFeapart1._make(
struct.unpack(
self.feapart1_fmt,
self.fh.read(struct.calcsize(self.feapart1_fmt))
)
)
hdr.feapart1 = feapart1
if feapart1.field_order != 0:
msg = 'Support for field_order YES not implemented.'
raise NotImplementedError(msg)
size = feapart1.field_count
lfmt = '{:}{:d}l'.format(self.byte_order, size)
hfmt = '{:}{:d}h'.format(self.byte_order, size)
# TODO: these should not be stored directly in hdr; need different names
hdr.sizes = struct.unpack(lfmt, self.fh.read(struct.calcsize(lfmt)))
hdr.starts = struct.unpack(lfmt, self.fh.read(struct.calcsize(lfmt)))
hdr.ranks = struct.unpack(hfmt, self.fh.read(struct.calcsize(hfmt)))
hdr.types = struct.unpack(hfmt, self.fh.read(struct.calcsize(hfmt)))
hdr.names = []
hdr.dimens = []
hdr.srcfields = []
feapart2 = EspsFeapart2._make(
struct.unpack(
self.feapart2_fmt,
self.fh.read(struct.calcsize(self.feapart2_fmt))
)
)
hdr.feapart2 = feapart2
hdr.derived = struct.unpack(hfmt, self.fh.read(struct.calcsize(hfmt)))
for rnk, typ in zip(hdr.ranks, hdr.types):
slen = struct.unpack(self.byte_order + 'h', self.fh.read(2))[0]
sfmt = '{:d}s'.format(slen)
hdr.names.append(
struct.unpack(
sfmt,
self.fh.read(struct.calcsize(sfmt)))[0].decode('ascii')
)
if rnk != 0:
lfmt = '{:}{:d}l'.format(self.byte_order, rnk)
hdr.dimens.append(
struct.unpack(lfmt, self.fh.read(struct.calcsize(lfmt)))[0]
)
if typ == 7: # CODED defined in esps.h
# TODO: implement this from lines 1281-1302 in headers.c
# Note that read_coded() is implemented already and may be
# used to read the coded value.
raise NotImplementedError('CODED fea type not implemented.')
slen = struct.unpack(self.byte_order + 'h', self.fh.read(2))[0]
sfmt = '{:d}s'.format(slen)
flds = []
for i in np.arange(slen):
slen2 = struct.unpack(self.byte_order + 'h', self.fh.read(2))[0]
sfmt2 = '{:d}s'.format(slen2)
flds.append(
struct.unpack(
sfmt2,
self.fh.read(struct.calcsize(sfmt2)))[0].decode('ascii')
)
hdr.srcfields.append(flds)
# read variable part
while True:
code = struct.unpack(self.byte_order + 'h', self.fh.read(2))[0]
slen = struct.unpack(self.byte_order + 'h', self.fh.read(2))[0]
if EspsVarheadType[code]['is_string'] is True:
sfmt = '{:d}s'.format(slen * 4)
name = struct.unpack(
sfmt,
self.fh.read(struct.calcsize(sfmt))
)[0].replace(b'\x00', b'').decode('ascii')
if code == 0: # PT_ENDPAR
break
try:
if EspsVarheadType[code]['subtype'] == 'PT_GENHD':
setattr(hdr.genhd, name, self.read_genhd())
elif EspsVarheadType[code]['subtype'] == 'PT_HEADER':
break
# TODO: recursive header reading is untested.
#hdr.variable.srchead.append(self.recursive_rh(hdr.level+1))
elif EspsVarheadType[code]['is_string'] is True: # all other string types
setattr(hdr.variable, EspsVarheadType[code]['fld'], name)
else:
# TODO: I am hoping that the genhd fields, e.g. record_freq,
# start_time, come first, as they are the only ones we are
# interested in. Stop processing after they are found.
break
except KeyError:
try:
msg = 'Reading of ESPS {:} variable header not implemented.'.format(
EspsVarheadType[code]['subtype']
)
raise NotImplementedError(msg)
except KeyError:
msg = 'Did not recognize ESPS variable header code {:d}'.format(
code
)
raise RuntimeError(msg)
return hdr
def read_genhd(self):
'''Read generic header.'''
sz = struct.unpack(self.byte_order + 'i', self.fh.read(4))[0]
typ = struct.unpack(self.byte_order + 'h', self.fh.read(2))[0]
val = None
if EspsDtype[typ] is None:
# TODO: implement additional dtypes?
msg = 'fea type {:d} not implemented in read_genhd.'.format(type)
raise NotImplementedError(msg)
elif typ == 7: # CODED
val = self.read_coded()
else:
vfmt = '{:}{:d}{:}'.format(self.byte_order, sz, EspsDtype[typ])
val = struct.unpack(
vfmt,
self.fh.read(struct.calcsize(vfmt))
)[0]
if EspsDtype[typ] == 's':
val = val.replace(b'\x00', b'').decode('ascii')
return val
def read_coded(self):
'''Read 'coded' header value.'''
codes = {}
n = 0
lngth = struct.unpack(self.byte_order + 'h', self.fh.read(2))[0]
while lngth != 0:
codefmt = '{:d}s'.format(lngth)
codes[n] = struct.unpack(
codefmt, self.fh.read(struct.calcsize(codefmt))
)[0].replace(b'\x00', b'').decode('ascii')
lngth = struct.unpack('h', self.fh.read(struct.calcsize('h')))[0]
n += 1
key = struct.unpack('h', self.fh.read(struct.calcsize('h')))[0]
return codes[key]
def read_fea_ana_header(self):
'''Read header of a FEA_ANA file.'''
print('fea_ana type')
# TODO: despite the name 'recursive_rh', recursion does not yet work.
def recursive_rh(self, level=0):
'''Read the file header.'''
if level == 0:
fixpart = self.read_fixpart()
self.ftype = fixpart.type
if self.ftype == 13:
try:
hdr_func = getattr(
self,
EspsFeaType[self.fea_type]['hdr_func']
)
hdr = hdr_func(level)
if level == 0:
hdr.fixpart = fixpart
except KeyError:
try:
msg = 'Reading of ESPS {:} files not implemented'.format(
EspsFeaType[self.fea_type]['subtype']
)
raise RuntimeError(msg)
except KeyError:
msg = 'Did not recognize ESPS FEA file subtype {:d}'.format(
self.fea_type
)
raise RuntimeError(msg)
else:
raise RuntimeError('Do not know how to read type {:}.'.format(
self.ftype
))
return hdr
def check_data_read(self):
'''Perform checks on whether data was read correctly.'''
# After reading, filehandle should be at the end of the file.
assert(self.fh.tell() == os.stat(self.fh.name).st_size)
# TODO: additional checks against nframes or other header values
# to see that records were read correctly.
class EspsSgramReader(EspsFeaReader):
'''A class for reading ESPS .fspec files produced by the sgram command.'''
def __init__(self, infile=None, open_mode='rb', *args, **kwargs):
super(EspsSgramReader, self).__init__(infile=infile,
open_mode=open_mode, *args, **kwargs)
# Read the data records.
self.data = np.fromfile(self.fh, self.fromfile_dtype)
self.check_data_read()
bins = np.arange(self.num_freqs + 1, dtype=float)
self._bins = bins * self.sf / self.fft_length
self._times = (np.arange(len(self.data)) / self.record_freq) + \
self.start_time
self.set_data_view()
@property
def fromfile_dtype(self):
'''The dtypes for unpacking item records using np.fromfile().'''
# TODO: Not sure this is right for all .fspec files.
if self._fromfile_dtype is None or self._fromfile_dtype == []:
self._fromfile_dtype = []
flds = []
try:
assert(self.hdr.feapart2.nfloat == 1)
except AssertionError:
sys.stderr.write('Unexpected number of floats.')
raise
# TODO: check fixpart for tag=1 before adding 'tag' to flds? Note that
# fixpart has 'tag', 'nfloat', and 'nchar' values whereas self.hdr.feapart2
# has 'nfloat' and 'nbyte' values but no 'tag'.
flds.append(('tag', self.byte_order + 'u4'))
flds.append(('tot_power', self.byte_order + 'f4'))
flds.append(
('re_spec_val', '{:d}B'.format(self.hdr.feapart2.nbyte))
)
self._fromfile_dtype = np.dtype(flds)
return self._fromfile_dtype
@property
def sgram(self):
'''Return the spectrogram values of the current data view.'''
return self.data[:]['re_spec_val'][
self.data_view['t1idx']:self.data_view['t2idx'],
self.data_view['hz1idx']:self.data_view['hz2idx']
].T
@property
def power(self):
'''Return the power values of the current data view.'''
return self.data[:]['tot_power'][
self.data_view['t1idx']:self.data_view['t2idx']
]
@property
def tag(self):
'''Return the tag values of the current data view.'''
# TODO: What are the 'tag' values? Should this property have a better name?
# The values seem to be related to the step size and I think indicates the
# sample in the original audio file where the record is centered.
# pplain reports this value as 'tag'.
return self.data[:]['tag'][
self.data_view['t1idx']:self.data_view['t2idx'],
self.data_view['hz1idx']:self.data_view['hz2idx']
]
@property
def bin_hz(self):
'''Return an array of the centers of the fft frequency bins in Hertz
of the current data view.'''
return self._bins[self.data_view['hz1idx']:self.data_view['hz2idx']]
@property
def times(self):
'''Return the timepoints of each spectral slice in sgram of the
current data view.'''
return self._times[self.data_view['t1idx']:self.data_view['t2idx']]
@property
def data_view_extent(self):
'''Return sgram slice's extents suitable for use as imshow()'s
'extent' param for current data view.'''
tdelta = self._times[1] - self._times[0]
fdelta = self._bins[1] - self._bins[0]
return [
self._times[self.data_view['t1idx']] - tdelta/2,
self._times[self.data_view['t2idx']] + tdelta/2,
self._bins[self.data_view['hz1idx']] - fdelta/2,
self._bins[self.data_view['hz2idx']] + fdelta/2
]
@property
def sf(self):
'''Return the sf (sampling frequency) value.'''
return self.hdr.genhd.sf
@property
def window_type(self):
'''Return the window_type value.'''
return self.hdr.genhd.window_type
@property
def frmlen(self):
'''Return the frmlen value.'''
return self.hdr.genhd.frmlen
@property
def frame_meth(self):
'''Return the frame_meth value.'''
return self.hdr.genhd.frame_meth
@property
def freq_format(self):
'''Return the freq_format value.'''
return self.hdr.genhd.freq_format
@property
def record_freq(self):
'''Return the record_freq value.'''
return self.hdr.genhd.record_freq
@property
def start(self):
'''Return the start value.'''
return self.hdr.genhd.start
@property
def sgram_method(self):
'''Return the sgram_method value.'''
return self.hdr.genhd.sgram_method
@property
def fft_order(self):
'''Return the fft_order value.'''
return self.hdr.genhd.fft_order
@property
def fft_length(self):
'''Return the fft_length value.'''
return self.hdr.genhd.fft_length
@property
def spec_type(self):
'''Return the spec_type value.'''
return self.hdr.genhd.spec_type
@property
def step(self):
'''Return the step value.'''
return self.hdr.genhd.step
@property
def num_freqs(self):
'''Return the num_freqs value.'''
return self.hdr.genhd.num_freqs
@property
def pre_emphasis(self):
'''Return the pre_emphasis value.'''
return self.hdr.genhd.pre_emphasis
@property
def contin(self):
'''Return the contin value.'''
return self.hdr.genhd.contin
@property
def start_time(self):
'''Return the start_time value.'''
return self.hdr.genhd.start_time
def set_data_view(self, t1=0.0, t2=np.inf, hz1=0.0, hz2=np.inf):
'''Set the time and frequency ranges that determine the values
returned by data-related properties.'''
t1idx = (np.abs(self._times - t1)).argmin()
if t2 != np.inf:
t2idx = (np.abs(self._times - t2)).argmin()
else:
t2idx = len(self._times) - 1
hz1idx = (np.abs(self._bins - hz1)).argmin()
if hz2 != np.inf:
hz2idx = (np.abs(self._bins - hz2)).argmin()
else:
hz2idx = self.num_freqs
self.data_view = {
't1idx': t1idx, 't2idx': t2idx,
'hz1idx': hz1idx, 'hz2idx': hz2idx
}
class EspsFormantReader(EspsFeaReader):
'''A class for reading ESPS .fb files produced by formant and rformant
commands.'''
def __init__(self, infile=None, open_mode='rb', *args, **kwargs):
super(EspsFormantReader, self).__init__(infile=infile,
open_mode=open_mode, *args, **kwargs)
self._times = None
def __str__(self):
return f'''{type(self)}
{self.fh.name}
start time (genhd): {self.start_time}
record freq (genhd): {self.record_freq}
data offset (preamble): {self.preamble.data_offset}
record size (preamble): {self.preamble.record_size}
field count: {self.hdr.feapart1.field_count}
field order: {self.hdr.feapart1.field_order}
names: {self.hdr.names}
sizes: {self.hdr.sizes}
ranks: {self.hdr.ranks}
dimens: {self.hdr.dimens}
types: {self.hdr.types}
fromfile_dtype {self.fromfile_dtype}
fromfile_dtype size (should be the same as record size): {self.fromfile_dtype.itemsize}
'''
@property
def record_freq(self):
return self.hdr.genhd.record_freq
@property
def start_time(self):
return self.hdr.genhd.start_time
@property
def times(self):
if self._times is None:
frame_period = 1 / self.record_freq
self._times = \
(np.arange(self.nrecs) * frame_period) \
| |
from __future__ import print_function
import numpy as np
from scipy.linalg import eigh, expm, norm
from scipy.sparse import csr_matrix, spmatrix
from math import factorial
import warnings
from functools import reduce
try:
import qutip
except ImportError:
qutip = None
class Setup(object):
sparse = False
def __init__(self, H0, Hcs, c_ops=None, loss_vec=None, sparse=False):
self.sparse = sparse
if c_ops is None:
c_ops = []
self.c_ops = c_ops = self.map_from_qobj(c_ops)
self.H0 = self.from_qobj(H0)
for op in c_ops:
self.H0 += -0.5j*op.conj().T.dot(op)
dim = self.H0.shape[0]
assert self.H0.shape == (dim, dim)
self.Hcs = self.map_from_qobj(Hcs)
n_ctrls = self.Hcs.shape[0]
if not self.sparse:
assert self.Hcs.shape == (n_ctrls, dim, dim), self.Hcs.shape
self.hermitian = True
for H in [self.H0] + list(self.Hcs):
if self.sparse:
H = H.toarray()
if not np.allclose(H, H.conj().T):
print('Non-Hermitian hamiltonian detected!')
self.hermitian = False
break
self.loss_vec = loss_vec
def from_qobj(self, A, sparse=None):
if sparse is None:
sparse = self.sparse
if qutip is not None and isinstance(A, qutip.Qobj):
arr = np.squeeze(A.full())
elif sparse and isinstance(A, spmatrix):
return A.tocsr()
else:
arr = np.asarray(A).copy().astype(complex)
if sparse and arr.ndim == 2 and arr.shape[0] == arr.shape[1]:
return csr_matrix(arr)
return arr
def map_from_qobj(self, A, sparse=None):
return np.array([self.from_qobj(a, sparse=sparse) for a in A])
def get_fids(self, controls, aux_params, dt):
raise NotImplementedError
def set_dtype(self, dtype):
self.H0 = self.H0.astype(dtype)
self.Hcs = [Hc.astype(dtype) for Hc in self.Hcs]
class StateTransferSetup(Setup):
r"""Optimize a problem of the form
.. math::
\max_\epsilon \big|\sum_k \langle \text{final}_k| U(\epsilon) |\text{init}_k\rangle\big|
Since the absolute value is taken after the sum, this results in a coherent evolution of
the initial states into the final states.
"""
def __init__(self, H0, Hcs, inits, finals, c_ops=None, gauge_ops=None, loss_vec=None, coherent=True, sparse=False, use_taylor=False):
self.use_taylor = use_taylor
self.taylor_order = 5
if not use_taylor:
if sparse:
warnings.warn('Exact (non-taylor) method incompatible with sparse matrices, using dense matrices')
sparse = False
super(StateTransferSetup, self).__init__(H0, Hcs, c_ops=c_ops, loss_vec=loss_vec, sparse=sparse)
self.inits = self.map_from_qobj(inits)
self.finals = self.map_from_qobj(finals)
self.gauge_ops = None
self.coherent = coherent
if gauge_ops is not None:
self.gauge_ops = self.map_from_qobj(gauge_ops, sparse=False)
def optimize_taylor_order(self, max_norm, plen, dt, aux_params=None, tol=1e-6):
if aux_params is None:
aux_params = []
orders = []
for _ in range(3):
ctrls = max_norm * np.random.randn(len(self.Hcs), plen)
self.taylor_order = 5
prev_psi = self.get_fids(ctrls, aux_params, dt)[0]
rel_err = 1
while rel_err > tol:
self.taylor_order += 1
psi = self.get_fids(ctrls, aux_params, dt)[0]
rel_err = np.sum(np.abs(psi - prev_psi)**2) / np.sum(np.abs(psi)**2)
print('Taylor order:', self.taylor_order, 'Rel Err:', rel_err)
prev_psi = psi
orders.append(self.taylor_order)
self.taylor_order = max(orders)
print('Using taylor order', self.taylor_order)
def __getitem__(self, item):
return [self.H0, self.Hcs, self.inits, self.finals, self.gauge_ops][item]
def get_fids(self, controls, aux_params, dt):
if self.use_taylor:
return taylor_states_fidelity(
controls, self.H0, self.Hcs,
self.inits, self.finals, dt=dt,
gauge_vals=aux_params, gauge_ops=self.gauge_ops, hermitian=self.hermitian,
coherent=self.coherent, loss_vec=self.loss_vec, order=self.taylor_order
)
else:
return states_fidelity(
controls, self.H0, self.Hcs, self.inits, self.finals, dt=dt,
gauge_vals=aux_params, gauge_ops=self.gauge_ops, hermitian=self.hermitian,
coherent=self.coherent, loss_vec=self.loss_vec
)
def set_dtype(self, dtype):
super(StateTransferSetup, self).set_dtype(dtype)
self.inits = self.inits.astype(dtype)
self.finals = self.finals.astype(dtype)
if self.gauge_ops is not None:
self.gauge_ops = self.gauge_ops.astype(dtype)
class UnitarySetup(Setup):
r"""Optimize a problem of the form
.. math::
\max_\epsilon \big|\text{Tr}[U_\text{target} U(\epsilon)^\dagger]\big|
"""
def __init__(self, H0, Hcs, U_target, c_ops=None, gauge_ops=None):
super(UnitarySetup, self).__init__(H0, Hcs, c_ops=c_ops)
self.U_target = self.from_qobj(U_target)
self.gauge_ops = None
if gauge_ops is not None:
self.gauge_ops = self.map_from_qobj(gauge_ops)
def __getitem__(self, item):
return [self.H0, self.Hcs, self.U_target][item]
def get_fids(self, controls, aux_params, dt):
return prop_fidelity(
controls, self.H0, self.Hcs, self.U_target, aux_params, self.gauge_ops, dt,
hermitian=self.hermitian, loss_vec=self.loss_vec
)
def set_dtype(self, dtype):
super(UnitarySetup, self).set_dtype(dtype)
self.U_target = self.U_target.astype(dtype)
if self.gauge_ops is not None:
self.gauge_ops = self.gauge_ops
class ExpectationSetup(Setup):
def __init__(self, H0, Hcs, inits, expect_ops, c_ops=None):
super(ExpectationSetup, self).__init__(H0, Hcs, c_ops=c_ops)
self.inits = self.from_qobj(inits) #map_from_qobj(inits)
self.expect_ops = self.from_qobj(expect_ops) #map_from_qobj(expect_ops)
def __getitem__(self, item):
return [self.H0, self.Hcs, self.inits, self.expect_ops][item]
def get_fids(self, controls, aux_params, dt):
prop, fid, d_fid = get_expectation(controls, self.H0, self.Hcs, self.inits, self.expect_ops, dt)
return prop, fid, d_fid, np.zeros_like(aux_params)
def set_dtype(self, dtype):
super(ExpectationSetup, self).set_dtype(dtype)
self.inits = self.inits.astype(dtype)
self.expect_ops = self.expect_ops.astype(dtype)
class LindbladSetup(StateTransferSetup):
def __init__(self, H0, Hcs, inits, finals, c_ops, loss_vec=None, **kwargs):
L0 = self.make_liouvillian(H0) + sum(map(self.make_dissipator, c_ops))
Lcs = np.array(list(map(self.make_liouvillian, Hcs)))
inits = self.map_from_qobj(inits)
finals = self.map_from_qobj(finals)
if inits[0].shape[0] != L0.shape[0]:
rho_inits = [np.outer(i1, i2.conj()).flatten() for i1 in inits for i2 in inits]
rho_finals = [np.outer(f1, f2.conj()).flatten() for f1 in finals for f2 in finals]
else:
rho_inits = inits
rho_finals = finals
super(LindbladSetup, self).__init__(L0, Lcs, rho_inits, rho_finals, **kwargs)
# self.hermitian = False
def get_fids(self, controls, aux_params, dt):
prop, fid, d_fid, d_fid_aux = super(LindbladSetup, self).get_fids(controls, aux_params, dt)
fid = np.sqrt(fid)
d_fid = d_fid / fid
d_fid_aux = d_fid_aux / fid
return prop, fid, d_fid, d_fid_aux
def make_liouvillian(self, H):
H = self.from_qobj(H)
I = np.eye(H.shape[0])
return (np.kron(I, H) - np.kron(H.T, I))
def make_dissipator(self, c_op):
c_op = self.from_qobj(c_op)
cd = c_op.T.conj()
c = c_op
cdc = cd.dot(c)
I = np.eye(c_op.shape[0])
return 1j * (np.kron(cd.T, c) - 0.5 * (np.kron(I, cdc) + np.kron(cdc.T, I)))
class SubspaceSetup(StateTransferSetup):
def get_fids(self, controls, aux_params, dt):
assert not self.use_taylor
return states_fidelity(
controls, self.H0, self.Hcs, self.inits, self.finals, dt=dt,
gauge_vals=aux_params, gauge_ops=self.gauge_ops, hermitian=self.hermitian,
coherent=False, subspace_contain=True, loss_vec=self.loss_vec
)
def states_fidelity(controls, H_drift, H_controls, inits, finals, gauge_vals=None, gauge_ops=None,
dt=1, hermitian=True, coherent=True, subspace_contain=False, loss_vec=None):
n_ctrls, plen = controls.shape
n_states = len(inits)
use_gauge = gauge_ops is not None
dim = H_drift.shape[0]
H_drift = dt * H_drift
H_controls = dt * np.array(H_controls)
# TODO: Don't re-initialize every time if possible
props = np.empty((plen, dim, dim), H_drift.dtype)
d_props = np.empty((n_ctrls, plen, dim, dim), H_drift.dtype)
for i, time_slice in enumerate(controls.T):
H = H_drift + sum(c*Hc for c,Hc in zip(time_slice, H_controls))
if hermitian:
props[i], d_props[:, i, :, :] = step_propagator(H, H_controls, loss_vec)
else:
props[i], d_props[:, i, :, :] = step_propagator_nonhermitian(H, H_controls)
if use_gauge:
g_sum = sum(g_val*g_op for g_val, g_op in zip(gauge_vals, gauge_ops))
g_prop, d_g_props = step_propagator(g_sum, gauge_ops)
props = np.concatenate((props, [g_prop]))
prop_inits = [inits.T]
for prop in props:
prop_inits.append(prop.dot(prop_inits[-1]))
prop_finals = [finals.conj()]
for prop in reversed(props):
prop_finals.append(prop_finals[-1].dot(prop))
prop_finals.reverse()
if coherent:
ovlp = np.sum(prop_finals[-1].T * prop_inits[-1])
fid = abs(ovlp)
d_ovlps = []
for i, (pi, pf) in enumerate(zip(prop_inits[:plen], prop_finals[1:])):
for d_prop in d_props[:, i]:
d_ovlps.append(np.sum(pf.T * d_prop.dot(pi)))
d_ovlps = np.array(d_ovlps).reshape((plen, n_ctrls)).T
d_fids = (ovlp.real*d_ovlps.real + ovlp.imag*d_ovlps.imag) / (fid)
elif subspace_contain:
ovlps = prop_finals[-1].dot(prop_inits[-1])
a_ovlps = np.abs(ovlps)**2
fid = np.sum(a_ovlps)
d_fids = []
for i, (pi, pf) in enumerate(zip(prop_inits[:plen], prop_finals[1:])):
for d_prop in d_props[:, i]:
d_ovlp = pf.dot(d_prop.dot(pi))
d_a_ovlps = 2 * (ovlps.real*d_ovlp.real + ovlps.imag*d_ovlp.imag)
d_fids.append(np.sum(d_a_ovlps))
d_fids = np.array(d_fids).reshape((plen, n_ctrls)).T
else:
ovlps = np.sum(prop_finals[-1].T * prop_inits[-1], axis=0)
a_ovlps = np.abs(ovlps)**2
fid = np.sum(a_ovlps)
d_fids = []
for i, (pi, pf) in enumerate(zip(prop_inits[:plen], prop_finals[1:])):
for d_prop in d_props[:, i]:
d_ovlp = pf.T * d_prop.dot(pi)
d_a_ovlps = 2 * (ovlps.real*d_ovlp.real + ovlps.imag*d_ovlp.imag)
d_fids.append(np.sum(d_a_ovlps))
d_fids = np.array(d_fids).reshape((plen, n_ctrls)).T
if not use_gauge:
return prop_inits[-1], fid / n_states, d_fids / n_states, []
d_g_ovlps = []
pi = prop_inits[-2]
pf = prop_finals[-1]
for d_prop in d_g_props:
d_g_ovlps.append(np.sum(pf.T * d_prop.dot(pi)))
d_g_ovlps = np.array(d_g_ovlps)
d_g_fids = (ovlp.real*d_g_ovlps.real + ovlp.imag*d_g_ovlps.imag) / (fid)
return prop_inits[-1], fid / n_states, d_fids / n_states, d_g_fids / n_states
def get_expectation(controls, H_drift, H_controls, init, expect_op, dt=1):
H_drift = dt * H_drift
H_controls = dt * np.array(H_controls)
tot_prop, d_tot_props, _ = total_propagator(controls, H_drift, H_controls)
final = tot_prop.dot(init)
d_finals = np.einsum('ijkl,l->ijk', d_tot_props, init)
expect = final.conj().T.dot(expect_op).dot(final).real
d_op_finals = np.einsum('ij,klj->kli', expect_op, d_finals)
d_expects = 2*np.einsum('i,jki->jk', final.conj(), d_op_finals).real
return tot_prop, expect, d_expects
def prop_fidelity(controls, H_drift, H_controls, U_target, gauge_vals, gauge_ops, dt=1,
hermitian=True, loss_vec=None):
"""
Get the total propagator as well as the fidelity to a given target
defined as abs(Tr(U_target . U.conj().T)) and the gradient of the fidelity
with respect to the controls
"""
H_drift = dt * H_drift
H_controls = dt * np.array(H_controls)
tot_prop, d_tot_props, d_g_props = total_propagator(
controls, H_drift, H_controls, gauge_vals, gauge_ops, hermitian=hermitian, loss_vec=loss_vec
)
return prop_fidelity_from_U(tot_prop, d_tot_props, d_g_props, U_target)
def prop_fidelity_from_U(U, dUs, d_g_Us, U_target):
norm = np.sum(abs(U_target)**2)
overlap = np.sum(U_target.conj() * U) / norm
d_overlaps = np.sum(U_target.conj() * dUs, axis=(2,3)) / norm
fid = abs(overlap)
d_fid = (overlap.real*d_overlaps.real + overlap.imag*d_overlaps.imag) / fid
if len(d_g_Us) == 0:
d_g_fid = []
else:
d_g_overlaps = np.sum(U_target.conj() * d_g_Us, axis=(1,2)) / norm
d_g_fid = (overlap.real*d_g_overlaps.real + overlap.imag*d_g_overlaps.imag) / fid
return U, fid, d_fid, d_g_fid
def total_propagator(controls, H_drift, H_controls, gauge_vals=None, gauge_ops=None,
hermitian=True, loss_vec=None):
"""
Compute step propagator for each time point and take product
to find the total propagator. Similarly find the derivative
of the propagator with respect to the controls.
:param controls: (N_CTRLS, PLEN) real array
:param H_drift: (DIM, DIM) complex array
:param H_controls: (N_CTRLS, DIM, DIM) complex | |
to 0, the cookie is non-persistent and lasts
only until the end of the browser session (or equivalent). The
maximum allowed value for TTL is one day.
When the load balancing scheme is INTERNAL, this field is not used.
"""
return pulumi.get(self, "affinity_cookie_ttl_sec")
@affinity_cookie_ttl_sec.setter
def affinity_cookie_ttl_sec(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "affinity_cookie_ttl_sec", value)
@property
@pulumi.getter
def backends(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionBackendServiceBackendArgs']]]]:
"""
The set of backends that serve this RegionBackendService.
Structure is documented below.
"""
return pulumi.get(self, "backends")
@backends.setter
def backends(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionBackendServiceBackendArgs']]]]):
pulumi.set(self, "backends", value)
@property
@pulumi.getter(name="cdnPolicy")
def cdn_policy(self) -> Optional[pulumi.Input['RegionBackendServiceCdnPolicyArgs']]:
"""
Cloud CDN configuration for this BackendService.
Structure is documented below.
"""
return pulumi.get(self, "cdn_policy")
@cdn_policy.setter
def cdn_policy(self, value: Optional[pulumi.Input['RegionBackendServiceCdnPolicyArgs']]):
pulumi.set(self, "cdn_policy", value)
@property
@pulumi.getter(name="circuitBreakers")
def circuit_breakers(self) -> Optional[pulumi.Input['RegionBackendServiceCircuitBreakersArgs']]:
"""
Settings controlling the volume of connections to a backend service. This field
is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED
and the `protocol` is set to HTTP, HTTPS, or HTTP2.
Structure is documented below.
"""
return pulumi.get(self, "circuit_breakers")
@circuit_breakers.setter
def circuit_breakers(self, value: Optional[pulumi.Input['RegionBackendServiceCircuitBreakersArgs']]):
pulumi.set(self, "circuit_breakers", value)
@property
@pulumi.getter(name="connectionDrainingTimeoutSec")
def connection_draining_timeout_sec(self) -> Optional[pulumi.Input[int]]:
"""
Time for which instance will be drained (not accept new
connections, but still work to finish started).
"""
return pulumi.get(self, "connection_draining_timeout_sec")
@connection_draining_timeout_sec.setter
def connection_draining_timeout_sec(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connection_draining_timeout_sec", value)
@property
@pulumi.getter(name="consistentHash")
def consistent_hash(self) -> Optional[pulumi.Input['RegionBackendServiceConsistentHashArgs']]:
"""
Consistent Hash-based load balancing can be used to provide soft session
affinity based on HTTP headers, cookies or other properties. This load balancing
policy is applicable only for HTTP connections. The affinity to a particular
destination host will be lost when one or more hosts are added/removed from the
destination service. This field specifies parameters that control consistent
hashing.
This field only applies when all of the following are true -
"""
return pulumi.get(self, "consistent_hash")
@consistent_hash.setter
def consistent_hash(self, value: Optional[pulumi.Input['RegionBackendServiceConsistentHashArgs']]):
pulumi.set(self, "consistent_hash", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource.
Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableCdn")
def enable_cdn(self) -> Optional[pulumi.Input[bool]]:
"""
If true, enable Cloud CDN for this RegionBackendService.
"""
return pulumi.get(self, "enable_cdn")
@enable_cdn.setter
def enable_cdn(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_cdn", value)
@property
@pulumi.getter(name="failoverPolicy")
def failover_policy(self) -> Optional[pulumi.Input['RegionBackendServiceFailoverPolicyArgs']]:
"""
Policy for failovers.
Structure is documented below.
"""
return pulumi.get(self, "failover_policy")
@failover_policy.setter
def failover_policy(self, value: Optional[pulumi.Input['RegionBackendServiceFailoverPolicyArgs']]):
pulumi.set(self, "failover_policy", value)
@property
@pulumi.getter(name="healthChecks")
def health_checks(self) -> Optional[pulumi.Input[str]]:
"""
The set of URLs to HealthCheck resources for health checking
this RegionBackendService. Currently at most one health
check can be specified.
A health check must be specified unless the backend service uses an internet
or serverless NEG as a backend.
"""
return pulumi.get(self, "health_checks")
@health_checks.setter
def health_checks(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_checks", value)
@property
@pulumi.getter
def iap(self) -> Optional[pulumi.Input['RegionBackendServiceIapArgs']]:
"""
Settings for enabling Cloud Identity Aware Proxy
Structure is documented below.
"""
return pulumi.get(self, "iap")
@iap.setter
def iap(self, value: Optional[pulumi.Input['RegionBackendServiceIapArgs']]):
pulumi.set(self, "iap", value)
@property
@pulumi.getter(name="loadBalancingScheme")
def load_balancing_scheme(self) -> Optional[pulumi.Input[str]]:
"""
Indicates what kind of load balancing this regional backend service
will be used for. A backend service created for one type of load
balancing cannot be used with the other(s).
Default value is `INTERNAL`.
Possible values are `EXTERNAL`, `INTERNAL`, and `INTERNAL_MANAGED`.
"""
return pulumi.get(self, "load_balancing_scheme")
@load_balancing_scheme.setter
def load_balancing_scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "load_balancing_scheme", value)
@property
@pulumi.getter(name="localityLbPolicy")
def locality_lb_policy(self) -> Optional[pulumi.Input[str]]:
"""
The load balancing algorithm used within the scope of the locality.
The possible values are -
* ROUND_ROBIN - This is a simple policy in which each healthy backend
is selected in round robin order.
* LEAST_REQUEST - An O(1) algorithm which selects two random healthy
hosts and picks the host which has fewer active requests.
* RING_HASH - The ring/modulo hash load balancer implements consistent
hashing to backends. The algorithm has the property that the
addition/removal of a host from a set of N hosts only affects
1/N of the requests.
* RANDOM - The load balancer selects a random healthy host.
* ORIGINAL_DESTINATION - Backend host is selected based on the client
connection metadata, i.e., connections are opened
to the same address as the destination address of
the incoming connection before the connection
was redirected to the load balancer.
* MAGLEV - used as a drop in replacement for the ring hash load balancer.
Maglev is not as stable as ring hash but has faster table lookup
build times and host selection times. For more information about
Maglev, refer to https://ai.google/research/pubs/pub44824
This field is applicable only when the `load_balancing_scheme` is set to
INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2.
Possible values are `ROUND_ROBIN`, `LEAST_REQUEST`, `RING_HASH`, `RANDOM`, `ORIGINAL_DESTINATION`, and `MAGLEV`.
"""
return pulumi.get(self, "locality_lb_policy")
@locality_lb_policy.setter
def locality_lb_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "locality_lb_policy", value)
@property
@pulumi.getter(name="logConfig")
def log_config(self) -> Optional[pulumi.Input['RegionBackendServiceLogConfigArgs']]:
"""
This field denotes the logging options for the load balancer traffic served by this backend service.
If logging is enabled, logs will be exported to Stackdriver.
Structure is documented below.
"""
return pulumi.get(self, "log_config")
@log_config.setter
def log_config(self, value: Optional[pulumi.Input['RegionBackendServiceLogConfigArgs']]):
pulumi.set(self, "log_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the cookie.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the network to which this backend service belongs.
This field can only be specified when the load balancing scheme is set to INTERNAL.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "network", value)
@property
@pulumi.getter(name="outlierDetection")
def outlier_detection(self) -> Optional[pulumi.Input['RegionBackendServiceOutlierDetectionArgs']]:
"""
Settings controlling eviction of unhealthy hosts from the load balancing pool.
This field is applicable only when the `load_balancing_scheme` is set
to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2.
Structure is documented below.
"""
return pulumi.get(self, "outlier_detection")
@outlier_detection.setter
def outlier_detection(self, value: Optional[pulumi.Input['RegionBackendServiceOutlierDetectionArgs']]):
pulumi.set(self, "outlier_detection", value)
@property
@pulumi.getter(name="portName")
def port_name(self) -> Optional[pulumi.Input[str]]:
"""
A named port on a backend instance group representing the port for
communication to the backend VMs in that group. Required when the
loadBalancingScheme is EXTERNAL, INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED
and the backends are instance groups. The named port must be defined on each
backend instance group. This parameter has no meaning if the backends are NEGs. API sets a
default of "http" if not given.
Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing).
"""
return pulumi.get(self, "port_name")
@port_name.setter
def port_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port_name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The protocol this RegionBackendService uses to communicate with backends.
The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer
types and may result in errors if used with the GA API.
Possible values are `HTTP`, `HTTPS`, `HTTP2`, `SSL`, `TCP`, `UDP`, `GRPC`, and `UNSPECIFIED`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The Region in which the created backend service should reside.
If it is not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[pulumi.Input[str]]:
"""
Type of session affinity to use. The default is NONE. Session affinity is
not applicable if the protocol is UDP.
Possible values are `NONE`, `CLIENT_IP`, `CLIENT_IP_PORT_PROTO`, `CLIENT_IP_PROTO`, `GENERATED_COOKIE`, `HEADER_FIELD`, and `HTTP_COOKIE`.
"""
return pulumi.get(self, "session_affinity")
@session_affinity.setter
def session_affinity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "session_affinity", value)
@property
@pulumi.getter(name="timeoutSec")
def timeout_sec(self) -> Optional[pulumi.Input[int]]:
"""
How many seconds to wait for the backend before considering it a
failed request. Default is 30 seconds. Valid range is [1, 86400].
"""
return pulumi.get(self, "timeout_sec")
@timeout_sec.setter
def timeout_sec(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_sec", value)
@pulumi.input_type
class _RegionBackendServiceState:
def __init__(__self__, *,
affinity_cookie_ttl_sec: Optional[pulumi.Input[int]] = None,
backends: Optional[pulumi.Input[Sequence[pulumi.Input['RegionBackendServiceBackendArgs']]]] = None,
cdn_policy: Optional[pulumi.Input['RegionBackendServiceCdnPolicyArgs']] | |
= max([abs(min(dtxlist)),abs(max(dtxlist))])
self.dtymax0 = max([abs(min(dtylist)),abs(max(dtylist))])
self.dtzmax0 = max([abs(min(dtzlist)),abs(max(dtzlist))])
self.dmaxset = 1
for i in range(self.num_z):
for j in range(self.num_x):
dtxi,dtzi,dtxj_up,dtxj_down,dtxj_left,dtxj_right,dtzj_up,dtzj_down,dtzj_left,dtzj_right,di,dj_up,dj_down,dj_left,dj_right,n_up,n_down,n_left,n_right,pos1,pos2,bc,geo = state_data(i,j,self)
'''
self.metagrid[i][j] = [[dtxi],[dtzi],[dtxj_up],[dtxj_down],[dtxj_left],[dtxj_right],[dtzj_up],[dtzj_down],[dtzj_left],[dtzj_right],[di],[dj_up],[dj_down],[dj_left],[dj_right],[n_up],[n_down],[n_left],[n_right],[pos1],[pos2],[bc],[geo]]
'''
self.metagrid[i][j] = [
[n_up],[n_down],[n_left],[n_right],
[pos1],[pos2],
[geo]
]
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _game_gen_state_condi(self):
self.gen_model.gen_surface1() # Calculate total surface / 総表面積を計算する
self.strain_e = self.gen_model.surface_1 # Current total surface of this structure / この構造の総表面積
if self.game_step == 1: # Initial total length of this structure / この構造の初期の全長
self.int_strain_e = self.gen_model.surface_1
else:
pass
# Function to initialize state / 状態を初期化する関数
def _game_get_1_state(self,do):
self._update_metagrid() # update structural data array / 構造データ配列を更新する
# do = [i,j]
# metagrid[z,x]
# Check game type to generate state from structural data array / 構造データ配列から状態を生成するゲームタイプをチェックしてください
x = self.metagrid[do[0]][do[1]]
state = np.array(x) # state / 状態
return state
# Function to generate next state / 次の状態を生成する関数
def _game_get_next_state(self,do,action,i=0):
num = [action[0][0],action[0][1],action[0][2],action[0][3],action[0][4],action[0][5]]
num = num.index(max(num)) # Find maximum index of the action receive from Neural Network / ニューラルネットワークから受け取るアクションの最大インデックスを見つける
# next_state = f(action)
# Interpretation of action index / 行動のインデックスの解釈
if num == 0: # Adjust this node by moving up in the magnitude of y_step / y_stepの大きさを上に移動して、このノードを調整します
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] !=1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] + self.y_step <= self.max_y_val:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] += self.y_step
else:
pass
else:
pass
elif num == 1: # Adjust this node by moving down in the magnitude of y_step / y_stepの大きさを下に移動して、このノードを調整します
if self.gen_model.n_u_name_div[do[0]][do[1]].res[1] != 1:
if self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] - self.y_step >= 0:
self.gen_model.n_u_name_div[do[0]][do[1]].coord[1] -= self.y_step
else:
pass
else:
pass
elif num == 2: # Agent move to other node to the right(move right x+1) / #エージェントは他のノードに右に移動します(右にx + 1移動)
# do[z,x]
if (do[1]+1 != (len(self.gen_model.n_u_name_div[0]))):
self.doing[0][1] = do[1]+1
else:
pass
elif num == 3: # Agent move to other node to the left(move left x-1) / エージェントは他のノードに左に移動します(左に移動x-1)
# do[z,x]
if (do[1] != 0):
self.doing[0][1] = do[1]-1
else:
pass
elif num == 4: # Agent move to other node to the upper(move up z-1) / エージェントが他のノードに移動します(上に移動z-1)
# do[z,x]
if (do[0] != 0):
self.doing[0][0] = do[0]-1
else:
pass
elif num == 5: # Agent move to other node to the lower(move down z+1) / エージェントは他のノードに移動します(z + 1に移動)
# do[z,x]
if (do[0]+1 != (len(self.gen_model.n_u_name_div))):
self.doing[0][0] = do[0]+1
else:
pass
announce = ['z_up','z_down','move right','move left','move up','move down'] # list of actions / 行動のリスト
if self.tell_action == True:
print(announce[num-1]) # print out action if tell_action is Trues / tell_actionがTrueの場合、行動を出力します
self._update_metagrid() # update structural data array / 構造データ配列を更新する
# Check game type to generate state from structural data array / 構造データ配列から状態を生成するゲームタイプをチェックしてください
x = self.metagrid[self.doing[i][0]][self.doing[i][1]]
next_state = np.array(x) # next_state / 次の状態
return next_state
# Function to calculate value use in the reward system / 報酬制度での価値利用を計算する機能
def _gen_gen_reward_condition(self):
self.gen_model.gen_surface1() # Calculate next state's total surface / 次の状態の総表面積を計算する
self.next_strain_e = self.gen_model.surface_1# Total surface of this structure in the next_state after agents do actions / エージェントが行動を実行した後のnext_state内のこの構造の総表面積
# Function to calculate reward for each agent / 各エージェントの報酬を計算する機能
def _game_get_reward(self,agent):
self.reward[agent] += 1000*(self.strain_e[0]-self.next_strain_e[0])/(self.int_strain_e[0]) # Reward rule / 報酬規定
if self.game_step == self.end_step: # Check if game is end / ゲームが終了したかどうかを確認する
self.done_counter = 1
return self.reward[agent],self.done_counter
# Function to reset every values and prepare for the next game / すべての値をリセットして次のゲームに備える機能
def reset(self):
self.state = [] # Game state / ゲームの状態
self.action = [] # Game action / ゲームの行動
self.reward = [] # Game reward for each agent / 各エージェントのゲーム報酬
for i in range(self.num_agents):
self.reward.append(0)
self.next_state = [] # Game next state / ゲームの次の状態
self.done = [] # Game over counter / ゲームオーバーカウンター
self.doing = [] # List of position(x,z) in the structure of each agent / 各エージェントの構造内のposition(x、z)のリスト
for i in range(self.num_agents): # Initialize starting position of each structure / 各構造の開始位置を初期化
self.doing.append([0,0])
self.game_step = 1 # Game initial step / ゲームの最初のステップ
self.xmax = 0 # Maximum x coordinate value in this structural model (horizontal) / この構造モデルの最大x座標値(水平)
self.xmin = 0 # Minimum x coordinate value in this structural model (horizontal) / この構造モデル(水平)の最小x座標値
self.ymax = 0 # Maximum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最大値(垂直)
self.ymin = 0 # Minimum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最小値(垂直)
self.zmax = 0 # Maximum z coordinate value in this structural model (horizontal) / この構造モデルの最大Z座標値(水平)
self.zmin = 0 # Minimum z coordinate value in this structural model (horizontal) / この構造モデルの最小Z座標値(水平)
self.sval = 0.001 # small noise / 小さなノイズ
self.dymax = 0
self.dymin = 0
self.sdyval = 0
self.dmax0 = 0
self.dmaxset = 0
self.dtxmax = 0
self.dtxmin = 0
self.dtymax = 0
self.dtymin = 0
self.dtzmax = 0
self.dtzmin = 0
self.dtxmax0 = 0
self.dtymax0 = 0
self.dtzmax0 = 0
#**********
self.int_strain_e = 0 # Initial Total length for this game / このゲームの初期の全長
self.strain_e = 0 # Current Total length for this game / このゲームの現在の合計の長さ
self.next_strain_e = 0 # Total length after agents do actions. Used for calculating reward / エージェントがアクションを実行した後の全長。 報酬の計算に使用されます
#**********
self.reward_counter = [] # List of reward of each agent / 各エージェントの報酬一覧
for i in range(self.num_agents): # Initialize reward for each agent / 各エージェントの報酬を初期化する
self.reward.append(0)
self.reward_counter.append(0)
self.done_counter = 0 # Counter for game end / ゲーム終了のカウンター
# Function change state to next_state / 関数は状態を次の状態に変更します
def step(self):
self.state = self.next_state
self.action = [] # Reset List of Action for each agent / 各エージェントのアクションリストをリセット
for i in range(len(self.reward)): # Reset List of Reward for each agent / 各エージェントの報酬リストをリセット
self.reward[i] = 0
self.next_state = [] # Reset List of next state for each agent / 各エージェントの次の状態のリストをリセット
self.done = [] # Reset List of game over counter / ゲームオーバーカウンターのリストをリセット
self.game_step += 1 # Increase game step counter / ゲームのステップカウンターを増やす
#=============================================================================
# GAME 2 '研究室'
class Game2:
def __init__(self,end_step,alpha,max_y_val,model,num_agents=1,render=0,tell_action=False):
self.name = 'GAME 2' # Name of the game / ゲームの名前
self.description = 'AGENT HAS 6 ACTIONS: MOVE NODE (UP DOWN), MOVE TO SURROUNDING NODES (LEFT RIGHT UP DOWN)' # Game's description / ゲームの説明
self.objective = 'REDUCE STRAIN ENERGY' # Game's objective / ゲームの目的
self.tell_action =tell_action # Print agent action in console /コンソールでエージェントの行動を印刷する
self.num_agents = num_agents # Amount of agents in the game / ゲーム内のエージェントの数
self.gen_model = model # Gen structural model used in the game / ゲームで使用されるGen構造モデル
self.model = model.model # Structural model used in the game / ゲームで使用される構造モデル
self.num_x = model.num_x # Amount of Structural model's span in x axis (horizontal) / X軸での構造モデルのスパンの量(水平)
self.num_z = model.num_z # Amount of Structural model's span in z axis (horizontal) / Z軸での構造モデルのスパンの量(水平)
self.render = render # Render after each step / 各ステップ後にレンダリング
self.game_step = 1 # Game initial step / ゲームの最初のステップ
self.game_type = 0 # Game's state type / ゲームの状態タイプ
self.end_step = end_step # Game final step / ゲームの最終ステップ
self.alpha = alpha # Magnitude for agents to adjust structure as a factor of Structural model's span / エージェントが構造モデルのスパンの要素として構造を調整するための大きさ
self.y_step = self.alpha*self.gen_model.span # Magnitude for agents to adjust structure(m) / エージェントが構造を調整するための大きさ(m)
self.state = [] # Game state / ゲームの状態
self.action = [] # Game action / ゲームの行動
self.reward = [] # Game reward for each agent / 各エージェントのゲーム報酬
self.next_state = [] # Game next state / ゲームの次の状態
self.done = [] # Game over counter / ゲームオーバーカウンター
self.doing = [] # List of position(x,z) in the structure of each agent / 各エージェントの構造内のposition(x、z)のリスト
for i in range(self.num_agents): # Initialize starting position of each structure / 各構造の開始位置を初期化
self.doing.append([0,0])
self.metagrid = [] # 2-D Array of data in each structural node / 各構造ノードのデータの2次元配列
for i in range(self.num_z): # Initialize structural data array / 構造データ配列を初期化する
self.metagrid.append([])
for j in range(self.num_x):
self.metagrid[-1].append([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]) # Maximum num_state in this suite is 23 / このスイートの最大num_stateは23です
self.xmax = 0 # Maximum x coordinate value in this structural model (horizontal) / この構造モデルの最大x座標値(水平)
self.xmin = 0 # Minimum x coordinate value in this structural model (horizontal) / この構造モデル(水平)の最小x座標値
self.ymax = 0 # Maximum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最大値(垂直)
self.ymin = 0 # Minimum y coordinate value in this structural model (vertical) / この構造モデルのY座標の最小値(垂直)
self.zmax = 0 # Maximum z coordinate value in this structural model (horizontal) | |
<reponame>AndresQuichimbo/landlab
import numpy as np
import pytest
from numpy import testing
from landlab import HexModelGrid, RasterModelGrid
from landlab.components import FlowAccumulator, Space
def test_route_to_multiple_error_raised():
mg = RasterModelGrid((10, 10))
z = mg.add_zeros("topographic__elevation", at="node")
z += mg.x_of_node + mg.y_of_node
fa = FlowAccumulator(mg, flow_director="MFD")
fa.run_one_step()
with pytest.raises(NotImplementedError):
Space(
mg,
K_sed=0.1,
K_br=0.1,
F_f=0.5,
phi=0.1,
H_star=1.0,
v_s=0.001,
m_sp=1.0,
n_sp=0.5,
sp_crit_sed=0,
sp_crit_br=0,
)
def test_bad_solver_name():
"""
Test that any solver name besides 'basic' and 'adaptive' raises an error.
"""
# set up a 5x5 grid with one open outlet node and low initial elevations.
nr = 5
nc = 5
mg = RasterModelGrid((nr, nc), xy_spacing=10.0)
z = mg.add_zeros("topographic__elevation", at="node")
br = mg.add_zeros("bedrock__elevation", at="node")
soil = mg.add_zeros("soil__depth", at="node")
mg["node"]["topographic__elevation"] += (
mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000
)
mg.set_closed_boundaries_at_grid_edges(
bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True,
)
mg.set_watershed_boundary_condition_outlet_id(
0, mg["node"]["topographic__elevation"], -9999.0
)
br[:] = z[:] - soil[:]
# Create a D8 flow handler
FlowAccumulator(mg, flow_director="D8")
# try to instantiate SPACE using a wrong solver name
with pytest.raises(ValueError):
Space(
mg,
K_sed=0.01,
K_br=0.01,
F_f=0.0,
phi=0.0,
v_s=0.001,
m_sp=0.5,
n_sp=1.0,
sp_crit_sed=0,
sp_crit_br=0,
solver="something_else",
)
def test_soil_field_already_on_grid():
"""
Test that an existing soil grid field is not changed by instantiating
SPACE.
"""
# set up a 5x5 grid with one open outlet node and low initial elevations.
nr = 5
nc = 5
mg = RasterModelGrid((nr, nc), xy_spacing=10.0)
z = mg.add_zeros("topographic__elevation", at="node")
br = mg.add_zeros("bedrock__elevation", at="node")
soil = mg.add_zeros("soil__depth", at="node")
soil += 1.0 # add 1m of soil everywehre
mg["node"]["topographic__elevation"] += (
mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000
)
mg.set_closed_boundaries_at_grid_edges(
bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True,
)
mg.set_watershed_boundary_condition_outlet_id(
0, mg["node"]["topographic__elevation"], -9999.0
)
br[:] = z[:] - soil[:]
# Create a D8 flow handler
FlowAccumulator(mg, flow_director="D8")
# Instantiate SPACE
sp = Space(
mg,
K_sed=0.01,
K_br=0.01,
F_f=0.0,
phi=0.0,
v_s=0.001,
m_sp=0.5,
n_sp=1.0,
sp_crit_sed=0,
sp_crit_br=0,
solver="basic",
)
# ensure that 'soil__depth' field is everywhere equal to 1.0 m.
testing.assert_array_equal(
np.ones(mg.number_of_nodes),
sp._soil__depth,
err_msg="SPACE soil depth field test failed",
verbose=True,
)
def test_br_field_already_on_grid():
"""
Test that an existing bedrock elevation grid field is not changed by
instantiating SPACE.
"""
# set up a 5x5 grid with one open outlet node and low initial elevations.
nr = 5
nc = 5
mg = RasterModelGrid((nr, nc), xy_spacing=10.0)
z = mg.add_zeros("topographic__elevation", at="node")
br = mg.add_zeros("bedrock__elevation", at="node")
br += 1.0 # make bedrock elevation 5m below surface
soil = mg.add_zeros("soil__depth", at="node")
mg["node"]["topographic__elevation"] += (
mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000
)
mg.set_closed_boundaries_at_grid_edges(
bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True,
)
mg.set_watershed_boundary_condition_outlet_id(
0, mg["node"]["topographic__elevation"], -9999.0
)
z[:] = br[:] + soil[:]
# Create a D8 flow handler
FlowAccumulator(mg, flow_director="D8")
# Instantiate SPACE
sp = Space(
mg,
K_sed=0.01,
K_br=0.01,
F_f=0.0,
phi=0.0,
v_s=0.001,
m_sp=0.5,
n_sp=1.0,
sp_crit_sed=0,
sp_crit_br=0,
solver="basic",
)
# ensure that 'bedrock__elevation' field is everywhere equal to 1.0 m.
testing.assert_array_equal(
np.ones(mg.number_of_nodes),
sp._bedrock__elevation,
err_msg="SPACE bedrock field test failed",
verbose=True,
)
def test_matches_detachment_solution():
"""
Test that model matches the detachment-limited analytical solution
for slope/area relationship at steady state: S=(U/K_br)^(1/n)*A^(-m/n).
"""
# set up a 5x5 grid with one open outlet node and low initial elevations.
nr = 5
nc = 5
mg = RasterModelGrid((nr, nc), xy_spacing=10.0)
z = mg.add_zeros("topographic__elevation", at="node")
br = mg.add_zeros("bedrock__elevation", at="node")
soil = mg.add_zeros("soil__depth", at="node")
mg["node"]["topographic__elevation"] += (
mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000
)
mg.set_closed_boundaries_at_grid_edges(
bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True,
)
mg.set_watershed_boundary_condition_outlet_id(
0, mg["node"]["topographic__elevation"], -9999.0
)
br[:] = z[:] - soil[:]
# Create a D8 flow handler
fa = FlowAccumulator(mg, flow_director="D8")
# Parameter values for detachment-limited test
K_br = 0.01
U = 0.0001
dt = 1.0
F_f = 1.0 # all detached rock disappears; detachment-ltd end-member
m_sp = 0.5
n_sp = 1.0
# Instantiate the Space component...
sp = Space(
mg,
K_sed=0.00001,
K_br=K_br,
F_f=F_f,
phi=0.1,
H_star=1.0,
v_s=0.001,
m_sp=m_sp,
n_sp=n_sp,
sp_crit_sed=0,
sp_crit_br=0,
)
# ... and run it to steady state (2000x1-year timesteps).
for i in range(2000):
fa.run_one_step()
sp.run_one_step(dt=dt)
z[mg.core_nodes] += U * dt # m
br[mg.core_nodes] = z[mg.core_nodes] - soil[mg.core_nodes]
# compare numerical and analytical slope solutions
num_slope = mg.at_node["topographic__steepest_slope"][mg.core_nodes]
analytical_slope = np.power(U / K_br, 1.0 / n_sp) * np.power(
mg.at_node["drainage_area"][mg.core_nodes], -m_sp / n_sp
)
# test for match with analytical slope-area relationship
testing.assert_array_almost_equal(
num_slope,
analytical_slope,
decimal=8,
err_msg="SPACE detachment-limited test failed",
verbose=True,
)
@pytest.mark.slow
def test_matches_transport_solution():
"""
Test that model matches the transport-limited analytical solution
for slope/area relationship at steady state: S=((U * v_s) / (K_sed * A^m)
+ U / (K_sed * A^m))^(1/n).
Also test that model matches the analytical solution for steady-state
sediment flux: Qs = U * A * (1 - phi).
"""
# set up a 5x5 grid with one open outlet node and low initial elevations.
nr = 5
nc = 5
mg = RasterModelGrid((nr, nc), xy_spacing=10.0)
z = mg.add_zeros("topographic__elevation", at="node")
br = mg.add_zeros("bedrock__elevation", at="node")
soil = mg.add_zeros("soil__depth", at="node")
mg["node"]["topographic__elevation"] += (
mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000
)
mg.set_closed_boundaries_at_grid_edges(
bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True,
)
mg.set_watershed_boundary_condition_outlet_id(
0, mg["node"]["topographic__elevation"], -9999.0
)
soil[:] += 100.0 # initial soil depth of 100 m
br[:] = z[:]
z[:] += soil[:]
# Create a D8 flow handler
fa = FlowAccumulator(
mg, flow_director="D8", depression_finder="DepressionFinderAndRouter"
)
# Parameter values for detachment-limited test
K_sed = 0.01
U = 0.0001
dt = 1.0
F_f = 1.0 # all detached rock disappears; detachment-ltd end-member
m_sp = 0.5
n_sp = 1.0
v_s = 0.5
phi = 0.5
# Instantiate the Space component...
sp = Space(
mg,
K_sed=K_sed,
K_br=0.01,
F_f=F_f,
phi=phi,
H_star=1.0,
v_s=v_s,
m_sp=m_sp,
n_sp=n_sp,
sp_crit_sed=0,
sp_crit_br=0,
erode_flooded_nodes=False,
)
# ... and run it to steady state (5000x1-year timesteps).
for i in range(5000):
fa.run_one_step()
sp.run_one_step(dt=dt)
br[mg.core_nodes] += U * dt # m
soil[
0
] = 100.0 # enforce constant soil depth at boundary to keep lowering steady
z[:] = br[:] + soil[:]
# compare numerical and analytical slope solutions
num_slope = mg.at_node["topographic__steepest_slope"][mg.core_nodes]
analytical_slope = np.power(
(
(U * v_s * (1 - phi))
/ (K_sed * np.power(mg.at_node["drainage_area"][mg.core_nodes], m_sp))
)
+ (
(U * (1 - phi))
/ (K_sed * np.power(mg.at_node["drainage_area"][mg.core_nodes], m_sp))
),
1.0 / n_sp,
)
# test for match with analytical slope-area relationship
testing.assert_array_almost_equal(
num_slope,
analytical_slope,
decimal=8,
err_msg="SPACE transport-limited slope-area test failed",
verbose=True,
)
# compare numerical and analytical sediment flux solutions
num_sedflux = mg.at_node["sediment__flux"][mg.core_nodes]
analytical_sedflux = U * mg.at_node["drainage_area"][mg.core_nodes] * (1 - phi)
# test for match with anakytical sediment flux
testing.assert_array_almost_equal(
num_sedflux,
analytical_sedflux,
decimal=8,
err_msg="SPACE transport-limited sediment flux test failed",
verbose=True,
)
@pytest.mark.slow
def test_matches_bedrock_alluvial_solution():
"""
Test that model matches the bedrock-alluvial analytical solution
for slope/area relationship at steady state:
S=((U * v_s * (1 - F_f)) / (K_sed * A^m) + U / (K_br * A^m))^(1/n).
Also test that the soil depth everywhere matches the bedrock-alluvial
analytical solution at steady state:
H = -H_star * ln(1 - (v_s / (K_sed / (K_br * (1 - F_f)) + v_s))).
"""
# set up a 5x5 grid with one open outlet node and low initial elevations.
nr = 5
nc = 5
mg = RasterModelGrid((nr, nc), xy_spacing=10.0)
z = mg.add_zeros("topographic__elevation", at="node")
br = mg.add_zeros("bedrock__elevation", at="node")
soil = mg.add_zeros("soil__depth", at="node")
mg["node"]["topographic__elevation"] += (
mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000
)
mg.set_closed_boundaries_at_grid_edges(
bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True,
)
mg.set_watershed_boundary_condition_outlet_id(
0, mg["node"]["topographic__elevation"], -9999.0
)
soil[:] += 0.0 # initial condition of no soil depth.
br[:] = z[:]
z[:] += soil[:]
# Create a D8 flow handler
fa = FlowAccumulator(
mg, flow_director="D8", depression_finder="DepressionFinderAndRouter"
)
# Parameter values for detachment-limited test
K_br = 0.02
K_sed = 0.02
U = 0.0001
dt = 1.0
F_f = 0.2 # all detached rock disappears; detachment-ltd end-member
m_sp = 0.5
n_sp = 1.0
v_s = 0.25
H_star = 0.1
# Instantiate the Space component...
sp = Space(
mg,
K_sed=K_sed,
K_br=K_br,
F_f=F_f,
phi=0.0,
H_star=H_star,
v_s=v_s,
m_sp=m_sp,
n_sp=n_sp,
sp_crit_sed=0,
sp_crit_br=0,
erode_flooded_nodes=False,
)
# ... and run it to steady state (10000x1-year timesteps).
for i in range(10000):
fa.run_one_step()
sp.run_one_step(dt=dt)
br[mg.core_nodes] += U * dt # m
soil[0] = 0.0 # enforce 0 soil depth at boundary to keep lowering steady
z[:] = br[:] | |
<reponame>msarmie/horizon
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Default settings for openstack_dashboard"""
import os
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
# openstack_auth.default is imported in horizon.defaults.
from horizon.defaults import * # noqa: F401,F403,H303
def _get_root_path():
return os.path.dirname(os.path.abspath(__file__))
# ---------------------------------------------------
# Override horizn, openstack_auth and Django settings
# ---------------------------------------------------
WEBROOT = '/' # from openstack_auth
# NOTE: The following are calculated baed on WEBROOT
# after loading local_settings
# LOGIN_URL = WEBROOT + 'auth/login/'
# LOGOUT_URL = WEBROOT + 'auth/logout/'
# LOGIN_ERROR = WEBROOT + 'auth/error/'
LOGIN_URL = None
LOGOUT_URL = None
LOGIN_ERROR = None # from openstack_auth
# NOTE: The following are calculated baed on WEBROOT
# after loading local_settings
# LOGIN_REDIRECT_URL can be used as an alternative for
# HORIZON_CONFIG.user_home, if user_home is not set.
# Do not set it to '/home/', as this will cause circular redirect loop
# LOGIN_REDIRECT_URL = WEBROOT
LOGIN_REDIRECT_URL = None
# NOTE: The following are calculated baed on WEBROOT
# after loading local_settings
MEDIA_ROOT = None
MEDIA_URL = None
STATIC_ROOT = None
STATIC_URL = None
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
POLICY_FILES_PATH = os.path.join(_get_root_path(), "conf")
# Map of local copy of service policy files.
# Please insure that your identity policy file matches the one being used on
# your keystone servers. There is an alternate policy file that may be used
# in the Keystone v3 multi-domain case, policy.v3cloudsample.json.
# This file is not included in the Horizon repository by default but can be
# found at
# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \
# policy.v3cloudsample.json
# Having matching policy files on the Horizon and Keystone servers is essential
# for normal operation. This holds true for all services and their policy files.
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json',
'volume': 'cinder_policy.json',
'image': 'glance_policy.json',
'network': 'neutron_policy.json',
}
# Services for which horizon has extra policies are defined
# in POLICY_DIRS by default.
POLICY_DIRS = {
'compute': ['nova_policy.d'],
'volume': ['cinder_policy.d'],
}
POLICY_CHECK_FUNCTION = 'openstack_auth.policy.check'
SITE_BRANDING = 'OpenStack Dashboard'
NG_TEMPLATE_CACHE_AGE = 2592000
# 'key', 'label', 'path'
AVAILABLE_THEMES = [
(
'default',
pgettext_lazy('Default style theme', 'Default'),
'themes/default'
), (
'material',
pgettext_lazy("Google's Material Design style theme", "Material"),
'themes/material'
),
]
# None means to Use AVAILABLE_THEMES as the default value.
SELECTABLE_THEMES = None
# ----------------------------------------
# openstack_dashboard settings
# ----------------------------------------
# Dict used to restrict user private subnet cidr range.
# An empty list means that user input will not be restricted
# for a corresponding IP version. By default, there is
# no restriction for IPv4 or IPv6. To restrict
# user private subnet cidr range set ALLOWED_PRIVATE_SUBNET_CIDR
# to something like
# ALLOWED_PRIVATE_SUBNET_CIDR = {
# 'ipv4': ['10.0.0.0/8', '192.168.0.0/16'],
# 'ipv6': ['fc00::/7']
# }
ALLOWED_PRIVATE_SUBNET_CIDR = {'ipv4': [], 'ipv6': []}
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# For multiple regions uncomment this configuration, and add (endpoint, title).
# AVAILABLE_REGIONS = [
# ('http://cluster1.example.com/identity/v3', 'cluster1'),
# ('http://cluster2.example.com/identity/v3', 'cluster2'),
# ]
AVAILABLE_REGIONS = []
# Modules that provide /auth routes that can be used to handle different types
# of user authentication. Add auth plugins that require extra route handling to
# this list.
AUTHENTICATION_URLS = [
'openstack_auth.urls',
]
# Set Console type:
# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL", "MKS"
# or None. Set to None explicitly if you want to deactivate the console.
CONSOLE_TYPE = "AUTO"
# A dictionary of default settings for create image modal.
CREATE_IMAGE_DEFAULTS = {
'image_visibility': "public",
}
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. If you would like a different sort order,
# you can provide another flavor attribute as sorting key. Alternatively, you
# can provide a custom callback method to use for sorting. You can also provide
# a flag for reverse sort. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
# CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': 'name',
# # or
# 'key': my_awesome_callback_method,
# 'reverse': False,
# }
CREATE_INSTANCE_FLAVOR_SORT = {}
# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
# Scripting (XFS) vulnerability, so this option allows extra security hardening
# where iframes are not used in deployment. Default setting is True.
# For more information see:
# http://tinyurl.com/anticlickjack
DISALLOW_IFRAME_EMBED = True
# Specify a maximum number of items to display in a dropdown.
DROPDOWN_MAX_ITEMS = 30
ENABLE_CLIENT_TOKEN = True
# Set this to True to display an 'Admin Password' field on the Change Password
# form to verify that it is indeed the admin logged-in who wants to change
# the password.
ENFORCE_PASSWORD_CHECK = False
EXTERNAL_MONITORING = []
# To allow operators to require users provide a search criteria first
# before loading any data into the views, set the following dict
# attributes to True in each one of the panels you want to enable this feature.
# Follow the convention <dashboard>.<view>
FILTER_DATA_FIRST = {
'admin.instances': False,
'admin.images': False,
'admin.networks': False,
'admin.routers': False,
'admin.volumes': False,
'identity.application_credentials': False,
'identity.groups': False,
'identity.projects': False,
'identity.roles': False,
'identity.users': False,
}
# Set to 'legacy' or 'direct' to allow users to upload images to glance via
# Horizon server. When enabled, a file form field will appear on the create
# image form. If set to 'off', there will be no file form field on the create
# image form. See documentation for deployment considerations.
HORIZON_IMAGES_UPLOAD_MODE = 'legacy'
# Allow a location to be set when creating or updating Glance images.
# If using Glance V2, this value should be False unless the Glance
# configuration and policies allow setting locations.
IMAGES_ALLOW_LOCATION = False
# A default instance boot source. Allowed values are: "image", "snapshot",
# "volume" and "volume_snapshot"
DEFAULT_BOOT_SOURCE = "image"
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type"),
}
IMAGES_LIST_FILTER_TENANTS = []
# The default number of lines displayed for instance console log.
INSTANCE_LOG_LENGTH = 35
# The Launch Instance user experience has been significantly enhanced.
# You can choose whether to enable the new launch instance experience,
# the legacy experience, or both. The legacy experience will be removed
# in a future release, but is available as a temporary backup setting to ensure
# compatibility with existing deployments. Further development will not be
# done on the legacy experience. Please report any problems with the new
# experience via the Launchpad tracking system.
#
# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
# determine the experience to enable. Set them both to true to enable
# both.
LAUNCH_INSTANCE_LEGACY_ENABLED = False
LAUNCH_INSTANCE_NG_ENABLED = True
# A dictionary of settings which can be used to provide the default values for
# properties found in the Launch Instance modal.
LAUNCH_INSTANCE_DEFAULTS = {
'config_drive': False,
'create_volume': True,
'hide_create_volume': False,
'disable_image': False,
'disable_instance_snapshot': False,
'disable_volume': False,
'disable_volume_snapshot': False,
'enable_scheduler_hints': True,
}
# The absolute path to the directory where message files are collected.
# The message file must have a .json file extension. When the user logins to
# horizon, the message files collected are processed and displayed to the user.
MESSAGES_PATH = None
OPENRC_CUSTOM_TEMPLATE = 'project/api_access/openrc.sh.template'
OPENSTACK_CLOUDS_YAML_CUSTOM_TEMPLATE = ('project/api_access/'
'clouds.yaml.template')
# The default date range in the Overview panel meters - either <today> minus N
# days (if the value is integer N), or from the beginning of the current month
# until today (if set to None). This setting should be used to limit the amount
# of data fetched by default when rendering the Overview panel.
OVERVIEW_DAYS_RANGE = 1
# Projects and users can have extra attributes as defined by keystone v3.
# Horizon has the ability to display these extra attributes via this setting.
# If you'd like to display extra data in the project or user tables, set the
# corresponding dict key to the attribute name, followed by the display name.
# For more information, see horizon's customization
# (https://docs.openstack.org/horizon/latest/configuration/
# | |
import json
import warnings
from enum import Enum
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from mmhuman3d.core.cameras.cameras import PerspectiveCameras
from mmhuman3d.core.conventions.cameras.convert_convention import (
convert_camera_matrix,
convert_K_3x3_to_4x4,
convert_K_4x4_to_3x3,
)
from .builder import build_cameras
_CAMERA_PARAMETER_SUPPORTED_KEYS_ = {
'H': {
'type': int,
},
'W': {
'type': int,
},
'in_mat': {
'type': list,
'len': 3,
},
'rotation_mat': {
'type': list,
'len': 3,
},
'translation': {
'type': list,
'len': 3,
},
'k1': {
'type': float,
},
'k2': {
'type': float,
},
'k3': {
'type': float,
},
'k4': {
'type': float,
},
'k5': {
'type': float,
},
'k6': {
'type': float,
},
'p1': {
'type': float,
},
'p2': {
'type': float,
},
}
class _TypeValidation(Enum):
MATCH = 0
ARRAY = 1
FAIL = 2
class CameraParameter:
logger = None
SUPPORTED_KEYS = _CAMERA_PARAMETER_SUPPORTED_KEYS_
def __init__(self,
name: str = 'default',
H: int = 1080,
W: int = 1920) -> None:
"""
Args:
name (str, optional):
Name of this camera. Defaults to "default".
H (int, optional):
Height of a frame, in pixel. Defaults to 1080.
W (int, optional):
Width of a frame, in pixel. Defaults to 1920.
"""
self.name = name
self.parameters_dict = {}
in_mat = __zero_mat_list__(3)
self.parameters_dict['in_mat'] = in_mat
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
_, H = self.validate_item('H', H)
self.parameters_dict['H'] = H
_, W = self.validate_item('W', W)
self.parameters_dict['W'] = W
r_mat = __zero_mat_list__(3)
self.parameters_dict['rotation_mat'] = r_mat
t_list = [0.0, 0.0, 0.0]
self.parameters_dict['translation'] = t_list
def reset_distort(self) -> None:
"""Reset all distort coefficients to zero."""
for distort_name in __distort_coefficient_names__:
self.parameters_dict[distort_name] = 0.0
def get_opencv_distort_mat(self) -> np.ndarray:
"""Get a numpy array of 8 distort coefficients, which is the distCoeffs
arg of cv2.undistort.
Returns:
ndarray:
(k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6) of 8 elements.
"""
dist_coeffs = [
self.get_value('k1'),
self.get_value('k2'),
self.get_value('p1'),
self.get_value('p2'),
self.get_value('k3'),
self.get_value('k4'),
self.get_value('k5'),
self.get_value('k6'),
]
dist_coeffs = np.array(dist_coeffs)
return dist_coeffs
def set_KRT(self,
K_mat: np.ndarray,
R_mat: np.ndarray,
T_vec: np.ndarray,
inverse_extrinsic: bool = False) -> None:
"""Set intrinsic and extrinsic of a camera.
Args:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
inverse_extrinsic (bool, optional):
If true, R_mat and T_vec transform a point
from view to world. Defaults to False.
"""
k_shape = K_mat.shape
assert k_shape[0] == k_shape[1] == 3
r_shape = R_mat.shape
assert r_shape[0] == r_shape[1] == 3
assert T_vec.ndim == 1 and T_vec.shape[0] == 3
self.set_mat_np('in_mat', K_mat)
if inverse_extrinsic:
R_mat = np.linalg.inv(R_mat)
T_vec = -np.dot(R_mat, T_vec).reshape((3))
self.set_mat_np('rotation_mat', R_mat)
self.set_value('translation', T_vec.tolist())
def get_KRT(self, k_dim=3) -> List[np.ndarray]:
"""Get intrinsic and extrinsic of a camera.
Args:
k_dim (int, optional):
Dimension of the returned mat K.
Defaults to 3.
Raises:
ValueError: k_dim is neither 3 nor 4.
Returns:
List[np.ndarray]:
K_mat (np.ndarray):
In shape [3, 3].
R_mat (np.ndarray):
Rotation from world to view in default.
In shape [3, 3].
T_vec (np.ndarray):
Translation from world to view in default.
In shape [3,].
"""
K_3x3 = self.get_mat_np('in_mat')
R_mat = self.get_mat_np('rotation_mat')
T_vec = np.asarray(self.get_value('translation'))
if k_dim == 3:
return [K_3x3, R_mat, T_vec]
elif k_dim == 4:
K_3x3 = np.expand_dims(K_3x3, 0) # shape (1, 3, 3)
K_4x4 = convert_K_3x3_to_4x4(
K=K_3x3, is_perspective=True) # shape (1, 4, 4)
K_4x4 = K_4x4[0, :, :]
return [K_4x4, R_mat, T_vec]
else:
raise ValueError(f'K mat cannot be converted to {k_dim}x{k_dim}')
def set_mat_np(self, mat_key: str, mat_numpy: np.ndarray) -> None:
"""Set a matrix-type parameter to mat_numpy.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_numpy (ndarray):
Matrix in numpy format.
Raises:
TypeError:
mat_numpy is not an np.ndarray.
"""
if not isinstance(mat_numpy, np.ndarray):
raise TypeError
self.set_mat_list(mat_key, mat_numpy.tolist())
def set_mat_list(self, mat_key: str, mat_list: List[list]) -> None:
"""Set a matrix-type parameter to mat_list.
Args:
mat_key (str):
Key of the target matrix. in_mat or rotation_mat.
mat_list (List[list]):
Matrix in list format.
"""
_, mat_list = self.validate_item(mat_key, mat_list)
self.parameters_dict[mat_key] = mat_list
def set_value(self, key: str, value: Any) -> None:
"""Set a parameter to value.
Args:
key (str):
Name of the parameter.
value (object):
New value of the parameter.
"""
_, value = self.validate_item(key, value)
self.parameters_dict[key] = value
def get_value(self, key: str) -> Any:
"""Get a parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
object:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
return self.parameters_dict[key]
def get_mat_np(self, key: str) -> np.ndarray:
"""Get a a matrix-type parameter by key.
Args:
key (str):
Name of the parameter.
Raises:
KeyError: key not in self.parameters_dict
Returns:
ndarray:
Value of the parameter.
"""
if key not in self.parameters_dict:
raise KeyError(key)
else:
mat_list = self.parameters_dict[key]
mat_np = np.array(mat_list).reshape((3, 3))
return mat_np
def to_string(self) -> str:
"""Convert self.to_dict() to a string.
Returns:
str:
A dict in json string format.
"""
dump_dict = self.to_dict()
ret_str = json.dumps(dump_dict)
return ret_str
def to_dict(self) -> dict:
"""Dump camera name and parameters to dict.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict.
"""
dump_dict = self.parameters_dict.copy()
dump_dict['name'] = self.name
return dump_dict
def dump(self, json_path: str) -> None:
"""Dump camera name and parameters to a file.
Returns:
dict:
Put self.name and self.parameters_dict
in one dict, and dump them to a json file.
"""
dump_dict = self.to_dict()
with open(json_path, 'w') as f_write:
json.dump(dump_dict, f_write)
def load(self, json_path: str) -> None:
"""Load camera name and parameters from a file."""
with open(json_path, 'r') as f_read:
dumped_dict = json.load(f_read)
self.load_from_dict(dumped_dict)
def load_from_dict(self, json_dict: dict) -> None:
"""Load name and parameters from a dict.
Args:
json_dict (dict):
A dict comes from self.to_dict().
"""
for key in json_dict.keys():
if key == 'name':
self.name = json_dict[key]
elif key == 'rotation':
self.parameters_dict['rotation_mat'] = np.array(
json_dict[key]).reshape(3, 3).tolist()
elif key == 'translation':
self.parameters_dict[key] = np.array(json_dict[key]).reshape(
(3)).tolist()
else:
self.parameters_dict[key] = json_dict[key]
if '_mat' in key:
self.parameters_dict[key] = np.array(
self.parameters_dict[key]).reshape(3, 3).tolist()
def load_from_chessboard(self,
chessboard_dict: dict,
name: str,
inverse: bool = True) -> None:
"""Load name and parameters from a dict.
Args:
chessboard_dict (dict):
A dict loaded from json.load(chessboard_file).
name (str):
Name of this camera.
inverse (bool, optional):
Whether to inverse rotation and translation mat.
Defaults to False.
"""
camera_param_dict = \
__parse_chessboard_param__(chessboard_dict, name, inverse=inverse)
self.load_from_dict(camera_param_dict)
def load_kinect_from_smc(self, smc_reader, kinect_id: int) -> None:
"""Load name and parameters of a kinect from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
kinect_id (int):
Id of the target kinect.
"""
name = kinect_id
extrinsics_dict = \
smc_reader.get_kinect_color_extrinsics(
kinect_id, homogeneous=False
)
rot_np = extrinsics_dict['R']
trans_np = extrinsics_dict['T']
intrinsics_np = \
smc_reader.get_kinect_color_intrinsics(
kinect_id
)
resolution = \
smc_reader.get_kinect_color_resolution(
kinect_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
def load_iphone_from_smc(self,
smc_reader,
iphone_id: int = 0,
frame_id: int = 0) -> None:
"""Load name and parameters of an iPhone from an SmcReader instance.
Args:
smc_reader (mmhuman3d.data.data_structures.smc_reader.SMCReader):
An SmcReader instance containing kinect camera parameters.
iphone_id (int):
Id of the target iphone.
Defaults to 0.
frame_id (int):
Frame ID of one selected frame.
It only influences the intrinsics.
Defaults to 0.
"""
name = f'iPhone_{iphone_id}'
extrinsics_mat = \
smc_reader.get_iphone_extrinsics(
iphone_id, homogeneous=True
)
rot_np = extrinsics_mat[:3, :3]
trans_np = extrinsics_mat[:3, 3]
intrinsics_np = \
smc_reader.get_iphone_intrinsics(
iphone_id, frame_id
)
resolution = \
smc_reader.get_iphone_color_resolution(
iphone_id
)
rmatrix = np.linalg.inv(rot_np).reshape(3, 3)
tvec = -np.dot(rmatrix, trans_np)
self.name = name
self.set_mat_np('in_mat', intrinsics_np)
self.set_mat_np('rotation_mat', rmatrix)
self.set_value('translation', tvec.tolist())
self.set_value('H', resolution[1])
self.set_value('W', resolution[0])
@classmethod
def load_from_perspective_cameras(cls,
cam,
name: str,
resolution: Union[List, Tuple] = None):
"""Load parameters from a PerspectiveCameras and return a
CameraParameter.
Args:
cam (mmhuman3d.core.cameras.cameras.PerspectiveCameras):
An instance.
name (str):
Name of this camera.
"""
assert isinstance(cam, PerspectiveCameras
), 'Wrong input, support PerspectiveCameras only!'
if len(cam) > 1:
warnings.warn('Will only use the first camera in the batch.')
cam = cam[0]
resolution = resolution if resolution is not None else cam.resolution[
0].tolist()
height, width = int(resolution[0]), int(resolution[1])
cam_param = CameraParameter()
cam_param.__init__(H=height, W=width, name=name)
k_4x4 = cam.K # shape (1, 4, 4)
r_3x3 = cam.R # shape (1, 3, 3)
t_3 = cam.T # shape (1, 3)
is_perspective = cam.is_perspective()
in_ndc = | |
'TTL': 300,
'Type': 'A'
}, # this is a ordinary record. should be not modified.
# we expect to have the tree without the ip that has weight 0.
] + policy_members_to_list(policy_members, policy_record)
expected = sorted(expected, key=sort_key)
actual = strip_ns_and_soa(
boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id), zone.root)
assert actual == expected
@pytest.mark.django_db
def test_policy_record_with_all_ips_disabled(zone, boto_client):
policy = G(m.Policy, routing='latency')
ip1 = create_ip_with_healthcheck()
ip1.enabled = False
ip1.save()
ip2 = create_ip_with_healthcheck()
ip2.enabled = False
ip2.save()
G(m.PolicyMember, policy=policy, region=regions[0], ip=ip1)
G(m.PolicyMember, policy=policy, region=regions[1], ip=ip2)
G(m.PolicyRecord, zone=zone, policy=policy, name='@')
with pytest.raises(Exception) as exc:
zone.reconcile()
assert "Policy can't be applied" in str(exc)
expected = [
{
'Name': 'test.test-zinc.net.',
'ResourceRecords': [{'Value': '1.1.1.1'}],
'TTL': 300,
'Type': 'A'
}, # this is a ordinary record. should be not modified.
]
rrsets = boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id)
assert strip_ns_and_soa(rrsets, zone.root) == sorted(expected, key=sort_key)
@pytest.mark.django_db
def test_apply_policy_on_zone(zone, boto_client):
policy = G(m.Policy)
ip = create_ip_with_healthcheck()
policy_members = [
G(m.PolicyMember, policy=policy, region=regions[0], ip=ip),
G(m.PolicyMember, policy=policy, region=regions[1], ip=ip),
]
policy_record = G(m.PolicyRecord, zone=zone, policy=policy, name='@')
policy_record_2 = G(m.PolicyRecord, zone=zone, policy=policy, name='www')
zone.reconcile()
expected = [
{
'Name': 'test.test-zinc.net.',
'ResourceRecords': [{'Value': '1.1.1.1'}],
'TTL': 300,
'Type': 'A'
}, # this is a ordinary record. should be not modified.
] + (policy_members_to_list(policy_members, policy_record) +
policy_members_to_list([], policy_record_2, just_pr=True))
rrsets = boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id)
assert strip_ns_and_soa(rrsets, zone.root) == sorted(expected, key=sort_key)
@pytest.mark.django_db
def test_apply_policy_is_not_duplicated(zone):
policy = G(m.Policy)
G(m.PolicyMember, policy=policy, region=regions[0])
G(m.PolicyMember, policy=policy, region=regions[1])
G(m.PolicyRecord, zone=zone, policy=policy, name='@')
G(m.PolicyRecord, zone=zone, policy=policy, name='www')
with patch('zinc.route53.Policy.reconcile') as policy_reconcile:
zone.reconcile()
policy_reconcile.assert_called_once()
@pytest.mark.django_db
def test_modifying_a_policy_member_in_policy_all_policy_members_get_dirty(zone):
policy = G(m.Policy)
ip = create_ip_with_healthcheck()
G(m.PolicyMember, policy=policy, region=regions[0], ip=ip)
policy_member = G(m.PolicyMember, policy=policy, region=regions[1], ip=ip)
policy_records = [
G(m.PolicyRecord, zone=zone, policy=policy, name='@'),
G(m.PolicyRecord, zone=zone, policy=policy, name='www')
]
zone.reconcile()
policy_records_from_db = set(m.PolicyRecord.objects.all().values_list('id', 'dirty'))
assert policy_records_from_db == set([(record.id, False) for record in policy_records])
policy_member.weight = 3
policy_member.save()
policy_records_from_db = set(m.PolicyRecord.objects.all().values_list('id', 'dirty'))
assert policy_records_from_db == set([(record.id, True) for record in policy_records])
@pytest.mark.django_db
def test_changing_an_disabled(zone):
policy = G(m.Policy)
ip = G(m.IP, healthcheck_id=None)
ip2 = G(m.IP, healthcheck_id=None)
G(m.PolicyMember, policy=policy, region=regions[0], ip=ip)
G(m.PolicyMember, policy=policy, region=regions[1], ip=ip2)
policy_records = [
G(m.PolicyRecord, zone=zone, policy=policy, name='@', dirty=False),
G(m.PolicyRecord, zone=zone, policy=policy, name='www', dirty=False)
]
zone.reconcile()
policy_records_from_db = set(m.PolicyRecord.objects.all().values_list('id', 'dirty'))
assert policy_records_from_db == set([(record.id, False) for record in policy_records])
@pytest.mark.django_db
def test_ip_mark_policy_records_dirty(zone):
policy1 = G(m.Policy)
policy2 = G(m.Policy)
ip1 = create_ip_with_healthcheck()
ip2 = create_ip_with_healthcheck()
G(m.PolicyMember, policy=policy1, region=regions[0], ip=ip1)
G(m.PolicyMember, policy=policy2, region=regions[1], ip=ip2)
policy_record_1 = G(m.PolicyRecord, zone=zone, policy=policy1, name='pr1', dirty=False)
policy_record_2 = G(m.PolicyRecord, zone=zone, policy=policy1, name='pr2', dirty=False)
other_zone_policy_record = G(
m.PolicyRecord, zone=zone, policy=policy2, name='oz_pr', dirty=False)
ip1.mark_policy_records_dirty()
policy_record_1.refresh_from_db()
policy_record_2.refresh_from_db()
other_zone_policy_record.refresh_from_db()
assert policy_record_1.dirty is True
assert policy_record_2.dirty is True
assert other_zone_policy_record.dirty is False # different policy, should not have changed
@pytest.mark.django_db
def test_weighted_policy(zone, boto_client):
policy = G(m.Policy, routing='weighted')
ip = G(m.IP, healthcheck_id=None)
ip2 = G(m.IP, healthcheck_id=None)
policy_members = [
G(m.PolicyMember, policy=policy, region=regions[0], ip=ip),
G(m.PolicyMember, policy=policy, region=regions[0], ip=ip2)
]
policy_record = G(m.PolicyRecord, zone=zone, policy=policy, name='@')
zone.reconcile()
expected = [
{
'Name': 'test.test-zinc.net.',
'ResourceRecords': [{'Value': '1.1.1.1'}],
'TTL': 300,
'Type': 'A'
}, # this is a ordinary record. should be not modified.
] + policy_members_to_list(policy_members, policy_record, no_health=True)
rrsets = boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id)
assert strip_ns_and_soa(rrsets, zone.root) == sorted(expected, key=sort_key)
@pytest.mark.django_db
def test_dangling_records(zone, boto_client):
"""
Tests a dangling record in an existing policy tree gets removed.
"""
dangling_record = {
'Name': '_zn_policy1.us-east-1.' + zone.root,
'Type': 'A',
'ResourceRecords': [{'Value': '127.1.1.1'}],
'SetIdentifier': 'test-identifier',
'Weight': 20,
'TTL': 30
}
boto_client.change_resource_record_sets(
HostedZoneId=zone.route53_id,
ChangeBatch={
'Comment': 'string',
'Changes': [
{
'Action': 'CREATE',
'ResourceRecordSet': dangling_record
}
]
}
)
ip = create_ip_with_healthcheck()
policy = G(m.Policy, name='policy1')
G(m.PolicyMember, ip=ip, policy=policy, region='us-east-1', weight=10)
G(m.PolicyRecord, zone=zone, policy=policy, name='record', dirty=True)
route53.Policy(policy=policy, zone=zone.r53_zone).reconcile()
zone.commit()
records = boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id)
assert dangling_record not in records['ResourceRecordSets']
@pytest.mark.django_db
def test_check_policy_trees(zone, boto_client):
ip = create_ip_with_healthcheck()
policy = G(m.Policy, name='policy1', routing='weighted')
member = G(m.PolicyMember, ip=ip, policy=policy, region='us-east-1', weight=10)
G(m.PolicyRecord, zone=zone, policy=policy, name='record', dirty=True)
route53.Policy(policy=policy, zone=zone.r53_zone).reconcile()
zone.commit()
dangling_record = {
'Name': '_zn_policy1.us-east-1.' + zone.root,
'Type': 'A',
'ResourceRecords': [{'Value': '127.1.1.1'}],
'SetIdentifier': 'test-identifier',
'Weight': 20,
'TTL': 30
}
boto_client.change_resource_record_sets(
HostedZoneId=zone.route53_id,
ChangeBatch={
'Comment': 'string',
'Changes': [{'Action': 'CREATE', 'ResourceRecordSet': dangling_record}]
}
)
zone.r53_zone._clear_cache()
# because check_policy_trees relies on the change_batch produced by policy_reconcile
# we first check that explicitly
ip.healthcheck_id = 'spam-id'
ip.save()
route53.Policy(policy=policy, zone=zone.r53_zone).reconcile()
assert zone.r53_zone._change_batch == [
{'Action': 'DELETE',
'ResourceRecordSet': dangling_record},
{'Action': 'UPSERT',
'ResourceRecordSet': {'HealthCheckId': 'spam-id',
'Name': '_zn_policy1.test-zinc.net.',
'ResourceRecords': [{'Value': ip.ip}],
'SetIdentifier': '{}-us-east-1'.format(member.id),
'TTL': 30,
'Type': 'A',
'Weight': 10}}
]
# reset the change_batch and test the check method
zone.r53_zone._change_batch = []
with patch('zinc.route53.zone.logger.error') as error:
zone.r53_zone.check_policy_trees()
assert error.called_with("Glitch in the matrix for %s %s", zone.root, policy.name)
@pytest.mark.django_db
def test_change_policy(zone, boto_client):
"""
Tests that changing the policy for a policy_record doesn't leave dangling record behind
"""
ip1 = create_ip_with_healthcheck()
ip2 = create_ip_with_healthcheck()
policy1 = G(m.Policy, name='policy1')
policy2 = G(m.Policy, name='policy2')
# add each IP to both policies
G(m.PolicyMember, ip=ip1, policy=policy1, region='us-east-1', weight=10)
G(m.PolicyMember, ip=ip1, policy=policy2, region='us-east-1', weight=10)
G(m.PolicyMember, ip=ip2, policy=policy1, region='us-east-2', weight=10)
G(m.PolicyMember, ip=ip2, policy=policy2, region='us-east-2', weight=10)
# build a tree with policy1
policy_record = G(m.PolicyRecord, zone=zone, policy=policy1, name='record', dirty=True)
zone.reconcile()
# switch to policy2 and rebuild
policy_record.policy = policy2
policy_record.dirty = True
policy_record.save()
zone.reconcile()
records = boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id)
policy1_records = [record['Name'] for record in records['ResourceRecordSets']
if record['Name'].startswith('_zn_policy1')]
assert policy1_records == []
@pytest.mark.django_db
def test_untouched_policy_not_deleted(zone, boto_client):
"""
Tests a policy record with dirty=False doesn't end up deleted after a tree rebuild.
"""
ip1 = create_ip_with_healthcheck()
policy1 = G(m.Policy, name='policy1', routing='weighted')
G(m.PolicyMember, ip=ip1, policy=policy1, region='us-east-1', weight=10)
ip2 = create_ip_with_healthcheck()
policy2 = G(m.Policy, name='policy2', routing='weighted')
G(m.PolicyMember, ip=ip2, policy=policy2, region='us-east-2', weight=10)
# build a tree with policy1
G(m.PolicyRecord, zone=zone, policy=policy1, name='policy_record1', dirty=True)
zone.reconcile()
# add another policy record
G(m.PolicyRecord, zone=zone, policy=policy2, name='policy_record2', dirty=True)
zone.reconcile()
records = boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id)
policy_records = set([record['Name'] for record in records['ResourceRecordSets']
if record['Name'].startswith('_zn_')])
# check policy1's records are still here
assert policy_records == set(['_zn_policy1.test-zinc.net.', '_zn_policy2.test-zinc.net.'])
@pytest.mark.django_db
def test_delete_policy_record(zone, boto_client):
"""
Tests a policy record by deleting and creating immediate after. Issue #210
"""
ip1 = create_ip_with_healthcheck()
policy = G(m.Policy, name='policy')
G(m.PolicyMember, ip=ip1, policy=policy, region='us-east-1', weight=10)
policy_record = G(m.PolicyRecord, zone=zone, policy=policy, name='www', dirty=True)
zone.reconcile() # reconcile
policy_record.soft_delete() # delete the record
zone.reconcile() # reconcile
# assert the object is deleted.
assert not m.PolicyRecord.objects.filter(id=policy_record.id).exists()
# assert route53 should be empty
expected = sorted([
{
'Name': 'test.test-zinc.net.',
'ResourceRecords': [{'Value': '1.1.1.1'}],
'TTL': 300,
'Type': 'A'
}, # this is a ordinary record. should be not modified.
], key=sort_key)
result = strip_ns_and_soa(
boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id), zone.root
)
assert result == expected
@pytest.mark.django_db
def test_r53_policy_record_aws_records(zone, boto_client):
"""
Tests a PolicyRecord loads it's records correctly from AWS
"""
route53.Record(
name='_zn_pol1.us-east-1',
values=['192.168.127.12'],
type='A',
zone=zone.r53_zone,
ttl=30,
set_identifier='foo',
weight=10,
).save()
route53.Record(
name='_zn_pol1',
alias_target={
'DNSName': '_zn_pol1.us-east-1.{}'.format(zone.root),
'HostedZoneId': zone.r53_zone.id,
'EvaluateTargetHealth': False
},
type='A',
zone=zone.r53_zone,
).save()
zone.commit()
policy = G(m.Policy, name='pol1')
policy_record = G(m.PolicyRecord, zone=zone, name='www', policy=policy)
policy = route53.Policy(zone=zone.r53_zone, policy=policy_record.policy)
assert set([r.name for r in policy.aws_records.values()]) == set([
'_zn_pol1', '_zn_pol1.us-east-1'])
@pytest.mark.django_db
def test_r53_policy_expected_aws_records(zone, boto_client):
"""
Tests a Policy builds the expected desired_records for the alias tree.
"""
policy = G(m.Policy, name='pol1')
policy_record = G(m.PolicyRecord, zone=zone, name='www', policy=policy)
ip1 = create_ip_with_healthcheck()
G(m.PolicyMember, policy=policy_record.policy, region=regions[0], ip=ip1)
G(m.PolicyMember, policy=policy_record.policy, region=regions[1], ip=ip1)
# pol_factory = route53.CachingFactory(route53.Policy)
r53_policy = route53.Policy(zone=zone.r53_zone, policy=policy)
assert [(r.name, r.values) for r in r53_policy.desired_records.values()] == [
('_zn_pol1_us-east-1', [ip1.ip]),
('_zn_pol1_us-east-2', [ip1.ip]),
('_zn_pol1', ['ALIAS _zn_pol1_us-east-1.test-zinc.net.']),
('_zn_pol1', ['ALIAS _zn_pol1_us-east-2.test-zinc.net.']),
]
@pytest.mark.django_db
def test_r53_policy_reconcile(zone, boto_client):
policy = G(m.Policy, name='pol1')
policy_record = G(m.PolicyRecord, zone=zone, name='www', policy=policy)
ip1 = create_ip_with_healthcheck()
G(m.PolicyMember, policy=policy_record.policy, region=regions[0], ip=ip1)
G(m.PolicyMember, policy=policy_record.policy, region=regions[1], ip=ip1)
r53_policy = route53.Policy(zone=zone.r53_zone, policy=policy)
r53_policy.reconcile()
zone.commit()
raw_aws_records = [
route53.Record.from_aws_record(r, zone=zone)
for r in strip_ns_and_soa(
boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id),
zone.root)]
# only look at the hidden records (the ones part of the policy tree)
records = [(r.name, r.values) for r in raw_aws_records if r.is_hidden]
assert records == [
('_zn_pol1', ['ALIAS _zn_pol1_us-east-1.test-zinc.net.']),
('_zn_pol1', ['ALIAS _zn_pol1_us-east-2.test-zinc.net.']),
('_zn_pol1_us-east-1', [ip1.ip]),
('_zn_pol1_us-east-2', [ip1.ip]),
]
@pytest.mark.django_db
def test_r53_policy_reconcile_cname_clash(zone, boto_client):
"""
Tests a single policy records clashing with a cname won't block the
rest of the zone reconciling.
"""
policy = G(m.Policy, name='pol1')
G(m.PolicyRecord, zone=zone, name='www', policy=policy)
conflict = G(m.PolicyRecord, zone=zone, name='conflict', policy=policy, dirty=True)
route53.Record(
name='conflict',
values=['conflict.example.com'],
type='CNAME',
zone=zone.r53_zone,
ttl=30,
).save()
zone.commit()
ip1 = create_ip_with_healthcheck()
G(m.PolicyMember, policy=policy, region=regions[0], ip=ip1)
G(m.PolicyMember, policy=policy, region=regions[1], ip=ip1)
zone.reconcile()
raw_aws_records = [
route53.Record.from_aws_record(r, zone=zone)
for r in strip_ns_and_soa(
boto_client.list_resource_record_sets(HostedZoneId=zone.route53_id),
zone.root)]
# only look at the hidden records (the ones part of the policy tree)
records = [(r.name, r.values) for r in raw_aws_records]
expected = [
('_zn_pol1', ['ALIAS _zn_pol1_us-east-1.test-zinc.net.']),
('_zn_pol1', ['ALIAS _zn_pol1_us-east-2.test-zinc.net.']),
('_zn_pol1_us-east-1', [ip1.ip]),
('_zn_pol1_us-east-2', [ip1.ip]),
('conflict', ['conflict.example.com']),
('test', ['1.1.1.1']),
('www', ['ALIAS _zn_pol1.test-zinc.net.']),
| |
<gh_stars>10-100
from __future__ import division, unicode_literals, absolute_import
import numpy as np
import logging
logger = logging.getLogger(__name__)
from .utils import dict_2_list
from collections import namedtuple
Variable = namedtuple("Variable", ("name","func","kwarg"), defaults=(None,None,{}) )
Constant = namedtuple("Constant", ("name","value"), defaults=(None,None) )
def draw_uniform_list(pr, N):
Nparam = len(pr.names)
init_samples = np.array([[np.random.uniform(pr.bounds[i][0],pr.bounds[i][1]) for i in range(Nparam)] for _ in range(N)])
return init_samples
def draw_uniform_samples(pr, Nmax):
from .utils import list_2_dict
Nparam = len(pr.names)
samples = np.transpose([np.random.uniform(pr.bounds[i][0],pr.bounds[i][1], size=Nmax) for i in range(Nparam)])
init_samples = [list_2_dict(samples[i], pr.names) for i in range(Nmax)]
log_prob = [pr.log_prior(init_samples[i]) for i in range(Nmax)]
return init_samples, log_prob
def rejection_sampling(pr, Nmax, maxlogp=None):
# draw initial points and compute (prior) probabilities
init_samples, log_prob = draw_uniform_samples(pr, Nmax)
if maxlogp == None:
log_prob_max = np.max(log_prob)
else:
log_prob_max = np.max([maxlogp,np.max(log_prob)])
acc = ((log_prob - log_prob_max ) > np.log(np.random.uniform(0.,1.,size=len(log_prob))))
Nout = (acc).sum()
samples = np.array(init_samples)[acc]
log_prob = np.array(log_prob)[acc]
return samples , log_prob, Nout
class Parameter(object):
"""
Parameter object
"""
def __init__(self, name=None, min=None, max=None, prior='uniform',
periodic=0, func=None, func_kwarg={},
interp_kwarg={'ngrid':2000, 'kind':'linear'},
**kwarg):
"""
Initialize parameter
Arguments:
- name : str, name of parameter
- min : float, lower bound
- max : float, upper bound
- periodic : bool, periodic (True) or reflective (False)
- prior : str, specify implemented prior distribution to be used:
uniform, log-uniform, linear, quadratic, power-law,
triangular, cosinusoidal, sinusoidal, exponential, exp-log,
normal, log-normal (default uniform)
- func : method, if not None this method will define the prior
distribution and the prior string will be ignored
- func_kwarg : dict, optional keyword arguments for input function
- interp_kwarg : dict, optional keyword arguments for interpolator,
the dictionary should contain two arguments:
- ngrid : int, number of point for the grid
- kind : str, kind of interpolation for scipy.interpolate.interp1d
- kwargs : it is possible to pass further arguments depending on the
employed prior:
- deg : if power, deg is the power-law degree
- tau : if exponential, tau is the exponential decay factor
- mu, sigma : if normal, specify mean and stdev
"""
# check name
if name == None:
raise AttributeError("Unable to initialize parameter, name is missing.")
else:
name = str(name)
# check bounds
if min == None or max == None:
raise ValueError("Unable to initialize parameter {}, please define upper and lower bounds.".format(name))
if min >= max :
raise ValueError("Unable to initialize parameter {}, lower bound is greater or equal to the upper bound".format(name))
bound = [float(min),float(max)]
# check periodic
periodic = int(periodic)
# check/get distribution
if func == None:
from . import __known_probs__
if not isinstance(prior, str):
_all = ''
for kpi in __known_probs__ : _all = _all + kpi +', '
raise AttributeError("Unable to initialize parameter {}, prior argument is not a string. Please use one of the followings: {}or a customized function using the func argument".format(name,_all))
if prior not in __known_probs__:
_all = ''
for kpi in __known_probs__ : _all = _all + kpi +', '
raise AttributeError("Unable to initialize parameter {}, unknown prior argument. Please use one of the followings: {}or a customized function using the func argument".format(name,_all))
# if func == None, read prior from prior string
from .utils import get_parameter_distribution_from_string
prob = get_parameter_distribution_from_string(name, prior, min, max, kwarg)
else:
prior = 'custom'
if callable(func):
from .utils import initialize_param_from_func
prob = initialize_param_from_func(name, min, max, func, kwarg=func_kwarg, **interp_kwarg)
else:
raise AttributeError("Unable to initialize parameter {}, requested probability function is not callable.".format(name))
# set properties
self._name = name
self._bound = bound
self._periodic = periodic
self._kind = prior
self._prob = prob
def __eq__(self, other):
# check prior arguments
args = ['_name', '_bound', '_periodic', '_kind']
bools = [self.__dict__[ai] == other.__dict__[ai] for ai in args]
# if customized prior, check agreement between interpolators
if self._kind == 'custom' and bools[-1]:
bools.append(all(np.abs(self._prob.log_density.x - other._prob.log_density.x) < 1e-30))
bools.append(all(np.abs(self._prob.log_density.y - other._prob.log_density.y) < 1e-30))
return all(bools)
def __ne__(self, other):
return not self.__eq__(other)
@property
def name(self):
return self._name
@property
def bound(self):
return self._bound
@property
def lower_bound(self):
return self._bound[0]
@property
def upper_bound(self):
return self._bound[1]
@property
def periodic(self):
return self._periodic
def log_density(self, x):
return self._prob.log_density(x)
def cumulative(self, x):
return self._prob.cumulative(x)
def quantile(self, x):
return self._prob.quantile(x)
class Prior(object):
"""
Prior object
"""
def __init__(self, parameters, variables=[], constants=[]):
"""
Initialize Prior object
Arguments:
- parameters : list of bajes.inf.Parameter objects
- variables : list of bajes.inf.Variable objects, default empty
- constants : list of bajes.inf.Constant objects, default empty
"""
self.ndim = len(parameters)
# reading constant properties
self.const = {ci.name : ci.value for ci in constants}
# reading variables properties
self.v_names = []
self.v_funcs = []
self.v_kwargs = []
for vi in variables:
# check that every element in parameters is a Parameter object
if not isinstance(vi, Variable):
logger.error("The Prior received a variable that is not a Variable object.")
raise ValueError("The Prior received a variable that is is not a Variable object.")
# check that name is not in constants
if vi.name in list(self.const.keys()):
logger.error("Repeated name {} between variables and contants. Please use different names.".format(vi.name))
raise ValueError("Repeated name {} in sampling variables and contants. Please use different names.".format(vi.name))
# append information
self.v_names.append(vi.name)
self.v_funcs.append(vi.func)
self.v_kwargs.append(vi.kwarg)
# checking/reading parameters
temp_names = []
for pi in parameters:
# check that every element in parameters is a Parameter object
if not isinstance(pi, Parameter):
logger.error("The Prior received a parameter that is not a Parameter object.")
raise ValueError("The Prior received a parameter that is is not a Parameter object.")
# check bounds lengths
if len(pi.bound) !=2:
logger.error("Wrong prior bounds for {} parameter. Bounds array length is different from 2.".format(pi.name))
raise ValueError("Wrong prior bounds for {} parameter. Bounds array length is different from 2.".format(pi.name))
# check that name is not repeated
if pi.name in temp_names:
logger.error("Repeate name {} for different parameters. Please use different names.".format(pi.name))
raise ValueError("Repeate name {} for different parameters. Please use different names.".format(pi.name))
# check that name is not in constants
if pi.name in list(self.const.keys()) or pi.name in self.v_names:
logger.error("Repeated name {} between parameters/contants/variables. Please use different names.".format(pi.name))
raise ValueError("Repeated name {} in sampling parameters/contants/variables. Please use different names.".format(pi.name))
self.parameters = parameters
@property
def names(self):
return [p.name for p in self.parameters]
@property
def bounds(self):
return [p.bound for p in self.parameters]
@property
def periodics(self):
return [p.periodic for p in self.parameters]
def this_sample(self, p):
"""
Fill parameters dictionary with constants and variables
"""
# collect variables
v = {n: f(**p,**k) for n,f,k in zip(self.v_names, self.v_funcs, self.v_kwargs)}
# merge parameters, variables and constants
return {**p, **v, **self.const}
def log_prior(self, x):
"""
Compute log-prior from parameter
"""
if isinstance(x, (list,np.ndarray)):
return sum([pi.log_density(xi) for pi, xi in zip(self.parameters, x)])
else:
# if x is not a list, it may be a dictionary. However some structures might not be pure dictionaries (i.e. cpnest.LivePoint),
# then, the only requirement is that we can access to the values of the parameter as a dictionary
return sum([pi.log_density(x[pi.name]) for pi in self.parameters])
def prior_transform(self, u):
"""
Transform uniform sample in prior sample
"""
return np.array([pi.quantile(xi) for pi,xi in zip(self.parameters, u)])
def cumulative(self, x, name=None):
if isinstance(x, (float, int, np.float)):
x = [x]
if len(x) == self.ndim:
return np.prod(list(map(lambda pi, xi: pi.cumulative(xi), self.parameters, x)))
else:
if name == None:
raise AttributeError("Unable to estimate partial cumulative probability. Please include the names of the requested parameters.")
indx = [i for i,pi in enumerate(self.paramters) if pi.name in name]
return np.prod(list(map(lambda i, xi: self.parameters[i].cumulative(xi), indx, x)))
@property
def sample(self):
u = np.random.uniform(0,1,size=self.ndim)
return self.prior_transform(u)
def get_prior_samples(self, n, **kwargs):
return np.array([self.sample for _ in range(n)])
def rejection_sampling(self, Nmax, maxlogpr=None):
return rejection_sampling(self,Nmax,maxlogp=maxlogpr)
def sample_uniform(self, N):
return draw_uniform_list(self, N)
def in_bounds(self, x):
if isinstance(x, (list,np.ndarray)):
return all([pi.bound[0]<=xi<=pi.bound[1] for xi,pi in zip(x,self.parameters)])
else:
# if x is not a list/array, it may be a dictionary.
# However some structures might not be pure dictionaries (i.e. cpnest.LivePoint).
# Then, the only requirement is that we can | |
if 'Load Preset' in selection:
# Rebuild settings menu using preset
settings_menu = build_settings_menu(silent=False)
else:
break
# Detect drives
if 'Detect drives' in selection[0]:
std.clear_screen()
std.print_warning(DETECT_DRIVES_NOTICE)
if std.ask('Are you sure you proceed?'):
std.print_standard('Forcing controllers to rescan for devices...')
cmd = 'echo "- - -" | sudo tee /sys/class/scsi_host/host*/scan'
exe.run_program(cmd, check=False, shell=True)
if source_or_destination_changed(state):
std.abort()
# Start recovery
if 'Start' in selection:
std.clear_screen()
run_recovery(state, main_menu, settings_menu, dry_run=args['--dry-run'])
# Quit
if 'Quit' in selection:
total_percent = state.get_percent_recovered()
if total_percent == 100:
break
# Recovey < 100%
std.print_warning('Recovery is less than 100%')
if std.ask('Are you sure you want to quit?'):
break
# Save results to log
LOG.info('')
for line in state.generate_report():
LOG.info(' %s', std.strip_colors(line))
def mount_raw_image(path):
"""Mount raw image using OS specific methods, returns pathlib.Path."""
loopback_path = None
if PLATFORM == 'Darwin':
loopback_path = mount_raw_image_macos(path)
elif PLATFORM == 'Linux':
loopback_path = mount_raw_image_linux(path)
# Check
if not loopback_path:
std.print_error(f'Failed to mount image: {path}')
# Register unmount atexit
atexit.register(unmount_loopback_device, loopback_path)
# Done
return loopback_path
def mount_raw_image_linux(path):
"""Mount raw image using losetup, returns pathlib.Path."""
loopback_path = None
# Mount using losetup
cmd = [
'sudo',
'losetup',
'--find',
'--partscan',
'--show',
path,
]
proc = exe.run_program(cmd, check=False)
# Check result
if proc.returncode == 0:
loopback_path = proc.stdout.strip()
# Done
return loopback_path
def mount_raw_image_macos(path):
"""Mount raw image using hdiutil, returns pathlib.Path."""
loopback_path = None
plist_data = {}
# Mount using hdiutil
# plistdata['system-entities'][{}...]
cmd = [
'hdiutil', 'attach',
'-imagekey', 'diskimage-class=CRawDiskImage',
'-nomount',
'-plist',
'-readonly',
path,
]
proc = exe.run_program(cmd, check=False, encoding=None, errors=None)
# Check result
try:
plist_data = plistlib.loads(proc.stdout)
except plistlib.InvalidFileException:
return None
for dev in plist_data.get('system-entities', []):
dev_path = dev.get('dev-entry', '')
if re.match(r'^/dev/disk\d+$', dev_path):
loopback_path = dev_path
# Done
return loopback_path
def run_ddrescue(state, block_pair, pass_name, settings, dry_run=True):
# pylint: disable=too-many-statements
"""Run ddrescue using passed settings."""
cmd = build_ddrescue_cmd(block_pair, pass_name, settings)
poweroff_source_after_idle = True
state.update_progress_pane('Active')
std.clear_screen()
warning_message = ''
def _poweroff_source_drive(idle_minutes):
"""Power off source drive after a while."""
source_dev = state.source.path
# Bail early
if PLATFORM == 'Darwin':
return
# Sleep
i = 0
while i < idle_minutes*60:
if not poweroff_source_after_idle:
# Countdown canceled, exit without powering-down drives
return
if i % 600 == 0 and i > 0:
if i == 600:
std.print_standard(' ', flush=True)
std.print_warning(
f'Powering off source in {int((idle_minutes*60-i)/60)} minutes...',
)
std.sleep(5)
i += 5
# Power off drive
cmd = ['sudo', 'hdparm', '-Y', source_dev]
proc = exe.run_program(cmd, check=False)
if proc.returncode:
std.print_error(f'Failed to poweroff source {source_dev}')
else:
std.print_warning(f'Powered off source {source_dev}')
std.print_standard(
'Press Enter to return to main menu...', end='', flush=True,
)
def _update_smart_pane():
"""Update SMART pane every 30 seconds."""
state.source.update_smart_details()
now = datetime.datetime.now(tz=TIMEZONE).strftime('%Y-%m-%d %H:%M %Z')
with open(f'{state.log_dir}/smart.out', 'w', encoding='utf-8') as _f:
_f.write(
std.color_string(
['SMART Attributes', f'Updated: {now}\n'],
['BLUE', 'YELLOW'],
sep='\t\t',
),
)
_f.write('\n'.join(state.source.generate_report(header=False)))
# Dry run
if dry_run:
LOG.info('ddrescue cmd: %s', cmd)
return
# Start ddrescue
proc = exe.popen_program(cmd)
# ddrescue loop
_i = 0
while True:
if _i % 30 == 0:
# Update SMART pane
_update_smart_pane()
# Check destination
warning_message = check_destination_health(state.destination)
if warning_message:
# Error detected on destination, stop recovery
exe.stop_process(proc)
std.print_error(warning_message)
break
if _i % 60 == 0:
# Clear ddrescue pane
tmux.clear_pane()
_i += 1
# Update progress
block_pair.update_progress(pass_name)
state.update_progress_pane('Active')
# Check if complete
try:
proc.wait(timeout=1)
break
except KeyboardInterrupt:
# Wait a bit to let ddrescue exit safely
LOG.warning('ddrescue stopped by user')
warning_message = 'Aborted'
std.sleep(2)
exe.stop_process(proc, graceful=False)
break
except subprocess.TimeoutExpired:
# Continue to next loop to update panes
pass
else:
# Done
std.sleep(1)
break
# Update progress
# NOTE: Using 'Active' here to avoid flickering between block pairs
block_pair.update_progress(pass_name)
state.update_progress_pane('Active')
# Check result
if proc.poll():
# True if return code is non-zero (poll() returns None if still running)
poweroff_thread = exe.start_thread(
_poweroff_source_drive,
[cfg.ddrescue.DRIVE_POWEROFF_TIMEOUT],
)
warning_message = 'Error(s) encountered, see message above'
state.update_top_panes()
if warning_message:
print(' ')
print(' ')
std.print_error('DDRESCUE PROCESS HALTED')
print(' ')
std.print_warning(warning_message)
# Needs attention?
if str(proc.poll()) != '0':
state.update_progress_pane('NEEDS ATTENTION')
std.pause('Press Enter to return to main menu...')
# Stop source poweroff countdown
std.print_standard('Stopping device poweroff countdown...', flush=True)
poweroff_source_after_idle = False
poweroff_thread.join()
# Done
raise std.GenericAbort()
def run_recovery(state, main_menu, settings_menu, dry_run=True):
# pylint: disable=too-many-branches
"""Run recovery passes."""
atexit.register(state.save_debug_reports)
attempted_recovery = False
auto_continue = False
# Bail early
if is_missing_source_or_destination(state):
std.print_standard('')
std.pause('Press Enter to return to main menu...')
return
if source_or_destination_changed(state):
std.print_standard('')
std.abort()
# Get settings
for name, details in main_menu.toggles.items():
if 'Auto continue' in name and details['Selected']:
auto_continue = True
if 'Retry' in name and details['Selected']:
details['Selected'] = False
state.retry_all_passes()
settings = get_ddrescue_settings(settings_menu)
# Start SMART/Journal
state.panes['SMART'] = tmux.split_window(
behind=True, lines=12, vertical=True,
watch_file=f'{state.log_dir}/smart.out',
)
if PLATFORM != 'Darwin':
state.panes['Journal'] = tmux.split_window(
lines=4, vertical=True, cmd='journalctl --dmesg --follow',
)
# Run pass(es)
for pass_name in ('read', 'trim', 'scrape'):
abort = False
# Skip to next pass
if state.pass_complete(pass_name):
# NOTE: This bypasses auto_continue
state.skip_pass(pass_name)
continue
# Run ddrescue
for pair in state.block_pairs:
if not pair.pass_complete(pass_name):
attempted_recovery = True
state.mark_started()
try:
run_ddrescue(state, pair, pass_name, settings, dry_run=dry_run)
except (FileNotFoundError, KeyboardInterrupt, std.GenericAbort):
is_missing_source_or_destination(state)
abort = True
break
# Continue or return to menu
all_complete = state.pass_complete(pass_name)
all_above_threshold = state.pass_above_threshold(pass_name)
if abort or not (all_complete and all_above_threshold and auto_continue):
LOG.warning('Recovery halted')
break
# Stop SMART/Journal
for pane in ('SMART', 'Journal'):
if pane in state.panes:
tmux.kill_pane(state.panes.pop(pane))
# Show warning if nothing was done
if not attempted_recovery:
std.print_warning('No actions performed')
std.print_standard(' ')
std.pause('Press Enter to return to main menu...')
# Done
state.save_debug_reports()
atexit.unregister(state.save_debug_reports)
state.update_progress_pane('Idle')
def select_disk(prompt, skip_disk=None):
"""Select disk from list, returns Disk()."""
std.print_info('Scanning disks...')
disks = hw_obj.get_disks()
menu = std.Menu(
title=std.color_string(f'ddrescue TUI: {prompt} Selection', 'GREEN'),
)
menu.disabled_str = 'Already selected'
menu.separator = ' '
menu.add_action('Quit')
for disk in disks:
disable_option = False
size = disk.details["size"]
# Check if option should be disabled
if skip_disk:
parent = skip_disk.details.get('parent', None)
if (disk.path.samefile(skip_disk.path)
or (parent and disk.path.samefile(parent))):
disable_option = True
# Add to menu
menu.add_option(
name=(
f'{str(disk.path):<12} '
f'{disk.details["bus"]:<5} '
f'{std.bytes_to_string(size, decimals=1, use_binary=False):<8} '
f'{disk.details["model"]} '
f'{disk.details["serial"]}'
),
details={'Disabled': disable_option, 'Object': disk},
)
# Get selection
selection = menu.simple_select()
if 'Quit' in selection:
raise std.GenericAbort()
# Done
return selection[-1]['Object']
def select_disk_parts(prompt, disk):
"""Select disk parts from list, returns list of Disk()."""
title = std.color_string('ddrescue TUI: Partition Selection', 'GREEN')
title += f'\n\nDisk: {disk.path} {disk.description}'
menu = std.Menu(title)
menu.separator = ' '
menu.add_action('All')
menu.add_action('None')
menu.add_action('Proceed', {'Separator': True})
menu.add_action('Quit')
object_list = []
def _select_parts(menu):
"""Loop over selection menu until at least one partition selected."""
while True:
selection = menu.advanced_select(
f'Please select the parts to {prompt.lower()}: ',
)
if 'All' in selection:
for option in menu.options.values():
option['Selected'] = True
elif 'None' in selection:
for option in menu.options.values():
option['Selected'] = False
elif 'Proceed' in selection:
if any(option['Selected'] for option in menu.options.values()):
# At least one partition/device selected/device selected
break
elif 'Quit' in selection:
raise std.GenericAbort()
# Bail early if running under macOS
if PLATFORM == 'Darwin':
return [disk]
# Bail early if child device selected
if disk.details.get('parent', False):
return [disk]
# Add parts
whole_disk_str = f'{str(disk.path):<14} (Whole device)'
for part in disk.details.get('children', []):
size = part["size"]
name = (
f'{str(part["path"]):<14} '
f'({std.bytes_to_string(size, decimals=1, use_binary=False):>6})'
)
menu.add_option(name, details={'Selected': True, 'Path': part['path']})
# Add whole disk if necessary
if not menu.options:
menu.add_option(whole_disk_str, {'Selected': True, 'Path': disk.path})
menu.title += '\n\n'
menu.title += std.color_string(' No partitions detected.', 'YELLOW')
# Get selection
_select_parts(menu)
# Build list of Disk() object_list
for option in menu.options.values():
if option['Selected']:
object_list.append(option['Path'])
# Check if whole disk selected
if len(object_list) == len(disk.details.get('children', [])):
# NOTE: This is not true if the disk has no partitions
msg = f'Preserve partition table and unused space in {prompt.lower()}?'
if std.ask(msg):
# Replace part list with whole disk obj
object_list = [disk.path]
# Convert object_list to hw_obj.Disk() objects
print(' ')
std.print_info('Getting disk/partition details...')
object_list = [hw_obj.Disk(path) for path in object_list]
# Done
return object_list
def select_path(prompt):
"""Select path, returns pathlib.Path."""
invalid = False
menu = std.Menu(
title=std.color_string(f'ddrescue TUI: {prompt} Path Selection', 'GREEN'),
)
menu.separator = ' '
menu.add_action('Quit')
menu.add_option('Current directory')
menu.add_option('Enter manually')
path = None
# Make selection
selection = menu.simple_select()
if 'Current directory' in selection:
path = os.getcwd()
elif 'Enter manually' in selection:
path = | |
in use by the simulation data."""
dm = data.models
if file_type == 'undulatorTable':
if _is_tabulated_undulator_source(dm.simulation):
return dm.tabulatedUndulator.magneticFile == filename
return False
field = None
if file_type == 'mirror':
field = 'MirrorFile'
elif file_type == 'sample':
field = 'ImageFile'
if not field:
return False
for m in dm.beamline:
for k, v in _SCHEMA.model[m.type].items():
t = v[1]
if m[k] and t == field:
if m[k] == filename:
return True
return False
def validate_file(file_type, path):
"""Ensure the data file contains parseable rows data"""
match = re.search(r'\.(\w+)$', str(path))
extension = None
if match:
extension = match.group(1).lower()
else:
return 'invalid file extension'
if extension not in _FILE_TYPE_EXTENSIONS[file_type]:
return 'invalid file type: {}'.format(extension)
if file_type == 'mirror':
# mirror file
try:
count = 0
with open(str(path)) as f:
for line in f.readlines():
parts = line.split("\t")
if len(parts) > 0:
float(parts[0])
if len(parts) > 1:
float(parts[1])
count += 1
if count == 0:
return 'no data rows found in file'
except ValueError as e:
return 'invalid file format: {}'.format(e)
elif file_type == 'undulatorTable':
# undulator magnetic data file
#TODO(pjm): add additional zip file validation
try:
template_common.validate_safe_zip(str(path), '.', validate_magnet_data_file)
except AssertionError as err:
return err.message
elif file_type == 'sample':
filename = os.path.splitext(os.path.basename(str(path)))[0]
# Save the processed file:
srwl_uti_smp.SRWLUtiSmp(file_path=str(path), is_save_images=True, prefix=filename)
return None
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(data, run_dir=run_dir)
)
def _add_report_filenames(v):
for k in _DATA_FILE_FOR_MODEL:
v['{}Filename'.format(k)] = _DATA_FILE_FOR_MODEL[k]['filename']
def _beamline_element(template, item, fields, propagation, shift='', is_crystal=False):
el = template.format(*map(lambda x: item[x], fields))
pp = _propagation_params(propagation[str(item['id'])][0], shift)
# special case for crystal elements
if is_crystal:
el = ''' opCr = {}
# Set crystal orientation:
opCr.set_orient({}, {}, {}, {}, {})
el.append(opCr)'''.format(
el,
item['nvx'], item['nvy'], item['nvz'], item['tvx'], item['tvy']
)
else:
el = '{} el.append({})'.format(shift, el)
return el, pp
def _calculate_beam_drift(ebeam_position, source_type, undulator_type, undulator_length, undulator_period):
if ebeam_position['driftCalculationMethod'] == 'auto':
"""Calculate drift for ideal undulator."""
if _is_idealized_undulator(source_type, undulator_type):
# initial drift = 1/2 undulator length + 2 periods
return -0.5 * float(undulator_length) - 2 * float(undulator_period)
return 0
return ebeam_position['drift']
def _compute_material_characteristics(model, photon_energy, prefix=''):
fields_with_prefix = pkcollections.Dict({
'material': 'material',
'refractiveIndex': 'refractiveIndex',
'attenuationLength': 'attenuationLength',
})
if prefix:
for k in fields_with_prefix.keys():
fields_with_prefix[k] = '{}{}{}'.format(
prefix,
fields_with_prefix[k][0].upper(),
fields_with_prefix[k][1:],
)
if model[fields_with_prefix['material']] == 'User-defined':
return model
# Index of refraction:
kwargs = pkcollections.Dict({
'energy': photon_energy,
})
if model['method'] == 'server':
kwargs['precise'] = True
kwargs['formula'] = model[fields_with_prefix['material']]
elif model['method'] == 'file':
kwargs['precise'] = True
kwargs['data_file'] = '{}_delta.dat'.format(model[fields_with_prefix['material']])
else:
kwargs['calc_delta'] = True
kwargs['formula'] = model[fields_with_prefix['material']]
delta = bnlcrl.pkcli.simulate.find_delta(**kwargs)
model[fields_with_prefix['refractiveIndex']] = delta['characteristic_value']
# Attenuation length:
kwargs['characteristic'] = 'atten'
if model['method'] == 'file':
kwargs['precise'] = True
kwargs['data_file'] = '{}_atten.dat'.format(model[fields_with_prefix['material']])
if model['method'] == 'calculation':
# The method 'calculation' in bnlcrl library is not supported yet for attenuation length calculation.
pass
else:
atten = bnlcrl.pkcli.simulate.find_delta(**kwargs)
model[fields_with_prefix['attenuationLength']] = atten['characteristic_value']
return model
def _compute_crl_focus(model):
d = bnlcrl.pkcli.simulate.calc_ideal_focus(
radius=float(model['tipRadius']) * 1e-6, # um -> m
n=model['numberOfLenses'],
delta=model['refractiveIndex'],
p0=model['position']
)
model['focalDistance'] = d['ideal_focus']
model['absoluteFocusPosition'] = d['p1_ideal_from_source']
return model
def _compute_crystal_init(model):
parms_list = ['dSpacing', 'psi0r', 'psi0i', 'psiHr', 'psiHi', 'psiHBr', 'psiHBi', 'grazingAngle']
try:
material_raw = model['material'] # name contains either "(SRW)" or "(X0h)"
material = material_raw.split()[0] # short name for SRW (e.g., Si), long name for X0h (e.g., Silicon)
h = int(model['h'])
k = int(model['k'])
l = int(model['l'])
millerIndices = [h, k, l]
energy = model['energy']
grazingAngle = None
if re.search('(X0h)', material_raw):
crystal_parameters = crystal.get_crystal_parameters(material, energy, h, k, l)
dc = crystal_parameters['d']
xr0 = crystal_parameters['xr0']
xi0 = crystal_parameters['xi0']
xrh = crystal_parameters['xrh']
xih = crystal_parameters['xih']
elif re.search('(SRW)', material_raw):
dc = srwl_uti_cryst_pl_sp(millerIndices, material)
xr0, xi0, xrh, xih = srwl_uti_cryst_pol_f(energy, millerIndices, material)
else:
dc = xr0 = xi0 = xrh = xih = None
if dc:
angles_data = crystal.calc_bragg_angle(d=dc, energy_eV=energy, n=1)
grazingAngle = angles_data['bragg_angle']
model['dSpacing'] = dc
model['psi0r'] = xr0
model['psi0i'] = xi0
model['psiHr'] = xrh
model['psiHi'] = xih
model['psiHBr'] = xrh
model['psiHBi'] = xih
model['grazingAngle'] = grazingAngle
except Exception:
pkdlog('{}: error: {}', material_raw, pkdexc())
for key in parms_list:
model[key] = None
return model
def _compute_crystal_orientation(model):
if not model['dSpacing']:
return model
parms_list = ['nvx', 'nvy', 'nvz', 'tvx', 'tvy']
try:
opCr = srwlib.SRWLOptCryst(
_d_sp=model['dSpacing'],
_psi0r=model['psi0r'],
_psi0i=model['psi0i'],
_psi_hr=model['psiHr'],
_psi_hi=model['psiHi'],
_psi_hbr=model['psiHBr'],
_psi_hbi=model['psiHBi'],
_tc=model['crystalThickness'],
_ang_as=model['asymmetryAngle'],
)
orientDataCr = opCr.find_orient(_en=model['energy'], _ang_dif_pl=model['grazingAngle'])[0]
tCr = orientDataCr[0] # Tangential Vector to Crystal surface
nCr = orientDataCr[2] # Normal Vector to Crystal surface
if model['rotationAngle'] != 0:
rot = uti_math.trf_rotation([0, 1, 0], model['rotationAngle'], [0, 0, 0])[0]
nCr = uti_math.matr_prod(rot, nCr)
tCr = uti_math.matr_prod(rot, tCr)
model['nvx'] = nCr[0]
model['nvy'] = nCr[1]
model['nvz'] = nCr[2]
model['tvx'] = tCr[0]
model['tvy'] = tCr[1]
except Exception:
pkdlog('\n{}', traceback.format_exc())
for key in parms_list:
model[key] = None
return model
def _compute_grazing_angle(model):
def preserve_sign(item, field, new_value):
old_value = item[field] if field in item else 0
was_negative = float(old_value) < 0
item[field] = float(new_value)
if (was_negative and item[field] > 0) or item[field] < 0:
item[field] = - item[field]
grazing_angle = float(model['grazingAngle']) / 1000.0
preserve_sign(model, 'normalVectorZ', math.sin(grazing_angle))
if model['autocomputeVectors'] == 'horizontal':
preserve_sign(model, 'normalVectorX', math.cos(grazing_angle))
preserve_sign(model, 'tangentialVectorX', math.sin(grazing_angle))
model['normalVectorY'] = 0
model['tangentialVectorY'] = 0
elif model['autocomputeVectors'] == 'vertical':
preserve_sign(model, 'normalVectorY', math.cos(grazing_angle))
preserve_sign(model, 'tangentialVectorY', math.sin(grazing_angle))
model['normalVectorX'] = 0
model['tangentialVectorX'] = 0
return model
def _compute_undulator_length(model):
if model['undulatorType'] == 'u_i':
return {}
zip_file = simulation_db.simulation_lib_dir(SIM_TYPE).join(model['magneticFile'])
if zip_file.check():
return {
'length': MagnMeasZip(str(zip_file)).find_closest_gap(model['gap']),
}
return {}
def _convert_ebeam_units(field_name, value, to_si=True):
"""Convert values from the schema to SI units (m, rad) and back.
Args:
field_name: name of the field in _SCHEMA['model']['electronBeam'].
value: value of the field.
to_si: if set to True, convert to SI units, otherwise convert back to the units in the schema.
Returns:
value: converted value.
"""
if field_name in _SCHEMA['model']['electronBeam'].keys():
label, field_type = _SCHEMA['model']['electronBeam'][field_name]
if field_type == 'Float':
if re.search('\[m(m|rad)\]', label):
value *= _invert_value(1e3, to_si)
elif re.search('\[\xb5(m|rad)\]', label): # mu
value *= _invert_value(1e6, to_si)
elif re.search('\[n(m|rad)\]', label):
value *= _invert_value(1e9, to_si)
return value
def _create_user_model(data, model_name):
model = data['models'][model_name]
if model_name == 'tabulatedUndulator':
model = model.copy()
model['undulator'] = data['models']['undulator']
return model
def _delete_user_models(electron_beam, tabulated_undulator):
"""Remove the beam and undulator user model list files"""
for model_name in _USER_MODEL_LIST_FILENAME.keys():
model = electron_beam if model_name == 'electronBeam' else tabulated_undulator
if not model or 'id' not in model:
continue
user_model_list = _load_user_model_list(model_name)
for i,m in enumerate(user_model_list):
if m['id'] == model.id:
del user_model_list[i]
_save_user_model_list(model_name, user_model_list)
break
return pkcollections.Dict({})
def _extract_brilliance_report(model, data):
label = template_common.enum_text(_SCHEMA, 'BrillianceReportType', model['reportType'])
if model['reportType'] in ('3', '4'):
label += ' [rad]'
elif model['reportType'] in ('5', '6'):
label += ' [m]'
x_points = []
points = []
scale_adjustment = 1000.0
if 'brightnessComponent' in model and model['brightnessComponent'] == 'spectral-detuning':
scale_adjustment = 1.0
for f in data:
m = re.search('^f(\d+)', f)
if m:
x_points.append((np.array(data[f]['data']) * scale_adjustment).tolist())
points.append(data['e{}'.format(m.group(1))]['data'])
title = template_common.enum_text(_SCHEMA, 'BrightnessComponent', model['brightnessComponent'])
if model['brightnessComponent'] == 'k-tuning':
if model['initialHarmonic'] == model['finalHarmonic']:
title += ', Harmonic {}'.format(model['initialHarmonic'])
else:
title += ', Harmonic {} - {}'.format(model['initialHarmonic'], model['finalHarmonic'])
else:
title += ', Harmonic {}'.format(model['harmonic'])
return {
'title': title,
'y_label': label,
'x_label': 'Photon Energy [eV]',
'x_range': [np.amin(x_points), np.amax(x_points)],
'y_range': [np.amin(points), np.amax(points)],
'x_points': x_points,
'points': points,
}
def _extract_trajectory_report(model, data):
available_axes = {}
for s in _SCHEMA['enum']['TrajectoryPlotAxis']:
available_axes[s[0]] = s[1]
x_points = data[model['plotAxisX']]['data']
plots = []
y_range = []
for f in ('plotAxisY', 'plotAxisY2'):
if model[f] != 'None':
points = data[model[f]]['data']
if y_range:
y_range = [min(y_range[0], min(points)), max(y_range[1], max(points))]
else:
y_range = [min(points), max(points)]
plots.append({
'points': points,
'label': available_axes[model[f]],
#TODO(pjm): refactor with template_common.compute_plot_color_and_range()
'color': '#ff7f0e' if len(plots) else '#1f77b4',
})
return {
'title': 'Electron Trajectory',
'x_range': [min(x_points), max(x_points)],
'x_points': x_points,
'y_label': '[' + data[model['plotAxisY']]['units'] + ']',
'x_label': available_axes[model['plotAxisX']] + ' [' + data[model['plotAxisX']]['units'] + ']',
'y_range': y_range,
'plots': plots,
}
def _fixup_beamline(data):
for item in data['models']['beamline']:
if item['type'] == 'ellipsoidMirror':
if 'firstFocusLength' not in item:
item['firstFocusLength'] = item['position']
if item['type'] in ['grating', 'ellipsoidMirror', 'sphericalMirror', 'toroidalMirror']:
if 'grazingAngle' not in item:
angle = 0
if item['normalVectorX']:
angle = math.acos(abs(float(item['normalVectorX']))) * 1000
elif item['normalVectorY']:
angle = math.acos(abs(float(item['normalVectorY']))) * 1000
item['grazingAngle'] = angle
if 'grazingAngle' in item and 'normalVectorX' in item and 'autocomputeVectors' not in item:
item['autocomputeVectors'] = '1'
if item['type'] == 'crl':
key_value_pairs = pkcollections.Dict({
'material': 'User-defined',
'method': 'server',
'absoluteFocusPosition': None,
'focalDistance': None,
'tipRadius': float(item['radius']) * 1e6, # m -> um
| |
# coding: utf-8
# ## Case study
#
# In this case study we'll walk through using Python to fetch some data, clean it, and then graph it. This may be a short project, but it combines a number of features of the language we've dicussed, and gives you a chance to a see a project worked through from beginning to end. At almost every step we will also briefly call out alternatives and enhancements you can make.
#
# Global temperature change is the topic of much discussion. But those discussions are based on a global scale, Suppose I want to know what the temperatures have been doing near where I am? One way of finding out would be to get some historical data for my location and process that data and plot it to see exactly what's been happening.
#
# Fortunately, there are several sources of historical weather data that are freely available. We're going to talk through using data from the Global Historical Climatology Network, which has data from around the world. You may find other sources, which may have different data formats, but the steps and the processes we discuss here should be generally applicable to any data set.
# ### Downloading the data
#
# The first step will be to get the data. There is an archive of daily historical weather data at https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/, which has a wide array of datay. The first step will be to figure out which files we want and exactly where they are, and then to download them. Once we have the data, we can move on to processing and ultimately displaying our results.
#
# In order to download the files, which are accessable via HTTPS, we will need the requests library. We can get requests with `pip install requests` at the command prompt. Once we have requests our first step will be to fetch the `readme.txt` file, which can guide us as the formats and location of the data files we'll want.
# In[11]:
# import requests
import requests
# In[ ]:
# get readme.txt file
r = requests.get('https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt')
readme = r.text
# When we look at the readme file, it looks something like this:
# In[4]:
print(readme)
# In particular, we'll be interested in section II which lists the contents.
#
# ```
# II. CONTENTS OF ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily
#
# all: Directory with ".dly" files for all of GHCN-Daily
# gsn: Directory with ".dly" files for the GCOS Surface Network
# (GSN)
# hcn: Directory with ".dly" files for U.S. HCN
# by_year: Directory with GHCN Daily files parsed into yearly
# subsets with observation times where available. See the
# /by_year/readme.txt and
# /by_year/ghcn-daily-by_year-format.rtf
# files for further information
# grid: Directory with the GHCN-Daily gridded dataset known
# as HadGHCND
# papers: Directory with pdf versions of journal articles relevant
# to the GHCN-Daily dataset
# figures: Directory containing figures that summarize the inventory
# of GHCN-Daily station records
#
# ghcnd-all.tar.gz: TAR file of the GZIP-compressed files in the "all" directory
# ghcnd-gsn.tar.gz: TAR file of the GZIP-compressed "gsn" directory
# ghcnd-hcn.tar.gz: TAR file of the GZIP-compressed "hcn" directory
#
# ghcnd-countries.txt: List of country codes (FIPS) and names
# ghcnd-inventory.txt: File listing the periods of record for each station and
# element
# ghcnd-stations.txt: List of stations and their metadata (e.g., coordinates)
# ghcnd-states.txt: List of U.S. state and Canadian Province codes
# used in ghcnd-stations.txt
# ghcnd-version.txt: File that specifies the current version of GHCN Daily
#
# readme.txt: This file
# status.txt: Notes on the current status of GHCN-Daily
# ```
#
# As we look at the files available we cansee that `ghcnd-inventory.txt` has a listing of the recording periods for each station which will help us find a good data set, and `ghcnd-stations.txt` lists the stations which should help us find the station closest to our location, so we'll grab those two files first.
# ```
# II. CONTENTS OF ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/daily
#
# all: Directory with ".dly" files for all of GHCN-Daily
# gsn: Directory with ".dly" files for the GCOS Surface Network
# (GSN)
# hcn: Directory with ".dly" files for U.S. HCN
# by_year: Directory with GHCN Daily files parsed into yearly
# subsets with observation times where available. See the
# /by_year/readme.txt and
# /by_year/ghcn-daily-by_year-format.rtf
# files for further information
# grid: Directory with the GHCN-Daily gridded dataset known
# as HadGHCND
# papers: Directory with pdf versions of journal articles relevant
# to the GHCN-Daily dataset
# figures: Directory containing figures that summarize the inventory
# of GHCN-Daily station records
#
# ghcnd-all.tar.gz: TAR file of the GZIP-compressed files in the "all" directory
# ghcnd-gsn.tar.gz: TAR file of the GZIP-compressed "gsn" directory
# ghcnd-hcn.tar.gz: TAR file of the GZIP-compressed "hcn" directory
#
# ghcnd-countries.txt: List of country codes (FIPS) and names
# ghcnd-inventory.txt: File listing the periods of record for each station and
# element
# ghcnd-stations.txt: List of stations and their metadata (e.g., coordinates)
# ghcnd-states.txt: List of U.S. state and Canadian Province codes
# used in ghcnd-stations.txt
# ghcnd-version.txt: File that specifies the current version of GHCN Daily
#
# readme.txt: This file
# status.txt: Notes on the current status of GHCN-Daily
# ```
#
# In[12]:
# get inventory and stations files
r = requests.get('https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-inventory.txt')
inventory_txt = r.text
r = requests.get('https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/ghcnd-stations.txt')
stations_txt = r.text
# Once we have those files, we can save them to our local disk, so that we won't need to download them again if we need to go back to the original data.
# In[13]:
# save both the inventory and stations files to disk, in case we need them
with open("inventory.txt", "w") as inventory_file:
inventory_file.write(inventory_txt)
with open("stations.txt", "w") as stations_file:
stations_file.write(stations_txt)
# We'll start by looking at the inventory file. Here's what the first 137 characters show us:
# In[16]:
print(inventory_txt[:137])
# If we look at section VII of the `readme.txt` file we can see that the format of the inventory file is:
# ```
# VII. FORMAT OF "ghcnd-inventory.txt"
#
# ------------------------------
# Variable Columns Type
# ------------------------------
# ID 1-11 Character
# LATITUDE 13-20 Real
# LONGITUDE 22-30 Real
# ELEMENT 32-35 Character
# FIRSTYEAR 37-40 Integer
# LASTYEAR 42-45 Integer
# ------------------------------
#
# These variables have the following definitions:
#
# ID is the station identification code. Please see "ghcnd-stations.txt"
# for a complete list of stations and their metadata.
#
# LATITUDE is the latitude of the station (in decimal degrees).
#
# LONGITUDE is the longitude of the station (in decimal degrees).
#
# ELEMENT is the element type. See section III for a definition of elements.
#
# FIRSTYEAR is the first year of unflagged data for the given element.
#
# LASTYEAR is the last year of unflagged data for the given element.
# ```
#
# From this descrption we can tell that inventory list has most of the information we need to find out what station we'll want to look at. We can use the latitude and longitude to find the stations closest to us, and then we can use the first and years years to a station with records covering a long span of time.
#
# The only question this file leaves us with is what the "ELEMENT" field is, for that it suggests we look at section III. In section III (which we'll look at in more detail later) we find the following description of the main elements:
#
# ```
# ELEMENT is the element type. There are five core elements as well as a number
# of addition elements.
#
# The five core elements are:
#
# PRCP = Precipitation (tenths of mm)
# SNOW = Snowfall (mm)
# SNWD = Snow depth (mm)
# TMAX = Maximum temperature (tenths of degrees C)
# TMIN = Minimum temperature (tenths of degrees C)
# ```
#
# For our purpose we'll be interested in the TMAX and TMIN elements, which are maximum and minimum temperatures in tenths of degrees Centigrade.
# ### Parsing the inventory data
#
# The `readme.txt` file tells us what we've got in the inventory file, so we can parse the data into a more usable format. We could just store the parsed inventory | |
iftrue
elif claripy.is_false(cond_v):
return iffalse
else:
data = iftrue.merge(iffalse)
return data
#
# Unary operation handlers
#
def _handle_Const(self, expr) -> MultiValues:
return MultiValues(offset_to_values={0: { claripy_value(expr.con.type, expr.con.value) }})
def _handle_Conversion(self, expr):
simop = vex_operations[expr.op]
bits = int(simop.op_attrs['to_size'])
arg_0 = self._expr(expr.args[0])
# if there are multiple values with only one offset, we apply conversion to each one of them
# otherwise, we return a TOP
if len(arg_0.values) == 1:
# extension, extract, or doing nothing
data = set()
for v in next(iter(arg_0.values.values())):
if bits > v.size():
data.add(v.zero_extend(bits - v.size()))
else:
data.add(v[bits - 1:0])
r = MultiValues(offset_to_values={next(iter(arg_0.values.keys())): data})
else:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_Not1(self, expr):
arg0 = expr.args[0]
expr_0 = self._expr(arg0)
e0 = expr_0.one_value()
if e0 is not None and not e0.symbolic:
return MultiValues(offset_to_values={0: {
claripy.BVV(1, 1) if e0._model_concrete.value != 1 else claripy.BVV(0, 1)
}})
return MultiValues(offset_to_values={0: {self.state.top(1)}})
def _handle_Not(self, expr):
arg0 = expr.args[0]
expr_0 = self._expr(arg0)
bits = expr.result_size(self.tyenv)
e0 = expr_0.one_value()
if e0 is not None and not e0.symbolic:
return MultiValues(offset_to_values={0: {~e0}}) # pylint:disable=invalid-unary-operand-type
return MultiValues(offset_to_values={0: {self.state.top(bits)}})
def _handle_Clz64(self, expr):
arg0 = expr.args[0]
_ = self._expr(arg0)
bits = expr.result_size(self.tyenv)
# Need to actually implement this later
return MultiValues(offset_to_values={0: {self.state.top(bits)}})
def _handle_Ctz64(self, expr):
arg0 = expr.args[0]
_ = self._expr(arg0)
bits = expr.result_size(self.tyenv)
# Need to actually implement this later
return MultiValues(offset_to_values={0: {self.state.top(bits)}})
#
# Binary operation handlers
#
def _handle_ExpCmpNE64(self, expr):
_, _ = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
# Need to actually implement this later
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_16HLto32(self, expr):
_, _ = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
# Need to actually implement this later
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_Add(self, expr):
expr0, expr1 = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
# we do not support addition between two real multivalues
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# adding a single value to a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v + expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# adding a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v + expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# adding two single values together
r = MultiValues(offset_to_values={0: {expr0_v + expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_Sub(self, expr):
expr0, expr1 = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
# we do not support addition between two real multivalues
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# subtracting a single value from a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v - expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# subtracting a single value from a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {expr0_v - v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# subtracting a single value from another single value
r = MultiValues(offset_to_values={0: {expr0_v - expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_Mul(self, expr):
expr0, expr1 = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
# we do not support multiplication between two real multivalues
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# multiplying a single value to a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v * expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# multiplying a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v * expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# multiplying two single values together
r = MultiValues(offset_to_values={0: {expr0_v * expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_Mull(self, expr):
_, _ = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
return MultiValues(offset_to_values={0: {self.state.top(bits)}})
def _handle_Div(self, expr):
expr0, expr1 = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
# we do not support division between two real multivalues
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v / expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v / expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr0_v.concrete and expr1_v.concrete:
# dividing two single values
r = MultiValues(offset_to_values={0: {expr0_v / expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_DivMod(self, expr):
_, _ = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_And(self, expr):
expr0, expr1 = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
# we do not support addition between two real multivalues
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# bitwise-and a single value with a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v & expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# bitwise-and a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v & expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr0_v.concrete and expr1_v.concrete:
# bitwise-and two single values together
r = MultiValues(offset_to_values={0: {expr0_v & expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_Xor(self, expr):
expr0, expr1 = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
# we do not support xor between two real multivalues
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# bitwise-xor a single value with a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v ^ expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# bitwise-xor a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v ^ expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr0_v.concrete and expr1_v.concrete:
# bitwise-xor two single values together
r = MultiValues(offset_to_values={0: {expr0_v ^ expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _handle_Or(self, expr):
expr0, expr1 = self._expr(expr.args[0]), self._expr(expr.args[1])
bits = expr.result_size(self.tyenv)
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
# we do not support or between two real multivalues
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# bitwise-or a single value with a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
vs = {v | expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# bitwise-or a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
vs = {v | expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# bitwise-and two single values together
r = MultiValues(offset_to_values={0: {expr0_v | expr1_v}})
if r is None:
r = | |
'void'
else:
# TODO: extract informations needed for printing in case of function argument which itself has a function argument
arg_code = ', '.join('{}'.format(self._print_FuncAddressDeclare(i))
if isinstance(i, FunctionAddress) else '{0}{1}'.format(self.get_declare_type(i), i)
for i in args)
return '{}(*{})({});\n'.format(ret_type, name, arg_code)
def _print_Declare(self, expr):
if isinstance(expr.variable, InhomogeneousTupleVariable):
return ''.join(self._print_Declare(Declare(v.dtype,v,intent=expr.intent, static=expr.static)) for v in expr.variable)
declaration_type = self.get_declare_type(expr.variable)
variable = self._print(expr.variable.name)
if expr.variable.is_stack_array:
preface, init = self._init_stack_array(expr.variable,)
else:
preface = ''
init = ''
declaration = '{dtype}{var}{init};\n'.format(
dtype = declaration_type,
var = variable,
init = init)
return preface + declaration
def _print_NativeBool(self, expr):
self._additional_imports.add('stdbool')
return 'bool'
def _print_NativeInteger(self, expr):
return 'int'
def _print_NativeReal(self, expr):
return 'real'
def _print_NativeVoid(self, expr):
return 'void'
def _print_NativeComplex(self, expr):
self._additional_imports.add('complex')
return 'complex'
def _print_NativeString(self, expr):
return 'string'
def function_signature(self, expr, print_arg_names = True):
"""Extract from function definition all the information
(name, input, output) needed to create the signature
Parameters
----------
expr : FunctionDef
the function defintion
print_arg_names : Bool
default value True and False when we don't need to print
arguments names
Return
------
String
Signature of the function
"""
args = list(expr.arguments)
if len(expr.results) == 1:
ret_type = self.get_declare_type(expr.results[0])
elif len(expr.results) > 1:
ret_type = self._print(datatype('int')) + ' '
args += [FunctionDefArgument(a.clone(name = a.name, is_pointer =True)) for a in expr.results]
else:
ret_type = self._print(datatype('void')) + ' '
name = expr.name
if not args:
arg_code = 'void'
else:
arg_code = ', '.join('{}'.format(self.function_signature(i.var, False))
if isinstance(i.var, FunctionAddress)
else '{0}'.format(self.get_declare_type(i.var)) + (i.name if print_arg_names else '')
for i in args)
if isinstance(expr, FunctionAddress):
return '{}(*{})({})'.format(ret_type, name, arg_code)
else:
return '{0}{1}({2})'.format(ret_type, name, arg_code)
def _print_IndexedElement(self, expr):
base = expr.base
inds = list(expr.indices)
base_shape = base.shape
allow_negative_indexes = True if isinstance(base, PythonTuple) else base.allows_negative_indexes
for i, ind in enumerate(inds):
if isinstance(ind, PyccelUnarySub) and isinstance(ind.args[0], LiteralInteger):
inds[i] = PyccelMinus(base_shape[i], ind.args[0], simplify = True)
else:
#indices of indexedElement of len==1 shouldn't be a tuple
if isinstance(ind, tuple) and len(ind) == 1:
inds[i].args = ind[0]
if allow_negative_indexes and \
not isinstance(ind, LiteralInteger) and not isinstance(ind, Slice):
inds[i] = IfTernaryOperator(PyccelLt(ind, LiteralInteger(0)),
PyccelAdd(base_shape[i], ind, simplify = True), ind)
#set dtype to the C struct types
dtype = self._print(expr.dtype)
dtype = self.find_in_ndarray_type_registry(dtype, expr.precision)
base_name = self._print(base)
if base.is_ndarray or isinstance(base, HomogeneousTupleVariable):
if expr.rank > 0:
#managing the Slice input
for i , ind in enumerate(inds):
if isinstance(ind, Slice):
inds[i] = self._new_slice_with_processed_arguments(ind, PyccelArraySize(base, i),
allow_negative_indexes)
else:
inds[i] = Slice(ind, PyccelAdd(ind, LiteralInteger(1), simplify = True), LiteralInteger(1))
inds = [self._print(i) for i in inds]
return "array_slicing(%s, %s, %s)" % (base_name, expr.rank, ", ".join(inds))
inds = [self._cast_to(i, NativeInteger(), 8).format(self._print(i)) for i in inds]
else:
raise NotImplementedError(expr)
return "GET_ELEMENT(%s, %s, %s)" % (base_name, dtype, ", ".join(inds))
def _cast_to(self, expr, dtype, precision):
""" add cast to an expression when needed
parameters
----------
expr : PyccelAstNode
the expression to be cast
dtype : Datatype
base type of the cast
precision : integer
precision of the base type of the cast
Return
------
String
Return format string that contains the desired cast type
"""
if (expr.dtype != dtype or expr.precision != precision):
cast=self.find_in_dtype_registry(self._print(dtype), precision)
return '({}){{}}'.format(cast)
return '{}'
def _print_DottedVariable(self, expr):
"""convert dotted Variable to their C equivalent"""
return '{}.{}'.format(self._print(expr.lhs), self._print(expr.name))
@staticmethod
def _new_slice_with_processed_arguments(_slice, array_size, allow_negative_index):
""" Create new slice with informations collected from old slice and decorators
Parameters
----------
_slice : Slice
slice needed to collect (start, stop, step)
array_size : PyccelArraySize
call to function size()
allow_negative_index : Bool
True when the decorator allow_negative_index is present
Returns
-------
Slice
"""
start = LiteralInteger(0) if _slice.start is None else _slice.start
stop = array_size if _slice.stop is None else _slice.stop
# negative start and end in slice
if isinstance(start, PyccelUnarySub) and isinstance(start.args[0], LiteralInteger):
start = PyccelMinus(array_size, start.args[0], simplify = True)
elif allow_negative_index and not isinstance(start, (LiteralInteger, PyccelArraySize)):
start = IfTernaryOperator(PyccelLt(start, LiteralInteger(0)),
PyccelMinus(array_size, start, simplify = True), start)
if isinstance(stop, PyccelUnarySub) and isinstance(stop.args[0], LiteralInteger):
stop = PyccelMinus(array_size, stop.args[0], simplify = True)
elif allow_negative_index and not isinstance(stop, (LiteralInteger, PyccelArraySize)):
stop = IfTernaryOperator(PyccelLt(stop, LiteralInteger(0)),
PyccelMinus(array_size, stop, simplify = True), stop)
# steps in slices
step = _slice.step
if step is None:
step = LiteralInteger(1)
# negative step in slice
elif isinstance(step, PyccelUnarySub) and isinstance(step.args[0], LiteralInteger):
start = PyccelMinus(array_size, LiteralInteger(1), simplify = True) if _slice.start is None else start
stop = LiteralInteger(0) if _slice.stop is None else stop
# variable step in slice
elif allow_negative_index and step and not isinstance(step, LiteralInteger):
og_start = start
start = IfTernaryOperator(PyccelGt(step, LiteralInteger(0)), start, PyccelMinus(stop, LiteralInteger(1), simplify = True))
stop = IfTernaryOperator(PyccelGt(step, LiteralInteger(0)), stop, og_start)
return Slice(start, stop, step)
def _print_PyccelArraySize(self, expr):
return '{}.shape[{}]'.format(expr.arg, expr.index)
def _print_Allocate(self, expr):
free_code = ''
#free the array if its already allocated and checking if its not null if the status is unknown
if (expr.status == 'unknown'):
free_code = 'if (%s.shape != NULL)\n' % self._print(expr.variable.name)
free_code += "{{\n{}}}\n".format(self._print(Deallocate(expr.variable)))
elif (expr.status == 'allocated'):
free_code += self._print(Deallocate(expr.variable))
self._additional_imports.add('ndarrays')
shape = ", ".join(self._print(i) for i in expr.shape)
dtype = self._print(expr.variable.dtype)
dtype = self.find_in_ndarray_type_registry(dtype, expr.variable.precision)
shape_dtype = self.find_in_dtype_registry('int', 8)
shape_Assign = "("+ shape_dtype +"[]){" + shape + "}"
alloc_code = "{} = array_create({}, {}, {});\n".format(expr.variable, len(expr.shape), shape_Assign, dtype)
return '{}{}'.format(free_code, alloc_code)
def _print_Deallocate(self, expr):
if isinstance(expr.variable, InhomogeneousTupleVariable):
return ''.join(self._print(Deallocate(v)) for v in expr.variable)
if expr.variable.is_pointer:
return 'free_pointer({});\n'.format(self._print(expr.variable))
return 'free_array({});\n'.format(self._print(expr.variable))
def _print_Slice(self, expr):
start = self._print(expr.start)
stop = self._print(expr.stop)
step = self._print(expr.step)
return 'new_slice({}, {}, {})'.format(start, stop, step)
def _print_NumpyUfuncBase(self, expr):
""" Convert a Python expression with a Numpy function call to C
function call
Parameters
----------
expr : Pyccel ast node
Python expression with a Numpy function call
Returns
-------
string
Equivalent expression in C language
Example
-------
numpy.cos(x) ==> cos(x)
"""
# add necessary include
self._additional_imports.add('math')
type_name = type(expr).__name__
try:
func_name = numpy_ufunc_to_c_real[type_name]
except KeyError:
errors.report(PYCCEL_RESTRICTION_TODO, severity='fatal')
args = []
for arg in expr.args:
if arg.dtype is NativeComplex():
self._additional_imports.add('complex')
try:
func_name = numpy_ufunc_to_c_complex[type_name]
args.append(self._print(arg))
except KeyError:
errors.report(INCOMPATIBLE_TYPEVAR_TO_FUNC.format(type_name) ,severity='fatal')
elif arg.dtype is not NativeReal():
args.append(self._print(NumpyFloat(arg)))
else :
args.append(self._print(arg))
code_args = ', '.join(args)
return '{0}({1})'.format(func_name, code_args)
def _print_MathFunctionBase(self, expr):
""" Convert a Python expression with a math function call to C
function call
Parameters
----------
expr : Pyccel ast node
Python expression with a Math function call
Returns
-------
string
Equivalent expression in C language
------
Example:
--------
math.sin(x) ==> sin(x)
"""
# add necessary include
type_name = type(expr).__name__
try:
func_name = math_function_to_c[type_name]
except KeyError:
errors.report(PYCCEL_RESTRICTION_TODO, severity='fatal')
if func_name.startswith("pyc"):
self._additional_imports.add('pyc_math_c')
else:
if expr.dtype is NativeComplex():
self._additional_imports.add('cmath')
else:
self._additional_imports.add('math')
args = []
for arg in expr.args:
if arg.dtype != NativeReal() and not func_name.startswith("pyc"):
args.append(self._print(NumpyFloat(arg)))
else:
args.append(self._print(arg))
code_args = ', '.join(args)
if expr.dtype == NativeInteger():
cast_type = self.find_in_dtype_registry('int', expr.precision)
return '({0}){1}({2})'.format(cast_type, func_name, code_args)
return '{0}({1})'.format(func_name, code_args)
def _print_MathIsfinite(self, expr):
"""Convert a Python expression with a math isfinite function call to C
function call"""
# add necessary include
self._additional_imports.add('math')
arg = expr.args[0]
if arg.dtype is NativeInteger():
code_arg = self._print(NumpyFloat(arg))
else:
code_arg = self._print(arg)
return "isfinite({})".format(code_arg)
def _print_MathIsinf(self, expr):
"""Convert a Python expression with a math isinf function call to C
function call"""
# add necessary include
self._additional_imports.add('math')
arg = expr.args[0]
if arg.dtype is NativeInteger():
code_arg = self._print(NumpyFloat(arg))
else:
code_arg = self._print(arg)
return "isinf({})".format(code_arg)
def _print_MathIsnan(self, expr):
"""Convert a Python expression with a math isnan function call to C
function call"""
# add necessary include
self._additional_imports.add('math')
arg = expr.args[0]
if arg.dtype is NativeInteger():
code_arg = self._print(NumpyFloat(arg))
else:
code_arg = self._print(arg)
return "isnan({})".format(code_arg)
def _print_MathTrunc(self, expr):
"""Convert a Python expression with a math trunc function call to C
function call"""
# add necessary include
self._additional_imports.add('math')
arg = expr.args[0]
if arg.dtype is NativeInteger():
code_arg = self._print(NumpyFloat(arg))
else:
code_arg = self._print(arg)
return "trunc({})".format(code_arg)
def _print_FunctionAddress(self, expr):
return expr.name
def _print_Rand(self, expr):
raise NotImplementedError("Rand not implemented")
def _print_NumpyRandint(self, expr):
raise NotImplementedError("Randint not implemented")
def _print_NumpyMod(self, expr):
return self._print(PyccelMod(*expr.args))
def _print_Interface(self, expr):
return ""
def _print_FunctionDef(self, expr):
if len(expr.results) > 1:
self._additional_args.append(expr.results)
body = self._print(expr.body)
decs = [Declare(i.dtype, i) if isinstance(i, Variable) else FuncAddressDeclare(i) for i in expr.local_vars]
if len(expr.results) <= 1 :
for i in expr.results:
if isinstance(i, Variable) and not i.is_temp:
decs += | |
<filename>pyqstrat/account.py
from collections import defaultdict
from sortedcontainers import SortedDict
import math
import pandas as pd
import numpy as np
from pyqstrat.pq_types import ContractGroup, Trade, Contract
from types import SimpleNamespace
from typing import Sequence, Any, Tuple, Callable, Union, MutableSet, MutableSequence, MutableMapping, List, Optional
def calc_trade_pnl(open_qtys: np.ndarray,
open_prices: np.ndarray,
new_qtys: np.ndarray,
new_prices: np.ndarray,
multiplier: float) -> Tuple[np.ndarray, np.ndarray, float, float, float]:
'''
>>> print(calc_trade_pnl(
... open_qtys = np.array([], dtype = float), open_prices = np.array([], dtype = float),
... new_qtys = np.array([-8, 9, -4]), new_prices = np.array([10, 11, 6]), multiplier = 100))
(array([-3.]), array([6.]), -3.0, 6.0, -1300.0)
>>> print(calc_trade_pnl(open_qtys = np.array([], dtype = float), open_prices = np.array([], dtype = float), new_qtys = np.array([3, 10, -5]),
... new_prices = np.array([51, 50, 45]), multiplier = 100))
(array([8.]), array([50.]), 8.0, 50.0, -2800.0)
>>> print(calc_trade_pnl(open_qtys = np.array([]), open_prices = np.array([]),
... new_qtys = np.array([-58, -5, -5, 6, -8, 5, 5, -5, 19, 7, 5, -5, 39]),
... new_prices = np.array([2080, 2075.25, 2070.75, 2076, 2066.75, 2069.25, 2074.75, 2069.75, 2087.25, 2097.25, 2106, 2088.25, 2085.25]),
... multiplier = 50))
(array([], dtype=float64), array([], dtype=float64), 0.0, 0, -33762.5) '''
# TODO: Cythonize this
realized = 0.
new_qtys = new_qtys.copy()
new_prices = new_prices.copy()
_open_prices = np.zeros(len(open_prices) + len(new_prices), dtype=float)
_open_prices[:len(open_prices)] = open_prices
_open_qtys = np.zeros(len(open_qtys) + len(new_qtys), dtype=float)
_open_qtys[:len(open_qtys)] = open_qtys
new_qty_indices = np.nonzero(new_qtys)[0]
open_qty_indices = np.zeros(len(_open_qtys), dtype=int)
nonzero_indices = np.nonzero(_open_qtys)[0]
open_qty_indices[:len(nonzero_indices)] = nonzero_indices
i = 0 # index into new_qty_indices to get idx of the new qty we are currently netting
o = len(nonzero_indices) # virtual length of open_qty_indices
j = 0 # index into open_qty_indices to get idx of the open qty we are currently netting
k = len(open_qtys) # virtual length of _open_qtys
# Try to net all new trades against existing non-netted trades.
# Append any remaining non-netted new trades to end of existing trades
while i < len(new_qty_indices):
# Always try to net first non-zero new trade against first non-zero existing trade
# FIFO acccounting
new_idx = new_qty_indices[i]
new_qty, new_price = new_qtys[new_idx], new_prices[new_idx]
# print(f'i: {i} j: {j} k: {k} o: {o} oq: {_open_qtys} oqi: {open_qty_indices} op: {_open_prices} nq: {new_qtys} np: {new_prices}')
if j < o: # while we still have open positions to net against
open_idx = open_qty_indices[j]
open_qty, open_price = _open_qtys[open_idx], _open_prices[open_idx]
if math.copysign(1, open_qty) == math.copysign(1, new_qty):
# Nothing to net against so add this trade to the array and wait for the next offsetting trade
_open_qtys[k] = new_qty
_open_prices[k] = new_price
open_qty_indices[o] = k
k += 1
o += 1
new_qtys[new_idx] = 0
i += 1
elif abs(new_qty) > abs(open_qty):
# New trade has more qty than offsetting trade so:
# a. net against offsetting trade
# b. remove the offsetting trade
# c. reduce qty of new trade
open_qty, open_price = _open_qtys[open_idx], _open_prices[open_idx]
realized += open_qty * (new_price - open_price)
# print(f'open_qty: {open_qty} open_price: {open_price} open_idx: {open_idx} i: {i}
# j: {j} k: {k} l: {l} oq: {_open_qtys} oqi: {open_qty_indices} op: {_open_prices} nq: {new_qtys} np: {new_prices}')
_open_qtys[open_idx] = 0
j += 1
new_qtys[new_idx] += open_qty
else:
# New trade has less qty than offsetting trade so:
# a. net against offsetting trade
# b. remove new trade
# c. reduce qty of offsetting trade
realized += new_qty * (open_price - new_price)
new_qtys[new_idx] = 0
i += 1
_open_qtys[open_idx] += new_qty
else:
# Nothing to net against so add this trade to the open trades array and wait for the next offsetting trade
_open_qtys[k] = new_qty
_open_prices[k] = new_price
open_qty_indices[o] = k
k += 1
o += 1
new_qtys[new_idx] = 0
i += 1
mask = _open_qtys != 0
_open_qtys = _open_qtys[mask]
_open_prices = _open_prices[mask]
open_qty = np.sum(_open_qtys)
if math.isclose(open_qty, 0):
weighted_avg_price = 0
else:
weighted_avg_price = np.sum(_open_qtys * _open_prices) / open_qty
return _open_qtys, _open_prices, open_qty, weighted_avg_price, realized * multiplier
def leading_nan_to_zero(df: pd.DataFrame, columns: Sequence[str]) -> pd.DataFrame:
for column in columns:
vals = df[column].values
first_non_nan_index_ = np.ravel(np.nonzero(~np.isnan(vals))) # type: ignore
if len(first_non_nan_index_):
first_non_nan_index = first_non_nan_index_[0]
else:
first_non_nan_index = -1
if first_non_nan_index > 0 and first_non_nan_index < len(vals):
vals[:first_non_nan_index] = np.nan_to_num(vals[:first_non_nan_index])
df[column] = vals
return df
def find_last_non_nan_index(array: np.ndarray) -> int:
i = np.nonzero(np.isfinite(array))[0]
if len(i): return i[-1]
return 0
def find_index_before(sorted_dict: SortedDict, key: Any) -> int:
'''
Find index of the first key in a sorted dict that is less than or equal to the key passed in.
If the key is less than the first key in the dict, return -1
'''
size = len(sorted_dict)
if not size: return -1
i = sorted_dict.bisect_left(key)
if i == size: return size - 1
if sorted_dict.keys()[i] != key:
return i - 1
return i
class ContractPNL:
'''Computes pnl for a single contract over time given trades and market data'''
def __init__(self,
contract: Contract,
account_timestamps: np.ndarray,
price_function: Callable[[Contract, np.ndarray, int, SimpleNamespace], float],
strategy_context: SimpleNamespace) -> None:
self.contract = contract
self._price_function = price_function
self.strategy_context = strategy_context
self._account_timestamps = account_timestamps
self._trade_pnl = SortedDict()
self._net_pnl = SortedDict()
# Store trades that are not offset so when new trades come in we can offset against these to calc pnl
self.open_qtys = np.empty(0, dtype=int)
self.open_prices = np.empty(0, dtype=float)
self.first_trade_timestamp: Optional[np.datetime64] = None
self.final_pnl = np.nan
def _add_trades(self, trades: Sequence[Trade]) -> None:
'''
Args:
trades: Must be sorted by timestamp
'''
if not len(trades): return
timestamps = [trade.timestamp for trade in trades]
if len(self._trade_pnl):
k, v = self._trade_pnl.peekitem(0)
if timestamps[0] <= k:
raise Exception(f'Can only add a trade that is newer than last added current: {timestamps[0]} prev max timestamp: {k}')
if self.first_trade_timestamp is None: self.first_trade_timestamp = timestamps[0]
for i, timestamp in enumerate(timestamps):
t_trades = [trade for trade in trades if trade.timestamp == timestamp]
open_qtys, open_prices, open_qty, weighted_avg_price, realized_chg = calc_trade_pnl(
self.open_qtys, self.open_prices,
np.array([trade.qty for trade in t_trades]),
np.array([trade.price for trade in t_trades]),
self.contract.multiplier)
self.open_qtys = open_qtys
self.open_prices = open_prices
position_chg = sum([trade.qty for trade in t_trades])
commission_chg = sum([trade.commission for trade in t_trades])
fee_chg = sum([trade.fee for trade in t_trades])
index = find_index_before(self._trade_pnl, timestamp)
if index == -1:
self._trade_pnl[timestamp] = (position_chg, realized_chg, fee_chg, commission_chg, open_qty, weighted_avg_price)
else:
prev_timestamp, (prev_position, prev_realized, prev_fee, prev_commission, _, _) = self._trade_pnl.peekitem(index)
self._trade_pnl[timestamp] = (prev_position + position_chg, prev_realized + realized_chg,
prev_fee + fee_chg, prev_commission + commission_chg, open_qty, weighted_avg_price)
self.calc_net_pnl(timestamp)
def calc_net_pnl(self, timestamp: np.datetime64) -> None:
if timestamp in self._net_pnl: return
if self.first_trade_timestamp is None or timestamp < self.first_trade_timestamp: return
# TODO: Option expiry should be a special case. If option expires at 3:00 pm, we put in an expiry order at 3 pm and the
# trade comes in at 3:01 pm. In this case, the final pnl is recorded at 3:01 but should be at 3 pm.
if self.contract.expiry is not None and timestamp > self.contract.expiry and not math.isnan(self.final_pnl): return
i = np.searchsorted(self._account_timestamps, timestamp)
assert(self._account_timestamps[i] == timestamp)
# Find the index before or equal to current timestamp. If not found, set to 0's
trade_pnl_index = find_index_before(self._trade_pnl, timestamp)
if trade_pnl_index == -1:
realized, fee, commission, open_qty, open_qty, weighted_avg_price = 0, 0, 0, 0, 0, 0
else:
_, (_, realized, fee, commission, open_qty, weighted_avg_price) = self._trade_pnl.peekitem(trade_pnl_index)
price = np.nan
if math.isclose(open_qty, 0):
unrealized = 0.0
else:
price = self._price_function(self.contract, self._account_timestamps, i, self.strategy_context) # type: ignore
assert np.isreal(price), \
f'Unexpected price type: {price} {type(price)} for contract: {self.contract} timestamp: {self._account_timestamps[i]}'
if math.isnan(price):
index = find_index_before(self._net_pnl, timestamp) # Last index we computed net pnl for
if index == -1:
prev_unrealized = 0
else:
_, (_, prev_unrealized, _) = self._net_pnl.peekitem(index)
unrealized = prev_unrealized
else:
unrealized = open_qty * (price - weighted_avg_price) * self.contract.multiplier
net_pnl = realized + unrealized - commission - fee
self._net_pnl[timestamp] = (price, unrealized, net_pnl)
if self.contract.expiry is not None and timestamp > self.contract.expiry:
self.final_pnl = net_pnl
def position(self, timestamp: np.datetime64) -> float:
index = find_index_before(self._trade_pnl, timestamp)
if index == -1: return 0.
_, (position, _, _, _, _, _) = self._trade_pnl.peekitem(index) # Less than or | |
i=None):
if i is None:
return self.getTokens(StlParser.Identifier)
else:
return self.getToken(StlParser.Identifier, i)
def COMMA(self):
return self.getToken(StlParser.COMMA, 0)
def RPAREN(self):
return self.getToken(StlParser.RPAREN, 0)
def accept(self, visitor):
if hasattr(visitor, "visitRosTopic"):
return visitor.visitRosTopic(self)
else:
return visitor.visitChildren(self)
def annotation_type(self):
localctx = StlParser.Annotation_typeContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_annotation_type)
try:
localctx = StlParser.RosTopicContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 80
self.match(StlParser.ROS_Topic)
self.state = 81
self.match(StlParser.LPAREN)
self.state = 82
self.match(StlParser.Identifier)
self.state = 83
self.match(StlParser.COMMA)
self.state = 84
self.match(StlParser.Identifier)
self.state = 85
self.match(StlParser.RPAREN)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VariableDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.VariableDeclarationContext, self).__init__(parent, invokingState)
self.parser = parser
def domainType(self):
return self.getTypedRuleContext(StlParser.DomainTypeContext,0)
def identifier(self):
return self.getTypedRuleContext(StlParser.IdentifierContext,0)
def Constant(self):
return self.getToken(StlParser.Constant, 0)
def ioType(self):
return self.getTypedRuleContext(StlParser.IoTypeContext,0)
def assignment(self):
return self.getTypedRuleContext(StlParser.AssignmentContext,0)
def getRuleIndex(self):
return StlParser.RULE_variableDeclaration
def accept(self, visitor):
if hasattr(visitor, "visitVariableDeclaration"):
return visitor.visitVariableDeclaration(self)
else:
return visitor.visitChildren(self)
def variableDeclaration(self):
localctx = StlParser.VariableDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_variableDeclaration)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 88
_la = self._input.LA(1)
if _la==StlParser.Constant:
self.state = 87
self.match(StlParser.Constant)
self.state = 91
_la = self._input.LA(1)
if _la==StlParser.Input or _la==StlParser.Output:
self.state = 90
self.ioType()
self.state = 93
self.domainType()
self.state = 94
self.identifier()
self.state = 96
_la = self._input.LA(1)
if _la==StlParser.EQUAL:
self.state = 95
self.assignment()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.AssignmentContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_assignment
def copyFrom(self, ctx):
super(StlParser.AssignmentContext, self).copyFrom(ctx)
class AsgnExprContext(AssignmentContext):
def __init__(self, parser, ctx): # actually a StlParser.AssignmentContext)
super(StlParser.AsgnExprContext, self).__init__(parser)
self.copyFrom(ctx)
def EQUAL(self):
return self.getToken(StlParser.EQUAL, 0)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitAsgnExpr"):
return visitor.visitAsgnExpr(self)
else:
return visitor.visitChildren(self)
class AsgnLiteralContext(AssignmentContext):
def __init__(self, parser, ctx): # actually a StlParser.AssignmentContext)
super(StlParser.AsgnLiteralContext, self).__init__(parser)
self.copyFrom(ctx)
def EQUAL(self):
return self.getToken(StlParser.EQUAL, 0)
def literal(self):
return self.getTypedRuleContext(StlParser.LiteralContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitAsgnLiteral"):
return visitor.visitAsgnLiteral(self)
else:
return visitor.visitChildren(self)
def assignment(self):
localctx = StlParser.AssignmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_assignment)
try:
self.state = 102
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
localctx = StlParser.AsgnLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 98
self.match(StlParser.EQUAL)
self.state = 99
self.literal()
pass
elif la_ == 2:
localctx = StlParser.AsgnExprContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 100
self.match(StlParser.EQUAL)
self.state = 101
self.expression(0)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DomainTypeContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.DomainTypeContext, self).__init__(parent, invokingState)
self.parser = parser
def DomainTypeFloat(self):
return self.getToken(StlParser.DomainTypeFloat, 0)
def DomainTypeInt(self):
return self.getToken(StlParser.DomainTypeInt, 0)
def DomainTypeLong(self):
return self.getToken(StlParser.DomainTypeLong, 0)
def DomainTypeComplex(self):
return self.getToken(StlParser.DomainTypeComplex, 0)
def Identifier(self):
return self.getToken(StlParser.Identifier, 0)
def getRuleIndex(self):
return StlParser.RULE_domainType
def accept(self, visitor):
if hasattr(visitor, "visitDomainType"):
return visitor.visitDomainType(self)
else:
return visitor.visitChildren(self)
def domainType(self):
localctx = StlParser.DomainTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_domainType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 104
_la = self._input.LA(1)
if not(((((_la - 29)) & ~0x3f) == 0 and ((1 << (_la - 29)) & ((1 << (StlParser.DomainTypeFloat - 29)) | (1 << (StlParser.DomainTypeLong - 29)) | (1 << (StlParser.DomainTypeComplex - 29)) | (1 << (StlParser.DomainTypeInt - 29)) | (1 << (StlParser.Identifier - 29)))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IoTypeContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.IoTypeContext, self).__init__(parent, invokingState)
self.parser = parser
def Input(self):
return self.getToken(StlParser.Input, 0)
def Output(self):
return self.getToken(StlParser.Output, 0)
def getRuleIndex(self):
return StlParser.RULE_ioType
def accept(self, visitor):
if hasattr(visitor, "visitIoType"):
return visitor.visitIoType(self)
else:
return visitor.visitChildren(self)
def ioType(self):
localctx = StlParser.IoTypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_ioType)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 106
_la = self._input.LA(1)
if not(_la==StlParser.Input or _la==StlParser.Output):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntervalContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.IntervalContext, self).__init__(parent, invokingState)
self.parser = parser
def LBRACK(self):
return self.getToken(StlParser.LBRACK, 0)
def intervalTime(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.IntervalTimeContext)
else:
return self.getTypedRuleContext(StlParser.IntervalTimeContext,i)
def COLON(self):
return self.getToken(StlParser.COLON, 0)
def RBRACK(self):
return self.getToken(StlParser.RBRACK, 0)
def getRuleIndex(self):
return StlParser.RULE_interval
def accept(self, visitor):
if hasattr(visitor, "visitInterval"):
return visitor.visitInterval(self)
else:
return visitor.visitChildren(self)
def interval(self):
localctx = StlParser.IntervalContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_interval)
try:
self.enterOuterAlt(localctx, 1)
self.state = 108
self.match(StlParser.LBRACK)
self.state = 109
self.intervalTime()
self.state = 110
self.match(StlParser.COLON)
self.state = 111
self.intervalTime()
self.state = 112
self.match(StlParser.RBRACK)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntervalTimeContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.IntervalTimeContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_intervalTime
def copyFrom(self, ctx):
super(StlParser.IntervalTimeContext, self).copyFrom(ctx)
class IntervalTimeLiteralContext(IntervalTimeContext):
def __init__(self, parser, ctx): # actually a StlParser.IntervalTimeContext)
super(StlParser.IntervalTimeLiteralContext, self).__init__(parser)
self.copyFrom(ctx)
def literal(self):
return self.getTypedRuleContext(StlParser.LiteralContext,0)
def unit(self):
return self.getTypedRuleContext(StlParser.UnitContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitIntervalTimeLiteral"):
return visitor.visitIntervalTimeLiteral(self)
else:
return visitor.visitChildren(self)
def intervalTime(self):
localctx = StlParser.IntervalTimeContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_intervalTime)
self._la = 0 # Token type
try:
localctx = StlParser.IntervalTimeLiteralContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 114
self.literal()
self.state = 116
_la = self._input.LA(1)
if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << StlParser.SEC) | (1 << StlParser.MSEC) | (1 << StlParser.USEC) | (1 << StlParser.NSEC) | (1 << StlParser.PSEC))) != 0):
self.state = 115
self.unit()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UnitContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.UnitContext, self).__init__(parent, invokingState)
self.parser = parser
def SEC(self):
return self.getToken(StlParser.SEC, 0)
def MSEC(self):
return self.getToken(StlParser.MSEC, 0)
def USEC(self):
return self.getToken(StlParser.USEC, 0)
def NSEC(self):
return self.getToken(StlParser.NSEC, 0)
def PSEC(self):
return self.getToken(StlParser.PSEC, 0)
def getRuleIndex(self):
return StlParser.RULE_unit
def accept(self, visitor):
if hasattr(visitor, "visitUnit"):
return visitor.visitUnit(self)
else:
return visitor.visitChildren(self)
def unit(self):
localctx = StlParser.UnitContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_unit)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 118
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << StlParser.SEC) | (1 << StlParser.MSEC) | (1 << StlParser.USEC) | (1 << StlParser.NSEC) | (1 << StlParser.PSEC))) != 0)):
self._errHandler.recoverInline(self)
else:
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TopExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.TopExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_topExpression
def copyFrom(self, ctx):
super(StlParser.TopExpressionContext, self).copyFrom(ctx)
class ExprContext(TopExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.TopExpressionContext)
super(StlParser.ExprContext, self).__init__(parser)
self.copyFrom(ctx)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitExpr"):
return visitor.visitExpr(self)
else:
return visitor.visitChildren(self)
class ExprUntimedEvExprContext(TopExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.TopExpressionContext)
super(StlParser.ExprUntimedEvExprContext, self).__init__(parser)
self.copyFrom(ctx)
def EventuallyOperator(self):
return self.getToken(StlParser.EventuallyOperator, 0)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitExprUntimedEvExpr"):
return visitor.visitExprUntimedEvExpr(self)
else:
return visitor.visitChildren(self)
class ExprUntimedAlwaysExprContext(TopExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.TopExpressionContext)
super(StlParser.ExprUntimedAlwaysExprContext, self).__init__(parser)
self.copyFrom(ctx)
def AlwaysOperator(self):
return self.getToken(StlParser.AlwaysOperator, 0)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def accept(self, visitor):
if hasattr(visitor, "visitExprUntimedAlwaysExpr"):
return visitor.visitExprUntimedAlwaysExpr(self)
else:
return visitor.visitChildren(self)
def topExpression(self):
localctx = StlParser.TopExpressionContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_topExpression)
try:
self.state = 125
la_ = self._interp.adaptivePredict(self._input,9,self._ctx)
if la_ == 1:
localctx = StlParser.ExprUntimedAlwaysExprContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 120
self.match(StlParser.AlwaysOperator)
self.state = 121
self.expression(0)
pass
elif la_ == 2:
localctx = StlParser.ExprUntimedEvExprContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 122
self.match(StlParser.EventuallyOperator)
self.state = 123
self.expression(0)
pass
elif la_ == 3:
localctx = StlParser.ExprContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 124
self.expression(0)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(StlParser.ExpressionContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return StlParser.RULE_expression
def copyFrom(self, ctx):
super(StlParser.ExpressionContext, self).copyFrom(ctx)
class ExprParenContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprParenContext, self).__init__(parser)
self.copyFrom(ctx)
def LPAREN(self):
return self.getToken(StlParser.LPAREN, 0)
def expression(self):
return self.getTypedRuleContext(StlParser.ExpressionContext,0)
def RPAREN(self):
return self.getToken(StlParser.RPAREN, 0)
def accept(self, visitor):
if hasattr(visitor, "visitExprParen"):
return visitor.visitExprParen(self)
else:
return visitor.visitChildren(self)
class ExprAndExprContext(ExpressionContext):
def __init__(self, parser, ctx): # actually a StlParser.ExpressionContext)
super(StlParser.ExprAndExprContext, self).__init__(parser)
self.copyFrom(ctx)
def expression(self, i=None):
if i is None:
return self.getTypedRuleContexts(StlParser.ExpressionContext)
else:
return self.getTypedRuleContext(StlParser.ExpressionContext,i)
def AndOperator(self):
return self.getToken(StlParser.AndOperator, 0)
def accept(self, visitor):
if hasattr(visitor, "visitExprAndExpr"):
return visitor.visitExprAndExpr(self)
else:
return visitor.visitChildren(self)
class ExpreOnceExprContext(ExpressionContext):
def | |
the model to find out what features are more discriminative for the positive and negative classes
Usually the computationally light chi2 test against the class labels.
:param training_data: numpy ndarray representing the training dataset features
:param training_data_labels: numpy ndarray representing the training dataset labels
:param testing_data: numpy ndarray representing the testing dataset features
:param feature_names: list of feature names
:returns: list containing the number_top_features
:rtype: list
"""
logger.info("Ranking features using the chi2 test ..")
ch2 = SelectKBest(chi2, k=number_top_features)
training_data_filtered = ch2.fit_transform(training_data, training_data_labels)
# keep selected feature names
feature_names = [feature_names[i] for i in ch2.get_support(indices=True)]
feature_names.append('Label')
return feature_names
def _load_labels(self, labels_filepath):
"""Loads the labels from the labels file
:param labels_filepath: path to the labels file
:returns: dictionary mapping the document name to its class
:rtype: dictionary
"""
labels_dict={}
labels_file = Path(labels_filepath)
if labels_file.is_file():
logger.info("Loading: " + labels_filepath)
with open(labels_filepath, "r") as ins:
for line in ins:
line=line.strip("\n")
line=line.split(" ")
file_name=line[0]
file_class=line[1]
labels_dict[file_name]=file_class
else:
raise IOError("Labels file: " + labels_filepath + " does not exist.")
return labels_dict
def _extract_token_features_from_text(self, corpus_list, doc_name_to_id_dict):
"""Extracts simple punctuation and capitalisation count-based features from documents
:param corpus_list: list of documents
:param doc_name_to_id_dict: dictionary mapping from document name to position in corpus_list
:returns: dictionary mapping document name to a list of token features
:rtype: dictionary
"""
'''
Go through the documents and extract simple punctuation and lexical
features (capitalisation, count of punctuation)
'''
doc_count=0
token_feature_dict=defaultdict(list)
for doc_name, row_id in doc_name_to_id_dict.iteritems():
logger.debug("Extracting token features from: " + doc_name)
doc=corpus_list[row_id]
sentences=doc.split(".")
upper_count=0
lower_count=0
mixed_count=0
punctuation_count=0
for sentence in sentences:
words=sentence.split(" ")
for word in words:
if word.isupper():
if re.search('[a-zA-Z]',word):
upper_count+=1
if word.islower():
if re.search('[a-zA-Z]',word):
lower_count+=1
if not word.islower() and not word.isupper():
if re.search('[a-zA-Z]',word):
mixed_count+=1
if word in string.punctuation:
if len(word)>0:
punctuation_count+=1
token_feature_dict[doc_name].append([upper_count,lower_count,mixed_count,punctuation_count])
return token_feature_dict
def _extract_sentiment_from_text(self, corpus_list, doc_name_to_id_dict):
"""Extracts several simple sentiment features from a document. I count the number of positive and negative sentiment words in a document,
the number, the count of the longest run of positives/negatives and the overall polarity of the document. These features are attempting
to identify incongruety, and therefore potentially sarcasm. See paper: Joshi et al. (2015), Harnessing Context Incongruity for Sarcasm Detection
:param corpus_list: list of documents from corpus
:param doc_name_to_id_dict: mapping from the document name to its position in corpus_list
:returns: dictionary of sentiment features per document
:rtype: dictionary
"""
vader = SentimentIntensityAnalyzer()
'''
Go through the documents and rate their sentiment
'''
doc_count=0
sentiment_feature_dict=defaultdict(list)
for doc_name, row_id in doc_name_to_id_dict.iteritems():
logger.debug("Extracting sentiment from: " + doc_name)
doc=corpus_list[row_id]
'''
doc is one document from our corpus
'''
sentences=doc.split(".")
pos_count=0
neg_count=0
prev_word_was_positive=False
prev_word_was_negative=False
pos_neg_count=0
count=0
longest_run_of_positives=0
longest_run_of_negatives=0
run_of_positives_count=0
run_of_negatives_count=0
score=vader.polarity_scores(' '.join(sentences))
compound_polarity=score['compound']
'''
Rate the overall polarity of the document (1 positive, 0 negative)
'''
if compound_polarity>0:
compound_polarity=1
else:
compound_polarity=0
'''
Rate each word in the corpus for sentiment and construct the word-based
features
'''
for sentence in sentences:
words=sentence.split(" ")
for word in words:
score=vader.polarity_scores(word)
'''
If the negative sentiment of a word is greater than the positive sentiment
'''
if score['pos']>abs(score['neg']):
pos_count+=1
if prev_word_was_negative:
pos_neg_count+=1
prev_word_was_negative=False
if run_of_negatives_count>longest_run_of_negatives:
longest_run_of_negatives=run_of_negatives_count
run_of_negatives_count=0
else:
run_of_positives_count+=1
prev_word_was_positive=True
'''
If the positive sentiment of a word is greater than the negative sentiment
'''
if score['pos']<abs(score['neg']):
neg_count+=1
if prev_word_was_positive:
prev_word_was_positive=False
pos_neg_count+=1
if run_of_positives_count>longest_run_of_positives:
longest_run_of_positives=run_of_positives_count
run_of_negatives_count=0
else:
run_of_negatives_count+=1
prev_word_was_negative=True
count+=1
sentiment_feature_dict[doc_name].append([pos_count,neg_count,pos_neg_count,longest_run_of_negatives,longest_run_of_positives,compound_polarity])
return sentiment_feature_dict
def _load_text(self, data_dirpath, vectorizer_count=None, vectorizer_tfidf=None):
""" Parses and tokenises the input document files
:param data_dirpath: the directory containing the documents
:param count_vectorizer: scikit vectorizer to extract corpus counts (optional)
:param tfidf_vectorizer: scikit vectorizer to extract corpus tfidf (optional)
:returns: vectorizer_count, vectorizer_tfidf, pandas dataframe containing features,pandas dataframe containing features, list of documents (corpus_list), mapping of document names to positions in the list of documents (corpus_list)
:rtype: vectorizer, vectorizer, pandas dataframe, pandas dataframe, list, dictionary
"""
corpus_list=[]
document_name_to_id_dict={}
count=0
file_list=sorted(os.listdir(data_dirpath)) # read the files in sorted order
for filename in file_list:
data_filepath=data_dirpath+"/"+filename
logger.debug("Loading: " + data_filepath)
'''
load in the document be mindful of the encoding
'''
text=io.open(data_filepath, mode="r", encoding="ISO-8859-1").read()
tokens=SatireClassifier.tokenize(text)
'''
corpus_list is a list of the documents pre-processed for stopwords etc
'''
corpus_list.append(' '.join(tokens))
'''
dictionary that maps a filename to its position in corpus_list
'''
document_name_to_id_dict[filename]=count
count+=1
'''
Extract count features from the text
'''
if not vectorizer_count:
'''
We have not passed in a vectorizer, so create one. Else transform the dataset using the provided vectorizer e.g. so the training and testing datasets share the same words.
'''
vectorizer_count = CountVectorizer(ngram_range=(1,1),token_pattern=r"(?u)\b\w\w+\b|\*|!|\?|\"|\'", encoding="ISO-8859-1",strip_accents='unicode')
vectorizer_tfidf = TfidfVectorizer(ngram_range=(1,1),token_pattern=r"(?u)\b\w\w+\b|\*|!|\?|\"|\'", encoding="ISO-8859-1",strip_accents='unicode', sublinear_tf=False)
#TfidfVectorizer(sublinear_tf=True, max_df=0.75, stop_words='english')
corpus_counts = vectorizer_count.fit_transform(corpus_list)
corpus_tfidf = vectorizer_tfidf.fit_transform(corpus_list)
else:
corpus_counts = vectorizer_count.transform(corpus_list)
corpus_tfidf = vectorizer_tfidf.transform(corpus_list)
'''
Store the features and column names in a pandas dataframe for ease of manipulation. The words in the corpus are the column headings.
'''
corpus_counts_df = pd.DataFrame(corpus_counts.toarray(), columns=vectorizer_count.get_feature_names())
corpus_tfidf_df = pd.DataFrame(corpus_tfidf.toarray(), columns=vectorizer_tfidf.get_feature_names())
return vectorizer_count, vectorizer_tfidf, corpus_counts_df, corpus_tfidf_df, corpus_list, document_name_to_id_dict
def _add_labels_to_documents(self,text_df,doc_name_to_id_dict,labels_dict):
"""Adds satire/non-satire labels to the correct documents in the pandas dataframe
:param text_df: pandas dataframe containing the text features for our documents
:param doc_name_to_id_dict: dictionary mapping document name
:param labels_dict:
:returns: pandas dataframe enriched with class labels
:rtype: pandas dataframe
"""
logger.info("Adding labels to documents ...")
for doc_name,row_id in doc_name_to_id_dict.iteritems():
if doc_name in labels_dict:
label=labels_dict[doc_name]
logger.debug("Label is: " + label + " for document: " + doc_name)
if "true" in label:
text_df.ix[row_id,'Label']=0
else:
text_df.ix[row_id,'Label']=1
else:
logger.debug("Could not find " + doc_name + " in the labels_dict, even though it should really be there.")
return text_df
def _add_sentiment_to_documents(self,text_df,doc_name_to_id_dict,sentiment_dict):
"""Adds the sentiment features to the pandas features dataframe
:param text_df: pandas dataframe containing our features
:param doc_name_to_id_dict: dictionary mapping document name to row in the dataframe
:param sentiment_dict: dictionary containing the sentiment features for each document
:returns: enriched dataframe containing the additional featureset
:rtype: pandas dataframe
"""
for doc_name,row_id in doc_name_to_id_dict.iteritems():
if doc_name in sentiment_dict:
sentiment=sentiment_dict[doc_name][0]
logger.debug("Positive sentiment is: " + str(sentiment[0]) + " for document: " + doc_name)
logger.debug("Negative sentiment is: " + str(sentiment[1]) + " for document: " + doc_name)
text_df.ix[row_id,'Sentiment_pos']=sentiment[0]
text_df.ix[row_id,'Sentiment_neg']=sentiment[1]
text_df.ix[row_id,'Sentiment_change']=sentiment[2]
text_df.ix[row_id,'Sentiment_neg_run']=sentiment[3]
text_df.ix[row_id,'Sentiment_pos_run']=sentiment[4]
text_df.ix[row_id,'Sentiment_compound']=sentiment[5]
else:
logger.debug("Could not find " + doc_name + " in the labels_dict, even though it should really be there.")
return text_df
def _add_token_features_to_documents(self,text_df,doc_name_to_id_dict,tokens_dict):
"""Adds the token features to the pandas features dataframe
:param text_df: pandas dataframe containing our features
:param doc_name_to_id_dict: dictionary mapping document name to row in the dataframe
:param tokens_dict: dictionary containing the token features for each document
:returns: enriched dataframe containing the additional featureset
:rtype: pandas dataframe
"""
for doc_name,row_id in doc_name_to_id_dict.iteritems():
if doc_name in tokens_dict:
token_features=tokens_dict[doc_name][0]
text_df.ix[row_id,'Token_upper']=token_features[0]
text_df.ix[row_id,'Token_lower']=token_features[1]
text_df.ix[row_id,'Token_mixed']=token_features[2]
text_df.ix[row_id,'Token_punctuation']=token_features[3]
else:
logger.debug("Could not find " + doc_name + " in the tokens_dict, even though it should really be there.")
return text_df
def _normalise_sparse_features(self,text_features_df,scaler=None):
"""Normalises sparse features so that their maximum is 1.0 while retaining sparsity.
:param text_features_df: pandas dataframe containing the text features
:param scaler: scikit scaler to perform the normalisation (optional)
:returns: normalised text features in pandas dataframe, scikit scaler
:rtype: pandas dataframe, scikit scaler
"""
text_features_without_labels=text_features_df.loc[:,text_features_df.columns != 'Label'].values
if not scaler:
scaler = preprocessing.MaxAbsScaler().fit(text_features_without_labels)
text_features_without_labels=scaler.transform(text_features_without_labels)
text_features_df.loc[:,text_features_df.columns!='Label']=text_features_without_labels
return text_features_df, scaler
def _load_discrete_data(self):
"""Reads all the files in the data directory into a python list. I assume they contain text. This list is passed in
to the scikit-learn CountVectorizer to extract tf-idf count-based features. CountVectorizer also stops, stems the
text. All the features are discrete counts.
:returns: pandas dataframe for training features, training labels dictionary, training sentences list, pandas dataframe for testing features, testing labels dictionary, testing sentences list
:rtype: pandas dataframe, dictionary, list, pandas dataframe, dictionary, list
"""
'''
Read the data
'''
training_labels_dict=self._load_labels(self.training_labels_filepath)
testing_labels_dict=self._load_labels(self.testing_labels_filepath)
count_vectorizer,tfidf_vectorizer,training_text_df,training_tfidf_df,training_sentences,training_doc_name_to_id_dict=self._load_text(self.training_dirpath)
training_sentiment_feature_dict=self._extract_sentiment_from_text(training_sentences, training_doc_name_to_id_dict)
training_token_feature_dict=self._extract_token_features_from_text(training_sentences, training_doc_name_to_id_dict)
training_watch_word_feature_dict=self._extract_watch_word_features_from_text(training_sentences, training_doc_name_to_id_dict)
_,_,testing_text_df,testing_tfidf_df,testing_sentences,testing_doc_name_to_id_dict=self._load_text(self.testing_dirpath, count_vectorizer, tfidf_vectorizer)
testing_sentiment_feature_dict=self._extract_sentiment_from_text(testing_sentences, testing_doc_name_to_id_dict)
testing_token_feature_dict=self._extract_token_features_from_text(testing_sentences, testing_doc_name_to_id_dict)
testing_watch_word_feature_dict=self._extract_watch_word_features_from_text(testing_sentences, testing_doc_name_to_id_dict)
logger.info("Size of training dataset: " + str(training_text_df.shape[0])+"x"+str(training_text_df.shape[1]))
logger.info("Size of testing dataset: " + str(testing_text_df.shape[0]) +"x"+ str(testing_text_df.shape[1]))
'''
| |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0907826,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.10146,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 2.83407e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202691,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.01201e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.165626,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.267148,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.134847,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.567621,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.189425,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.19355,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 1.91191e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00694708,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0502372,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0513779,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0502391,
'Execution Unit/Register Files/Runtime Dynamic': 0.058325,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.105836,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.272112,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.50613,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00238358,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00238358,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00214464,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000867714,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000738048,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00764984,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0204046,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0493909,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.14169,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.185957,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.167754,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.51268,
'Instruction Fetch Unit/Runtime Dynamic': 0.431156,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0375641,
'L2/Runtime Dynamic': 0.00825058,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.73504,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.732701,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0484613,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0484613,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.96388,
'Load Store Unit/Runtime Dynamic': 1.02016,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.119497,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.238995,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0424099,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.042828,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.195339,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0309176,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.4243,
'Memory Management Unit/Runtime Dynamic': 0.0737456,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.7214,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 5.35156e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00747264,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0840284,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
276.3,
266.1,
276.1,
268.1,
277.0,
273.4,
269.7,
],
]
],
units="K",
dtype="f8",
)
f.set_data(data, axes=("domainaxis0", "domainaxis1", "domainaxis2"))
# domain_ancillary
c = DomainAncillary()
c.set_properties({"units": "m"})
c.nc_set_variable("a")
data = Data([10.0], units="m", dtype="f8")
c.set_data(data)
b = Bounds()
b.nc_set_variable("a_bounds")
data = Data([[5.0, 15.0]], units="m", dtype="f8")
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis0",), key="domainancillary0", copy=False
)
# domain_ancillary
c = DomainAncillary()
c.nc_set_variable("b")
data = Data([20.0], dtype="f8")
c.set_data(data)
b = Bounds()
b.nc_set_variable("b_bounds")
data = Data([[14.0, 26.0]], dtype="f8")
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis0",), key="domainancillary1", copy=False
)
# domain_ancillary
c = DomainAncillary()
c.set_properties({"units": "m", "standard_name": "surface_altitude"})
c.nc_set_variable("surface_altitude")
data = Data(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 12.0, 10.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 52.0, 40.0],
[0.0, 0.0, 0.0, 7.0, 12.0, 8.0, 37.0, 73.0, 107.0],
[0.0, 0.0, 28.0, 30.0, 30.0, 30.0, 83.0, 102.0, 164.0],
[34.0, 38.0, 34.0, 32.0, 30.0, 31.0, 105.0, 281.0, 370.0],
[91.0, 89.0, 95.0, 94.0, 132.0, 194.0, 154.0, 318.0, 357.0],
[93.0, 114.0, 116.0, 178.0, 323.0, 365.0, 307.0, 289.0, 270.0],
],
units="m",
dtype="f4",
)
c.set_data(data)
f.set_construct(
c,
axes=("domainaxis1", "domainaxis2"),
key="domainancillary2",
copy=False,
)
# cell_measure
c = CellMeasure()
c.set_properties({"units": "km2"})
c.nc_set_variable("cell_measure")
data = Data(
[
[
2391.9657,
2391.9657,
2391.9657,
2391.9657,
2391.9657,
2391.9657,
2391.9657,
2391.9657,
2391.9657,
2392.6009,
],
[
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2393.0949,
2393.0949,
],
[
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.4478,
2393.4478,
2393.4478,
],
[
2393.4478,
2393.4478,
2393.4478,
2393.4478,
2393.4478,
2393.4478,
2393.6595,
2393.6595,
2393.6595,
2393.6595,
],
[
2393.6595,
2393.6595,
2393.6595,
2393.6595,
2393.6595,
2393.7301,
2393.7301,
2393.7301,
2393.7301,
2393.7301,
],
[
2393.7301,
2393.7301,
2393.7301,
2393.7301,
2393.6595,
2393.6595,
2393.6595,
2393.6595,
2393.6595,
2393.6595,
],
[
2393.6595,
2393.6595,
2393.6595,
2393.4478,
2393.4478,
2393.4478,
2393.4478,
2393.4478,
2393.4478,
2393.4478,
],
[
2393.4478,
2393.4478,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
2393.0949,
],
[
2393.0949,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
2392.6009,
],
],
units="km2",
dtype="f8",
)
c.set_data(data)
c.set_measure("area")
f.set_construct(
c,
axes=("domainaxis2", "domainaxis1"),
key="cellmeasure0",
copy=False,
)
# auxiliary_coordinate
c = AuxiliaryCoordinate()
c.set_properties({"units": "degrees_N", "standard_name": "latitude"})
c.nc_set_variable("latitude_1")
data = Data(
[
[
53.941,
53.987,
54.029,
54.066,
54.099,
54.127,
54.15,
54.169,
54.184,
],
[
53.504,
53.55,
53.591,
53.627,
53.66,
53.687,
53.711,
53.729,
53.744,
],
[
53.067,
53.112,
53.152,
53.189,
53.221,
53.248,
53.271,
53.29,
53.304,
],
[
52.629,
52.674,
52.714,
52.75,
52.782,
52.809,
52.832,
52.85,
52.864,
],
[
52.192,
52.236,
52.276,
52.311,
52.343,
52.37,
52.392,
52.41,
52.424,
],
[
51.754,
51.798,
51.837,
51.873,
51.904,
51.93,
51.953,
51.971,
51.984,
],
[
51.316,
51.36,
51.399,
51.434,
51.465,
51.491,
51.513,
51.531,
51.545,
],
[
50.879,
50.922,
50.96,
50.995,
51.025,
51.052,
51.074,
51.091,
51.105,
],
[
50.441,
50.484,
50.522,
50.556,
50.586,
50.612,
50.634,
50.652,
50.665,
],
[
50.003,
50.045,
50.083,
50.117,
50.147,
50.173,
50.194,
50.212,
50.225,
],
],
units="degrees_N",
dtype="f8",
)
c.set_data(data)
f.set_construct(
c,
axes=("domainaxis1", "domainaxis2"),
key="auxiliarycoordinate0",
copy=False,
)
# auxiliary_coordinate
c = AuxiliaryCoordinate()
c.set_properties({"units": "degrees_E", "standard_name": "longitude"})
c.nc_set_variable("longitude_1")
data = Data(
[
[
2.004,
2.747,
3.492,
4.238,
4.986,
5.734,
6.484,
7.234,
7.985,
2.085,
],
[
2.821,
3.558,
4.297,
5.037,
5.778,
6.52,
7.262,
8.005,
2.165,
2.893,
],
[
3.623,
4.355,
5.087,
5.821,
6.555,
7.29,
8.026,
2.243,
2.964,
3.687,
],
[
4.411,
5.136,
5.862,
6.589,
7.317,
8.045,
2.319,
3.033,
3.749,
4.466,
],
[
5.184,
5.903,
6.623,
7.344,
8.065,
2.394,
3.101,
3.81,
4.52,
5.231,
],
[
5.944,
6.656,
7.37,
8.084,
2.467,
3.168,
3.87,
4.573,
5.278,
5.983,
],
[
6.689,
7.395,
8.102,
2.539,
3.233,
3.929,
4.626,
5.323,
6.022,
6.721,
],
[
7.42,
8.121,
2.61,
3.298,
3.987,
4.677,
5.368,
6.059,
6.752,
7.445,
],
[
8.139,
2.679,
3.361,
4.043,
4.727,
5.411,
6.097,
6.783,
7.469,
8.156,
],
],
units="degrees_E",
dtype="f8",
)
c.set_data(data)
f.set_construct(
c,
axes=("domainaxis2", "domainaxis1"),
key="auxiliarycoordinate1",
copy=False,
)
# auxiliary_coordinate
c = AuxiliaryCoordinate()
c.set_properties({"long_name": "Grid latitude name"})
c.nc_set_variable("auxiliary")
data_mask = Data(
[
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
dtype="b1",
)
data = Data(
[
b"",
b"beta",
b"gamma",
b"delta",
b"epsilon",
b"zeta",
b"eta",
b"theta",
b"iota",
b"kappa",
],
dtype="S7",
mask=data_mask,
)
c.set_data(data)
f.set_construct(
c, axes=("domainaxis1",), key="auxiliarycoordinate2", copy=False
)
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties(
{
"computed_standard_name": "altitude",
"standard_name": "atmosphere_hybrid_height_coordinate",
}
)
c.nc_set_variable("atmosphere_hybrid_height_coordinate")
data = Data([1.5], dtype="f8")
c.set_data(data)
b = Bounds()
b.nc_set_variable("atmosphere_hybrid_height_coordinate_bounds")
data = Data([[1.0, 2.0]], dtype="f8")
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis0",), key="dimensioncoordinate0", copy=False
)
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties(
{"units": "degrees", "standard_name": "grid_latitude"}
)
c.nc_set_variable("y")
data = Data(
[2.2, 1.76, 1.32, 0.88, 0.44, 0.0, -0.44, -0.88, -1.32, -1.76],
units="degrees",
dtype="f8",
)
c.set_data(data)
b = Bounds()
b.nc_set_variable("y_bnds")
data = Data(
[
[2.42, 1.98],
[1.98, 1.54],
[1.54, 1.1],
[1.1, 0.66],
[0.66, 0.22],
[0.22, -0.22],
[-0.22, -0.66],
[-0.66, -1.1],
[-1.1, -1.54],
[-1.54, -1.98],
],
units="degrees",
dtype="f8",
)
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis1",), key="dimensioncoordinate1", copy=False
)
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties(
{"units": "degrees", "standard_name": "grid_longitude"}
)
c.nc_set_variable("x")
data = Data(
[-4.7, -4.26, -3.82, -3.38, -2.94, -2.5, -2.06, -1.62, -1.18],
units="degrees",
dtype="f8",
)
c.set_data(data)
b = Bounds()
b.nc_set_variable("x_bnds")
data = Data(
[
[-4.92, -4.48],
[-4.48, -4.04],
[-4.04, -3.6],
[-3.6, -3.16],
[-3.16, -2.72],
[-2.72, -2.28],
[-2.28, -1.84],
[-1.84, -1.4],
[-1.4, -0.96],
],
units="degrees",
dtype="f8",
)
b.set_data(data)
c.set_bounds(b)
f.set_construct(
c, axes=("domainaxis2",), key="dimensioncoordinate2", copy=False
)
# dimension_coordinate
c = DimensionCoordinate()
c.set_properties(
{"units": "days since 2018-12-01", "standard_name": "time"}
)
c.nc_set_variable("time")
data = Data([31.0], units="days since 2018-12-01", dtype="f8")
c.set_data(data)
f.set_construct(
c, axes=("domainaxis3",), key="dimensioncoordinate3", copy=False
)
# field_ancillary
c = FieldAncillary()
c.set_properties(
{"units": "K", "standard_name": "air_temperature standard_error"}
)
c.nc_set_variable("air_temperature_standard_error")
data = Data(
[
[0.76, 0.38, 0.68, 0.19, 0.14, 0.52, 0.57, 0.19, 0.81],
[0.59, 0.68, 0.25, 0.13, 0.37, 0.12, 0.26, 0.45, 0.36],
[0.88, 0.4, 0.35, 0.87, 0.24, 0.64, 0.78, 0.28, 0.11],
[0.73, 0.49, 0.69, 0.54, 0.17, 0.6, 0.82, 0.89, 0.71],
[0.43, 0.39, 0.45, 0.74, 0.85, 0.47, 0.37, 0.87, 0.46],
[0.47, 0.31, 0.76, 0.69, 0.61, 0.26, 0.43, 0.75, 0.23],
[0.43, 0.26, 0.5, 0.79, 0.25, 0.63, 0.25, 0.24, 0.74],
[0.33, 0.26, 0.89, 0.48, 0.79, 0.88, 0.41, 0.89, 0.47],
[0.25, 0.42, 0.61, 0.87, 0.58, 0.89, 0.58, 0.8, 0.32],
[0.49, 0.48, 0.49, 0.16, 0.65, 0.66, 0.86, 0.74, 0.32],
],
units="K",
dtype="f8",
)
c.set_data(data)
f.set_construct(
c,
axes=("domainaxis1", "domainaxis2"),
key="fieldancillary0",
copy=False,
)
# cell_method
c = CellMethod()
c.set_method("mean")
c.set_axes(("domainaxis1", "domainaxis2"))
c.set_qualifier("where", "land")
interval0 = Data(0.1, units="degrees", dtype="f8")
c.set_qualifier("interval", [interval0])
f.set_construct(c)
# cell_method
c = CellMethod()
c.set_method("maximum")
c.set_axes("domainaxis3")
f.set_construct(c)
# coordinate_reference
c = CoordinateReference()
c.set_coordinates({"dimensioncoordinate0"})
c.datum.set_parameter("earth_radius", 6371007)
c.coordinate_conversion.set_parameter(
"standard_name", "atmosphere_hybrid_height_coordinate"
)
c.coordinate_conversion.set_parameter(
"computed_standard_name", "altitude"
)
c.coordinate_conversion.set_domain_ancillaries(
{
"a": "domainancillary0",
"b": "domainancillary1",
"orog": "domainancillary2",
}
)
f.set_construct(c)
# coordinate_reference
c = CoordinateReference()
c.nc_set_variable("rotated_latitude_longitude")
c.set_coordinates(
{
"dimensioncoordinate2",
"auxiliarycoordinate1",
"dimensioncoordinate1",
"auxiliarycoordinate0",
}
)
c.datum.set_parameter("earth_radius", 6371007)
c.coordinate_conversion.set_parameter("grid_north_pole_latitude", 38.0)
c.coordinate_conversion.set_parameter(
"grid_north_pole_longitude", 190.0
)
c.coordinate_conversion.set_parameter(
"grid_mapping_name", "rotated_latitude_longitude"
)
f.set_construct(c)
elif n == 3:
f = Field()
f.set_properties(
{
"Conventions": "CF-" + CF(),
"featureType": "timeSeries",
"_FillValue": -999.9,
"standard_name": "precipitation_flux",
"units": "kg m-2 day-1",
}
)
f.nc_set_variable("p")
f.nc_set_global_attributes({"Conventions": None, "featureType": None})
# domain_axis
c = DomainAxis(size=4)
c.nc_set_dimension("station")
f.set_construct(c, key="domainaxis0")
# domain_axis
c = DomainAxis(size=9)
c.nc_set_dimension("timeseries")
f.set_construct(c, key="domainaxis1")
# field data
data_mask = Data(
[
[False, False, False, True, True, True, True, True, True],
[False, False, False, False, False, False, False, True, True],
[False, False, False, False, False, True, True, True, True],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
],
dtype="b1",
)
data = Data(
[
[
3.98,
0.0,
0.0,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
],
[
0.0,
0.0,
0.0,
3.4,
0.0,
0.0,
4.61,
9.969209968386869e36,
9.969209968386869e36,
],
[
0.86,
0.8,
0.75,
0.0,
4.56,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
],
[0.0, 0.09, 0.0, 0.91, 2.96, 1.14, 3.86, 0.0, 0.0],
],
units="kg m-2 day-1",
dtype="f8",
mask=data_mask,
)
f.set_data(data, axes=("domainaxis0", "domainaxis1"))
# auxiliary_coordinate
c = AuxiliaryCoordinate()
c.set_properties(
{
"standard_name": "time",
"long_name": "time of measurement",
"units": "days since 1970-01-01 00:00:00",
}
)
c.nc_set_variable("time")
data_mask = Data(
[
[False, False, False, True, True, True, True, True, True],
[False, False, False, False, False, False, False, True, True],
[False, False, False, False, False, True, True, True, True],
[
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
],
dtype="b1",
)
data = Data(
[
[
-3.0,
-2.0,
-1.0,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
9.969209968386869e36,
],
[
1.0,
2.0,
| |
import numpy as np
from numba import jit
import numpy.lib.recfunctions as rfn
##########################################################################
def count_hsps(blast_out):
"""Iterate over blast output. It considers that the output
is in outfmt 6 and that all the hsp should be one after the
other
Args:
blast_out (string): Path to a blast tabulated output
return:
int: number of hsps
"""
current_pair = ''
with open(blast_out) as r_file:
for line in r_file:
split_line = line.rstrip().split('\t')
if current_pair == '':
current_pair = (split_line[0], split_line[1])
number = 1
elif (split_line[0], split_line[1]) != current_pair:
current_pair = (split_line[0], split_line[1])
number += 1
return number
##########################################################################
def iterrator_on_blast_hsp(blast_out):
"""Iterate over blast output. It considers that the output
is in outfmt 6 and that all the hsp should be one after the
other
Args:
blast_out (string): Path to a blast tabulated output
Yields:
pandas.DataFrame: A pandas Dataframe of the two proteins and their hsps
"""
current_pair = ''
list_line = []
with open(blast_out) as r_file:
for line in r_file:
split_line = line.rstrip().split('\t')
if (split_line[0], split_line[1]) == current_pair:
list_line.append(split_line)
elif current_pair == '':
current_pair = (split_line[0], split_line[1])
list_line = [split_line]
else:
yield list_line
current_pair = (split_line[0], split_line[1])
list_line = [split_line]
yield list_line
##########################################################################
@jit(nopython=True)
def get_orientation(sstart, send):
"""Returns the orientation between two points.
Args:
sstart (numpy.array): Start of the sequence
send (numpy.array): End of the sequence
Returns:
numpy.array: 1 or -1 depending on the orientation
"""
return (send - sstart) // np.abs((send - sstart))
##########################################################################
@jit(nopython=True)
def update_values(old_values, sens):
"""Update sequence start/end with depending of the orientation.
Args:
old_values (numpy.array): Position in the sequence
sens (numpy.array): 1 or -1 depending on the orientation
Returns:
np.array: Position in the sequence
"""
return old_values * sens
##########################################################################
@jit(nopython=True)
def length_aln_on_sequence(start, end):
"""Return the length of the sequence.
Args:
start (numpy.array): Start of the sequence
end (numpy.array): End of the sequence
Returns:
np.array: Length of the protein
"""
return end - start + 1
##########################################################################
@jit(nopython=True)
def calculateIdentities(percIdentity, length):
"""Return the length of the sequence.
Args:
percIdentity (numpy.array): Percentage of identity from blast
length (numpy.array): length of the alignment
Returns:
np.array: number of identical amino acids in the alignment
"""
return np.floor(percIdentity * length / 100 + 0.5)
##########################################################################
@jit(nopython=True)
def calculate_fraction(delta, lgHSP, bitscore, pid, pos):
"""Calculate the new score, identities and positives of the hsp2.
Args:
delta (int): Difference of size between old and new hsp2 length
lgHSP (int): size of the hsp before trimming
bitscore (float): Score of the alignment
pid (int): Number of identical in the alignment
pos (int): Number of positives in the alignment
Returns:
float, int, int, int: The updated values of the score, identities, positives and length
"""
# Calculate new score, id and positive
# Calculation: initial_value * (franction of length that has been preserved)
fraction = 1 - (delta / lgHSP)
new_score = np.floor(bitscore * fraction)
new_id = np.floor(pid * fraction)
new_pos = np.floor(pos * fraction)
# Calculate new length
new_length = lgHSP - delta
# Set expect value to -1 : this value should not be used after
# having changed HSPs boundaries
# new_evalue = -1
return new_score, new_id, new_pos, new_length
##########################################################################
def remove_overlap_query(hsp1, hsp2):
"""Remove overlap with the given hsp2 in the query.
Args:
hsp1 (np.array): Blast values of the given hsp (best)
hsp2 (np.array): Blast values of the given hsp (questioning)
Returns:
dict: Dictionnary with the updated values for the hsp2
"""
# Calculate where is the overlap and remove the overlapping part: 'qstart': 6, 'qend': 7, 'sstart': 8, 'send': 9,
if hsp2[6] < hsp1[6]:
new_qstart = hsp2[6]
new_qend = hsp1[6] -1
delta = hsp2[7] - new_qend
new_sstart = hsp2[8]
new_send = hsp2[9] - delta
elif hsp2[7] > hsp1[7]:
new_qstart = hsp1[7] + 1
new_qend = hsp2[7]
delta = new_qstart - hsp2[6]
new_sstart = hsp2[8] + delta
new_send = hsp2[9]
# lgHSP: 17, bitscore: 11, id: 12, pos:13
new_score, new_id, new_pos, new_length = calculate_fraction(delta=delta,
lgHSP=hsp2[17],
bitscore=hsp2[11],
pid=hsp2[12],
pos=hsp2[13])
return {11:new_score, 17:new_length,
7:new_qend, 6:new_qstart, 9:new_send,
8:new_sstart, 13:new_pos, 12:new_id}
##########################################################################
def remove_overlap_subject(hsp1, hsp2):
"""Remove overlap with the given hsp2 in the subject.
Args:
hsp1 (pandas.Series): Blast values of the given hsp (best)
hsp2 (pandas.Series): Blast values of the given hsp (questioning)
Returns:
dict: Dictionnary with the updated values for the hsp2
"""
# Calculate where is the overlap and remove the overlapping part: 'qstart': 6, 'qend': 7, 'sstart': 8, 'send': 9,
if hsp2[8] < hsp1[8]:
new_sstart = hsp2[8]
new_send = hsp1[8] -1
delta = hsp2[9] - new_send
new_qstart = hsp2[6]
new_qend = hsp2[7] - delta
elif hsp2[9] > hsp1[9]:
new_sstart = hsp1[9] + 1
new_send = hsp2[9]
delta = new_sstart - hsp2[8]
new_qstart = hsp2[6] + delta
new_qend = hsp2[7]
# lgHSP: 17, bitscore: 11, id: 12, pos:13
new_score, new_id, new_pos, new_length = calculate_fraction(delta=delta,
lgHSP=hsp2[17],
bitscore=hsp2[11],
pid=hsp2[12],
pos=hsp2[13])
return {11:new_score, 17:new_length,
7:new_qend, 6:new_qstart, 9:new_send,
8:new_sstart, 13:new_pos, 12:new_id}
##########################################################################
def checkHSPS(hsp1, hsp2, HSPMIN=100):
"""compare two HSPS in the blast output
Args:
hsp1 (np.array): Blast values of the given hsp (best)
hsp2 (np.array): Blast values of the given hsp (questioning)
HSPMIN (int, optional): Minumum length of the HSP. Defaults to 100.
Returns:
dict: Dictionnary with the updated values for the hsp2
"""
dict_update = {18:0}
# Check if the hsp2 is in a different orientation than hsp1: 'sens': 14
if hsp1[14] != hsp2[14]:
# print(f'orientation wrong: {hsp1[14]} != {hsp2[14]}')
return dict_update
# Check is hsp2 inside hsp1 for the query sequence: 'qstart': 6, 'qend': 7,
if hsp1[6] >= hsp2[6] and hsp1[7] <= hsp2[7]:
# print(f'hsp2 inside hsp1 for query: {hsp1[6]} >= {hsp2[6]} and {hsp1[7]} <= {hsp2[7]}')
return dict_update
# Check is hsp1 inside hsp2 for the query sequence: 'qstart': 6, 'qend': 7,
elif hsp1[6] <= hsp2[6] and hsp1[7] >= hsp2[7]:
# print(f'hsp1 inside hsp2 for query: {hsp1[6]} <= {hsp2[6]} and {hsp1[7]} >= {hsp2[7]}')
return dict_update
# Check is hsp1 inside hsp2 for the subject sequence: 'sstart': 8, 'send': 9,
elif hsp1[8] >= hsp2[8] and hsp1[9] <= hsp2[9]:
# print(f'hsp2 inside hsp1 for subject: {hsp1[8]} >= {hsp2[8]} and {hsp1[9]} <= {hsp2[9]}')
return dict_update
# Check is hsp2 inside hsp1 for the subject sequence: 'sstart': 8, 'send': 9,
elif hsp1[8] <= hsp2[8] and hsp1[9] >= hsp2[9]:
# print(f'hsp1 inside hsp2 for subject: {hsp1[8]} <= {hsp2[8]} and {hsp1[9]} >= {hsp2[9]}')
return dict_update
# reject HSPs that are in different orientation: 'qstart': 6, 'qend': 7, 'sstart': 8, 'send': 9,
# Query: ---- A ---- B ----- A = HSP1
# Sbjct: ---- B ---- A ----- B = HSP2
if (hsp1[7] - hsp2[7]) * (hsp1[9] - hsp2[9]) < 0:
# print(f'HSPs are in different orientation 1: ({hsp1[7]} - {hsp2[7]}) * ({hsp1[9]} - {hsp2[9]}) ===> {(hsp1[7] - hsp2[7]) * (hsp1[9] - hsp2[9])} < 0')
return dict_update
elif (hsp1[6] - hsp2[6]) * (hsp1[8] - hsp2[8]) < 0:
# print(f'HSPs are in different orientation 2: ({hsp1[6]} - {hsp2[6]}) * ({hsp1[8]} - {hsp2[8]}) ===> {(hsp1[6] - hsp2[6]) * (hsp1[8] - hsp2[8])} < 0')
return dict_update
overlap_q = (hsp2[6] - hsp1[7]) * (hsp2[7] - hsp1[6])
overlap_s = (hsp2[8] - hsp1[9]) * (hsp2[9] - hsp1[8])
# Accept non-overlapping HSPs in correct orientation
if overlap_q > 0 and overlap_s > 0 :
# print(f'No overlap in query and subject: {overlap_q} > 0 and {overlap_s} > 0')
dict_update[18] = 1
return dict_update
# Test if the query is overlaping
elif overlap_q < 0:
# print(f'Overlap in query: {overlap_q} > 0')
dict_update = remove_overlap_query(hsp1=hsp1,
hsp2=hsp2)
# update the hsp2 array with the new values
for index_key in dict_update:
hsp2[index_key] = dict_update[index_key]
overlap_s = (hsp2[8] - hsp1[9]) * (hsp2[9] - hsp1[8])
# Test if the subject is overlaping after possible update of an overlaping query
if overlap_s < 0:
# print(f'Overlap in subject: {overlap_s} > 0')
dict_update = remove_overlap_subject(hsp1=hsp1,
hsp2=hsp2)
# update the hsp2 array with the new values
for index_key in dict_update:
hsp2[index_key] = dict_update[index_key]
# Filter out HSPs | |
from fractions import Fraction as F
>>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)])
Fraction(13, 21)
>>> from decimal import Decimal as D
>>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")])
Decimal('0.5625')
If ``data`` is empty, StatisticsError will be raised.
"""
title = 'mean'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.mean(self.input(0)))
class Median_Node(NodeBase):
"""
Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
title = 'median'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.median(self.input(0)))
class Median_Grouped_Node(NodeBase):
"""
Return the 50th percentile (median) of grouped continuous data.
>>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5])
3.7
>>> median_grouped([52, 52, 53, 54])
52.5
This calculates the median as the 50th percentile, and should be
used when your data is continuous and grouped. In the above example,
the values 1, 2, 3, etc. actually represent the midpoint of classes
0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in
class 3.5-4.5, and interpolation is used to estimate it.
Optional argument ``interval`` represents the class interval, and
defaults to 1. Changing the class interval naturally will change the
interpolated 50th percentile value:
>>> median_grouped([1, 3, 3, 5, 7], interval=1)
3.25
>>> median_grouped([1, 3, 3, 5, 7], interval=2)
3.5
This function does not check whether the data points are at least
``interval`` apart.
"""
title = 'median_grouped'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
NodeInputBP(label='interval', dtype=dtypes.Data(default=1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.median_grouped(self.input(0), self.input(1)))
class Median_High_Node(NodeBase):
"""
Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
>>> median_high([1, 3, 5])
3
>>> median_high([1, 3, 5, 7])
5
"""
title = 'median_high'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.median_high(self.input(0)))
class Median_Low_Node(NodeBase):
"""
Return the low median of numeric data.
When the number of data points is odd, the middle value is returned.
When it is even, the smaller of the two middle values is returned.
>>> median_low([1, 3, 5])
3
>>> median_low([1, 3, 5, 7])
3
"""
title = 'median_low'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.median_low(self.input(0)))
class Mode_Node(NodeBase):
"""
Return the most common data point from discrete or nominal data.
``mode`` assumes discrete data, and returns a single value. This is the
standard treatment of the mode as commonly taught in schools:
>>> mode([1, 1, 2, 3, 3, 3, 3, 4])
3
This also works with nominal (non-numeric) data:
>>> mode(["red", "blue", "blue", "red", "green", "red", "red"])
'red'
If there are multiple modes with same frequency, return the first one
encountered:
>>> mode(['red', 'red', 'green', 'blue', 'blue'])
'red'
If *data* is empty, ``mode``, raises StatisticsError.
"""
title = 'mode'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.mode(self.input(0)))
class Multimode_Node(NodeBase):
"""
Return a list of the most frequently occurring values.
Will return more than one result if there are multiple modes
or an empty list if *data* is empty.
>>> multimode('aabbbbbbbbcc')
['b']
>>> multimode('aabbbbccddddeeffffgg')
['b', 'd', 'f']
>>> multimode('')
[]
"""
title = 'multimode'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.multimode(self.input(0)))
class Pstdev_Node(NodeBase):
"""
Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
>>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
0.986893273527251
"""
title = 'pstdev'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
NodeInputBP(label='mu', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.pstdev(self.input(0), self.input(1)))
class Pvariance_Node(NodeBase):
"""
Return the population variance of ``data``.
data should be a sequence or iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
Examples:
>>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
>>> pvariance(data)
1.25
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
>>> mu = mean(data)
>>> pvariance(data, mu)
1.25
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('24.815')
>>> from fractions import Fraction as F
>>> pvariance([F(1, 4), F(5, 4), F(1, 2)])
Fraction(13, 72)
"""
title = 'pvariance'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
NodeInputBP(label='mu', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.pvariance(self.input(0), self.input(1)))
class Quantiles_Node(NodeBase):
"""
Divide *data* into *n* continuous intervals with equal probability.
Returns a list of (n - 1) cut points separating the intervals.
Set *n* to 4 for quartiles (the default). Set *n* to 10 for deciles.
Set *n* to 100 for percentiles which gives the 99 cuts points that
separate *data* in to 100 equal sized groups.
The *data* can be any iterable containing sample.
The cut points are linearly interpolated between data points.
If *method* is set to *inclusive*, *data* is treated as population
data. The minimum value is treated as the 0th percentile and the
maximum value is treated as the 100th percentile.
"""
title = 'quantiles'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.quantiles(self.input(0)))
class Sqrt_Node(NodeBase):
"""
Return the square root of x."""
title = 'sqrt'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='x'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.sqrt(self.input(0)))
class Stdev_Node(NodeBase):
"""
Return the square root of the sample variance.
See ``variance`` for arguments and other details.
>>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
1.0810874155219827
"""
title = 'stdev'
type_ = 'statistics'
init_inputs = [
NodeInputBP(label='data'),
NodeInputBP(label='xbar', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, statistics.stdev(self.input(0), self.input(1)))
class Variance_Node(NodeBase):
"""
Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
Examples:
>>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
>>> variance(data)
1.3720238095238095
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
>>> m = mean(data)
>>> variance(data, m)
1.3720238095238095
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('31.01875')
>>> from fractions import Fraction as F
>>> variance([F(1, 6), F(1, 2), F(5, 3)])
Fraction(67, 108)
"""
| |
# encoding=utf8
"""
Module containing functions for calculating or approximating factorials.
"""
import numpy as np
from decimal import Decimal, localcontext
def factorial(n, prec=100):
r"""
Function for calculating factorials using the standard approach as explained in the
Notes section. For factorials over 100, the decimal package is used to support the
resulting large integers.
Parameters
----------
n : int
The desired integer to calculate the factorial.
prec : int default 100, optional
Defines level of precision for factorials over 100
for use by the decimal package
Returns
-------
int or Decimal
The computed factorial of the given integer.
Notes
-----
Factorials are denoted for a positive integer :math:`x` as :math:`x!` and are
defined as:
.. math::
x! = (x)(x - 1)(x - 2) \cdots (2)(1)
For example, the factorial of 5 is written as:
.. math::
5! = (5)(4)(3)(2)(1) = 120
Examples
--------
>>> factorial(10)
3628800.0
>>> factorial(50)
3.0414093201713376e+64
# Factorials above 100 use the decimal package to handle the resulting large integers
>>> factorial(200)
Decimal('7.886578673647905035523632139321850622951359776871732632947425332443594499634033429203042840119846238E+374')
References
----------
Press, W., <NAME>., <NAME>., & <NAME>. (2007). Numerical recipes (3rd ed.).
Cambridge: Cambridge University Press.
Weisstein, <NAME>. "Factorial." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/Factorial.html
"""
if n != np.floor(n):
n = np.floor(n)
factor = np.arange(1, n + 1)
if n > 100:
with localcontext() as ctx:
ctx.prec = prec
f = Decimal(1)
for i in reversed(factor):
f = Decimal(f) * i
else:
f = float(1)
for i in reversed(factor):
f = f * i
return int(f)
def stirlingln(n, keep_log=False, prec=100):
r"""
Approximates the factorial of n using the approximation
given by Ramanujan in his lost notebook (Ramanujan 1988,
as cited in Wikipedia). Computing the factorial in logarithmic
form is useful as it helps avoid overflow when n is large. As
values of n increase, the approximation given becomes more
exact.
Parameters
----------
n : int
The desired integer to calculate the factorial.
keep_log : bool default False
If True, the approximation remains in logarithmic
form. If False, converts to exponent form before
returning the factorial approximation.
prec : int default 100, optional
Defines level of precision for factorials over 100.
Returns
-------
int or Decimal
The computed log factorial of the given integer.
Notes
-----
It is often useful to compute the logarithmic form of the
factorial and convert it to exponent form to avoid overflow.
The approximation is an alternative approach given by
Srinivasa Ramanujan (Ramanujan 1988).
.. math::
ln n! \approx n ln n - n + \frac{1}{6} ln(n(1 + 4n(1 + 2n))) + \frac{1}{2} ln \pi
Examples
--------
# Difference between actual factorial calculation and Stirling's Approximation
# for low values of n is practically zero
>>> (factorial(5) - stirlingln(5)) / stirlingln(5)
3.8020354295010749e-06
>>> stirlingln(50)
3.041409303897981e+64
>>> stirlingln(100)
9.3326215380340829e+157
# If the keep_log flag is set to True, the output remains in logarithmic form.
>>> stirlingln(100, True)
363.73937555488197
References
----------
Stirling's approximation. (2017, March 8). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Stirling%27s_approximation&oldid=769328178
"""
if n != np.floor(n):
n = np.floor(n)
n = float(n)
if n > 100:
with localcontext() as ctx:
ctx.prec = prec
f = Decimal(n * np.log(n) - n + (1. / 6.) * np.log(n * (1. + 4. * n * (1. + 2. * n))) + .5 * np.log(np.pi))
else:
f = n * np.log(n) - n + (1. / 6.) * np.log(n * (1. + 4. * n * (1. + 2. * n))) + .5 * np.log(np.pi)
if keep_log is False:
return np.exp(f)
return f
def stirling(n, prec=100):
r"""
Approximates a factorial of an integer :math:`n` using Stirling's Approximation.
Specifically, the approximation is done using a method developed by Gosper.
Parameters
----------
n : int
The desired integer to calculate the factorial.
prec
Defines level of precision for factorials over 100. Default 100. Optional
Returns
-------
int or Decimal
The computed factorial of the given integer.
Notes
-----
Stirling's approximation is a method of approximating a factorial :math:`n!`.
As the value of :math:`n` increases, the more exact the approximation becomes;
however, it still yields almost exact results for small values of :math:`n`.
The approximation used is given by Gosper, which is noted to be a better
approximation to :math:`n!` and also results in a very close approximation to
:math:`0! = 1`.
.. math::
n! \approx \sqrt{(2n + \frac{1}{3})\pi} n^n e^{-n}
Examples
--------
>>> stirling(0)
1.0233267079464885
>>> (factorial(5) - stirling(5)) / stirling(5)
0.00024981097589214563
>>> stirling(5)
119.9700301696855
>>> stirling(50)
3.0414009581300833e+64
References
----------
Stirling's approximation. (2017, March 8). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Stirling%27s_approximation&oldid=769328178
Weisstein, <NAME>. "Stirling's Approximation." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/StirlingsApproximation.html
"""
if n != np.floor(n):
n = np.floor(n)
if n >= 100:
with localcontext() as ctx:
ctx.prec = prec
f = Decimal(np.sqrt((2. * n + 1. / 3.) * np.pi) * n ** n * np.exp(-n))
else:
f = np.sqrt((2. * n + 1. / 3.) * np.pi) * n ** n * np.exp(-n)
return f
def ramanujan(n, prec=100):
r"""
Approximates the factorial :math:`n!` given an integer :math:`n` using Ramanujan's formula.
Ramanujan's formula is just as or more accurate than several other factorial approximation
formulas.
Parameters
----------
n
Integer to approximate factorial
prec
Defines level of precision for factorials over 100. Default 100. Optional
Returns
-------
int or Decimal
Factorial of :math:`n` as approximated by Ramanujan's formula.
Notes
-----
Ramanujan's formula is another factorial approximation method known for its accuracy
in comparison to other factorial approximation approaches including Stirling's and
Gosper's approximations. Ramanujan's formula is defined as:
.. math::
n! \approx \sqrt{\pi} \left(\frac{n}{e}\right)^n \sqrt[6]{8n^3 + 4n^2 + n + \frac{1}{30}}
Examples
--------
>>> ramanujan(10)
3628800.3116126074
>>> ramanujan(5)
120.00014706585664
References
----------
Mortici, Cristinel. On Gosper's Formula for the Gamma Function. Valahia University of Targoviste,
Department of Mathematics. Retrieved from http://files.ele-math.com/articles/jmi-05-53.pdf
"""
if n != np.floor(n):
n = np.floor(n)
if n >= 100:
with localcontext() as ctx:
ctx.prec = prec
f = Decimal(
np.sqrt(np.pi) * n ** n * np.exp(-n) * (8. * n ** 3. + 4. * n ** 2. + n + 1. / 30.) ** (1. / 6.))
else:
f = np.sqrt(np.pi) * n ** n * np.exp(-n) * (8. * n ** 3. + 4. * n ** 2. + n + (1. / 30.)) ** (1. / 6.)
return f
def fallingfactorial(x, n):
r"""
Computes the falling factorial.
Parameters
----------
x
Integer. The value will be rounded down if the value is not an integer.
n
Integer. The value will be rounded down if the value is not an integer.
Returns
-------
int or str
The falling factorial for an integer :math:`n`, :math:`(x)_{n}`. If x is a
str, the output is the symbolic representation of the falling factorial.
Notes
-----
The falling factorial, denoted as :math:`(x)_{n}` (or :math:`x^{\underline{n}}`) is
defined as the following:
.. math::
(x)_n = x(x - 1) \cdots (x - (n - 1))
The first few falling factorials are then:
..math::
(x)_0 = 1
(x)_1 = x
(x)_2 = x(x - 1)
(x)_3 = x(x - 1)(x - 2)
(x)_4 = x(x - 1)(x - 2)(x - 3)
Examples
--------
>>> fallingfactorial(10, 5)
30240
>>> fallingfactorial(10, 2)
90
>>> fallingfactorial('x', 2)
'x*(x - 1)'
>>> fallingfactorial('a', 4)
'a*(a - 1)*(a - 2)*(a - 3)'
References
----------
Falling and rising factorials. (2017, June 8). In Wikipedia, The Free Encyclopedia.
From https://en.wikipedia.org/w/index.php?title=Falling_and_rising_factorials&oldid=784512036
Weisstein, <NAME>. "Falling Factorial." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/FallingFactorial.html
"""
if n != np.floor(n):
n = np.floor(n)
if isinstance(x, str):
f = x
for i in np.arange(1, np.absolute(n)):
f = f + '*(' + str(x) + ' - ' + str(i) + ')'
if n < 0:
f = '1 /' + f
else:
if x != np.floor(x):
x = np.floor(x)
f = np.uint64(1.0)
for i in np.arange(np.absolute(n)):
f *= (x - i)
if n < 0:
f = 1 / f
return f
def risingfactorial(x, n):
r"""
Computes the rising factorial. Also known | |
{
'storageAccountType': 'Standard_LRS'
},
'name': vm_name,
'createOption': 'FromImage'
}
},
'osProfile': {
'adminUsername': admin_username,
'computerName': vm_name,
'adminPassword': <PASSWORD>
},
'networkProfile': {
'networkInterfaces': [
{
'id': full_nic_id,
'properties': {
'primary': 'true'
}
}
]
}
},
'name': vm_name
}
return vm
def http_request(method, url_suffix=None, data=None, headers=HEADERS,
params=None, codes=None, full_url=None, j_son=None):
"""
A wrapper for requests lib to send our requests and handle requests and responses better
parameter: (string) method
A string denoting the http request method to use.
Can be 'GET', 'POST, 'PUT', 'DELETE', etc.
parameter: (string) url_suffix
The API endpoint that determines which data we are trying to access/create/update
in our call to the API
parameter: (dict) data
The key/value pairs to be form-encoded
parameter: (dict) headers
The headers to use with the request
parameter: (dict) params
The parameters to use with this request
parameter: (set) codes
The set of status codes against which the status code of the response should be checked
parameter: (string) full_url
The full url to make a request to. Only necessary in the case that you need to make
an API request to an endpoint which differs in its base url from the majority of
the API calls in the integration
parameter: (dict) j_son
A JSON serializable Python object to send in the body of the request
returns:
JSON Response Object
"""
update_access_token()
try:
url = full_url if full_url else None
if not url:
url = BASE_URL + url_suffix if url_suffix else BASE_URL
r = requests.request(
method,
url,
headers=headers,
data=data,
params=params,
verify=USE_SSL,
json=j_son
)
green_codes = codes if codes else {200, 201, 202, 204}
if r.status_code not in green_codes:
err_msg = 'Error in API call to Azure Compute Integration [{}] - {}'.format(r.status_code, r.reason)
err = r.json().get('error')
if err:
err_msg1 = '\nError code: {}\nError message: {}'.format(err.get('code'), err.get('message'))
err_msg += err_msg1
raise Exception(err_msg)
response = json.loads(r.content)
except ValueError:
response = r.content
return response
'''MAIN FUNCTIONS / API CALLS'''
# <---------- Test Module ----------> #
def test_module():
# Implicitly will test TENANT_ID, TOKEN and SUBSCRIPTION_ID
list_resource_groups()
demisto.results('ok')
# <-------- Resource Groups --------> #
def list_resource_groups():
parameters = {'api-version': '2018-05-01'}
response = http_request('GET', params=parameters, codes={200})
return response
def list_resource_groups_command():
"""
List all Resource Groups belonging to your Azure subscription
returns:
Resource-Group Objects
"""
response = list_resource_groups()
# Retrieve relevant properties to return to context
value = response.get('value')
resource_groups = []
for resource_group in value:
resource_group_context = {
'Name': resource_group.get('name'),
'ID': resource_group.get('id'),
'Location': resource_group.get('location'),
'ProvisioningState': resource_group.get('properties', {}).get('provisioningState')
}
resource_groups.append(resource_group_context)
title = 'List of Resource Groups'
human_readable = tableToMarkdown(title, resource_groups, removeNull=True)
entry_context = {'Azure.ResourceGroup(val.Name && val.Name === obj.Name)': resource_groups}
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['text'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': entry_context
})
# <-------- Virtual Machines --------> #
def list_vms(resource_group):
# Construct endpoint URI suffix
url_endpoint = resource_group + '/providers/Microsoft.Compute/virtualMachines'
parameters = {'api-version': API_VERSION}
# Call API
response = http_request('GET', url_endpoint, params=parameters, codes={200})
return response
def list_vms_command():
"""
List the VM instances in the specified Resource Group
demisto parameter: (string) resource_group
Resource Group of the VMs
returns:
Virtual Machine Objects
"""
resource_group = demisto.args().get('resource_group')
response = list_vms(resource_group)
vm_objects_list = response.get('value')
vms = []
for vm_object in vm_objects_list:
vm_name = vm_object.get('name').lower()
location = vm_object.get('location')
properties = vm_object.get('properties')
provisioning_state = properties.get('provisioningState')
os_disk = properties.get('storageProfile', {}).get('osDisk')
datadisk = os_disk.get('diskSizeGB', 'NA')
vm_id = properties.get('vmId')
os_type = os_disk.get('osType')
vm = {
'Name': vm_name,
'ID': vm_id,
'Size': datadisk,
'OS': os_type,
'Location': location,
'ProvisioningState': provisioning_state,
'ResourceGroup': resource_group
}
vms.append(vm)
title = 'Microsoft Azure - List of Virtual Machines in Resource Group "{}"'.format(resource_group)
table_headers = ['Name', 'ID', 'Size', 'OS', 'Location', 'ProvisioningState', 'ResourceGroup']
human_readable = tableToMarkdown(title, vms, headers=table_headers, removeNull=True)
entry_context = {'Azure.Compute(val.Name && val.Name === obj.Name)': vms}
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['text'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': entry_context
})
def get_vm(args):
# Retrieve relevant command arguments
resource_group = args.get('resource_group')
vm_name = args.get('virtual_machine_name')
# Construct endpoint URI suffix
url_endpoint = resource_group + '/providers/Microsoft.Compute/virtualMachines/' + vm_name
parameters = {'$expand': 'instanceView', 'api-version': API_VERSION}
# Call API
response = http_request('GET', url_endpoint, params=parameters, codes={200})
return response
def get_vm_command():
"""
Get the properties of a specified Virtual Machine
demisto parameter: (string) resource_group
Resource Group to which the virtual machine belongs
demisto parameter: (string) virtual_machine_name
Name of the virtual machine you wish to view the details of
returns:
Virtual Machine Object
"""
args = demisto.args()
response = get_vm(args)
# Retrieve relevant properties to return to context
vm_name = response.get('name').lower()
properties = response.get('properties')
os_disk = properties.get('storageProfile', {}).get('osDisk')
datadisk = os_disk.get('diskSizeGB', 'NA')
vm_id = properties.get('vmId')
os_type = os_disk.get('osType')
provisioning_state = properties.get('provisioningState')
location = response.get('location')
statuses = properties.get('instanceView', {}).get('statuses')
power_state = None
for status in statuses:
status_code = status.get('code')
status_code_prefix = status_code[:status_code.find('/')]
if status_code_prefix == 'PowerState':
power_state = status.get('displayStatus')
vm = {
'Name': vm_name,
'ID': vm_id,
'Size': datadisk,
'OS': os_type,
'ProvisioningState': provisioning_state,
'Location': location,
'PowerState': power_state,
'ResourceGroup': args.get('resource_group')
}
title = 'Properties of VM "{}"'.format(vm_name)
table_headers = ['Name', 'ID', 'Size', 'OS', 'ProvisioningState', 'Location', 'PowerState']
human_readable = tableToMarkdown(title, vm, headers=table_headers, removeNull=True)
entry_context = {'Azure.Compute(val.Name && val.Name === obj.Name)': vm}
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': entry_context
})
def create_vm(args):
# Retrieve relevant command arguments
resource_group = args.get('resource_group')
vm_name = args.get('virtual_machine_name')
# Construct endpoint URI suffix
url_endpoint = resource_group + '/providers/Microsoft.Compute/virtualMachines/' + vm_name
parameters = {'api-version': API_VERSION}
# Construct VM object utilizing parameters passed as command arguments
payload = create_vm_parameters(args)
# Call API
response = http_request('PUT', url_endpoint, params=parameters, j_son=payload)
return response
def create_vm_command():
"""
Create a virtual machine instance with the specified OS image
demisto parameter: (string) resource_group
Resource group to which the new VM will belong
demisto parameter: (string) virtual_machine_name
Name to assign to the new virtual machine
demisto parameter: (string) virtual_machine_location
Region in which the vm will be hosted
demisto parameter: (string) nic_name
The name of the Network Interface to link the VM with. This must be created from the Azure Portal
demisto parameter: (string) vm_size
The name of a VirtualMachineSize which determines the size of the deployed vm
demisto parameter: (string) os_image
Choose the base operating system image of the vm
demisto parameter: (string) sku
SKU of the image to be used
demisto parameter: (string) publisher
Name of the publisher of the image
demisto parameter: (string) version
Version of the image to use
demisto parameter: (string) offer
Specifies the offer of the platform image or marketplace image used
to create the virtual machine
demisto parameter: (string) admin_username
Admin Username to be used when creating the VM
demisto parameter: (string) admin_password
Admin Password to be used when creating the VM
returns:
Virtual Machine Object
"""
args = demisto.args()
response = create_vm(args)
# Retrieve relevant properties to return to context
vm_name = response.get('name').lower()
properties = response.get('properties')
os_disk = properties.get('storageProfile', {}).get('osDisk')
datadisk = os_disk.get('diskSizeGB', 'NA')
vm_id = properties.get('vmId')
os_type = os_disk.get('osType')
provisioning_state = properties.get('provisioningState')
location = response.get('location')
vm = {
'Name': vm_name,
'ID': vm_id,
'Size': datadisk,
'OS': os_type,
'ProvisioningState': provisioning_state,
'Location': location,
'ResourceGroup': args.get('resource_group')
}
title = 'Created Virtual Machine "{}"'.format(vm_name)
human_readable = tableToMarkdown(title, vm, removeNull=True)
entry_context = {'Azure.Compute(val.Name && val.Name === obj.Name)': vm}
demisto.results({
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': entry_context
})
def delete_vm(args):
# Retrieve relevant command arguments
resource_group = args.get('resource_group')
vm_name = args.get('virtual_machine_name')
# Construct endpoint URI suffix (for de-allocation of compute resources)
url_endpoint = resource_group + '/providers/Microsoft.Compute/virtualMachines/' + vm_name + '/deallocate'
parameters = {'api-version': API_VERSION}
# Call API to deallocate compute resources
_ = http_request('POST', url_endpoint, params=parameters, codes={200, 202})
# Construct endpoint URI suffix (for deletion)
url_endpoint = resource_group + '/providers/Microsoft.Compute/virtualMachines/' + vm_name
parameters = {'api-version': API_VERSION}
# Call API to delete
response = http_request('DELETE', url_endpoint, params=parameters, codes={200, 202, 204})
return response
def delete_vm_command():
"""
Delete a specified Virtual Machine
demisto parameter: (string) resource_group
Resource Group to which the virtual machine belongs
demisto parameter: (string) virtual_machine_name
Name of the virtual machine to delete
returns:
Success message to the war room
"""
args = demisto.args()
_ = delete_vm(args)
success_msg = '"{}" VM Deletion Successfully Initiated'.format(args.get('virtual_machine_name'))
demisto.results(success_msg)
def start_vm(args):
# Retrieve relevant command arguments
resource_group = args.get('resource_group')
vm_name = args.get('virtual_machine_name')
# Construct endpoint | |
<gh_stars>10-100
"""
Pytorch modules
"""
from collections import defaultdict
import copy
import json
import logging
from io import open
import torch
from torch import nn
from torch.nn import functional as F
# from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from torch.nn import LayerNorm
from .layer import GELU, BertLayer, BertPooler, BertOnlyMLMHead
from .ot import optimal_transport_dist
logger = logging.getLogger(__name__)
class UniterConfig(object):
"""Configuration class to store the configuration of a `UniterModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_hidden_layers_img=1,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs UniterConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in
`UniterModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer
encoder.
num_attention_heads: Number of attention heads for each attention
layer in the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e.
feed-forward) layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string)
in the encoder and pooler. If string, "gelu", "relu" and
"swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully
connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this
model might ever be used with. Typically set this to something
large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed
into `UniterModel`.
initializer_range: The sttdev of the truncated_normal_initializer
for initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_layers_img = num_hidden_layers_img
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `UniterConfig` from a
Python dictionary of parameters."""
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `UniterConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses
# truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
"""
Instantiate a UniterPreTrainedModel from a pre-trained model file or a
pytorch state dict.
Params:
config_file: config json file
state_dict: an state dictionnary
*inputs, **kwargs: additional input for the specific Uniter class
"""
# Load config
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model
# variable name and be able to load any TensorFlow checkpoint file
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = LayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = LayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
# tf naming convention for layer norm
self.LayerNorm = LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
def pad_tensor_to_mul(tensor, dim=0, mul=8):
""" pad tensor to multiples (8 for tensor cores) """
# TODO find out whether this helps speed
return tensor, 0
t_size = list(tensor.size())
n_pad = mul - t_size[dim] % mul
if n_pad == mul:
n_pad = 0
padded_tensor = tensor
else:
t_size[dim] = n_pad
pad = torch.zeros(*t_size, dtype=tensor.dtype, device=tensor.device)
padded_tensor = torch.cat([tensor, pad], dim=dim)
return padded_tensor, n_pad
class UniterModel(UniterPreTrainedModel):
""" Modification for Joint Vision-Language Encoding
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
# align back to most compact input
if gather_index is None:
embedding_output = torch.cat([txt_emb, img_emb], dim=1)
else:
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_ids is None:
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
class RegionFeatureRegression(nn.Module):
def __init__(self, hidden_size, feat_dim, img_linear_weight):
super().__init__()
self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),
GELU(),
LayerNorm(hidden_size, eps=1e-12))
self.weight = img_linear_weight
self.bias = nn.Parameter(torch.zeros(feat_dim))
def forward(self, input_):
hidden = self.net(input_)
output = F.linear(hidden, self.weight.t(), self.bias)
return output
class RegionClassification(nn.Module):
def __init__(self, hidden_size, label_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),
GELU(),
LayerNorm(hidden_size, eps=1e-12),
nn.Linear(hidden_size, label_dim))
def forward(self, input_):
output = self.net(input_)
return output
class UniterForPretraining(UniterPreTrainedModel):
""" MLM + MRM """
def __init__(self, config, img_dim, img_label_dim,
nce_temp=1, ot_pos_only=False):
super().__init__(config)
self.bert | |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.setWindowModality(QtCore.Qt.ApplicationModal)
MainWindow.resize(466, 700)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(466, 700))
MainWindow.setMaximumSize(QtCore.QSize(466, 700))
MainWindow.setBaseSize(QtCore.QSize(0, 0))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.layoutWidget = QtGui.QWidget(self.centralwidget)
self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 489, 710))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gr_input = QtGui.QGroupBox(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gr_input.sizePolicy().hasHeightForWidth())
self.gr_input.setSizePolicy(sizePolicy)
self.gr_input.setMinimumSize(QtCore.QSize(430, 0))
self.gr_input.setMaximumSize(QtCore.QSize(430, 16777215))
self.gr_input.setObjectName(_fromUtf8("gr_input"))
self.verticalLayout_7 = QtGui.QVBoxLayout(self.gr_input)
self.verticalLayout_7.setSpacing(0)
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.lb_run = QtGui.QLabel(self.gr_input)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_run.sizePolicy().hasHeightForWidth())
self.lb_run.setSizePolicy(sizePolicy)
self.lb_run.setMinimumSize(QtCore.QSize(40, 0))
self.lb_run.setMaximumSize(QtCore.QSize(40, 24))
self.lb_run.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_run.setObjectName(_fromUtf8("lb_run"))
self.horizontalLayout_4.addWidget(self.lb_run)
self.le_run = QtGui.QLineEdit(self.gr_input)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_run.sizePolicy().hasHeightForWidth())
self.le_run.setSizePolicy(sizePolicy)
self.le_run.setMinimumSize(QtCore.QSize(280, 0))
self.le_run.setMaximumSize(QtCore.QSize(16777215, 24))
self.le_run.setObjectName(_fromUtf8("le_run"))
self.horizontalLayout_4.addWidget(self.le_run)
self.pb_run = QtGui.QPushButton(self.gr_input)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pb_run.sizePolicy().hasHeightForWidth())
self.pb_run.setSizePolicy(sizePolicy)
self.pb_run.setMaximumSize(QtCore.QSize(16777215, 24))
self.pb_run.setObjectName(_fromUtf8("pb_run"))
self.horizontalLayout_4.addWidget(self.pb_run)
self.verticalLayout_7.addLayout(self.horizontalLayout_4)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.lb_map = QtGui.QLabel(self.gr_input)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_map.sizePolicy().hasHeightForWidth())
self.lb_map.setSizePolicy(sizePolicy)
self.lb_map.setMinimumSize(QtCore.QSize(40, 0))
self.lb_map.setMaximumSize(QtCore.QSize(40, 24))
self.lb_map.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_map.setObjectName(_fromUtf8("lb_map"))
self.horizontalLayout_2.addWidget(self.lb_map)
self.le_map = QtGui.QLineEdit(self.gr_input)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_map.sizePolicy().hasHeightForWidth())
self.le_map.setSizePolicy(sizePolicy)
self.le_map.setMinimumSize(QtCore.QSize(280, 0))
self.le_map.setMaximumSize(QtCore.QSize(16777215, 24))
self.le_map.setObjectName(_fromUtf8("le_map"))
self.horizontalLayout_2.addWidget(self.le_map)
self.pb_map = QtGui.QPushButton(self.gr_input)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pb_map.sizePolicy().hasHeightForWidth())
self.pb_map.setSizePolicy(sizePolicy)
self.pb_map.setMaximumSize(QtCore.QSize(16777215, 24))
self.pb_map.setObjectName(_fromUtf8("pb_map"))
self.horizontalLayout_2.addWidget(self.pb_map)
self.verticalLayout_7.addLayout(self.horizontalLayout_2)
self.verticalLayout.addWidget(self.gr_input)
self.gr_stage = QtGui.QGroupBox(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gr_stage.sizePolicy().hasHeightForWidth())
self.gr_stage.setSizePolicy(sizePolicy)
self.gr_stage.setMinimumSize(QtCore.QSize(440, 0))
self.gr_stage.setMaximumSize(QtCore.QSize(440, 16777215))
self.gr_stage.setObjectName(_fromUtf8("gr_stage"))
self.horizontalLayout_16 = QtGui.QHBoxLayout(self.gr_stage)
self.horizontalLayout_16.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_16.setSpacing(0)
self.horizontalLayout_16.setObjectName(_fromUtf8("horizontalLayout_16"))
self.tb_align = QtGui.QTabWidget(self.gr_stage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tb_align.sizePolicy().hasHeightForWidth())
self.tb_align.setSizePolicy(sizePolicy)
self.tb_align.setMinimumSize(QtCore.QSize(220, 190))
self.tb_align.setMaximumSize(QtCore.QSize(220, 190))
self.tb_align.setBaseSize(QtCore.QSize(0, 0))
self.tb_align.setTabPosition(QtGui.QTabWidget.West)
self.tb_align.setObjectName(_fromUtf8("tb_align"))
self.tab_print = QtGui.QWidget()
self.tab_print.setObjectName(_fromUtf8("tab_print"))
self.lb_printpix = QtGui.QLabel(self.tab_print)
self.lb_printpix.setGeometry(QtCore.QRect(0, 0, 191, 181))
self.lb_printpix.setText(_fromUtf8(""))
self.lb_printpix.setPixmap(
QtGui.QPixmap(_fromUtf8(":/alignment/alignprint.png"))
)
self.lb_printpix.setScaledContents(False)
self.lb_printpix.setAlignment(QtCore.Qt.AlignCenter)
self.lb_printpix.setObjectName(_fromUtf8("lb_printpix"))
self.tb_align.addTab(self.tab_print, _fromUtf8(""))
self.tab_wafer = QtGui.QWidget()
self.tab_wafer.setObjectName(_fromUtf8("tab_wafer"))
self.lb_waferpix = QtGui.QLabel(self.tab_wafer)
self.lb_waferpix.setGeometry(QtCore.QRect(0, 0, 191, 181))
self.lb_waferpix.setText(_fromUtf8(""))
self.lb_waferpix.setPixmap(
QtGui.QPixmap(_fromUtf8(":/alignment/alignwafer.png"))
)
self.lb_waferpix.setScaledContents(False)
self.lb_waferpix.setAlignment(QtCore.Qt.AlignCenter)
self.lb_waferpix.setObjectName(_fromUtf8("lb_waferpix"))
self.tb_align.addTab(self.tab_wafer, _fromUtf8(""))
self.horizontalLayout_16.addWidget(self.tb_align)
spacerItem = QtGui.QSpacerItem(
8, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum
)
self.horizontalLayout_16.addItem(spacerItem)
self.verticalLayout_8 = QtGui.QVBoxLayout()
self.verticalLayout_8.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout_8.setSpacing(0)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.lb_stagcoord = QtGui.QLabel(self.gr_stage)
self.lb_stagcoord.setObjectName(_fromUtf8("lb_stagcoord"))
self.verticalLayout_8.addWidget(self.lb_stagcoord)
spacerItem1 = QtGui.QSpacerItem(
20, 8, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed
)
self.verticalLayout_8.addItem(spacerItem1)
self.tw_stage = QtGui.QTableWidget(self.gr_stage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tw_stage.sizePolicy().hasHeightForWidth())
self.tw_stage.setSizePolicy(sizePolicy)
self.tw_stage.setMaximumSize(QtCore.QSize(180, 97))
self.tw_stage.setObjectName(_fromUtf8("tw_stage"))
self.tw_stage.setColumnCount(3)
self.tw_stage.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.tw_stage.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tw_stage.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tw_stage.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tw_stage.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tw_stage.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tw_stage.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tw_stage.setItem(0, 0, item)
self.tw_stage.horizontalHeader().setDefaultSectionSize(54)
self.tw_stage.horizontalHeader().setMinimumSectionSize(54)
self.tw_stage.verticalHeader().setVisible(True)
self.tw_stage.verticalHeader().setDefaultSectionSize(24)
self.tw_stage.verticalHeader().setMinimumSectionSize(24)
self.verticalLayout_8.addWidget(self.tw_stage)
spacerItem2 = QtGui.QSpacerItem(
20, 8, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed
)
self.verticalLayout_8.addItem(spacerItem2)
self.horizontalLayout_15 = QtGui.QHBoxLayout()
self.horizontalLayout_15.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_15.setSpacing(0)
self.horizontalLayout_15.setObjectName(_fromUtf8("horizontalLayout_15"))
self.lb_stagz = QtGui.QLabel(self.gr_stage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_stagz.sizePolicy().hasHeightForWidth())
self.lb_stagz.setSizePolicy(sizePolicy)
self.lb_stagz.setMinimumSize(QtCore.QSize(32, 0))
self.lb_stagz.setMaximumSize(QtCore.QSize(32, 24))
self.lb_stagz.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_stagz.setObjectName(_fromUtf8("lb_stagz"))
self.horizontalLayout_15.addWidget(self.lb_stagz)
self.le_stagz = QtGui.QLineEdit(self.gr_stage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_stagz.sizePolicy().hasHeightForWidth())
self.le_stagz.setSizePolicy(sizePolicy)
self.le_stagz.setMinimumSize(QtCore.QSize(54, 24))
self.le_stagz.setMaximumSize(QtCore.QSize(54, 24))
self.le_stagz.setObjectName(_fromUtf8("le_stagz"))
self.horizontalLayout_15.addWidget(self.le_stagz)
spacerItem3 = QtGui.QSpacerItem(
18, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum
)
self.horizontalLayout_15.addItem(spacerItem3)
self.cb_rotonly = QtGui.QCheckBox(self.gr_stage)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cb_rotonly.sizePolicy().hasHeightForWidth())
self.cb_rotonly.setSizePolicy(sizePolicy)
self.cb_rotonly.setMinimumSize(QtCore.QSize(78, 0))
self.cb_rotonly.setMaximumSize(QtCore.QSize(78, 16777215))
self.cb_rotonly.setObjectName(_fromUtf8("cb_rotonly"))
self.horizontalLayout_15.addWidget(self.cb_rotonly)
self.verticalLayout_8.addLayout(self.horizontalLayout_15)
spacerItem4 = QtGui.QSpacerItem(
20, 8, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed
)
self.verticalLayout_8.addItem(spacerItem4)
self.le_waferignore = QtGui.QLabel(self.gr_stage)
self.le_waferignore.setTextFormat(QtCore.Qt.RichText)
self.le_waferignore.setObjectName(_fromUtf8("le_waferignore"))
self.verticalLayout_8.addWidget(self.le_waferignore)
self.lb_rotonly = QtGui.QLabel(self.gr_stage)
self.lb_rotonly.setTextFormat(QtCore.Qt.RichText)
self.lb_rotonly.setObjectName(_fromUtf8("lb_rotonly"))
self.verticalLayout_8.addWidget(self.lb_rotonly)
self.horizontalLayout_16.addLayout(self.verticalLayout_8)
self.verticalLayout.addWidget(self.gr_stage)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.gr_filter = QtGui.QGroupBox(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gr_filter.sizePolicy().hasHeightForWidth())
self.gr_filter.setSizePolicy(sizePolicy)
self.gr_filter.setMinimumSize(QtCore.QSize(220, 90))
self.gr_filter.setMaximumSize(QtCore.QSize(220, 90))
self.gr_filter.setObjectName(_fromUtf8("gr_filter"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.gr_filter)
self.verticalLayout_2.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.lb_keepcode = QtGui.QLabel(self.gr_filter)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_keepcode.sizePolicy().hasHeightForWidth())
self.lb_keepcode.setSizePolicy(sizePolicy)
self.lb_keepcode.setMinimumSize(QtCore.QSize(60, 0))
self.lb_keepcode.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_keepcode.setObjectName(_fromUtf8("lb_keepcode"))
self.horizontalLayout_5.addWidget(self.lb_keepcode)
self.le_keepcode = QtGui.QLineEdit(self.gr_filter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_keepcode.sizePolicy().hasHeightForWidth())
self.le_keepcode.setSizePolicy(sizePolicy)
self.le_keepcode.setMinimumSize(QtCore.QSize(103, 24))
self.le_keepcode.setMaximumSize(QtCore.QSize(130, 24))
self.le_keepcode.setObjectName(_fromUtf8("le_keepcode"))
self.horizontalLayout_5.addWidget(self.le_keepcode)
self.verticalLayout_2.addLayout(self.horizontalLayout_5)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setSpacing(0)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.lb_omitch = QtGui.QLabel(self.gr_filter)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_omitch.sizePolicy().hasHeightForWidth())
self.lb_omitch.setSizePolicy(sizePolicy)
self.lb_omitch.setMinimumSize(QtCore.QSize(60, 0))
self.lb_omitch.setMaximumSize(QtCore.QSize(60, 16777215))
self.lb_omitch.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_omitch.setObjectName(_fromUtf8("lb_omitch"))
self.horizontalLayout_7.addWidget(self.lb_omitch)
self.le_omitch = QtGui.QLineEdit(self.gr_filter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_omitch.sizePolicy().hasHeightForWidth())
self.le_omitch.setSizePolicy(sizePolicy)
self.le_omitch.setMinimumSize(QtCore.QSize(130, 24))
self.le_omitch.setMaximumSize(QtCore.QSize(130, 24))
self.le_omitch.setObjectName(_fromUtf8("le_omitch"))
self.horizontalLayout_7.addWidget(self.le_omitch)
self.verticalLayout_2.addLayout(self.horizontalLayout_7)
self.horizontalLayout.addWidget(self.gr_filter)
self.gr_platecoord = QtGui.QGroupBox(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.gr_platecoord.sizePolicy().hasHeightForWidth()
)
self.gr_platecoord.setSizePolicy(sizePolicy)
self.gr_platecoord.setMinimumSize(QtCore.QSize(220, 90))
self.gr_platecoord.setMaximumSize(QtCore.QSize(220, 90))
self.gr_platecoord.setObjectName(_fromUtf8("gr_platecoord"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.gr_platecoord)
self.verticalLayout_4.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setSpacing(0)
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.lb_xmin = QtGui.QLabel(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_xmin.sizePolicy().hasHeightForWidth())
self.lb_xmin.setSizePolicy(sizePolicy)
self.lb_xmin.setMinimumSize(QtCore.QSize(42, 0))
self.lb_xmin.setMaximumSize(QtCore.QSize(42, 16777215))
self.lb_xmin.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_xmin.setObjectName(_fromUtf8("lb_xmin"))
self.horizontalLayout_11.addWidget(self.lb_xmin)
self.le_xmin = QtGui.QLineEdit(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_xmin.sizePolicy().hasHeightForWidth())
self.le_xmin.setSizePolicy(sizePolicy)
self.le_xmin.setMinimumSize(QtCore.QSize(58, 24))
self.le_xmin.setMaximumSize(QtCore.QSize(58, 24))
self.le_xmin.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.le_xmin.setObjectName(_fromUtf8("le_xmin"))
self.horizontalLayout_11.addWidget(self.le_xmin)
self.lb_xmax = QtGui.QLabel(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_xmax.sizePolicy().hasHeightForWidth())
self.lb_xmax.setSizePolicy(sizePolicy)
self.lb_xmax.setMinimumSize(QtCore.QSize(42, 0))
self.lb_xmax.setMaximumSize(QtCore.QSize(42, 16777215))
self.lb_xmax.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_xmax.setObjectName(_fromUtf8("lb_xmax"))
self.horizontalLayout_11.addWidget(self.lb_xmax)
self.le_xmax = QtGui.QLineEdit(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_xmax.sizePolicy().hasHeightForWidth())
self.le_xmax.setSizePolicy(sizePolicy)
self.le_xmax.setMinimumSize(QtCore.QSize(58, 24))
self.le_xmax.setMaximumSize(QtCore.QSize(58, 24))
self.le_xmax.setObjectName(_fromUtf8("le_xmax"))
self.horizontalLayout_11.addWidget(self.le_xmax)
self.verticalLayout_4.addLayout(self.horizontalLayout_11)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.lb_ymin = QtGui.QLabel(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_ymin.sizePolicy().hasHeightForWidth())
self.lb_ymin.setSizePolicy(sizePolicy)
self.lb_ymin.setMinimumSize(QtCore.QSize(42, 0))
self.lb_ymin.setMaximumSize(QtCore.QSize(42, 16777215))
self.lb_ymin.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_ymin.setObjectName(_fromUtf8("lb_ymin"))
self.horizontalLayout_6.addWidget(self.lb_ymin)
self.le_ymin = QtGui.QLineEdit(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_ymin.sizePolicy().hasHeightForWidth())
self.le_ymin.setSizePolicy(sizePolicy)
self.le_ymin.setMinimumSize(QtCore.QSize(58, 24))
self.le_ymin.setMaximumSize(QtCore.QSize(58, 24))
self.le_ymin.setObjectName(_fromUtf8("le_ymin"))
self.horizontalLayout_6.addWidget(self.le_ymin)
self.lb_ymax = QtGui.QLabel(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_ymax.sizePolicy().hasHeightForWidth())
self.lb_ymax.setSizePolicy(sizePolicy)
self.lb_ymax.setMinimumSize(QtCore.QSize(42, 0))
self.lb_ymax.setMaximumSize(QtCore.QSize(42, 16777215))
self.lb_ymax.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_ymax.setObjectName(_fromUtf8("lb_ymax"))
self.horizontalLayout_6.addWidget(self.lb_ymax)
self.le_ymax = QtGui.QLineEdit(self.gr_platecoord)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_ymax.sizePolicy().hasHeightForWidth())
self.le_ymax.setSizePolicy(sizePolicy)
self.le_ymax.setMinimumSize(QtCore.QSize(58, 24))
self.le_ymax.setMaximumSize(QtCore.QSize(58, 24))
self.le_ymax.setObjectName(_fromUtf8("le_ymax"))
self.horizontalLayout_6.addWidget(self.le_ymax)
self.verticalLayout_4.addLayout(self.horizontalLayout_6)
self.horizontalLayout.addWidget(self.gr_platecoord)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_12.setSpacing(0)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.gr_skip = QtGui.QGroupBox(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gr_skip.sizePolicy().hasHeightForWidth())
self.gr_skip.setSizePolicy(sizePolicy)
self.gr_skip.setMinimumSize(QtCore.QSize(200, 150))
self.gr_skip.setMaximumSize(QtCore.QSize(200, 150))
self.gr_skip.setObjectName(_fromUtf8("gr_skip"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.gr_skip)
self.verticalLayout_5.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout_5.setSpacing(0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_9.setSpacing(0)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.le_sampleskip = QtGui.QLineEdit(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.le_sampleskip.sizePolicy().hasHeightForWidth()
)
self.le_sampleskip.setSizePolicy(sizePolicy)
self.le_sampleskip.setMinimumSize(QtCore.QSize(60, 24))
self.le_sampleskip.setMaximumSize(QtCore.QSize(60, 24))
self.le_sampleskip.setObjectName(_fromUtf8("le_sampleskip"))
self.horizontalLayout_9.addWidget(self.le_sampleskip)
self.lb_sampleskip = QtGui.QLabel(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.lb_sampleskip.sizePolicy().hasHeightForWidth()
)
self.lb_sampleskip.setSizePolicy(sizePolicy)
self.lb_sampleskip.setMinimumSize(QtCore.QSize(50, 0))
self.lb_sampleskip.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
)
self.lb_sampleskip.setObjectName(_fromUtf8("lb_sampleskip"))
self.horizontalLayout_9.addWidget(self.lb_sampleskip)
self.verticalLayout_5.addLayout(self.horizontalLayout_9)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_8.setSpacing(0)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.le_colskip = QtGui.QLineEdit(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_colskip.sizePolicy().hasHeightForWidth())
self.le_colskip.setSizePolicy(sizePolicy)
self.le_colskip.setMinimumSize(QtCore.QSize(60, 24))
self.le_colskip.setMaximumSize(QtCore.QSize(60, 24))
self.le_colskip.setObjectName(_fromUtf8("le_colskip"))
self.horizontalLayout_8.addWidget(self.le_colskip)
self.lb_colskip = QtGui.QLabel(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_colskip.sizePolicy().hasHeightForWidth())
self.lb_colskip.setSizePolicy(sizePolicy)
self.lb_colskip.setMinimumSize(QtCore.QSize(50, 0))
self.lb_colskip.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
)
self.lb_colskip.setObjectName(_fromUtf8("lb_colskip"))
self.horizontalLayout_8.addWidget(self.lb_colskip)
self.verticalLayout_5.addLayout(self.horizontalLayout_8)
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_14.setSpacing(0)
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.le_rowskip = QtGui.QLineEdit(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_rowskip.sizePolicy().hasHeightForWidth())
self.le_rowskip.setSizePolicy(sizePolicy)
self.le_rowskip.setMinimumSize(QtCore.QSize(60, 24))
self.le_rowskip.setMaximumSize(QtCore.QSize(60, 24))
self.le_rowskip.setObjectName(_fromUtf8("le_rowskip"))
self.horizontalLayout_14.addWidget(self.le_rowskip)
self.lb_rowskip = QtGui.QLabel(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_rowskip.sizePolicy().hasHeightForWidth())
self.lb_rowskip.setSizePolicy(sizePolicy)
self.lb_rowskip.setMinimumSize(QtCore.QSize(50, 0))
self.lb_rowskip.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
)
self.lb_rowskip.setObjectName(_fromUtf8("lb_rowskip"))
self.horizontalLayout_14.addWidget(self.lb_rowskip)
self.verticalLayout_5.addLayout(self.horizontalLayout_14)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_10.setSpacing(0)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.le_atskip = QtGui.QLineEdit(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_atskip.sizePolicy().hasHeightForWidth())
self.le_atskip.setSizePolicy(sizePolicy)
self.le_atskip.setMinimumSize(QtCore.QSize(60, 24))
self.le_atskip.setMaximumSize(QtCore.QSize(60, 24))
self.le_atskip.setObjectName(_fromUtf8("le_atskip"))
self.horizontalLayout_10.addWidget(self.le_atskip)
self.lb_atskip = QtGui.QLabel(self.gr_skip)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_atskip.sizePolicy().hasHeightForWidth())
self.lb_atskip.setSizePolicy(sizePolicy)
self.lb_atskip.setMinimumSize(QtCore.QSize(50, 0))
self.lb_atskip.setAlignment(
QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
)
self.lb_atskip.setObjectName(_fromUtf8("lb_atskip"))
self.horizontalLayout_10.addWidget(self.lb_atskip)
self.verticalLayout_5.addLayout(self.horizontalLayout_10)
self.horizontalLayout_12.addWidget(self.gr_skip)
self.gr_sample = QtGui.QGroupBox(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gr_sample.sizePolicy().hasHeightForWidth())
self.gr_sample.setSizePolicy(sizePolicy)
self.gr_sample.setMinimumSize(QtCore.QSize(285, 136))
self.gr_sample.setMaximumSize(QtCore.QSize(285, 136))
self.gr_sample.setObjectName(_fromUtf8("gr_sample"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.gr_sample)
self.verticalLayout_6.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.te_samplelist = QtGui.QTextEdit(self.gr_sample)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.te_samplelist.sizePolicy().hasHeightForWidth()
)
self.te_samplelist.setSizePolicy(sizePolicy)
self.te_samplelist.setMinimumSize(QtCore.QSize(280, 68))
self.te_samplelist.setMaximumSize(QtCore.QSize(280, 68))
self.te_samplelist.setObjectName(_fromUtf8("te_samplelist"))
self.verticalLayout_3.addWidget(self.te_samplelist)
spacerItem5 = QtGui.QSpacerItem(
20, 4, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed
)
self.verticalLayout_3.addItem(spacerItem5)
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setSpacing(0)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.lb_samplemin = QtGui.QLabel(self.gr_sample)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_samplemin.sizePolicy().hasHeightForWidth())
self.lb_samplemin.setSizePolicy(sizePolicy)
self.lb_samplemin.setMinimumSize(QtCore.QSize(80, 0))
self.lb_samplemin.setMaximumSize(QtCore.QSize(80, 16777215))
self.lb_samplemin.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_samplemin.setObjectName(_fromUtf8("lb_samplemin"))
self.horizontalLayout_13.addWidget(self.lb_samplemin)
self.le_samplemin = QtGui.QLineEdit(self.gr_sample)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_samplemin.sizePolicy().hasHeightForWidth())
self.le_samplemin.setSizePolicy(sizePolicy)
self.le_samplemin.setMinimumSize(QtCore.QSize(60, 24))
self.le_samplemin.setMaximumSize(QtCore.QSize(60, 24))
self.le_samplemin.setObjectName(_fromUtf8("le_samplemin"))
self.horizontalLayout_13.addWidget(self.le_samplemin)
self.lb_samplemax = QtGui.QLabel(self.gr_sample)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lb_samplemax.sizePolicy().hasHeightForWidth())
self.lb_samplemax.setSizePolicy(sizePolicy)
self.lb_samplemax.setMinimumSize(QtCore.QSize(80, 0))
self.lb_samplemax.setMaximumSize(QtCore.QSize(80, 16777215))
self.lb_samplemax.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter
)
self.lb_samplemax.setObjectName(_fromUtf8("lb_samplemax"))
self.horizontalLayout_13.addWidget(self.lb_samplemax)
self.le_samplemax = QtGui.QLineEdit(self.gr_sample)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.le_samplemax.sizePolicy().hasHeightForWidth())
self.le_samplemax.setSizePolicy(sizePolicy)
self.le_samplemax.setMinimumSize(QtCore.QSize(60, 24))
self.le_samplemax.setMaximumSize(QtCore.QSize(60, 24))
self.le_samplemax.setObjectName(_fromUtf8("le_samplemax"))
self.horizontalLayout_13.addWidget(self.le_samplemax)
self.verticalLayout_3.addLayout(self.horizontalLayout_13)
self.verticalLayout_6.addLayout(self.verticalLayout_3)
self.horizontalLayout_12.addWidget(self.gr_sample)
self.verticalLayout.addLayout(self.horizontalLayout_12)
spacerItem6 = QtGui.QSpacerItem(
20, 8, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed
)
self.verticalLayout.addItem(spacerItem6)
self.horizontalLayout_17 = QtGui.QHBoxLayout()
self.horizontalLayout_17.setObjectName(_fromUtf8("horizontalLayout_17"))
self.pb_preview = QtGui.QPushButton(self.layoutWidget)
self.pb_preview.setObjectName(_fromUtf8("pb_preview"))
self.horizontalLayout_17.addWidget(self.pb_preview)
self.pb_alignsave = QtGui.QPushButton(self.layoutWidget)
self.pb_alignsave.setObjectName(_fromUtf8("pb_alignsave"))
self.horizontalLayout_17.addWidget(self.pb_alignsave)
self.verticalLayout.addLayout(self.horizontalLayout_17)
spacerItem7 = QtGui.QSpacerItem(
20, 8, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed
)
self.verticalLayout.addItem(spacerItem7)
self.br_outputlog = QtGui.QTextBrowser(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(
QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.br_outputlog.sizePolicy().hasHeightForWidth())
self.br_outputlog.setSizePolicy(sizePolicy)
self.br_outputlog.setMaximumSize(QtCore.QSize(16777215, 110))
self.br_outputlog.setObjectName(_fromUtf8("br_outputlog"))
self.verticalLayout.addWidget(self.br_outputlog)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
| |
originCoordLong, and originCoordName are used to specify a (named) coordinate. For the destination, the corresponding parameters are named either destId or destCoordLat, destCoordLong and destCoordName. It is also possible to define a via-stop/station. This forces the journey planner to search for trips which pass the defined station. The parameter is called viaId. When searching for a trip that goes via a coordinate, rather than a stop, two separate trip requests need to be combined into one.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_trip_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int origin_id: origin stop id
:param float origin_coord_lat: origin latitude of center coordinate in the WGS84 system
:param float origin_coord_long: origin longitude of center coordinate in the WGS84 system
:param str origin_coord_name: name of the address at the specified origin coordinate
:param int dest_id: destination stop id
:param float dest_coord_lat: destination latitude of center coordinate in the WGS84 system
:param float dest_coord_long: destination longitude of center coordinate in the WGS84 system
:param str dest_coord_name: name of the address at the specified destination coordinate
:param int via_id: via stop/station id
:param date date: date of the trip
:param str time: time of the trip in format HH:MM
:param str search_for_arrival: to specify that the given time and date is not the departure time but the latest time to arrive at the destination, set this parameter to the value 1.
:param str use_vas: to exclude trips with Västtågen, set this parameter to 0.
:param str use_ld_train: to exclude trips with long distance trains, set this parameter to 0.
:param str use_reg_train: to exclude trips with regional trains, set this parameter to 0.
:param str use_bus: to exclude trips with buses, set this parameter to 0.
:param str use_medical: to include medical transport lines trips with buses, set this parameter to 1.
:param str origin_medical_con: to search for medical transport connections from the origin, set this parameter to 1.
:param str dest_medical_con: to search for medical transport connections from the destination, set this parameter to 1.
:param str wheel_chair_space: to search for trips where at least one wheelchair space is present in the vehicle, set this parameter to 1.
:param str stroller_space: to search for trips with space for stroller, baby carriage or rollator in the vehicle, set this parameter to 1.
:param str low_floor: to search for trips where the vehicle is equipped with a low floor section, but not necessarily a ramp or lift, set this parameter to 1.
:param str ramp_or_lift: to search for trips where the vehicle is equipped with ramp or lift that allows fully barrier-free boarding and alighting, set this parameter to 1.
:param str use_boat: to exclude trips with boats, set this parameter to 0.
:param str use_tram: to exclude trips with trams, set this parameter to 0.
:param str use_pt: to exclude trips with public transportation, set this parameter to 0.
:param str exclude_dr: to exclude journeys which require tel. registration, set this parameter to 1.
:param int max_walk_dist: maximum walking distance from/to the coordinate in meters
:param str walk_speed: walking speed given in percent of normal speed
:param str origin_walk: to exclude trips with walks from/to coordinates, set this to 0
:param str dest_walk: to exclude trips with walks from/to coordinates, set this to 0
:param str only_walk: to search for walk-only trips, set this to 1
:param str origin_bike: to search for trips with a bike ride from the origin to a nearby stop, where the journey continues using public transport, set this to 1.
:param int max_bike_dist: maximum biking distance from/to the coordinate in meters
:param str bike_criterion: optimize for either the fastest route or a route that is made up of a larger percentage of bike road, where 'F' is used to indicate tha fastest route with mimimized travel time, and 'D' is used to indicate dedicated bike roads to maximize use of bike roads.
:param str bike_profile: determines the altitude profile of the route, based on a setting for how fast the user can bike when it is steep, where 'E' is used to indicate easy with minimized steepnes, 'N' is used to indicate normal, and 'P' is used to indicate powerful to allow more steepness.
:param str only_bike: to search for bike-only trips, set this to 1
:param str origin_car: to search for trips where customer travels by car from the origin and is dropped off at a stop to continue the trip using public transport, set this to 1.
:param str origin_car_with_parking: to search for trips where the customer travels by car from the origin, parks at a commuter parking and walks to a nearby stop to continue the trip using public transport, set this to 1.
:param int max_car_dist: maximum car distance from/to the coordinate in meters
:param str only_car: to search for car-only trips, set this to 1
:param int max_changes: maximum number of changes in the trip
:param int additional_change_time: to prolong the minimal change times in minutes between the public transport legs of the returned journeys
:param str disregard_default_change_margin: to ignore the default change margin, set this to 1
:param str need_journey_detail: if the reference URL for the journey detail service is not needed in the re, set this to 0
:param str need_geo: if a reference link for each leg of the resulting trips, which can be used to request the geometry, is needed, set this to 1
:param str need_itinerary: if a reference link for each leg of the resulting trips, which can be used to request the itinerary, is needed, set this to 1
:param int num_trips: the number of trips in the returned result
:param str format: the required response format
:param str jsonp_callback: If JSONP response format is needed, you can append an additional parameter to specify the name of a callback function, and the JSON object will be wrapped by a function call with this name.
:return: TripList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['origin_id', 'origin_coord_lat', 'origin_coord_long', 'origin_coord_name', 'dest_id', 'dest_coord_lat', 'dest_coord_long', 'dest_coord_name', 'via_id', 'date', 'time', 'search_for_arrival', 'use_vas', 'use_ld_train', 'use_reg_train', 'use_bus', 'use_medical', 'origin_medical_con', 'dest_medical_con', 'wheel_chair_space', 'stroller_space', 'low_floor', 'ramp_or_lift', 'use_boat', 'use_tram', 'use_pt', 'exclude_dr', 'max_walk_dist', 'walk_speed', 'origin_walk', 'dest_walk', 'only_walk', 'origin_bike', 'max_bike_dist', 'bike_criterion', 'bike_profile', 'only_bike', 'origin_car', 'origin_car_with_parking', 'max_car_dist', 'only_car', 'max_changes', 'additional_change_time', 'disregard_default_change_margin', 'need_journey_detail', 'need_geo', 'need_itinerary', 'num_trips', 'format', 'jsonp_callback']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_trip" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'origin_id' in params:
query_params.append(('originId', params['origin_id']))
if 'origin_coord_lat' in params:
query_params.append(('originCoordLat', params['origin_coord_lat']))
if 'origin_coord_long' in params:
query_params.append(('originCoordLong', params['origin_coord_long']))
if 'origin_coord_name' in params:
query_params.append(('originCoordName', params['origin_coord_name']))
if 'dest_id' in params:
query_params.append(('destId', params['dest_id']))
if 'dest_coord_lat' in params:
query_params.append(('destCoordLat', params['dest_coord_lat']))
if 'dest_coord_long' in params:
query_params.append(('destCoordLong', params['dest_coord_long']))
if 'dest_coord_name' in params:
query_params.append(('destCoordName', params['dest_coord_name']))
if 'via_id' in params:
query_params.append(('viaId', params['via_id']))
if 'date' in params:
query_params.append(('date', params['date']))
if 'time' in params:
query_params.append(('time', params['time']))
if 'search_for_arrival' in params:
query_params.append(('searchForArrival', params['search_for_arrival']))
if 'use_vas' in params:
query_params.append(('useVas', params['use_vas']))
if 'use_ld_train' in params:
query_params.append(('useLDTrain', params['use_ld_train']))
if 'use_reg_train' in params:
query_params.append(('useRegTrain', params['use_reg_train']))
if 'use_bus' in params:
query_params.append(('useBus', params['use_bus']))
if 'use_medical' in params:
query_params.append(('useMedical', params['use_medical']))
if 'origin_medical_con' in params:
query_params.append(('originMedicalCon', params['origin_medical_con']))
if 'dest_medical_con' in params:
query_params.append(('destMedicalCon', params['dest_medical_con']))
if 'wheel_chair_space' in params:
query_params.append(('wheelChairSpace', params['wheel_chair_space']))
if 'stroller_space' in params:
query_params.append(('strollerSpace', params['stroller_space']))
if 'low_floor' in params:
query_params.append(('lowFloor', params['low_floor']))
if 'ramp_or_lift' in params:
query_params.append(('rampOrLift', params['ramp_or_lift']))
if 'use_boat' in params:
query_params.append(('useBoat', params['use_boat']))
if 'use_tram' in params:
query_params.append(('useTram', params['use_tram']))
if 'use_pt' in params:
query_params.append(('usePT', params['use_pt']))
if | |
cb.solve()
error += cb.close()
if not forsystemhealth:
interpolate_bandpass_solutions(
msname,
sourcename,
thresh=interp_thresh,
polyorder=interp_polyorder,
mode='a'
)
caltables += [
{
'table': '{0}_{1}_bacal'.format(msname, sourcename),
'type': 'B',
'spwmap': spwmap
}
]
cb = cc.calibrater()
error += cb.open('{0}.ms'.format(msname))
error += apply_calibration_tables(cb, caltables)
error += cb.setsolve(
type='B',
combine=combine,
table='{0}_{1}_bpcal'.format(msname, sourcename),
refant=refant,
apmode='p',
t='inf',
solnorm=True
)
error += cb.solve()
error += cb.close()
if not forsystemhealth: # and not keepdelays:
interpolate_bandpass_solutions(
msname,
sourcename,
thresh=interp_thresh,
polyorder=interp_polyorder,
mode='p'
)
if not forsystemhealth and keepdelays:
with table(
'{0}_{1}_kcal'.format(msname, sourcename),
readonly=False
) as tb:
fparam = np.array(tb.FPARAM[:])
newparam = np.round(kcorr[:, np.newaxis, :]/2)*2
print('kcal', fparam.shape, newparam.shape)
tb.putcol('FPARAM', newparam)
with table(
'{0}_{1}_bkcal'.format(msname, sourcename),
readonly=False
) as tb:
bpass = np.array(tb.CPARAM[:])
print(newparam.shape, bpass.shape, fobs.shape)
bpass *= np.exp(
-2j*np.pi*(fobs[:, np.newaxis]-fref_snaps)*
newparam
)
print(bpass.shape)
tb.putcol('CPARAM', bpass)
if forsystemhealth:
caltables += [
{
'table': '{0}_{1}_bpcal'.format(msname, sourcename),
'type': 'B',
'spwmap': spwmap
}
]
cb = cc.calibrater()
error += not cb.open('{0}.ms'.format(msname))
error += apply_calibration_tables(cb, caltables)
error += not cb.setsolve(
type='M' if blbased else 'G',
combine=combine,
table='{0}_{1}_2gcal'.format(msname, sourcename),
refant=refant,
apmode='ap',
t=tbeam
)
error += not cb.solve()
error += not cb.close()
return error
def flag_antenna(msname, antenna, datacolumn='data', pol=None):
"""Flags an antenna in a measurement set using CASA.
Parameters
----------
msname : str
The name of the measurement set. The MS `msname`.ms will be opened.
antenna : str
The antenna to flag. If type *str*, this is the name of the antenna. If
type *int*, the index of the antenna in the measurement set.
datacolumn : str
The column of the measurement set to flag. Options are ``'data'``,
``'model'``, ``'corrected'`` for the uncalibrated visibilities, the
visibility model (used by CASA to calculate calibration solutions), the
calibrated visibilities. Defaults to ``'data'``.
pol : str
The polarization to flag. Must be `'A'` (which is mapped to
polarization 'XX' of the CASA measurement set) or `'B'` (mapped to
polarization 'YY'). Can also be `None`, for which both polarizations
are flagged. Defaults to `None`.
Returns
-------
int
The number of errors that occured during flagging.
"""
if isinstance(antenna, int):
antenna = str(antenna)
error = 0
ag = cc.agentflagger()
error += not ag.open('{0}.ms'.format(msname))
error += not ag.selectdata()
rec = {}
rec['mode'] = 'manual'
#rec['clipoutside'] = False
rec['datacolumn'] = datacolumn
rec['antenna'] = antenna
if pol is not None:
rec['correlation'] = 'XX' if pol == 'A' else 'YY'
else:
rec['correlation'] = 'XX,YY'
error += not ag.parseagentparameters(rec)
error += not ag.init()
error += not ag.run()
error += not ag.done()
return error
def flag_manual(msname, key, value, datacolumn='data', pol=None):
"""Flags a measurement set in CASA using a flagging string.
Parameters
----------
msname : str
The name of the measurement set. The MS `msname`.ms will be opened.
key : str
The CASA-interpreted flagging specifier.
datacolumn : str
The column of the measurement set to flag. Options are ``'data'``,
``'model'``, ``'corrected'`` for the uncalibrated visibilities, the
visibility model (used by CASA to calculate calibration solutions), the
calibrated visibilities. Defaults to ``'data'``.
pol : str
The polarization to flag. Must be `'A'` (which is mapped to
polarization 'XX' of the CASA measurement set) or `'B'` (mapped to
polarization 'YY'). Can also be `None`, for which both polarizations
are flagged. Defaults to `None`.
Returns
-------
int
The number of errors that occured during flagging.
"""
error = 0
ag = cc.agentflagger()
error += not ag.open('{0}.ms'.format(msname))
error += not ag.selectdata()
rec = {}
rec['mode'] = 'manual'
rec['datacolumn'] = datacolumn
rec[key] = value
if pol is not None:
rec['correlation'] = 'XX' if pol == 'A' else 'YY'
else:
rec['correlation'] = 'XX,YY'
error += not ag.parseagentparameters(rec)
error += not ag.init()
error += not ag.run()
error += not ag.done()
return error
def flag_baselines(msname, datacolumn='data', uvrange='2~15m'):
"""Flags an antenna in a measurement set using CASA.
Parameters
----------
msname : str
The name of the measurement set. The MS `msname`.ms will be opened.
datacolumn : str
The column of the measurement set to flag. Options are ``'data'``,
``'model'``, ``'corrected'`` for the uncalibrated visibilities, the
visibility model (used by CASA to calculate calibration solutions), the
calibrated visibilities. Defaults to ``'data'``.
uvrange : str
The uvrange to flag. Should be CASA-interpretable.
Returns
-------
int
The number of errors that occured during flagging.
"""
error = 0
ag = cc.agentflagger()
error += not ag.open('{0}.ms'.format(msname))
error += not ag.selectdata()
rec = {}
rec['mode'] = 'manual'
rec['datacolumn'] = datacolumn
rec['uvrange'] = uvrange
error += not ag.parseagentparameters(rec)
error += not ag.init()
error += not ag.run()
error += not ag.done()
return error
def reset_flags(msname, datacolumn=None):
"""Resets all flags in a measurement set, so that all data is unflagged.
Parameters
----------
msname : str
The name of the measurement set. The MS `msname`.ms will be opened.
datacolumn : str
The column of the measurement set to flag. Options are ``'data'``,
``'model'``, ``'corrected'`` for the uncalibrated visibilities, the
visibility model (used by CASA to calculate calibration solutions), the
calibrated visibilities. Defaults to ``'data'``.
Returns
-------
int
The number of errors that occured during flagging.
"""
error = 0
ag = cc.agentflagger()
error += not ag.open('{0}.ms'.format(msname))
error += not ag.selectdata()
rec = {}
rec['mode'] = 'unflag'
if datacolumn is not None:
rec['datacolumn'] = datacolumn
rec['antenna'] = ''
error += not ag.parseagentparameters(rec)
error += not ag.init()
error += not ag.run()
error += not ag.done()
return error
def flag_zeros(msname, datacolumn='data'):
"""Flags all zeros in a measurement set.
Parameters
----------
msname : str
The name of the measurement set. The MS `msname`.ms will be opened.
datacolumn : str
The column of the measurement set to flag. Options are ``'data'``,
``'model'``, ``'corrected'`` for the uncalibrated visibilities, the
visibility model (used by CASA to calculate calibration solutions), the
calibrated visibilities. Defaults to ``'data'``.
Returns
-------
int
The number of errors that occured during flagging.
"""
error = 0
ag = cc.agentflagger()
error += not ag.open('{0}.ms'.format(msname))
error += not ag.selectdata()
rec = {}
rec['mode'] = 'clip'
rec['clipzeros'] = True
rec['datacolumn'] = datacolumn
error += not ag.parseagentparameters(rec)
error += not ag.init()
error += not ag.run()
error += not ag.done()
return error
# TODO: Change times to not use mjds, but mjd instead
def flag_badtimes(msname, times, bad, nant, datacolumn='data', verbose=False):
"""Flags bad time bins for each antenna in a measurement set using CASA.
Could use some work to select antennas to flag in a smarter way.
Parameters
----------
msname : str
The name of the measurement set. The MS `msname`.ms will be opened.
times : ndarray
A 1-D array of times, type float, seconds since MJD=0. Times should be
equally spaced and cover the entire time range of the measurement set,
but can be coarser than the resolution of the measurement set.
bad : ndarray
A 1-D boolean array with dimensions (len(`times`), `nant`). Should have
a value of ``True`` if the corresponding timebins should be flagged.
nant : int
The number of antennas in the measurement set (includes ones not in the
visibilities).
datacolumn : str
The column of the measurement set to flag. Options are ``'data'``,
``'model'``, ``'corrected'`` for the uncalibrated visibilities, the
visibility model (used by CASA to calculate calibration solutions), the
calibrated visibilities. Defaults to ``'data'``.
verbose : boolean
If ``True``, will print information about the antenna/time pairs being
flagged. Defaults to ``False``.
Returns
-------
int
The number of errors that occured during flagging.
"""
error = 0
tdiff = np.median(np.diff(times))
ag = cc.agentflagger()
error += not ag.open('{0}.ms'.format(msname))
error += not ag.selectdata()
for i in range(nant):
rec = {}
rec['mode'] = 'clip'
rec['clipoutside'] = False
rec['datacolumn'] = datacolumn
rec['antenna'] = str(i)
rec['polarization_type'] = 'XX'
tstr = ''
for j, timesj in enumerate(times):
if bad[j]:
if len(tstr) > 0:
tstr += '; '
tstr += '{0}~{1}'.format(timesj-tdiff/2, timesj+tdiff/2)
if verbose:
print('For antenna {0}, flagged: {1}'.format(i, tstr))
error += not ag.parseagentparameters(rec)
error += not ag.init()
error += not ag.run()
rec['polarization_type'] = 'YY'
tstr = ''
for j, timesj in enumerate(times):
if bad[j]:
if len(tstr) > 0:
tstr += '; '
tstr += '{0}~{1}'.format(timesj-tdiff/2, timesj+tdiff/2)
if verbose:
print('For antenna {0}, flagged: {1}'.format(i, tstr))
error += not ag.parseagentparameters(rec)
error += | |
<reponame>faradayio/docker-crankshaft<gh_stars>10-100
import unittest
import numpy as np
import unittest
# from mock_plpy import MockPlPy
# plpy = MockPlPy()
#
# import sys
# sys.modules['plpy'] = plpy
from helper import plpy, fixture_file
import crankshaft.space_time_dynamics as std
from crankshaft import random_seeds
import json
class SpaceTimeTests(unittest.TestCase):
"""Testing class for Markov Functions."""
def setUp(self):
plpy._reset()
self.params = {"id_col": "cartodb_id",
"time_cols": ['dec_2013', 'jan_2014', 'feb_2014'],
"subquery": "SELECT * FROM a_list",
"geom_col": "the_geom",
"num_ngbrs": 321}
self.neighbors_data = json.loads(open(fixture_file('neighbors_markov.json')).read())
self.markov_data = json.loads(open(fixture_file('markov.json')).read())
self.time_data = np.array([i * np.ones(10, dtype=float) for i in range(10)]).T
self.transition_matrix = np.array([
[[ 0.96341463, 0.0304878 , 0.00609756, 0. , 0. ],
[ 0.06040268, 0.83221477, 0.10738255, 0. , 0. ],
[ 0. , 0.14 , 0.74 , 0.12 , 0. ],
[ 0. , 0.03571429, 0.32142857, 0.57142857, 0.07142857],
[ 0. , 0. , 0. , 0.16666667, 0.83333333]],
[[ 0.79831933, 0.16806723, 0.03361345, 0. , 0. ],
[ 0.0754717 , 0.88207547, 0.04245283, 0. , 0. ],
[ 0.00537634, 0.06989247, 0.8655914 , 0.05913978, 0. ],
[ 0. , 0. , 0.06372549, 0.90196078, 0.03431373],
[ 0. , 0. , 0. , 0.19444444, 0.80555556]],
[[ 0.84693878, 0.15306122, 0. , 0. , 0. ],
[ 0.08133971, 0.78947368, 0.1291866 , 0. , 0. ],
[ 0.00518135, 0.0984456 , 0.79274611, 0.0984456 , 0.00518135],
[ 0. , 0. , 0.09411765, 0.87058824, 0.03529412],
[ 0. , 0. , 0. , 0.10204082, 0.89795918]],
[[ 0.8852459 , 0.09836066, 0. , 0.01639344, 0. ],
[ 0.03875969, 0.81395349, 0.13953488, 0. , 0.00775194],
[ 0.0049505 , 0.09405941, 0.77722772, 0.11881188, 0.0049505 ],
[ 0. , 0.02339181, 0.12865497, 0.75438596, 0.09356725],
[ 0. , 0. , 0. , 0.09661836, 0.90338164]],
[[ 0.33333333, 0.66666667, 0. , 0. , 0. ],
[ 0.0483871 , 0.77419355, 0.16129032, 0.01612903, 0. ],
[ 0.01149425, 0.16091954, 0.74712644, 0.08045977, 0. ],
[ 0. , 0.01036269, 0.06217617, 0.89637306, 0.03108808],
[ 0. , 0. , 0. , 0.02352941, 0.97647059]]]
)
def test_spatial_markov(self):
"""Test Spatial Markov."""
data = [ { 'id': d['id'],
'attr1': d['y1995'],
'attr2': d['y1996'],
'attr3': d['y1997'],
'attr4': d['y1998'],
'attr5': d['y1999'],
'attr6': d['y2000'],
'attr7': d['y2001'],
'attr8': d['y2002'],
'attr9': d['y2003'],
'attr10': d['y2004'],
'attr11': d['y2005'],
'attr12': d['y2006'],
'attr13': d['y2007'],
'attr14': d['y2008'],
'attr15': d['y2009'],
'neighbors': d['neighbors'] } for d in self.neighbors_data]
print(str(data[0]))
plpy._define_result('select', data)
random_seeds.set_random_seeds(1234)
result = std.spatial_markov_trend('subquery', ['y1995', 'y1996', 'y1997', 'y1998', 'y1999', 'y2000', 'y2001', 'y2002', 'y2003', 'y2004', 'y2005', 'y2006', 'y2007', 'y2008', 'y2009'], 5, 'knn', 5, 0, 'the_geom', 'cartodb_id')
self.assertTrue(result != None)
result = [(row[0], row[1], row[2], row[3], row[4]) for row in result]
print result[0]
expected = self.markov_data
for ([res_trend, res_up, res_down, res_vol, res_id],
[exp_trend, exp_up, exp_down, exp_vol, exp_id]
) in zip(result, expected):
self.assertAlmostEqual(res_trend, exp_trend)
def test_get_time_data(self):
"""Test get_time_data"""
data = [ { 'attr1': d['y1995'],
'attr2': d['y1996'],
'attr3': d['y1997'],
'attr4': d['y1998'],
'attr5': d['y1999'],
'attr6': d['y2000'],
'attr7': d['y2001'],
'attr8': d['y2002'],
'attr9': d['y2003'],
'attr10': d['y2004'],
'attr11': d['y2005'],
'attr12': d['y2006'],
'attr13': d['y2007'],
'attr14': d['y2008'],
'attr15': d['y2009'] } for d in self.neighbors_data]
result = std.get_time_data(data, ['y1995', 'y1996', 'y1997', 'y1998', 'y1999', 'y2000', 'y2001', 'y2002', 'y2003', 'y2004', 'y2005', 'y2006', 'y2007', 'y2008', 'y2009'])
## expected was prepared from PySAL example:
### f = ps.open(ps.examples.get_path("usjoin.csv"))
### pci = np.array([f.by_col[str(y)] for y in range(1995, 2010)]).transpose()
### rpci = pci / (pci.mean(axis = 0))
expected = np.array([[ 0.87654416, 0.863147, 0.85637567, 0.84811668, 0.8446154, 0.83271652
, 0.83786314, 0.85012593, 0.85509656, 0.86416612, 0.87119375, 0.86302631
, 0.86148267, 0.86252252, 0.86746356],
[ 0.9188951, 0.91757931, 0.92333258, 0.92517289, 0.92552388, 0.90746978
, 0.89830489, 0.89431991, 0.88924794, 0.89815176, 0.91832091, 0.91706054
, 0.90139505, 0.87897455, 0.86216858],
[ 0.82591007, 0.82548596, 0.81989793, 0.81503235, 0.81731522, 0.78964559
, 0.80584442, 0.8084998, 0.82258551, 0.82668196, 0.82373724, 0.81814804
, 0.83675961, 0.83574199, 0.84647177],
[ 1.09088176, 1.08537689, 1.08456418, 1.08415404, 1.09898841, 1.14506948
, 1.12151133, 1.11160697, 1.10888621, 1.11399806, 1.12168029, 1.13164797
, 1.12958508, 1.11371818, 1.09936775],
[ 1.10731446, 1.11373944, 1.13283638, 1.14472559, 1.15910025, 1.16898201
, 1.17212488, 1.14752303, 1.11843284, 1.11024964, 1.11943471, 1.11736468
, 1.10863242, 1.09642516, 1.07762337],
[ 1.42269757, 1.42118434, 1.44273502, 1.43577571, 1.44400684, 1.44184737
, 1.44782832, 1.41978227, 1.39092208, 1.4059372, 1.40788646, 1.44052766
, 1.45241216, 1.43306098, 1.4174431 ],
[ 1.13073885, 1.13110513, 1.11074708, 1.13364636, 1.13088149, 1.10888138
, 1.11856629, 1.13062931, 1.11944984, 1.12446239, 1.11671008, 1.10880034
, 1.08401709, 1.06959206, 1.07875225],
[ 1.04706124, 1.04516831, 1.04253372, 1.03239987, 1.02072545, 0.99854316
, 0.9880258, 0.99669587, 0.99327676, 1.01400905, 1.03176742, 1.040511
, 1.01749645, 0.9936394, 0.98279746],
[ 0.98996986, 1.00143564, 0.99491, 1.00188408, 1.00455845, 0.99127006
, 0.97925917, 0.9683482, 0.95335147, 0.93694787, 0.94308213, 0.92232874
, 0.91284091, 0.89689833, 0.88928858],
[ 0.87418391, 0.86416601, 0.84425695, 0.8404494, 0.83903044, 0.8578708
, 0.86036185, 0.86107306, 0.8500772, 0.86981998, 0.86837929, 0.87204141
, 0.86633032, 0.84946077, 0.83287146],
[ 1.14196118, 1.14660262, 1.14892712, 1.14909594, 1.14436624, 1.14450183
, 1.12349752, 1.12596664, 1.12213996, 1.1119989, 1.10257792, 1.10491258
, 1.11059842, 1.10509795, 1.10020097],
[ 0.97282463, 0.96700147, 0.96252588, 0.9653878, 0.96057687, 0.95831051
, 0.94480909, 0.94804195, 0.95430286, 0.94103989, 0.92122519, 0.91010201
, 0.89280392, 0.89298243, 0.89165385],
[ 0.94325468, 0.96436902, 0.96455242, 0.95243009, 0.94117647, 0.9480927
, 0.93539182, 0.95388718, 0.94597005, 0.96918424, 0.94781281, 0.93466815
, 0.94281559, 0.96520315, 0.96715441],
[ 0.97478408, 0.98169225, 0.98712809, 0.98474769, 0.98559897, 0.98687073
, 0.99237486, 0.98209969, 0.9877653, 0.97399471, 0.96910087, 0.98416665
, 0.98423613, 0.99823861, 0.99545704],
[ 0.85570269, 0.85575915, 0.85986132, 0.85693406, 0.8538012, 0.86191535
, 0.84981451, 0.85472102, 0.84564835, 0.83998883, 0.83478547, 0.82803648
, 0.8198736, 0.82265395, 0.8399404 ],
[ 0.87022047, 0.85996258, 0.85961813, 0.85689572, 0.83947136, 0.82785597
, 0.86008789, 0.86776298, 0.86720209, 0.8676334, 0.89179317, 0.94202108
, 0.9422231, 0.93902708, 0.94479184],
[ 0.90134907, 0.90407738, 0.90403991, 0.90201769, 0.90399238, 0.90906632
, 0.92693339, 0.93695966, 0.94242697, 0.94338265, 0.91981796, 0.91108804
, 0.90543476, 0.91737138, 0.94793657],
[ 1.1977611, 1.18222564, 1.18439158, 1.18267865, 1.19286723, 1.20172869
, 1.21328691, 1.22624778, 1.22397075, 1.23857042, 1.24419893, 1.23929384
, 1.23418676, 1.23626739, 1.26754398],
[ 1.24919678, 1.25754773, 1.26991161, 1.28020651, 1.30625667, 1.34790023
, 1.34399863, 1.32575181, 1.30795492, 1.30544841, 1.30303302, 1.32107766
, 1.32936244, 1.33001241, 1.33288462],
[ 1.06768004, 1.03799276, 1.03637303, 1.02768449, 1.03296093, 1.05059016
, 1.03405057, 1.02747623, 1.03162734, 0.9961416, 0.97356208, 0.94241549
, 0.92754547, 0.92549227, 0.92138102],
[ 1.09475614, 1.11526796, 1.11654299, 1.13103948, 1.13143264, 1.13889622
, 1.12442212, 1.13367018, 1.13982256, 1.14029944, 1.11979401, 1.10905389
, 1.10577769, 1.11166825, 1.09985155],
[ 0.76530058, 0.76612841, 0.76542451, 0.76722683, 0.76014284, 0.74480073
, 0.76098396, 0.76156903, 0.76651952, 0.76533288, 0.78205934, 0.76842416
, 0.77487118, 0.77768683, 0.78801192],
[ 0.98391336, 0.98075816, 0.98295341, 0.97386015, 0.96913803, 0.97370819
, 0.96419154, 0.97209861, 0.97441313, 0.96356162, 0.94745352, 0.93965462
, 0.93069645, 0.94020973, 0.94358232],
[ 0.83561828, 0.82298088, 0.81738502, 0.81748588, 0.80904801, 0.80071489
, 0.83358256, 0.83451613, 0.85175032, 0.85954307, 0.86790024, 0.87170334
, 0.87863799, 0.87497981, 0.87888675],
[ 0.98845573, 1.02092428, 0.99665283, 0.99141823, 0.99386619, 0.98733195
, 0.99644997, 0.99669587, 1.02559097, 1.01116651, 0.99988024, 0.97906749
, 0.99323123, 1.00204939, 0.99602148],
[ 1.14930913, 1.15241949, 1.14300962, 1.14265542, 1.13984683, 1.08312397
, 1.05192626, 1.04230892, 1.05577278, 1.08569751, 1.12443486, 1.08891079
, 1.08603695, 1.05997314, 1.02160943],
[ 1.11368269, 1.1057147, 1.11893431, 1.13778669, 1.1432272, 1.18257029
, 1.16226243, 1.16009196, 1.14467789, 1.14820235, 1.12386598, 1.12680236
, 1.12357937, 1.1159258, 1.12570828],
[ 1.30379431, 1.30752186, 1.31206366, 1.31532267, 1.30625667, 1.31210239
, 1.29989156, 1.29203193, 1.27183516, 1.26830786, 1.2617743, 1.28656675
, 1.29734097, 1.29390205, 1.29345446],
[ 0.83953719, 0.82701448, 0.82006005, 0.81188876, 0.80294864, 0.78772975
, 0.82848011, 0.8259679, 0.82435705, 0.83108634, 0.84373784, 0.83891093
, 0.84349247, 0.85637272, 0.86539395],
[ 1.23450087, 1.2426022, 1.23537935, 1.23581293, 1.24522626, 1.2256767
, 1.21126648, 1.19377804, 1.18355337, 1.19674434, 1.21536573, 1.23653297
, 1.27962009, 1.27968392, 1.25907738],
[ 0.9769662, 0.97400719, 0.98035944, 0.97581531, 0.95543282, 0.96480308
, 0.94686376, 0.93679073, 0.92540049, 0.92988835, 0.93442917, 0.92100464
, 0.91475304, 0.90249622, 0.9021363 ],
[ 0.84986886, 0.8986851, 0.84295997, 0.87280534, 0.85659368, 0.88937573
, 0.894401, 0.90448993, 0.95495898, 0.92698333, 0.94745352, 0.92562488
, 0.96635366, 1.02520312, 1.0394296 ],
[ 1.01922808, 1.00258203, 1.00974428, 1.00303417, 0.99765073, 1.00759019
, 0.99192968, 0.99747298, 0.99550759, 0.97583768, 0.9610168, 0.94779638
, 0.93759089, 0.93353431, 0.94121705],
[ 0.86367411, 0.85558932, 0.85544346, 0.85103025, 0.84336613, 0.83434854
, 0.85813595, 0.84667961, 0.84374558, 0.85951183, 0.87194227, 0.89455097
, 0.88283929, 0.90349491, 0.90600675],
[ 1.00947534, 1.00411055, 1.00698819, 0.99513687, 0.99291086, 1.00581626
, 0.98850522, 0.99291168, 0.98983209, 0.97511924, 0.96134615, 0.96382634
, 0.95011401, 0.9434686, 0.94637765],
[ 1.05712571, 1.05459419, 1.05753012, 1.04880786, 1.05103857, 1.04800023
, 1.03024941, 1.04200483, 1.0402554, 1.03296979, 1.02191682, 1.02476275
, 1.02347523, 1.02517684, 1.04359571],
[ 1.07084189, 1.06669497, 1.07937623, 1.07387988, 1.0794043, 1.0531801
, 1.07452771, 1.09383478, 1.1052447, 1.10322136, 1.09167939, 1.08772756
, 1.08859544, 1.09177338, 1.1096083 ],
[ 0.86719222, 0.86628896, 0.86675156, 0.86425632, 0.86511809, 0.86287327
, 0.85169796, 0.85411285, 0.84886336, 0.84517414, 0.84843858, 0.84488343
, 0.83374329, 0.82812044, 0.82878599],
[ 0.88389211, 0.92288667, 0.90282398, 0.91229186, 0.92023286, 0.92652175
, 0.94278865, 0.93682452, 0.98655146, 0.992237, 0.9798497, 0.93869677
, 0.96947771, 1.00362626, 0.98102351],
[ 0.97082064, 0.95320233, 0.94534081, 0.94215593, 0.93967, 0.93092109
, 0.92662519, 0.93412152, 0.93501274, 0.92879506, 0.92110542, 0.91035556
, 0.90430364, 0.89994694, 0.90073864],
[ 0.95861858, 0.95774543, 0.98254811, 0.98919472, 0.98684824, 0.98882205
, 0.97662234, 0.95601578, 0.94905385, 0.94934888, 0.97152609, 0.97163004
, 0.9700702, 0.97158948, 0.95884908],
[ 0.83980439, 0.84726737, 0.85747, 0.85467221, 0.8556751, 0.84818516
, 0.85265681, 0.84502402, 0.82645665, 0.81743586, 0.83550406, 0.83338919
, 0.83511679, 0.82136617, 0.80921874],
[ 0.95118156, 0.9466212, 0.94688098, 0.9508583, 0.9512441, 0.95440787
, 0.96364363, 0.96804412, 0.97136214, 0.97583768, 0.95571724, 0.96895368
, 0.97001634, 0.97082733, 0.98782366],
[ 1.08910044, 1.08248968, 1.08492895, 1.08656923, 1.09454249, 1.10558188
, 1.1214086, 1.12292577, 1.13021031, 1.13342735, 1.14686068, 1.14502975
| |
0x47FA
MediaGaugeB251 = 0x47FB
MediaGaugeB252 = 0x47FC
MediaGaugeB253 = 0x47FD
MediaGaugeB254 = 0x47FE
MediaGaugeB255 = 0x47FF
# MediaGaugeC List
MediaGaugeC0 = 0x4800
MediaGaugeC1 = 0x4801
MediaGaugeC2 = 0x4802
MediaGaugeC3 = 0x4803
MediaGaugeC4 = 0x4804
MediaGaugeC5 = 0x4805
MediaGaugeC6 = 0x4806
MediaGaugeC7 = 0x4807
MediaGaugeC8 = 0x4808
MediaGaugeC9 = 0x4809
MediaGaugeC10 = 0x480A
MediaGaugeC11 = 0x480B
MediaGaugeC12 = 0x480C
MediaGaugeC13 = 0x480D
MediaGaugeC14 = 0x480E
MediaGaugeC15 = 0x480F
MediaGaugeC16 = 0x4810
MediaGaugeC17 = 0x4811
MediaGaugeC18 = 0x4812
MediaGaugeC19 = 0x4813
MediaGaugeC20 = 0x4814
MediaGaugeC21 = 0x4815
MediaGaugeC22 = 0x4816
MediaGaugeC23 = 0x4817
MediaGaugeC24 = 0x4818
MediaGaugeC25 = 0x4819
MediaGaugeC26 = 0x481A
MediaGaugeC27 = 0x481B
MediaGaugeC28 = 0x481C
MediaGaugeC29 = 0x481D
MediaGaugeC30 = 0x481E
MediaGaugeC31 = 0x481F
MediaGaugeC32 = 0x4820
MediaGaugeC33 = 0x4821
MediaGaugeC34 = 0x4822
MediaGaugeC35 = 0x4823
MediaGaugeC36 = 0x4824
MediaGaugeC37 = 0x4825
MediaGaugeC38 = 0x4826
MediaGaugeC39 = 0x4827
MediaGaugeC40 = 0x4828
MediaGaugeC41 = 0x4829
MediaGaugeC42 = 0x482A
MediaGaugeC43 = 0x482B
MediaGaugeC44 = 0x482C
MediaGaugeC45 = 0x482D
MediaGaugeC46 = 0x482E
MediaGaugeC47 = 0x482F
MediaGaugeC48 = 0x4830
MediaGaugeC49 = 0x4831
MediaGaugeC50 = 0x4832
MediaGaugeC51 = 0x4833
MediaGaugeC52 = 0x4834
MediaGaugeC53 = 0x4835
MediaGaugeC54 = 0x4836
MediaGaugeC55 = 0x4837
MediaGaugeC56 = 0x4838
MediaGaugeC57 = 0x4839
MediaGaugeC58 = 0x483A
MediaGaugeC59 = 0x483B
MediaGaugeC60 = 0x483C
MediaGaugeC61 = 0x483D
MediaGaugeC62 = 0x483E
MediaGaugeC63 = 0x483F
MediaGaugeC64 = 0x4840
MediaGaugeC65 = 0x4841
MediaGaugeC66 = 0x4842
MediaGaugeC67 = 0x4843
MediaGaugeC68 = 0x4844
MediaGaugeC69 = 0x4845
MediaGaugeC70 = 0x4846
MediaGaugeC71 = 0x4847
MediaGaugeC72 = 0x4848
MediaGaugeC73 = 0x4849
MediaGaugeC74 = 0x484A
MediaGaugeC75 = 0x484B
MediaGaugeC76 = 0x484C
MediaGaugeC77 = 0x484D
MediaGaugeC78 = 0x484E
MediaGaugeC79 = 0x484F
MediaGaugeC80 = 0x4850
MediaGaugeC81 = 0x4851
MediaGaugeC82 = 0x4852
MediaGaugeC83 = 0x4853
MediaGaugeC84 = 0x4854
MediaGaugeC85 = 0x4855
MediaGaugeC86 = 0x4856
MediaGaugeC87 = 0x4857
MediaGaugeC88 = 0x4858
MediaGaugeC89 = 0x4859
MediaGaugeC90 = 0x485A
MediaGaugeC91 = 0x485B
MediaGaugeC92 = 0x485C
MediaGaugeC93 = 0x485D
MediaGaugeC94 = 0x485E
MediaGaugeC95 = 0x485F
MediaGaugeC96 = 0x4860
MediaGaugeC97 = 0x4861
MediaGaugeC98 = 0x4862
MediaGaugeC99 = 0x4863
MediaGaugeC100 = 0x4864
MediaGaugeC101 = 0x4865
MediaGaugeC102 = 0x4866
MediaGaugeC103 = 0x4867
MediaGaugeC104 = 0x4868
MediaGaugeC105 = 0x4869
MediaGaugeC106 = 0x486A
MediaGaugeC107 = 0x486B
MediaGaugeC108 = 0x486C
MediaGaugeC109 = 0x486D
MediaGaugeC110 = 0x486E
MediaGaugeC111 = 0x486F
MediaGaugeC112 = 0x4870
MediaGaugeC113 = 0x4871
MediaGaugeC114 = 0x4872
MediaGaugeC115 = 0x4873
MediaGaugeC116 = 0x4874
MediaGaugeC117 = 0x4875
MediaGaugeC118 = 0x4876
MediaGaugeC119 = 0x4877
MediaGaugeC120 = 0x4878
MediaGaugeC121 = 0x4879
MediaGaugeC122 = 0x487A
MediaGaugeC123 = 0x487B
MediaGaugeC124 = 0x487C
MediaGaugeC125 = 0x487D
MediaGaugeC126 = 0x487E
MediaGaugeC127 = 0x487F
MediaGaugeC128 = 0x4880
MediaGaugeC129 = 0x4881
MediaGaugeC130 = 0x4882
MediaGaugeC131 = 0x4883
MediaGaugeC132 = 0x4884
MediaGaugeC133 = 0x4885
MediaGaugeC134 = 0x4886
MediaGaugeC135 = 0x4887
MediaGaugeC136 = 0x4888
MediaGaugeC137 = 0x4889
MediaGaugeC138 = 0x488A
MediaGaugeC139 = 0x488B
MediaGaugeC140 = 0x488C
MediaGaugeC141 = 0x488D
MediaGaugeC142 = 0x488E
MediaGaugeC143 = 0x488F
MediaGaugeC144 = 0x4890
MediaGaugeC145 = 0x4891
MediaGaugeC146 = 0x4892
MediaGaugeC147 = 0x4893
MediaGaugeC148 = 0x4894
MediaGaugeC149 = 0x4895
MediaGaugeC150 = 0x4896
MediaGaugeC151 = 0x4897
MediaGaugeC152 = 0x4898
MediaGaugeC153 = 0x4899
MediaGaugeC154 = 0x489A
MediaGaugeC155 = 0x489B
MediaGaugeC156 = 0x489C
MediaGaugeC157 = 0x489D
MediaGaugeC158 = 0x489E
MediaGaugeC159 = 0x489F
MediaGaugeC160 = 0x48A0
MediaGaugeC161 = 0x48A1
MediaGaugeC162 = 0x48A2
MediaGaugeC163 = 0x48A3
MediaGaugeC164 = 0x48A4
MediaGaugeC165 = 0x48A5
MediaGaugeC166 = 0x48A6
MediaGaugeC167 = 0x48A7
MediaGaugeC168 = 0x48A8
MediaGaugeC169 = 0x48A9
MediaGaugeC170 = 0x48AA
MediaGaugeC171 = 0x48AB
MediaGaugeC172 = 0x48AC
MediaGaugeC173 = 0x48AD
MediaGaugeC174 = 0x48AE
MediaGaugeC175 = 0x48AF
MediaGaugeC176 = 0x48B0
MediaGaugeC177 = 0x48B1
MediaGaugeC178 = 0x48B2
MediaGaugeC179 = 0x48B3
MediaGaugeC180 = 0x48B4
MediaGaugeC181 = 0x48B5
MediaGaugeC182 = 0x48B6
MediaGaugeC183 = 0x48B7
MediaGaugeC184 = 0x48B8
MediaGaugeC185 = 0x48B9
MediaGaugeC186 = 0x48BA
MediaGaugeC187 = 0x48BB
MediaGaugeC188 = 0x48BC
MediaGaugeC189 = 0x48BD
MediaGaugeC190 = 0x48BE
MediaGaugeC191 = 0x48BF
MediaGaugeC192 = 0x48C0
MediaGaugeC193 = 0x48C1
MediaGaugeC194 = 0x48C2
MediaGaugeC195 = 0x48C3
MediaGaugeC196 = 0x48C4
MediaGaugeC197 = 0x48C5
MediaGaugeC198 = 0x48C6
MediaGaugeC199 = 0x48C7
MediaGaugeC200 = 0x48C8
MediaGaugeC201 = 0x48C9
MediaGaugeC202 = 0x48CA
MediaGaugeC203 = 0x48CB
MediaGaugeC204 = 0x48CC
MediaGaugeC205 = 0x48CD
MediaGaugeC206 = 0x48CE
MediaGaugeC207 = 0x48CF
MediaGaugeC208 = 0x48D0
MediaGaugeC209 = 0x48D1
MediaGaugeC210 = 0x48D2
MediaGaugeC211 = 0x48D3
MediaGaugeC212 = 0x48D4
MediaGaugeC213 = 0x48D5
MediaGaugeC214 = 0x48D6
MediaGaugeC215 = 0x48D7
MediaGaugeC216 = 0x48D8
MediaGaugeC217 = 0x48D9
MediaGaugeC218 = 0x48DA
MediaGaugeC219 = 0x48DB
MediaGaugeC220 = 0x48DC
MediaGaugeC221 = 0x48DD
MediaGaugeC222 = 0x48DE
MediaGaugeC223 = 0x48DF
MediaGaugeC224 = 0x48E0
MediaGaugeC225 = 0x48E1
MediaGaugeC226 = 0x48E2
MediaGaugeC227 = 0x48E3
MediaGaugeC228 = 0x48E4
MediaGaugeC229 = 0x48E5
MediaGaugeC230 = 0x48E6
MediaGaugeC231 = 0x48E7
MediaGaugeC232 = 0x48E8
MediaGaugeC233 = 0x48E9
MediaGaugeC234 = 0x48EA
MediaGaugeC235 = 0x48EB
MediaGaugeC236 = 0x48EC
MediaGaugeC237 = 0x48ED
MediaGaugeC238 = 0x48EE
MediaGaugeC239 = 0x48EF
MediaGaugeC240 = 0x48F0
MediaGaugeC241 = 0x48F1
MediaGaugeC242 = 0x48F2
MediaGaugeC243 = 0x48F3
MediaGaugeC244 = 0x48F4
MediaGaugeC245 = 0x48F5
MediaGaugeC246 = 0x48F6
MediaGaugeC247 = 0x48F7
MediaGaugeC248 = 0x48F8
MediaGaugeC249 = 0x48F9
MediaGaugeC250 = 0x48FA
MediaGaugeC251 = 0x48FB
MediaGaugeC252 = 0x48FC
MediaGaugeC253 = 0x48FD
MediaGaugeC254 = 0x48FE
MediaGaugeC255 = 0x48FF
# MediaGaugeD List
MediaGaugeD0 = 0x4900
MediaGaugeD1 = 0x4901
MediaGaugeD2 = 0x4902
MediaGaugeD3 = 0x4903
MediaGaugeD4 = 0x4904
MediaGaugeD5 = 0x4905
MediaGaugeD6 = 0x4906
MediaGaugeD7 = 0x4907
MediaGaugeD8 = 0x4908
MediaGaugeD9 = 0x4909
MediaGaugeD10 = 0x490A
MediaGaugeD11 = 0x490B
MediaGaugeD12 = 0x490C
MediaGaugeD13 = 0x490D
MediaGaugeD14 = 0x490E
MediaGaugeD15 = 0x490F
MediaGaugeD16 = 0x4910
MediaGaugeD17 = 0x4911
MediaGaugeD18 = 0x4912
MediaGaugeD19 = 0x4913
MediaGaugeD20 = 0x4914
MediaGaugeD21 = 0x4915
MediaGaugeD22 = 0x4916
MediaGaugeD23 = 0x4917
MediaGaugeD24 = 0x4918
MediaGaugeD25 = 0x4919
MediaGaugeD26 = 0x491A
MediaGaugeD27 = 0x491B
MediaGaugeD28 = 0x491C
MediaGaugeD29 = 0x491D
MediaGaugeD30 = 0x491E
MediaGaugeD31 = 0x491F
MediaGaugeD32 = 0x4920
MediaGaugeD33 = 0x4921
MediaGaugeD34 = 0x4922
MediaGaugeD35 = 0x4923
MediaGaugeD36 = 0x4924
MediaGaugeD37 = 0x4925
MediaGaugeD38 = 0x4926
MediaGaugeD39 = 0x4927
MediaGaugeD40 = 0x4928
MediaGaugeD41 = 0x4929
MediaGaugeD42 = 0x492A
MediaGaugeD43 = 0x492B
MediaGaugeD44 = 0x492C
MediaGaugeD45 = 0x492D
MediaGaugeD46 = 0x492E
MediaGaugeD47 = 0x492F
MediaGaugeD48 = 0x4930
MediaGaugeD49 = 0x4931
MediaGaugeD50 = 0x4932
MediaGaugeD51 = 0x4933
MediaGaugeD52 = 0x4934
MediaGaugeD53 = 0x4935
MediaGaugeD54 = 0x4936
MediaGaugeD55 = 0x4937
MediaGaugeD56 = 0x4938
MediaGaugeD57 = 0x4939
MediaGaugeD58 = 0x493A
MediaGaugeD59 = 0x493B
MediaGaugeD60 = 0x493C
MediaGaugeD61 = 0x493D
MediaGaugeD62 = 0x493E
MediaGaugeD63 = 0x493F
MediaGaugeD64 = 0x4940
MediaGaugeD65 = 0x4941
MediaGaugeD66 = 0x4942
MediaGaugeD67 = 0x4943
MediaGaugeD68 = 0x4944
MediaGaugeD69 = 0x4945
MediaGaugeD70 = 0x4946
MediaGaugeD71 = 0x4947
MediaGaugeD72 = 0x4948
MediaGaugeD73 = 0x4949
MediaGaugeD74 = 0x494A
MediaGaugeD75 = 0x494B
MediaGaugeD76 = 0x494C
MediaGaugeD77 = 0x494D
MediaGaugeD78 = 0x494E
MediaGaugeD79 = 0x494F
MediaGaugeD80 = 0x4950
MediaGaugeD81 = 0x4951
MediaGaugeD82 = 0x4952
MediaGaugeD83 = 0x4953
MediaGaugeD84 = 0x4954
MediaGaugeD85 = 0x4955
MediaGaugeD86 = 0x4956
MediaGaugeD87 = 0x4957
MediaGaugeD88 = 0x4958
MediaGaugeD89 = 0x4959
MediaGaugeD90 = 0x495A
MediaGaugeD91 = 0x495B
MediaGaugeD92 = 0x495C
MediaGaugeD93 = 0x495D
MediaGaugeD94 = 0x495E
MediaGaugeD95 = 0x495F
MediaGaugeD96 = 0x4960
MediaGaugeD97 = 0x4961
MediaGaugeD98 = 0x4962
MediaGaugeD99 = 0x4963
MediaGaugeD100 = 0x4964
MediaGaugeD101 = 0x4965
MediaGaugeD102 = 0x4966
MediaGaugeD103 = 0x4967
MediaGaugeD104 = 0x4968
MediaGaugeD105 = 0x4969
MediaGaugeD106 = 0x496A
MediaGaugeD107 = 0x496B
MediaGaugeD108 = 0x496C
MediaGaugeD109 = 0x496D
MediaGaugeD110 = 0x496E
MediaGaugeD111 = 0x496F
MediaGaugeD112 = 0x4970
MediaGaugeD113 = 0x4971
MediaGaugeD114 = 0x4972
MediaGaugeD115 = 0x4973
MediaGaugeD116 = 0x4974
MediaGaugeD117 = 0x4975
MediaGaugeD118 = 0x4976
MediaGaugeD119 = 0x4977
MediaGaugeD120 = 0x4978
MediaGaugeD121 = 0x4979
MediaGaugeD122 = 0x497A
MediaGaugeD123 = 0x497B
MediaGaugeD124 = 0x497C
MediaGaugeD125 = 0x497D
MediaGaugeD126 = 0x497E
MediaGaugeD127 = 0x497F
MediaGaugeD128 = 0x4980
MediaGaugeD129 = 0x4981
MediaGaugeD130 = 0x4982
MediaGaugeD131 = 0x4983
MediaGaugeD132 = 0x4984
MediaGaugeD133 = 0x4985
MediaGaugeD134 = 0x4986
MediaGaugeD135 = 0x4987
MediaGaugeD136 = 0x4988
MediaGaugeD137 = 0x4989
MediaGaugeD138 = 0x498A
MediaGaugeD139 = 0x498B
MediaGaugeD140 = 0x498C
MediaGaugeD141 = 0x498D
MediaGaugeD142 = 0x498E
MediaGaugeD143 = 0x498F
MediaGaugeD144 = 0x4990
MediaGaugeD145 = 0x4991
MediaGaugeD146 = 0x4992
MediaGaugeD147 = 0x4993
MediaGaugeD148 = 0x4994
MediaGaugeD149 = 0x4995
MediaGaugeD150 = 0x4996
MediaGaugeD151 = 0x4997
MediaGaugeD152 = 0x4998
MediaGaugeD153 = 0x4999
MediaGaugeD154 = 0x499A
MediaGaugeD155 = 0x499B
MediaGaugeD156 = 0x499C
MediaGaugeD157 = 0x499D
MediaGaugeD158 = 0x499E
MediaGaugeD159 = 0x499F
MediaGaugeD160 = 0x49A0
MediaGaugeD161 = 0x49A1
MediaGaugeD162 = 0x49A2
MediaGaugeD163 | |
<reponame>Amrib24/aws-secure-environment-accelerator
#!/usr/bin/env python
import os
import boto3
import botocore
import json
import threading
import time
import sys
import argparse
import base64
import re
from tabulate import tabulate
from os import path
parser = argparse.ArgumentParser(
description="A development script that cleans up resources deployed by the accelerator. Use Administrator AWS credentials in the root account when running this script."
)
parser.add_argument('--AcceleratorPrefix', default='ASEA', help='The value set in AcceleratorPrefix')
organizations = boto3.client("organizations")
sts = boto3.client("sts")
def get_accounts():
print("Accounts:")
all_aws_accounts = []
paginator = organizations.get_paginator('list_accounts')
page_iterator = paginator.paginate()
for aws_accounts in page_iterator:
tmp = map(lambda x: [x["Id"], x["Name"]], aws_accounts["Accounts"])
print(tabulate(list(tmp), headers=["Id", "Name"]))
all_aws_accounts = all_aws_accounts + list(aws_accounts["Accounts"])
return all_aws_accounts
def build_stack_data(accounts, regions, admin_role_name, root_account_name):
print("Stacks:")
result = {}
result["Accounts"] = []
result["Regions"] = regions
all_stacks = {}
for account in accounts:
cloudformation = None
roleArn = "arn:aws:iam::{accountId}:role/{roleName}".format(accountId=account["Id"], roleName=admin_role_name)
result["Accounts"].append(
{
"AccountId": account["Id"],
"AccountName": account["Name"],
"AdminRoleArn": roleArn
}
)
credentials = sts.assume_role(
RoleArn=roleArn,
RoleSessionName="AcceleratorCleanupScript"
)
region_stacks = {}
for region in regions:
print("Processing {} - {}".format(account["Name"], region))
cloudformation = boto3.client("cloudformation",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
stacks = cloudformation.list_stacks(
StackStatusFilter=['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'DELETE_FAILED', 'DELETE_IN_PROGRESS', 'ROLLBACK_COMPLETE', 'ROLLBACK_FAILED']
)
region_stacks[region] = list(map(lambda x: {"StackName":x["StackName"],"StackId":x["StackId"], "StackStatus":x["StackStatus"]}, stacks["StackSummaries"]))
tmp = map(lambda x: [x["StackName"], "True" if "ParentId" in x else "", region, account["Id"], x["StackStatus"]], stacks["StackSummaries"])
print(tabulate(list(tmp), headers=["StackName", "IsNested", "Region", "AccountId", "StackStatus"]))
print()
all_stacks[account["Id"]] = region_stacks
result["AllStacks"] = all_stacks
with open('stacks.json', 'w') as outfile:
json.dump(result, outfile)
return all_stacks
def process_delete(all_stacks):
phases = [
"-Phase5",
"Phase4-HostedZonesAssc1",
"Phase4-RulesAsscociation1",
"-Phase4",
"Phase3-CentralVpcResolverEndpoints",
"-Phase3",
"Phase2-VpcEndpoints1",
"-Phase2",
"-Phase1",
"-Phase0",
"-Phase-1",
"-InitialSetup",
"{}-CDKToolkit".format(AcceleratorPrefix),
"{}-PipelineRole".format(AcceleratorPrefix),
]
# Process one phase at a time, but to all accounts through all regions
# For each Phase
# For each Account
# For each Region
# Look for a stack with name ending in the phase. What status is it in?
# Does it contain any S3 buckets? If yes, delete them first
# Wait until all done
for phase in phases:
print("\n\nProcessing '{}'".format(phase))
threads = list()
try:
print("Waiting for all Phase stack cleanup threads to finish...")
for account in all_stacks["Accounts"]:
for region in all_stacks["Regions"]:
for stack in all_stacks["AllStacks"][account["AccountId"]][region]:
if stack["StackName"].endswith(phase):
t = threading.Thread(target=thread_cloudformation_delete, args=(phase, region, stack["StackId"], account["AdminRoleArn"], account["AccountId"]))
threads.append(t)
t.start()
except:
print("Error!", sys.exc_info()[0], "occurred.")
finally:
for index, thread in enumerate(threads):
thread.join()
print("Done. All Phase stack cleanup threads finished.")
print("Done processing '{}'".format(phase))
def thread_cloudformation_delete(phase, region, stackid, admin_role, accountId):
print("TID-{} - Processing '{}' in {} {}".format(threading.get_ident(), stackid, accountId, region))
sts = boto3.client("sts")
try:
credentials = sts.assume_role(
RoleArn=admin_role,
RoleSessionName="AcceleratorCleanupScript"
)
cloudformation = boto3.client("cloudformation",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
#Are there any S3 buckets?
resources = cloudformation.describe_stack_resources(StackName=stackid)
for resource in resources["StackResources"]:
if resource["ResourceType"] == "AWS::S3::Bucket" and resource["ResourceStatus"] != "DELETE_COMPLETE":
#delete all bucket contents first
print("TID-{} - S3 Bucket Resource '{}'".format(threading.get_ident(), resource["PhysicalResourceId"]))
delete_s3_bucket(region, credentials["Credentials"], resource["PhysicalResourceId"])
elif resource["ResourceType"] == "AWS::ElasticLoadBalancingV2::LoadBalancer" and resource["ResourceStatus"] != "DELETE_COMPLETE":
print("TID-{} - Checking ELB termination protection '{}'".format(threading.get_ident(), resource["PhysicalResourceId"]))
remove_elb_termination_block(region, credentials["Credentials"], resource["PhysicalResourceId"])
elif resource["ResourceType"] == "AWS::ECR::Repository" and resource["ResourceStatus"] != "DELETE_COMPLETE":
print("TID-{} - ECR Resource '{}".format(threading.get_ident(), resource["PhysicalResourceId"]))
remove_ecr_repository(region, credentials["Credentials"], resource["PhysicalResourceId"])
elif resource["ResourceType"] == "AWS::IAM::Role" and resource["ResourceStatus"] != "DELETE_COMPLETE":
print("TID-{} - IAM Role Permission Boundary check '{}'".format(threading.get_ident(), resource["PhysicalResourceId"]))
remove_permission_boundaries(region, credentials["Credentials"], resource["PhysicalResourceId"])
remove_permissions_special_case(region, credentials["Credentials"], resource["PhysicalResourceId"])
stack = cloudformation.describe_stacks(
StackName=stackid
)
for index, s in enumerate(stack["Stacks"]):
stack_name = s["StackId"]
if s["StackStatus"] != "DELETE_COMPLETE":
print("TID-{} - Deleting Stack Region: {}, StackName: {}, StackStatus: {}".format( threading.get_ident(),region, stack_name, s["StackStatus"]))
cloudformation.update_termination_protection(
EnableTerminationProtection=False,
StackName=stack_name
)
waiter = cloudformation.get_waiter('stack_delete_complete')
cloudformation.delete_stack(StackName=stack_name)
waiter.wait(StackName=stack_name)
#Did the stack delete fail?
stack_failed = stack_exists(cloudformation, stack_name, 'DELETE_FAILED')
print("TID-{} - Done. Deleting Stack Region: {}, StackName: {}, StackStatus: {}".format(threading.get_ident(), region, stack_name, s["StackStatus"]))
except botocore.exceptions.ClientError as err:
print('TID-{} Error Message: {}'.format(threading.get_ident(), err.response['Error']['Message']))
def stack_exists(client, name, required_status = 'CREATE_COMPLETE'):
try:
data = client.describe_stacks(StackName = name)
except botocore.exceptions.ClientError:
return False
return data['Stacks'][0]['StackStatus'] == required_status
def remove_ecr_repository(region, account_credentials, ecr_id):
ecr = boto3.client('ecr',
region_name=region,
aws_access_key_id=account_credentials['AccessKeyId'],
aws_secret_access_key=account_credentials['SecretAccessKey'],
aws_session_token=account_credentials['SessionToken']
)
print("TID-{} - Deleting ECR Repository '{}'".format(threading.get_ident(), ecr_id))
ecr.delete_repository(repositoryName=ecr_id, force=True)
print("TID-{} - Deleted ECR Repository '{}'".format(threading.get_ident(), ecr_id))
def remove_permissions_special_case(region, account_credentials, role_id):
if role_id.endswith("-Rsyslog-Role") or role_id.endswith("Firewall-Role"):
iam = boto3.client('iam',
region_name=region,
aws_access_key_id=account_credentials['AccessKeyId'],
aws_secret_access_key=account_credentials['SecretAccessKey'],
aws_session_token=account_credentials['SessionToken']
)
managed_policies = iam.list_attached_role_policies(RoleName=role_id)
for mpolicy in managed_policies['AttachedPolicies']:
print("TID-{} - Detaching policy {} from {}".format(threading.get_ident(), mpolicy['PolicyName'], role_id))
iam.detach_role_policy(RoleName=role_id, PolicyArn=mpolicy['PolicyArn'])
print("TID-{} - Detached policy {} from {}".format(threading.get_ident(), mpolicy['PolicyName'], role_id))
inline_policies = iam.list_role_policies(RoleName=role_id)
for ipolicy in inline_policies['PolicyNames']:
print("TID-{} - Deleting inline policy {} from {}".format(threading.get_ident(), ipolicy, role_id))
iam.delete_role_policy(RoleName=role_id, PolicyName=ipolicy)
print("TID-{} - Deleted inline policy {} from {}".format(threading.get_ident(), ipolicy, role_id))
def remove_permission_boundaries(region, account_credentials, role_id):
iam = boto3.client('iam',
region_name=region,
aws_access_key_id=account_credentials['AccessKeyId'],
aws_secret_access_key=account_credentials['SecretAccessKey'],
aws_session_token=account_credentials['SessionToken']
)
role = iam.get_role(RoleName=role_id)['Role']
# Is there a permission boundary
if 'PermissionsBoundary' in role:
print("TID-{} - Removing permission boundary from {}".format(threading.get_ident(), role_id))
iam.delete_role_permissions_boundary(RoleName=role_id)
print("TID-{} - Removed permission boundary from {}".format(threading.get_ident(), role_id))
else:
print("TID-{} - Role '{}' has no permission boundary".format(threading.get_ident(), role_id))
def remove_elb_termination_block(region, account_credentials, elb_id):
ec2 = boto3.client('elbv2',
region_name=region,
aws_access_key_id=account_credentials['AccessKeyId'],
aws_secret_access_key=account_credentials['SecretAccessKey'],
aws_session_token=account_credentials['SessionToken']
)
elb_attr = ec2.describe_load_balancer_attributes(LoadBalancerArn=elb_id)
for attr in elb_attr["Attributes"]:
if attr["Key"] == "deletion_protection.enabled" and bool(attr["Value"]) == True:
attr["Value"] = 'false'
ec2.modify_load_balancer_attributes(
LoadBalancerArn=elb_id,
Attributes=[attr]
)
print("TID-{} - Removed deletion protection for {}".format(threading.get_ident(), elb_id))
def delete_s3_bucket(region, account_credentials, bucket_name):
s3 = boto3.resource('s3',
region_name=region,
aws_access_key_id=account_credentials['AccessKeyId'],
aws_secret_access_key=account_credentials['SecretAccessKey'],
aws_session_token=account_credentials['SessionToken']
)
s3_bucket = s3.Bucket(bucket_name)
if s3_bucket in s3.buckets.all():
print("TID-{} - Emptying bucket (this may take a while) {}".format(threading.get_ident(), bucket_name))
s3_bucket.object_versions.all().delete()
print("TID-{} Done. Emptying bucket (this may take a while) {}".format(threading.get_ident(), bucket_name))
print('TID-{} Deleting bucket {}'.format(threading.get_ident(), bucket_name))
try:
s3_bucket.delete()
print('TID-{} Done. Deleting bucket {}'.format(threading.get_ident(), bucket_name))
except botocore.exceptions.ClientError as e:
print("TID-{} Error while trying to delete S3 bucket {}, it should be empty by now so if you see BucketNotEmpty check the bucket in AWS console and delete it manually".format(threading.get_ident(), bucket_name))
print(e)
def delete_scps(credentials, region):
organizations = boto3.client("organizations",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
scps = organizations.list_policies(
Filter='SERVICE_CONTROL_POLICY'
)
print("Deleting SCPs...")
for scp in scps["Policies"]:
scp_name = scp["Name"]
if scp_name.startswith(AcceleratorPrefix):
print("Detaching SCP '{}'".format(scp["Name"]))
targets = organizations.list_targets_for_policy(PolicyId=scp["Id"])
for target in targets["Targets"]:
organizations.detach_policy(
PolicyId=scp["Id"],
TargetId=target["TargetId"]
)
print("Done. Detaching SCP '{}'".format(scp["Name"]))
print("Deleting SCP '{}'".format(scp["Name"]))
organizations.delete_policy(
PolicyId=scp["Id"]
)
print("Done. Deleting SCP '{}'".format(scp["Name"]))
print("Done. Deleting SCPs...")
def root_cleanup(credentials, region):
print("delete stack sets")
cloudformation = boto3.client("cloudformation",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
stacksets = cloudformation.list_stack_sets()
for stackset in stacksets["Summaries"]:
name = stackset["StackSetName"]
if name.startswith(AcceleratorPrefix):
instances = cloudformation.list_stack_instances(StackSetName=name)
instances_accounts = list(map(lambda x: x["Account"], instances["Summaries"]))
instances_regions = list(set(map(lambda x: x["Region"], instances["Summaries"])))
if len(instances_accounts) > 0:
cloudformation.delete_stack_instances(
StackSetName=name,
RetainStacks=False,
Accounts=instances_accounts,
Regions=instances_regions
)
while True:
instances = cloudformation.list_stack_instances(StackSetName=name)
num = len(instances["Summaries"])
print("Instances left: {}".format(num))
if num == 0:
break
time.sleep(15)
waiter = cloudformation.get_waiter('stack_delete_complete')
cloudformation.delete_stack(StackName=name)
waiter.wait(StackName=name)
print("Done. Stack {} deleted".format(name))
cloud_trail_name = AcceleratorPrefix + "-Org-Trail"
cloudtrail = boto3.client("cloudtrail",
region_name=region,
aws_access_key_id=credentials["Credentials"]["AccessKeyId"],
aws_secret_access_key=credentials["Credentials"]["SecretAccessKey"],
aws_session_token=credentials["Credentials"]["SessionToken"]
)
print("Deleting {}".format(cloud_trail_name))
try:
cloudtrail.delete_trail(Name=cloud_trail_name)
print("Done. Deleting {}".format(cloud_trail_name))
except botocore.exceptions.ClientError as err:
print('Error Message: {}'.format(err.response['Error']['Message']))
cleanup_ecr(credentials, region)
cleanup_dynamodb(credentials, region)
def cleanup():
print("cleanup")
supported_regions = []
admin_role = ""
root_account_name = ""
root_region = ""
security_account_name = ""
isALZorCT = False
with open('config.json') as json_file:
config = json.load(json_file)
supported_regions = config["global-options"]["supported-regions"]
admin_role = config["global-options"]["organization-admin-role"]
config_root_account_name = config["global-options"]["aws-org-management"]["account"]
root_region = config["global-options"]["aws-org-management"]["region"]
if root_region == "${HOME_REGION}":
my_session = boto3.session.Session()
root_region = my_session.region_name
print("Setting region to '{}'".format(root_region))
config_security_account_name = config["global-options"]["central-security-services"]["account"]
root_account_name = config["mandatory-account-configs"][config_root_account_name]["account-name"]
security_account_name = config["mandatory-account-configs"][config_security_account_name]["account-name"]
isALZorCT = config["global-options"]["ct-baseline"] or ("alz-baseline" in config["global-options"] and config["global-options"]["alz-baseline"])
if isALZorCT:
print("This cleanup script is designed to retract all components deployed in the accelerator and is intended for development use. It isn't tested for cleanup with baseline configurations.")
return
print("RootAccount: {}", root_account_name)
all_stacks = None
if path.exists("stacks.json"):
print("Loading stacks.json...")
with open('stacks.json') as stacks_json_file:
all_stacks = json.load(stacks_json_file)
print("Done")
else:
aws_accounts = get_accounts()
all_stacks = build_stack_data(aws_accounts, supported_regions, admin_role, root_account_name)
print("Review stacks.json")
print("*** SSO must be cleaned up manually before continuing ***")
return
root_admin_arn_role = None
for a in all_stacks["Accounts"]:
if a["AccountName"] == root_account_name:
root_admin_arn_role = a["AdminRoleArn"]
root_credentials = sts.assume_role(
RoleArn=root_admin_arn_role,
RoleSessionName="AcceleratorCleanupScript"
)
delete_scps(root_credentials, root_region)
cleanup_route53_resolver_load_config()
cleanup_directory_sharing_load_config()
process_delete(all_stacks)
root_cleanup(root_credentials, root_region)
security_credentials = None
for a in all_stacks["Accounts"]:
if a["AccountName"] == security_account_name:
security_role_arn = a["AdminRoleArn"]
security_credentials = sts.assume_role(
RoleArn=security_role_arn,
RoleSessionName="AcceleratorCleanupScript"
)
break
if security_credentials is not None:
| |
<reponame>checkly/pulumi-checkly
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['CheckGroupArgs', 'CheckGroup']
@pulumi.input_type
class CheckGroupArgs:
def __init__(__self__, *,
activated: pulumi.Input[bool],
concurrency: pulumi.Input[int],
locations: pulumi.Input[Sequence[pulumi.Input[str]]],
alert_channel_subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input['CheckGroupAlertChannelSubscriptionArgs']]]] = None,
alert_settings: Optional[pulumi.Input['CheckGroupAlertSettingsArgs']] = None,
api_check_defaults: Optional[pulumi.Input['CheckGroupApiCheckDefaultsArgs']] = None,
double_check: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, Any]]] = None,
local_setup_script: Optional[pulumi.Input[str]] = None,
local_teardown_script: Optional[pulumi.Input[str]] = None,
muted: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
runtime_id: Optional[pulumi.Input[str]] = None,
setup_snippet_id: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
teardown_snippet_id: Optional[pulumi.Input[int]] = None,
use_global_alert_settings: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a CheckGroup resource.
:param pulumi.Input[bool] activated: Determines if the checks in the group are running or not.
:param pulumi.Input[int] concurrency: Determines how many checks are run concurrently when triggering a check group from CI/CD or through the API.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: An array of one or more data center locations where to run the checks.
:param pulumi.Input[bool] double_check: Setting this to `true` will trigger a retry when a check fails from the failing region and another, randomly selected
region before marking the check as failed.
:param pulumi.Input[Mapping[str, Any]] environment_variables: Key/value pairs for setting environment variables during check execution. These are only relevant for browser checks.
Use global environment variables whenever possible.
:param pulumi.Input[str] local_setup_script: A valid piece of Node.js code to run in the setup phase of an API check in this group.
:param pulumi.Input[str] local_teardown_script: A valid piece of Node.js code to run in the teardown phase of an API check in this group.
:param pulumi.Input[bool] muted: Determines if any notifications will be sent out when a check in this group fails and/or recovers.
:param pulumi.Input[str] name: The name of the check group.
:param pulumi.Input[str] runtime_id: The id of the runtime to use for this group.
:param pulumi.Input[int] setup_snippet_id: An ID reference to a snippet to use in the setup phase of an API check.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Tags for organizing and filtering checks.
:param pulumi.Input[int] teardown_snippet_id: An ID reference to a snippet to use in the teardown phase of an API check.
:param pulumi.Input[bool] use_global_alert_settings: When true, the account level alert settings will be used, not the alert setting defined on this check group.
"""
pulumi.set(__self__, "activated", activated)
pulumi.set(__self__, "concurrency", concurrency)
pulumi.set(__self__, "locations", locations)
if alert_channel_subscriptions is not None:
pulumi.set(__self__, "alert_channel_subscriptions", alert_channel_subscriptions)
if alert_settings is not None:
pulumi.set(__self__, "alert_settings", alert_settings)
if api_check_defaults is not None:
pulumi.set(__self__, "api_check_defaults", api_check_defaults)
if double_check is not None:
pulumi.set(__self__, "double_check", double_check)
if environment_variables is not None:
pulumi.set(__self__, "environment_variables", environment_variables)
if local_setup_script is not None:
pulumi.set(__self__, "local_setup_script", local_setup_script)
if local_teardown_script is not None:
pulumi.set(__self__, "local_teardown_script", local_teardown_script)
if muted is not None:
pulumi.set(__self__, "muted", muted)
if name is not None:
pulumi.set(__self__, "name", name)
if runtime_id is not None:
pulumi.set(__self__, "runtime_id", runtime_id)
if setup_snippet_id is not None:
pulumi.set(__self__, "setup_snippet_id", setup_snippet_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if teardown_snippet_id is not None:
pulumi.set(__self__, "teardown_snippet_id", teardown_snippet_id)
if use_global_alert_settings is not None:
pulumi.set(__self__, "use_global_alert_settings", use_global_alert_settings)
@property
@pulumi.getter
def activated(self) -> pulumi.Input[bool]:
"""
Determines if the checks in the group are running or not.
"""
return pulumi.get(self, "activated")
@activated.setter
def activated(self, value: pulumi.Input[bool]):
pulumi.set(self, "activated", value)
@property
@pulumi.getter
def concurrency(self) -> pulumi.Input[int]:
"""
Determines how many checks are run concurrently when triggering a check group from CI/CD or through the API.
"""
return pulumi.get(self, "concurrency")
@concurrency.setter
def concurrency(self, value: pulumi.Input[int]):
pulumi.set(self, "concurrency", value)
@property
@pulumi.getter
def locations(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
An array of one or more data center locations where to run the checks.
"""
return pulumi.get(self, "locations")
@locations.setter
def locations(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "locations", value)
@property
@pulumi.getter(name="alertChannelSubscriptions")
def alert_channel_subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CheckGroupAlertChannelSubscriptionArgs']]]]:
return pulumi.get(self, "alert_channel_subscriptions")
@alert_channel_subscriptions.setter
def alert_channel_subscriptions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['CheckGroupAlertChannelSubscriptionArgs']]]]):
pulumi.set(self, "alert_channel_subscriptions", value)
@property
@pulumi.getter(name="alertSettings")
def alert_settings(self) -> Optional[pulumi.Input['CheckGroupAlertSettingsArgs']]:
return pulumi.get(self, "alert_settings")
@alert_settings.setter
def alert_settings(self, value: Optional[pulumi.Input['CheckGroupAlertSettingsArgs']]):
pulumi.set(self, "alert_settings", value)
@property
@pulumi.getter(name="apiCheckDefaults")
def api_check_defaults(self) -> Optional[pulumi.Input['CheckGroupApiCheckDefaultsArgs']]:
return pulumi.get(self, "api_check_defaults")
@api_check_defaults.setter
def api_check_defaults(self, value: Optional[pulumi.Input['CheckGroupApiCheckDefaultsArgs']]):
pulumi.set(self, "api_check_defaults", value)
@property
@pulumi.getter(name="doubleCheck")
def double_check(self) -> Optional[pulumi.Input[bool]]:
"""
Setting this to `true` will trigger a retry when a check fails from the failing region and another, randomly selected
region before marking the check as failed.
"""
return pulumi.get(self, "double_check")
@double_check.setter
def double_check(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "double_check", value)
@property
@pulumi.getter(name="environmentVariables")
def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Key/value pairs for setting environment variables during check execution. These are only relevant for browser checks.
Use global environment variables whenever possible.
"""
return pulumi.get(self, "environment_variables")
@environment_variables.setter
def environment_variables(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "environment_variables", value)
@property
@pulumi.getter(name="localSetupScript")
def local_setup_script(self) -> Optional[pulumi.Input[str]]:
"""
A valid piece of Node.js code to run in the setup phase of an API check in this group.
"""
return pulumi.get(self, "local_setup_script")
@local_setup_script.setter
def local_setup_script(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_setup_script", value)
@property
@pulumi.getter(name="localTeardownScript")
def local_teardown_script(self) -> Optional[pulumi.Input[str]]:
"""
A valid piece of Node.js code to run in the teardown phase of an API check in this group.
"""
return pulumi.get(self, "local_teardown_script")
@local_teardown_script.setter
def local_teardown_script(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "local_teardown_script", value)
@property
@pulumi.getter
def muted(self) -> Optional[pulumi.Input[bool]]:
"""
Determines if any notifications will be sent out when a check in this group fails and/or recovers.
"""
return pulumi.get(self, "muted")
@muted.setter
def muted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "muted", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the check group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="runtimeId")
def runtime_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the runtime to use for this group.
"""
return pulumi.get(self, "runtime_id")
@runtime_id.setter
def runtime_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime_id", value)
@property
@pulumi.getter(name="setupSnippetId")
def setup_snippet_id(self) -> Optional[pulumi.Input[int]]:
"""
An ID reference to a snippet to use in the setup phase of an API check.
"""
return pulumi.get(self, "setup_snippet_id")
@setup_snippet_id.setter
def setup_snippet_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "setup_snippet_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Tags for organizing and filtering checks.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="teardownSnippetId")
def teardown_snippet_id(self) -> Optional[pulumi.Input[int]]:
"""
An ID reference to a snippet to use in the teardown phase of an API check.
"""
return pulumi.get(self, "teardown_snippet_id")
@teardown_snippet_id.setter
def teardown_snippet_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "teardown_snippet_id", value)
@property
@pulumi.getter(name="useGlobalAlertSettings")
def use_global_alert_settings(self) -> Optional[pulumi.Input[bool]]:
"""
When true, the account level alert settings will be used, not the alert setting defined on this check group.
"""
return pulumi.get(self, "use_global_alert_settings")
@use_global_alert_settings.setter
def use_global_alert_settings(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_global_alert_settings", value)
@pulumi.input_type
class _CheckGroupState:
def __init__(__self__, *,
activated: Optional[pulumi.Input[bool]] = None,
alert_channel_subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input['CheckGroupAlertChannelSubscriptionArgs']]]] = None,
alert_settings: Optional[pulumi.Input['CheckGroupAlertSettingsArgs']] = None,
api_check_defaults: Optional[pulumi.Input['CheckGroupApiCheckDefaultsArgs']] = None,
concurrency: Optional[pulumi.Input[int]] = None,
double_check: Optional[pulumi.Input[bool]] = None,
environment_variables: Optional[pulumi.Input[Mapping[str, Any]]] = None,
local_setup_script: Optional[pulumi.Input[str]] = None,
local_teardown_script: Optional[pulumi.Input[str]] = None,
locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
muted: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
runtime_id: Optional[pulumi.Input[str]] = None,
setup_snippet_id: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
teardown_snippet_id: Optional[pulumi.Input[int]] = None,
use_global_alert_settings: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering CheckGroup resources.
:param pulumi.Input[bool] activated: Determines if the checks in the group are running or not.
:param pulumi.Input[int] concurrency: Determines how many checks are run concurrently when triggering a check group from CI/CD or through the API.
:param pulumi.Input[bool] double_check: Setting this to `true` will trigger a retry when a check fails from the failing region and another, randomly selected
region before marking the check as failed.
:param pulumi.Input[Mapping[str, Any]] environment_variables: Key/value pairs for setting environment variables during check execution. These are only relevant for browser checks.
Use global environment variables whenever possible.
:param pulumi.Input[str] local_setup_script: A valid piece of Node.js code to run in the setup phase of an API check in this group.
:param pulumi.Input[str] local_teardown_script: A valid piece of Node.js code to run in the teardown phase of an API check in this group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] locations: An array | |
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 01-12-2020 #
# Author(s): <NAME>, <NAME> #
# E-mail: <EMAIL> #
# Website: avalanche.continualai.org #
################################################################################
from typing import Optional, Sequence, List, Union
from torch.nn import Module, CrossEntropyLoss
from torch.optim import Optimizer, SGD
from avalanche.models.pnn import PNN
from avalanche.training.plugins.evaluation import default_logger
from avalanche.training.plugins import StrategyPlugin, CWRStarPlugin, \
ReplayPlugin, GDumbPlugin, LwFPlugin, AGEMPlugin, GEMPlugin, EWCPlugin, \
EvaluationPlugin, SynapticIntelligencePlugin, CoPEPlugin, \
GSS_greedyPlugin, LFLPlugin
from avalanche.training.strategies.base_strategy import BaseStrategy
class Naive(BaseStrategy):
""" Naive finetuning.
The simplest (and least effective) Continual Learning strategy. Naive just
incrementally fine tunes a single model without employing any method
to contrast the catastrophic forgetting of previous knowledge.
This strategy does not use task identities.
Naive is easy to set up and its results are commonly used to show the worst
performing baseline.
"""
def __init__(self, model: Module, optimizer: Optimizer,
criterion=CrossEntropyLoss(),
train_mb_size: int = 1, train_epochs: int = 1,
eval_mb_size: int = None, device=None,
plugins: Optional[List[StrategyPlugin]] = None,
evaluator: EvaluationPlugin = default_logger, eval_every=-1):
"""
Creates an instance of the Naive strategy.
:param model: The model.
:param optimizer: The optimizer to use.
:param criterion: The loss criterion to use.
:param train_mb_size: The train minibatch size. Defaults to 1.
:param train_epochs: The number of training epochs. Defaults to 1.
:param eval_mb_size: The eval minibatch size. Defaults to 1.
:param device: The device to use. Defaults to None (cpu).
:param plugins: Plugins to be added. Defaults to None.
:param evaluator: (optional) instance of EvaluationPlugin for logging
and metric computations.
:param eval_every: the frequency of the calls to `eval` inside the
training loop. -1 disables the evaluation. 0 means `eval` is called
only at the end of the learning experience. Values >0 mean that
`eval` is called every `eval_every` epochs and at the end of the
learning experience.
"""
super().__init__(
model, optimizer, criterion,
train_mb_size=train_mb_size, train_epochs=train_epochs,
eval_mb_size=eval_mb_size, device=device, plugins=plugins,
evaluator=evaluator, eval_every=eval_every)
class PNNStrategy(BaseStrategy):
""" Progressive Neural Network strategy. """
def __init__(self, num_layers: int, in_features: int,
hidden_features_per_column: int,
lr: float, momentum=0, dampening=0,
weight_decay=0, nesterov=False, adapter='mlp',
criterion=CrossEntropyLoss(),
train_mb_size: int = 1, train_epochs: int = 1,
eval_mb_size: int = None, device=None,
plugins: Optional[List[StrategyPlugin]] = None,
evaluator: EvaluationPlugin = default_logger, eval_every=-1):
""" Progressive Neural Network strategy.
:param num_layers: Number of layers for the PNN architecture.
:param in_features: Number of input features.
:param hidden_features_per_column: Number of hidden units for
each column of the PNN architecture.
:param lr: learning rate
:param momentum: momentum factor (default: 0)
:param weight_decay: weight decay (L2 penalty) (default: 0)
:param dampening: dampening for momentum (default: 0)
:param nesterov: enables Nesterov momentum (default: False)
:param adapter: adapter type. One of {'linear', 'mlp'} (default='mlp')
:param criterion: The loss criterion to use.
:param train_mb_size: The train minibatch size. Defaults to 1.
:param train_epochs: The number of training epochs. Defaults to 1.
:param eval_mb_size: The eval minibatch size. Defaults to 1.
:param device: The device to use. Defaults to None (cpu).
:param plugins: Plugins to be added. Defaults to None.
:param evaluator: (optional) instance of EvaluationPlugin for logging
and metric computations.
:param eval_every: the frequency of the calls to `eval` inside the
training loop. -1 disables the evaluation. 0 means `eval` is called
only at the end of the learning experience. Values >0 mean that
`eval` is called every `eval_every` epochs and at the end of the
learning experience.
"""
model = PNN(
num_layers=num_layers,
in_features=in_features,
hidden_features_per_column=hidden_features_per_column,
adapter=adapter
)
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum,
weight_decay=weight_decay, dampening=dampening,
nesterov=nesterov)
super().__init__(
model, optimizer, criterion,
train_mb_size=train_mb_size, train_epochs=train_epochs,
eval_mb_size=eval_mb_size, device=device, plugins=plugins,
evaluator=evaluator, eval_every=eval_every)
class CWRStar(BaseStrategy):
""" CWR* Strategy. """
def __init__(self, model: Module, optimizer: Optimizer, criterion,
cwr_layer_name: str, train_mb_size: int = 1,
train_epochs: int = 1, eval_mb_size: int = None, device=None,
plugins: Optional[List[StrategyPlugin]] = None,
evaluator: EvaluationPlugin = default_logger, eval_every=-1):
"""
:param model: The model.
:param optimizer: The optimizer to use.
:param criterion: The loss criterion to use.
:param cwr_layer_name: name of the CWR layer. Defaults to None, which
means that the last fully connected layer will be used.
:param train_mb_size: The train minibatch size. Defaults to 1.
:param train_epochs: The number of training epochs. Defaults to 1.
:param eval_mb_size: The eval minibatch size. Defaults to 1.
:param device: The device to use. Defaults to None (cpu).
:param plugins: Plugins to be added. Defaults to None.
:param evaluator: (optional) instance of EvaluationPlugin for logging
and metric computations.
:param eval_every: the frequency of the calls to `eval` inside the
training loop. -1 disables the evaluation. 0 means `eval` is called
only at the end of the learning experience. Values >0 mean that
`eval` is called every `eval_every` epochs and at the end of the
learning experience.
"""
cwsp = CWRStarPlugin(model, cwr_layer_name, freeze_remaining_model=True)
if plugins is None:
plugins = [cwsp]
else:
plugins.append(cwsp)
super().__init__(
model, optimizer, criterion,
train_mb_size=train_mb_size, train_epochs=train_epochs,
eval_mb_size=eval_mb_size, device=device, plugins=plugins,
evaluator=evaluator, eval_every=eval_every)
class Replay(BaseStrategy):
""" Experience replay strategy.
See ReplayPlugin for more details.
This strategy does not use task identities.
"""
def __init__(self, model: Module, optimizer: Optimizer, criterion,
mem_size: int = 200,
train_mb_size: int = 1, train_epochs: int = 1,
eval_mb_size: int = None, device=None,
plugins: Optional[List[StrategyPlugin]] = None,
evaluator: EvaluationPlugin = default_logger, eval_every=-1):
""" Init.
:param model: The model.
:param optimizer: The optimizer to use.
:param criterion: The loss criterion to use.
:param mem_size: replay buffer size.
:param train_mb_size: The train minibatch size. Defaults to 1.
:param train_epochs: The number of training epochs. Defaults to 1.
:param eval_mb_size: The eval minibatch size. Defaults to 1.
:param device: The device to use. Defaults to None (cpu).
:param plugins: Plugins to be added. Defaults to None.
:param evaluator: (optional) instance of EvaluationPlugin for logging
and metric computations.
:param eval_every: the frequency of the calls to `eval` inside the
training loop. -1 disables the evaluation. 0 means `eval` is called
only at the end of the learning experience. Values >0 mean that
`eval` is called every `eval_every` epochs and at the end of the
learning experience.
"""
rp = ReplayPlugin(mem_size)
if plugins is None:
plugins = [rp]
else:
plugins.append(rp)
super().__init__(
model, optimizer, criterion,
train_mb_size=train_mb_size,
train_epochs=train_epochs,
eval_mb_size=eval_mb_size, device=device,
plugins=plugins,
evaluator=evaluator, eval_every=eval_every)
class GSS_greedy(BaseStrategy):
""" Experience replay strategy.
See ReplayPlugin for more details.
This strategy does not use task identities.
"""
def __init__(self, model: Module, optimizer: Optimizer, criterion,
mem_size: int = 200, mem_strength=1, input_size=[],
train_mb_size: int = 1, train_epochs: int = 1,
eval_mb_size: int = None, device=None,
plugins: Optional[List[StrategyPlugin]] = None,
evaluator: EvaluationPlugin = default_logger, eval_every=-1):
"""Init.
:param model: The model.
:param optimizer: The optimizer to use.
:param criterion: The loss criterion to use.
:param mem_size: replay buffer size.
:param n: memory random set size.
:param train_mb_size: The train minibatch size. Defaults to 1.
:param train_epochs: The number of training epochs. Defaults to 1.
:param eval_mb_size: The eval minibatch size. Defaults to 1.
:param device: The device to use. Defaults to None (cpu).
:param plugins: Plugins to be added. Defaults to None.
:param evaluator: (optional) instance of EvaluationPlugin for logging
and metric computations.
:param eval_every: the frequency of the calls to `eval` inside the
training loop. -1 disables the evaluation. 0 means `eval` is called
only at the end of the learning experience. Values >0 mean that
`eval` is called every `eval_every` epochs and at the end of the
learning experience.
"""
rp = GSS_greedyPlugin(mem_size=mem_size,
mem_strength=mem_strength, input_size=input_size)
if plugins is None:
plugins = [rp]
else:
plugins.append(rp)
super().__init__(
model, optimizer, criterion,
train_mb_size=train_mb_size, train_epochs=train_epochs,
eval_mb_size=eval_mb_size, device=device, plugins=plugins,
evaluator=evaluator, eval_every=eval_every)
class GDumb(BaseStrategy):
""" GDumb strategy.
See GDumbPlugin for more details.
This strategy does not use task identities.
"""
def __init__(self, model: Module, optimizer: Optimizer, criterion,
mem_size: int = 200,
train_mb_size: int = 1, train_epochs: int = 1,
eval_mb_size: int = None, device=None,
plugins: Optional[List[StrategyPlugin]] = None,
evaluator: EvaluationPlugin = default_logger, eval_every=-1):
"""Init.
:param model: The model.
:param optimizer: The optimizer to use.
:param criterion: The loss criterion to use.
:param mem_size: replay buffer size.
:param | |
import datetime
import json
from unittest import mock
import pytest
from simple_salesforce import SalesforceMalformedRequest
from cumulusci.tasks.push.push_api import (
BasePushApiObject,
MetadataPackage,
MetadataPackageVersion,
PackagePushError,
PackagePushJob,
PackagePushRequest,
PackageSubscriber,
SalesforcePushApi,
batch_list,
)
NAME = "Chewbacca"
SF_ID = "033xxxxxxxxx"
PUSH_API = "push_api"
NAMESPACE = "namespace"
ORG_KEY = "bar"
@pytest.fixture
def sf_push_api():
return SalesforcePushApi(sf=mock.Mock(), logger=mock.Mock())
@pytest.fixture
def metadata_package():
return MetadataPackage(
push_api=mock.MagicMock(), name=NAME, sf_id=SF_ID, namespace=NAMESPACE
)
@pytest.fixture
def metadata_package_version(metadata_package):
return MetadataPackageVersion(
push_api=mock.MagicMock(),
package=metadata_package,
name=NAME,
sf_id=SF_ID,
state="Beta",
major="1",
minor="2",
patch="3",
build="4",
)
@pytest.fixture
def package_push_job():
return PackagePushJob(
push_api=mock.MagicMock(),
request="",
org="00DS0000003TJJ6MAO",
status="Succeeded",
sf_id=SF_ID,
)
@pytest.fixture
def package_subscriber():
return PackageSubscriber(
push_api=mock.MagicMock(),
version="1.2.3",
status="Succeeded",
org_name="foo",
org_key="bar",
org_status="Succeeded",
org_type="Sandbox",
sf_id=SF_ID,
)
@pytest.fixture
def package_push_error():
return PackagePushError(
push_api="foo",
sf_id=SF_ID,
job="Foo",
severity="high",
error_type="bar",
title="foo_bar",
message="The foo hit the bar",
details="foo bar, foo, foo bar",
)
@pytest.fixture
def package_push_request():
return PackagePushRequest(
push_api=mock.MagicMock(),
version="1.2.3",
start_time="12:03",
status="Succeeded",
sf_id=SF_ID,
)
def test_base_push_format_where():
base_obj = BasePushApiObject()
field_name = "id_field"
sf_id = "006000000XXX000"
where_clause = "id=001000000XXX000"
base_obj.sf_id = sf_id
returned = base_obj.format_where(field_name, where_clause)
assert "{} = '{}' AND ({})".format(field_name, sf_id, where_clause) == returned
returned = base_obj.format_where(field_name, None)
assert "{} = '{}'".format(field_name, sf_id) == returned
def test_metadata_package_init():
package = MetadataPackage(PUSH_API, NAME)
assert package.push_api == PUSH_API
assert package.sf_id is None
assert package.name == NAME
assert package.namespace is None
package = MetadataPackage(PUSH_API, NAME, SF_ID, NAMESPACE)
assert package.push_api == PUSH_API
assert package.sf_id == SF_ID
assert package.name == NAME
assert package.namespace == NAMESPACE
def test_metadata_package_get_versions(metadata_package):
expected = f"MetadataPackageId = '{SF_ID}'"
metadata_package.get_package_versions()
metadata_package.push_api.get_package_versions.assert_called_once_with(
expected, None
)
def test_metadata_package_get_version_objs(metadata_package):
expected = f"MetadataPackageId = '{SF_ID}'"
metadata_package.get_package_version_objs()
metadata_package.push_api.get_package_version_objs.assert_called_once_with(
expected, None
)
def test_metadata_package_get_versions_by_id(metadata_package):
expected = f"MetadataPackageId = '{SF_ID}'"
metadata_package.get_package_versions_by_id()
metadata_package.push_api.get_package_versions_by_id.assert_called_once_with(
expected, None
)
def test_metadata_package_version_version_number(metadata_package_version):
expected = "1.2.3 (Beta 4)"
actual = metadata_package_version.version_number
assert expected == actual
def test_sf_push_return_query_records(sf_push_api):
query = "SELECT Id FROM Account"
records = ["record 1", "record 2", "record 3"]
results = {"totalSize": 10, "records": records}
sf_push_api.sf.query_all.return_value = results
returned = sf_push_api.return_query_records(query)
assert len(records) == len(returned)
results["totalSize"] = 0
sf_push_api.sf.query_all.return_value = results
returned = sf_push_api.return_query_records(query)
assert [] == returned
def test_sf_push_format_where(sf_push_api):
returned = sf_push_api.format_where_clause(None)
assert "" == returned
default_where = "Id='001000000XXX000'"
sf_push_api.default_where = {"Account": default_where}
returned = sf_push_api.format_where_clause(None, "Object__c")
assert "" == returned
returned = sf_push_api.format_where_clause(None, "Account")
assert " WHERE ({})".format(default_where) == returned
where = "IsDeleted=False"
returned = sf_push_api.format_where_clause(where)
assert " WHERE {}".format(where) == returned
# No default where for Object__C
returned = sf_push_api.format_where_clause(where, "Object__c")
assert " WHERE {}".format(where) == returned
returned = sf_push_api.format_where_clause(where, "Account")
assert " WHERE ({}) AND ({})".format(default_where, where) == returned
def test_sf_push_add_query_limit(sf_push_api):
query = "SELECT Id FROM Account"
limit = 100
returned = sf_push_api.add_query_limit(query, limit)
assert "{} LIMIT {}".format(query, limit) == returned
def test_sf_push_add_query_no_limit(sf_push_api):
query = "SELECT Id FROM Account"
returned = sf_push_api.add_query_limit(query, None)
assert f"{query}" == returned
def test_sf_push_get_packages(sf_push_api):
query = "SELECT id, name, namespaceprefix FROM MetadataPackage WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_packages("Name='foo'", None)
sf_push_api.return_query_records.assert_called_once_with(query)
def test_sf_push_get_package_objs(sf_push_api, metadata_package):
sf_push_api.get_packages = mock.MagicMock()
packages = {
"Id": metadata_package.sf_id,
"Name": metadata_package.name,
"NamespacePrefix": metadata_package.namespace,
}
sf_push_api.get_packages.return_value = [packages]
actual_result_list = sf_push_api.get_package_objs("Name='foo'", None)
assert len(actual_result_list) == 1
actual_result = actual_result_list[0]
assert packages["Id"] == actual_result.sf_id
assert packages["Name"] == actual_result.name
assert packages["NamespacePrefix"] == actual_result.namespace
def test_sf_push_get_packages_by_id(sf_push_api, metadata_package):
sf_push_api.get_package_objs = mock.MagicMock()
sf_push_api.get_package_objs.return_value = [metadata_package]
package_expected = {metadata_package.sf_id: metadata_package}
package_result = sf_push_api.get_packages_by_id("Name='foo'", None)
sf_push_api.get_package_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_package_versions(sf_push_api):
query = "SELECT Id, Name, MetadataPackageId, ReleaseState, MajorVersion, MinorVersion, PatchVersion, BuildNumber FROM MetadataPackageVersion WHERE Name='foo' ORDER BY MajorVersion DESC, MinorVersion DESC, PatchVersion, BuildNumber DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_package_versions("Name='foo'", None)
sf_push_api.return_query_records.assert_called_once_with(query)
def test_sf_push_get_package_version_objs(sf_push_api):
query = "SELECT Id, Name, MetadataPackageId, ReleaseState, MajorVersion, MinorVersion, PatchVersion, BuildNumber FROM MetadataPackageVersion WHERE Name='foo' ORDER BY MajorVersion DESC, MinorVersion DESC, PatchVersion, BuildNumber DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_package_version_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_package_version_by_id(sf_push_api, metadata_package_version):
sf_push_api.get_package_version_objs = mock.MagicMock()
sf_push_api.get_package_version_objs.return_value = [metadata_package_version]
package_expected = {metadata_package_version.sf_id: metadata_package_version}
package_result = sf_push_api.get_package_versions_by_id("Name='foo'", None)
sf_push_api.get_package_version_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_subscribers(sf_push_api):
query = "SELECT Id, MetadataPackageVersionId, InstalledStatus, OrgName, OrgKey, OrgStatus, OrgType from PackageSubscriber WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_subscribers("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_subscriber_objs(sf_push_api):
query = "SELECT Id, MetadataPackageVersionId, InstalledStatus, OrgName, OrgKey, OrgStatus, OrgType from PackageSubscriber WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_subscriber_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_subscribers_by_org_key(sf_push_api, package_subscriber):
sf_push_api.get_subscriber_objs = mock.MagicMock()
sf_push_api.get_subscriber_objs.return_value = [package_subscriber]
package_expected = {package_subscriber.org_key: package_subscriber}
package_result = sf_push_api.get_subscribers_by_org_key("Name='foo'", None)
sf_push_api.get_subscriber_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_push_requests(sf_push_api):
query = "SELECT Id, PackageVersionId, ScheduledStartTime, Status FROM PackagePushRequest WHERE Name='foo' ORDER BY ScheduledStartTime DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_requests("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_request_objs(sf_push_api):
query = "SELECT Id, PackageVersionId, ScheduledStartTime, Status FROM PackagePushRequest WHERE Name='foo' ORDER BY ScheduledStartTime DESC"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_request_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_requests_by_id(sf_push_api, package_push_request):
sf_push_api.get_push_request_objs = mock.MagicMock()
sf_push_api.get_push_request_objs.return_value = [package_push_request]
package_expected = {package_push_request.sf_id: package_push_request}
package_result = sf_push_api.get_push_requests_by_id("Name='foo'", None)
sf_push_api.get_push_request_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_push_jobs(sf_push_api):
query = "SELECT Id, PackagePushRequestId, SubscriberOrganizationKey, Status FROM PackagePushJob WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_jobs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_job_objs(sf_push_api):
query = "SELECT Id, PackagePushRequestId, SubscriberOrganizationKey, Status FROM PackagePushJob WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_job_objs("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_jobs_by_id(sf_push_api, package_push_job):
sf_push_api.get_push_job_objs = mock.MagicMock()
sf_push_api.get_push_job_objs.return_value = [package_push_job]
package_expected = {package_push_job.sf_id: package_push_job}
package_result = sf_push_api.get_push_jobs_by_id("Name='foo'", None)
sf_push_api.get_push_job_objs.assert_called_with("Name='foo'", None)
assert package_expected == package_result
def test_sf_push_get_push_errors(sf_push_api):
query = "SELECT Id, PackagePushJobId, ErrorSeverity, ErrorType, ErrorTitle, ErrorMessage, ErrorDetails FROM PackagePushError WHERE Name='foo'"
sf_push_api.return_query_records = mock.MagicMock()
sf_push_api.get_push_errors("Name='foo'", None)
sf_push_api.return_query_records.assert_called_with(query)
def test_sf_push_get_push_error_objs(sf_push_api, package_push_job, package_push_error):
sf_push_api.get_push_job_objs = mock.MagicMock()
sf_push_api.get_push_job_objs.return_value = [package_push_job]
sf_push_api.lazy = ["jobs"]
sf_push_api.get_push_errors = mock.MagicMock()
record = {
"ErrorSeverity": "high",
"ErrorType": "bar",
"ErrorTitle": "foo_bar",
"ErrorMessage": "The foo hit the bar",
"ErrorDetails": "foo bar, foo, foo bar",
"Id": SF_ID,
"PackagePushJobId": "pkg_push_id",
}
sf_push_api.get_push_errors.return_value = [record]
actual_result_list = sf_push_api.get_push_error_objs("Name='foo'", None)
sf_push_api.get_push_job_objs.assert_called_once_with(where="Id = 'pkg_push_id'")
assert len(actual_result_list) == 1
actual_result = actual_result_list[0]
assert record["ErrorMessage"] == actual_result.message
assert record["ErrorDetails"] == actual_result.details
assert record["Id"] == actual_result.sf_id
assert actual_result.job == package_push_job
def test_sf_push_get_push_errors_by_id(sf_push_api, package_push_error):
sf_push_api.get_push_error_objs = mock.MagicMock()
sf_push_api.get_push_error_objs.return_value = [package_push_error]
push_error_expected = {package_push_error.sf_id: package_push_error}
push_error_result = sf_push_api.get_push_errors_by_id("Name='foo'", None)
sf_push_api.get_push_error_objs.assert_called_with("Name='foo'", None)
assert push_error_expected == push_error_result
def test_sf_push_cancel_push_request(sf_push_api):
ref_id = "12"
sf_push_api.cancel_push_request(ref_id)
sf_push_api.sf.PackagePushRequest.update.assert_called_once_with(
ref_id, {"Status": "Canceled"}
)
def test_sf_push_run_push_request(sf_push_api):
ref_id = "12"
sf_push_api.run_push_request(ref_id)
sf_push_api.sf.PackagePushRequest.update.assert_called_once_with(
ref_id, {"Status": "Pending"}
)
def test_sf_push_create_push_request(sf_push_api, metadata_package_version):
sf_push_api.batch_size = 1
push_request_id = "0DV?xxxxxx?"
version_id = metadata_package_version.sf_id = "0KM?xxxxx?"
orgs = ["00D000000001", "00D000000002"]
batch_0, batch_1 = [orgs[0]], [orgs[1]]
start_time = datetime.datetime.now()
sf_push_api.sf.PackagePushRequest.create.return_value = {"id": push_request_id}
sf_push_api.sf.base_url = "url"
sf_push_api._add_batch = mock.MagicMock(side_effect=[batch_0, batch_1])
actual_id, actual_org_count = sf_push_api.create_push_request(
metadata_package_version, orgs, start_time
)
sf_push_api.sf.PackagePushRequest.create.assert_called_once_with(
{"PackageVersionId": version_id, "ScheduledStartTime": start_time.isoformat()}
)
assert mock.call(batch_0, push_request_id) in sf_push_api._add_batch.call_args_list
assert mock.call(batch_1, push_request_id) in sf_push_api._add_batch.call_args_list
assert push_request_id == actual_id
assert 2 == actual_org_count
def test_sf_push_add_push_batch(sf_push_api, metadata_package_version):
push_request_id = "0DV?xxxxxx?"
metadata_package_version.sf_id = "0KM?xxxxx?"
orgs = ["00D000000001", "00D000000002"]
expected_records_json = json.dumps(
{
"records": [
{
"attributes": {"type": "PackagePushJob", "referenceId": orgs[0]},
"PackagePushRequestId": push_request_id,
"SubscriberOrganizationKey": orgs[0],
},
{
"attributes": {"type": "PackagePushJob", "referenceId": orgs[1]},
"PackagePushRequestId": push_request_id,
"SubscriberOrganizationKey": orgs[1],
},
]
}
)
sf_push_api.sf.base_url = "base_url/"
returned_batch = sf_push_api._add_batch(orgs, push_request_id)
sf_push_api.sf._call_salesforce.assert_called_once_with(
"POST", "base_url/composite/tree/PackagePushJob", data=expected_records_json
)
assert ["00D000000001", "00D000000002"] == returned_batch
def test_sf_push_add_push_batch_retry(sf_push_api, metadata_package_version):
push_request_id = "0DV?xxxxxx?"
orgs = ["00D000000001", "00D000000002", "00D000000003"]
retry_response = {
"results": [
{
"referenceId": orgs[0],
"errors": [
{"message": "Something bad has happened! Whatever could it be?"}
],
}
]
}
duplicate_response = {
"results": [
{
"referenceId": orgs[1],
"errors": [{"message": "", "statusCode": "DUPLICATE_VALUE"}],
}
]
}
invalid_response = {
"results": [
{
"referenceId": orgs[2],
"errors": [{"message": "", "statusCode": "INVALID_OPERATION"}],
}
]
}
sf_push_api.sf.base_url = "base_url/"
sf_push_api.sf._call_salesforce.side_effect = [
SalesforceMalformedRequest(
"base_url/composite/tree/PackagePushJob",
400,
"resource_name",
retry_response,
),
SalesforceMalformedRequest(
"base_url/composite/tree/PackagePushJob",
400,
"resource_name",
duplicate_response,
),
SalesforceMalformedRequest(
"base_url/composite/tree/PackagePushJob",
400,
"resource_name",
invalid_response,
),
[],
]
returned_batch = sf_push_api._add_batch(orgs, push_request_id)
assert [orgs[0]] == returned_batch # only remaining org should be retry-able
assert 4 == sf_push_api.sf._call_salesforce.call_count
def test_push_batch_list():
data = ["zero", "one", "two", "three"]
actual_batch_list = batch_list(data, 1)
expected_batch_list = [["zero"], ["one"], ["two"], ["three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 2)
expected_batch_list = [["zero", "one"], ["two", "three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 3)
expected_batch_list = [["zero", "one", "two"], ["three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 4)
expected_batch_list = [["zero", "one", "two", "three"]]
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list(data, 5)
assert expected_batch_list == actual_batch_list
actual_batch_list = batch_list([], 2)
expected_batch_list = []
assert expected_batch_list == actual_batch_list
def test_version_init(metadata_package):
package = MetadataPackageVersion(
push_api=PUSH_API,
package=metadata_package,
name=NAME,
sf_id=SF_ID,
state="Beta",
major="1",
minor="2",
patch="3",
build="4",
)
assert package.push_api == PUSH_API
assert package.package == metadata_package
assert package.name == NAME
assert package.sf_id == SF_ID
assert package.state == "Beta"
assert package.major == "1"
assert package.minor == "2"
assert package.patch == "3"
assert package.build == "4"
def test_version_number(metadata_package_version):
actual = metadata_package_version.version_number
expected = "1.2.3 (Beta 4)"
assert actual == expected
def test_metadata_package_get_subscribers(metadata_package_version):
expected = f"MetadataPackageVersionId = '{SF_ID}'"
metadata_package_version.get_subscribers()
metadata_package_version.push_api.get_subscribers.assert_called_once_with(
expected, None
)
def test_metadata_package_get_subscriber_objects(metadata_package_version):
expected = f"MetadataPackageVersionId = '{SF_ID}'"
| |
<reponame>formalabstracts/CNL-CIC<filename>2parser/parser_combinator.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 14:39:04 2021
@author: <NAME>
This file contains parser combinators.
The original name was parse.py, but that conflicts with a python lib.
The combinators should preserves tokens; nothings should be discarded.
The bottom part of this module contains
various parsers for words, phrases, and delimited matters.
"""
import logging
from exception import ParseError, ParseNoCatch, DataProcess, ErrorItem
import copy, lib, msg, word_lists
import tokenlib
import inner_lexer
import lexer
from tokenlib import (mk_stream , Etok, Item)
from ply.lex import LexToken
import sample
#import sample
from state import state
logger = logging.getLogger(__name__)
def pstream(p:'Parse',s):
"""For reunning test cases on different parsers.
p is a parser
s:str is input to parser
output is the output of p applied to s.
"""
#try:
return (p.process(mk_stream(s)).acc)
#except ParseError as pe:
# logger.exception(pe)
# raise pe
#raise #traceback.print_stack()
class Parse:
"""base class for parsers.
Many of the standard combinators are given.
process:Item->Item processes one or more tokens from the item stream.
We refer to process as the 'item transformer'.
A sample is a function that returns a random parse
on input tokenlib.random.
It is a function sample:acc -> acc transforming accumulated toks.
"""
def __init__(self,process,nonterminal,production='',sample=None,show_up=('',[])):
"""r:Item->Item, repr:str"""
self._process = process
self.nonterminal = nonterminal
self.production = production
self._sample = sample
self.show_up=show_up
def __repr__(self):
if self.production:
return self.nonterminal + '-' + self.production
else:
return self.nonterminal
empty_item = Item(stream=[],pos=0,acc=None)
def sample(self):
if not(self._sample):
raise ParseError(ErrorItem(item=None,nonterminal=f'sample not installed: {self.nonterminal}-{self.production}'))
return self._sample()
def process(self,item):
# sample mode
if state.mk_sample:
acc = self.sample()
return tokenlib.update(acc,item)
# process mode, the front door.
try:
return self._process(item)
except ParseError as pe:
ei = pe.error_stack
nt = ei.nonterminal
if self.nonterminal or self.production:
nt = f'({nt}; {self})'
#As a last resort try a backdoor, nonterminal as terminal...
#the backdoor should never raise an exception.
if not(tokenlib.eof(item)):
item1 = tokenlib.next_item(item)
if item1.acc.value == self.nonterminal:
acc = Etok.etok(item1.acc)
return tokenlib.update(acc,item1)
raise ParseError(ErrorItem(item=ei.item, nonterminal=nt))
def preprocess(self,pre):
"""Use pre if possible for process."""
def f(item):
try:
return pre(item)
except:
self.process(item)
return Parse(f,nonterminal=self.nonterminal,production=self.production,show_up=self.show_up)
def update(self,d):
for key,value in d.items():
setattr(self,key,value)
return self
def setsample(self,s):
self._sample = s
return self
def name(self,nonterminal,production=''):
self.nonterminal=nonterminal
if production:
self.production=production
#msg.setdefault(production,self.production)
return self
def show(self,level):
"""DEBUG Not implemented. Generate the parse tree (in words) of the parser,
down to given level"""
if (level==0):
return (f'{self}',[])
if self.show_up==('',[]):
return ('',[])
(comb,ps)=self.show_up
if comb[0:4]=='pass':
return ps[0].show(level)
if (level==1):
pass ## DEBUG FIX
def next_token(): # constructor for next token
"""The work horse of parsing.
Parser to fetch the next token.
"""
def f(item):
return tokenlib.next_item(item)
return Parse(f,'.',sample=sample.next_token)
def finished():
"""fails if tokens remain in stream, otherwise do nothing"""
def f(item):
if item.pos < len(item.stream):
raise ParseError(ErrorItem(item=item,nonterminal='$'))
return item
return Parse(f,'$',sample=sample.none)
def identity(): #was nothing
"""Does no parsing, identity parser"""
return Parse(lambda item:item,'identity',sample=sample.none)
def fail():
"""This parser always fails"""
def f(item):
raise ParseError(ErrorItem(item=item,nonterminal='fail'))
return Parse(f,'fail')
# def nil(self):
# """replaces output with nil list"""
# return self.treat(lambda _:[]).name('nil','')
def probe(self):
"""run parser but then undo"""
def f(item):
try:
self.process(item)
return item
except ParseError as pe:
raise ParseError(ErrorItem(item=item,nonterminal='-probe')) from pe
return Parse(f,self.nonterminal,self.production,sample=sample.none,show_up=('pass',[self]))
def nocatch(self,nt=''):
"""No catch error if failure"""
def f(item):
try:
return self.process(item)
except ParseError as pe:
nonterminal= msg.setdefault(nt,'nocatch')
raise ParseNoCatch(ErrorItem(item=item,nonterminal=nonterminal)) from pe
return Parse(f,'nocatch',sample=self.sample,show_up=('pass',[self]))
def commit(self,probe:'Parse',msg='') -> 'Parse':
"""if trial_parse does not fail, discard,
then apply pr without catching"""
def f(item):
probe.process(item)
return self.nocatch(msg).process(item)
return Parse(f,self.nonterminal,self.production,sample=self.sample)
# more directly
# try:
# return pr.process(item)
# except pe:
# try:
# probe.process(item)
# raise NoCatch(pe.args[0])
# except:
# raise pe
def reparse(self,p2):
"""Run parser as a reparser on list of accumulated tokens.
If accumulated tokens == [], then do nothing.
All tokens must be consumed.
"""
def f(item):
try:
item0 = self.process(item)
acc = item0.acc
if not acc:
return item
item1 = tokenlib.Item(stream=acc,pos=0,acc=None)
item2 = (self + Parse.finished()).treat(lib.fst).process(item1)
item3 = tokenlib.update(item2.acc,item0)
return item3
except ParseError as pe:
raise ParseError(ErrorItem(item=item1,nonterminal='reparse-'+p2.nonterminal)) from pe
return Parse(f,self.nonterminal,self.production,show_up=('reparse',[p2]))
def reparse_list(self,p2):
"""Run parser p2 as reparser on each accumulated list entry.
All tokens must be consumed."""
def f(item):
try:
item1 = self.process(item)
acc = item1.acc
its1 = [tokenlib.Item(stream=a,pos=0,acc=None) for a in acc]
acc2 = [(p2 + Parse.finished()).treat(lib.fst).process(it).acc for it in its1]
item3 = tokenlib.update(acc2,item1)
return item3
except ParseError as pe:
raise ParseError(ErrorItem(item=item,nonterminal='reparse_list-'+p2.nonterminal)) from pe
return Parse(f,self.nonterminal,self.production,show_up=('reparse_list',[p2]))
def __add__(self,other):
"""combine two parsers in succession, returning pair of results."""
def f(item:tokenlib.Item):
item1 = self.process(item)
acc1 = item1.acc
#try:
item2 = other.name(f'(+{other})').process(tokenlib.update(None,item1))
#except ParseError:
# raise ParseError(ErrorItem(item=item1,nonterminal= f'{other.nonterminal}-{other.production}'))
return tokenlib.update((acc1,item2.acc),item2)
if (self.show_up[0]=='add'):
show_up = ('add',self.show_up[1]+[other])
else:
show_up = ('add',[self,other])
return Parse(f,'',sample=sample.add_sample(self,other),show_up=show_up)
def __or__(self,other):
"""try first parser then next. Lower precedence than +"""
def f(item):
try:
return self.process(item)
except ParseError as pe1:
try:
return other.process(item)
except ParseError as pe2:
#debug:print(f'pe1={pe1.args}')
parse_error1 = pe1.error_stack #args[0]
item1 = parse_error1.item
item2 = pe2.error_stack.item # args[0].item
if item2.pos > item1.pos: #raise the most progressed
raise pe2
raise pe1
if (self.show_up[0]=='or'):
show_up = ('or',self.show_up[1]+[other])
else:
show_up = ('or',[self,other])
return Parse(f,'',sample=sample.or_sample(self,other),show_up=show_up)
def treat(self,treatment,name=''):
"""apply treatment to parser output.
The treatment transforms the accumulator."""
def f(item):
item1 = self.process(item)
try:
item2 = tokenlib.update(treatment(item1.acc),item1)
return item2
except ParseError as pe:
nonterminal=msg.setdefault(name,'treat')
raise ParseError(ErrorItem(item=item,nonterminal=nonterminal)) from pe
pr = Parse(f,self.nonterminal,self.production,sample=sample.treat_sample(self,treatment),show_up=('pass',[self]))
if name:
return pr.name(name)
return pr
def _many_nosep(self):
"""parse zero or more times"""
def f(item):
try:
item1 = self.process(item)
except (ParseError, StopIteration):
return tokenlib.update([],item) #all returns must be a list
acc1 = item1.acc
#
item2 = f(tokenlib.update(None,item1)) #this doesn't fail
return tokenlib.update([acc1]+item2.acc,item2)
show_up=(f'{self}*',self.show_up[1])
return Parse(f,self.nonterminal+'*',sample=sample.many(self,None),show_up=show_up)
def atleast(self,n):
"""parse at least n times.
Output acc is a list
"""
def f(item):
if n < 1:
item1 = self.many().process(item)
return item1 #acc is a list
else:
item1 = (self + Parse.atleast(self,n-1)).treat(lib.prepend).process(item)
return item1
if n==1:
supp = ''
else:
supp = f'{n}'
return Parse(f,f'{self.nonterminal}+'+supp,sample=sample.atleast(self,n))
def _plus_nosep(self):
"""parse at least once"""
return self.atleast(1)
def plus(self,sep=None):
"""Sequence of at least one parse with
optional separation sep
Output is a list
If sep, then non sep items are obtained by slice [0::2]
"""
if not(sep):
return self._plus_nosep()
def f(acc):
(x,xs) = acc
return [x]+ lib.flatten(xs)
return (self +
(sep + self).many()).treat(f).name('plus',self.nonterminal).setsample(sample.plus(self,sep))
def many(self,sep=None):
"""sequence of parses with optional separation sep
"""
def f(_):
return []
if not(sep):
return self._many_nosep()
return (self.plus(sep) | Parse.identity().treat(f)).setsample(sample.many(self,sep))
def possibly(self):
"""zero or one parses. It never fails.
acc left unchanged (None), if no match."""
def f(item):
try:
return self.process(item)
except (ParseError,StopIteration):
return item
return Parse(f,'?',sample=sample.possibly(self))
def if_test(self,p): #was some
"""Next token passes boolean test or fail"""
def f(item):
item1 = self.process(item)
if p(item1.acc):
return item1
else:
raise ParseError(ErrorItem(item=item,nonterminal=f'{self.nonterminal}-if-{self.production}'))
return Parse(f,'if',sample=sample.if_test(self,p))
def if_value(self,v): #was a
"""parse if next token has value v or fail"""
def p(tok):
return tok.value == v
return self.name(self.nonterminal,v).if_test(p).setsample(sample.if_value(v))
def if_rawvalue(self,v):
"""parse if token has rawvalue v or fail.
>>> pstream(Parse.next_token().if_rawvalue('True'),'True')
LexToken(WORD,'true',1,0)
"""
def p(tok):
return (lexer.rawvalue(tok) == v)
return self.name(self.nonterminal,v).if_test(p).setsample(sample.if_rawvalue(v))
def if_types(self,ts):
"""parse if next type is in ts or fail"""
def p(tok):
return tok.type in ts
return self.name(list(ts)[0]).if_test(p).setsample(sample.if_types(ts))
# class methods
def all(prs):
"""sequentially parse a list of parsers and return list of results"""
def f(item):
if not prs:
return tokenlib.update([],item)
else:
item1 = prs[0].process(item)
acc1 = item1.acc
item2 = Parse.all(prs[1:]).process(tokenlib.update(None,item1))
return tokenlib.update([acc1]+item2.acc,item2)
return Parse(f,'all',sample=sample.all_sample(prs))
def first(prs): #was parse_some
"""parse first in a list that does not fail"""
def f(item):
raise ParseError(ErrorItem(item=item,nonterminal='first-empty'))
if not prs:
return Parse(f,'first')
return Parse.__or__(prs[0],Parse.first(prs[1:])).name('first').setsample(sample.first(prs))
# def gen_first(prs_gen,args):
# """Repeat (lazy) parse generator until first non-failure.
# Yields of generator function prs_gen should be a parser.
# Generator formed by prs_gen(*args).
# Deprecated. Use LazyParser instead.
# """
# def f(item):
# gen = prs_gen(*args)
# #print(f'\nentering first on {item.stream[item.pos].value}\n')
# item_max = item
# while True:
# try:
# prs = next(gen)
# #print(f'{prs}--start on {item.stream[item.pos].value}')
# item1 = prs.process(item)
# del gen
# #print(f'{prs}--works')
# return item1
# except ParseError as pe:
# #print(f'{prs}--fails')
# item_e = pe.args[0]
# if item_e.pos > item_max.pos:
# item_max = item_e
# pass
# except StopIteration:
# #print(f'{prs}--stop')
# del gen
# raise ParseError(ErrorItem(item=item,nonterminal='gen_first'))
# return Parse(f,'gen_first')
class | |
<reponame>selinozdas/ObsCo
from helpers import quick_sort,get_variances
from pprint import pprint
from db import mongo
import numpy as np
import pickle
from sklearn.externals import joblib
from sentiment import vect
from util import remove_common_adjectives
filename = 'obsco_model.sav'
model = joblib.load(open(filename, 'rb'))
def fetchUser(name="", userId=-1) -> list:
"""
Fetch user with the given ID from DB.
Keyword arguments:
name -- name of the user which can be used for search (default "")
userId -- unique id of the user (default -1)
Return Value:
results -- list of users with the given credentials
"""
users = mongo.db.users
results = []
# No query for invalid calls
if (name == "" and userId == -1):
return []
# function call with userId
elif (userId != -1):
for entry in users.find({'id':userId},{'_id':0}):
if (int(entry["id"]) == int(userId)):
results.append(entry)
# function call with only name
elif (str(name) != ""):
split_name = "".join(name.split())
split_name = split_name.lower()
for entry in users.find(({},{'_id':0})):
temp_entry = entry["name"].lower()
temp_entry = "".join(temp_entry.split())
if (split_name in temp_entry):
results.append(entry)
# if no result can be found
if (len(results) == 0):
return []
return results
def getSkill(userId, skill=-1) -> list:
"""
Finds the specified skill information of a user, if it is not entered returns all skills of the user.
Keyword Arguments:
userId -- unique id of the user (non-optional)
skill -- unique id of the skill (default -1)
Return Value:
skill_temp -- skill information if skill id is given else all skills of the given user
"""
# fetch user
try:
user = fetchUser(userId=userId)
except:
user = []
skill_temp = -1
# get skills if user is found
if (len(user) != 0):
for u in user:
if (skill != -1):
for entry in u["skills"]:
if (skill == entry["id"]):
skill_temp = entry
if (skill_temp == -1):
return "No such skill exist for the given user"
else:
return skill_temp
else:
skill_temp = u["skills"]
for i in skill_temp:
name = getSkillName(i['id'])
i['name'] = name
return skill_temp
def getGroup(group: int, name="") -> list:
"""
Lists the members of the group.
Keyword Arguments:
group -- unique id of the group (non-optional)
name -- name of the group for search queries (default "")
Return Value:
user_list -- list of users in the group
"""
groups = mongo.db.groups.find({'id':group},{'_id':0})
userID_list = []
user_list = []
for entry in groups:
if entry["id"] == group:
userID_list = userID_list + entry["members"]
if len(userID_list) != 0:
for entry in userID_list:
x = fetchUser(userId=entry)
user_list = user_list + x
return user_list
def addSkill(name):
skill_nu = getSkillLength() + 1
entry = {'id':skill_nu, 'name':name,'members':[]}
try:
response = mongo.db.skills.insert_one(entry)
except:
return 'Yetenek eklenemedi'
return 'Yetenek başarıyla eklendi'
def getGroupMembers(group):
groups = mongo.db.groups.find({'id':group},{'_id':0})
userID_list = []
user_list = []
for entry in groups:
if entry["id"] == group:
userID_list = userID_list + entry["members"]
if len(userID_list) != 0:
for entry in userID_list:
x = fetchUser(userId=entry)
y = []
for i in x:
temp = {'id':i['id'],'name':i['name']}
y.append(temp)
user_list = user_list + y
return user_list
def canSee(userId,group):
user_list = fetchUser(userId=userId)
val = 0
user = user_list[0]
groups = user['groups']
for g in groups:
if g['id'] == group:
val = g['leaders']
return val
def getGroupName( group:int):
groups = mongo.db.groups.find({'id':group},{'_id':0})
group_list = [i for i in groups]
return group_list[0]['name']
def getSkillName( skill:int):
'''
returns the name of the skill with given id
'''
skills = mongo.db.skills.find({'id':skill},{'_id':0})
skill_list = [i for i in skills]
return skill_list[0]['name']
def analysisInfo(userid:int):
'''
Returns the information that is going to be used for recommender
'''
members = getGroups(userid)
member_list = []
for member in members:
entry = {member['id']: member['skills']}
member_list.append(entry)
return member_list
def getSkillLength():
'''
Returns the total number of skills
'''
skills = mongo.db.skills.find({},{'_id':0})
skills = [skill for skill in skills]
return(len(skills))
def getSkillList():
'''
Returns a list of dictionary of the skills with {id:name} key value pairs
'''
skills = mongo.db.skills.find({},{'_id':0})
result = []
for entry in skills:
temp = {}
temp['id'] = entry['id']
temp['name'] = entry['name']
result.append(temp)
return result
def recommender(info,skills,mem_count):
column_nu = getSkillLength()
member_nu = len(info)
matrix = np.zeros(shape = (member_nu,column_nu),dtype = int)
column_names = list(range(1,column_nu+1))
row_names = []
for entry in info:
row_names= row_names + list([i for i in entry.keys()])
skill_list = []
for entry in info:
skill_list= skill_list + list([i for i in entry.values()])
row = 0
for member in skill_list:
for skill in member:
column = skill['id']-1
matrix[row,column]= skill['value']
row += 1
variances = get_variances(matrix)
column_name_map = {}
for i in range(len(column_names)):
column_name_map[column_names[i]] = i
skill_indexes = []
for i in range(len(skills)):
if skills[i] in column_name_map:
skill_indexes.append(column_name_map[skills[i]])
if len(skill_indexes) == 0:
print("No skills found")
return []
ranks = []
for i in range(member_nu):
rank = 0
for s in skill_indexes:
rank += matrix[i][s] * variances[s]
ranks.append(rank)
rank_index = quick_sort(ranks, is_ascending=False)
if mem_count > member_nu:
mem_count = member_nu
result = []
for i in range(mem_count):
entry = {row_names[rank_index[i]]: ranks[i]}
result.append(entry)
return result
def recommend(group,skills,mem_nu):
'''
'''
info = analysisInfo(group)
recommended = recommender(info,skills,mem_nu)
members = []
for i in recommended:
user_list = fetchUser(userId=list(i.keys())[0])
user = user_list[0]
temp = {}
temp['id'] = user['id']
temp['name'] = user['name']
temp['recommended'] = float(format(list(i.values())[0],'.2f'))
members.append(temp)
return members
def getTotalReputation(id):
relations_list = mongo.db.relations.find({},{'_id':0})
relations = [rel for rel in relations_list if id == rel['voted']]
count = len(relations)
sum_result = 0
for rel in relations:
scores = rel['votes']
sum_scores = sum(scores)
len_scores = len(scores)
if len_scores !=0:
sum_result = sum_result + sum_scores/len_scores
return float(format(sum_result/count,'.2f'))
def getRelation(first,second):
relations_list = mongo.db.relations.find({},{'_id':0})
relations = [rel for rel in relations_list]
for entry in relations:
if first == entry['voter'] and second == entry['voted']:
scores = sum(entry['votes'])
len_scores = len(entry['votes'])
if len_scores != 0:
return scores/len_scores
return 0
def getGroupReputation(voted,group):
relations_list = mongo.db.relations.find({},{'_id':0})
relations = [rel for rel in relations_list if id == rel['voted']]
members = getGroupMembers(group)
member_ids = [member['id'] for member in members if member['id'] != id]
member_count = len(member_ids)
score_sum = 0
for member in member_ids:
score_sum += getRelation(voted,member)
return float(format(score_sum/member_count,'.2f'))
def getRelations(id,group):
members = getGroupMembers(group)
member_except_id = [member for member in members if member['id'] != id]
for member in member_except_id:
member['score'] = float(format(getRelation(id,member['id']),'.2f'))
return sorted(member_except_id, key = lambda i: i['score'],reverse=True)
def analyzer(entry):
processed,v = remove_common_adjectives(entry)
result = model.predict(vect.transform([processed]))
p = result[0]
print(p,v,processed)
if (p == 0) and (v == 0):
return 1
elif (p == 0) and (v == 1):
return 0
elif (p == 1) and (v == 0):
return 0
elif (p == 1) and (v == 1):
return 1
else:
return int(p)
return p
def addNLPVote(voter,voted,entry):
nlp_result = analyzer(entry)
relations_list = mongo.db.relations.find({},{'_id':0})
relations = [rel for rel in relations_list if voter == rel['voter'] and voted == rel['voted']]
relations = relations[0]['votes']
relations.append(nlp_result)
relations_list = mongo.db.relations.update({'voter':voter,'voted':voted},{'$set':{'votes':relations}})
relations_list = mongo.db.relations.find({},{'_id':0})
relations = [rel for rel in relations_list if voter == rel['voter'] and voted == rel['voted']]
relations = relations[0]['votes']
return nlp_result
def getGroups(id):
group_list = mongo.db.groups.find({},{'_id':0})
groups = [g['id'] for g in group_list if id in g['leaders'] ]
member_list = []
for i in groups:
member_i = getGroup(i)
for j in member_i:
if j not in member_list:
member_list.append(j)
return member_list
def createGroup(name,owner,member_list):
group_list = mongo.db.groups.find({},{'_id':0})
groups = [x for x in group_list]
new_group_id = len(groups)+1
ids_to_add = member_list.split('_')
members = [int(i) for i in ids_to_add]
name = ' '.join(name.split('_'))
leaders = [owner]
new_group = {'id':new_group_id, 'name':name, 'members':members, 'owner':owner, 'leaders':leaders}
try:
mongo.db.groups.insert_one(new_group)
except:
return 'Grup olusturulamadi.'
if owner in members:
for mem in members:
g_info = {}
u = fetchUser(userId=mem)[0]
g = u['groups']
if mem == owner:
g_info['id'] = new_group_id
g_info['leaders'] = 1
g_info['owner'] = 1
g_info['members'] = 1
g.append(g_info)
else:
g_info['id'] = new_group_id
g_info['leaders'] = 0
g_info['owner'] = 0
g_info['members'] = 1
g.append(g_info)
mongo.db.users.update_one({'id':mem},{'$set':{'groups':g}})
else:
for mem in members:
g_info = {}
u = fetchUser(userId=mem)[0]
g = u['groups']
g_info['id'] = new_group_id
g_info['leaders'] = 0
g_info['owner'] = 0
g_info['members'] = 1
g.append(g_info)
mongo.db.users.update_one({'id':mem},{'$set':{'groups':g}})
return 'Grup basariyla eklendi'
def addLeader(owner,group,members):
can_see = canSee(owner,group)
if can_see == 1:
cursor = mongo.db.groups.find({'id':group},{'_id':0})
group_to_add = [i for i in cursor][0]
to_be_leader = members.split('_')
to_be_leader = [int(i) for i in to_be_leader]
current_leaders = group_to_add['leaders']
to_be_leader = [i for i in to_be_leader if i not in current_leaders]
current_leaders = current_leaders + to_be_leader
mongo.db.groups.update_one({'id':group},{'$set':{'leaders':current_leaders}})
for l in to_be_leader:
user = fetchUser(userId=l)[0]
| |
of the same type. Parameter default
values are instantiated once and cached to be reused when another
Parameterized object of the same type is instantiated.
Can be useful to easily modify large collections of Parameterized
objects at once and can provide a significant speedup.
"""
_share = False
_shared_cache = {}
def __enter__(self):
shared_parameters._share = True
def __exit__(self, exc_type, exc_val, exc_tb):
shared_parameters._share = False
shared_parameters._shared_cache = {}
def as_uninitialized(fn):
"""
Decorator: call fn with the parameterized_instance's
initialization flag set to False, then revert the flag.
(Used to decorate Parameterized methods that must alter
a constant Parameter.)
"""
@wraps(fn)
def override_initialization(self_,*args,**kw):
parameterized_instance = self_.self
original_initialized=parameterized_instance.initialized
parameterized_instance.initialized=False
fn(parameterized_instance,*args,**kw)
parameterized_instance.initialized=original_initialized
return override_initialization
class Comparator(object):
"""
Comparator defines methods for determining whether two objects
should be considered equal. It works by registering custom
comparison functions, which may either be registed by type or with
a predicate function. If no matching comparison can be found for
the two objects the comparison will return False.
If registered by type the Comparator will check whether both
objects are of that type and apply the comparison. If the equality
function is instead registered with a function it will call the
function with each object individually to check if the comparison
applies. This is useful for defining comparisons for objects
without explicitly importing them.
To use the Comparator simply call the is_equal function.
"""
equalities = {
numbers.Number: operator.eq,
basestring: operator.eq,
bytes: operator.eq,
type(None): operator.eq
}
@classmethod
def is_equal(cls, obj1, obj2):
for eq_type, eq in cls.equalities.items():
if ((isinstance(eq_type, FunctionType)
and eq_type(obj1) and eq_type(obj2))
or (isinstance(obj1, eq_type) and isinstance(obj2, eq_type))):
return eq(obj1, obj2)
if isinstance(obj2, (list, set, tuple)):
return cls.compare_iterator(obj1, obj2)
elif isinstance(obj2, dict):
return cls.compare_mapping(obj1, obj2)
return False
@classmethod
def compare_iterator(cls, obj1, obj2):
if type(obj1) != type(obj2) or len(obj1) != len(obj2):
return False
for o1, o2 in zip(obj1, obj2):
if not cls.is_equal(o1, o2):
return False
return True
@classmethod
def compare_mapping(cls, obj1, obj2):
if type(obj1) != type(obj2) or len(obj1) != len(obj2): return False
for k in obj1:
if k in obj2:
if not cls.is_equal(obj1[k], obj2[k]):
return False
else:
return False
return True
class Parameters(object):
"""Object that holds the namespace and implementation of Parameterized
methods as well as any state that is not in __slots__ or the
Parameters themselves.
Exists at both the metaclass level (instantiated by the metaclass)
and at the instance level. Can contain state specific to either the
class or the instance as necessary.
"""
_disable_stubs = False # Flag used to disable stubs in the API1 tests
# None for no action, True to raise and False to warn.
def __init__(self_, cls, self=None):
"""
cls is the Parameterized class which is always set.
self is the instance if set.
"""
self_.cls = cls
self_.self = self
@property
def _BATCH_WATCH(self_):
return self_.self_or_cls._parameters_state['BATCH_WATCH']
@_BATCH_WATCH.setter
def _BATCH_WATCH(self_, value):
self_.self_or_cls._parameters_state['BATCH_WATCH'] = value
@property
def _TRIGGER(self_):
return self_.self_or_cls._parameters_state['TRIGGER']
@_TRIGGER.setter
def _TRIGGER(self_, value):
self_.self_or_cls._parameters_state['TRIGGER'] = value
@property
def _events(self_):
return self_.self_or_cls._parameters_state['events']
@_events.setter
def _events(self_, value):
self_.self_or_cls._parameters_state['events'] = value
@property
def _watchers(self_):
return self_.self_or_cls._parameters_state['watchers']
@_watchers.setter
def _watchers(self_, value):
self_.self_or_cls._parameters_state['watchers'] = value
@property
def self_or_cls(self_):
return self_.cls if self_.self is None else self_.self
def __setstate__(self, state):
# Set old parameters state on Parameterized._parameters_state
self_or_cls = state.get('self', state.get('cls'))
for k in self_or_cls._parameters_state:
key = '_'+k
if key in state:
self_or_cls._parameters_state[k] = state.pop(key)
for k, v in state.items():
setattr(self, k, v)
def __getitem__(self_, key):
"""
Returns the class or instance parameter
"""
inst = self_.self
parameters = self_.objects(False) if inst is None else inst.param.objects(False)
p = parameters[key]
if (inst is not None and getattr(inst, 'initialized', False) and p.per_instance and
not getattr(inst, '_disable_instance__params', False)):
if key not in inst._instance__params:
try:
# Do not copy watchers on class parameter
watchers = p.watchers
p.watchers = {}
p = copy.copy(p)
except:
raise
finally:
p.watchers = watchers
p.owner = inst
inst._instance__params[key] = p
else:
p = inst._instance__params[key]
return p
def __dir__(self_):
"""
Adds parameters to dir
"""
return super(Parameters, self_).__dir__() + list(self_)
def __iter__(self_):
"""
Iterates over the parameters on this object.
"""
for p in self_.objects(instance=False):
yield p
def __contains__(self_, param):
return param in list(self_)
def __getattr__(self_, attr):
"""
Extends attribute access to parameter objects.
"""
cls = self_.__dict__.get('cls')
if cls is None: # Class not initialized
raise AttributeError
try:
params = list(getattr(cls, '_%s__params' % cls.__name__))
except AttributeError:
params = [n for class_ in classlist(cls) for n, v in class_.__dict__.items()
if isinstance(v, Parameter)]
if attr in params:
return self_.__getitem__(attr)
elif self_.self is None:
raise AttributeError("type object '%s.param' has no attribute %r" %
(self_.cls.__name__, attr))
else:
raise AttributeError("'%s.param' object has no attribute %r" %
(self_.cls.__name__, attr))
@as_uninitialized
def _set_name(self_, name):
self = self_.param.self
self.name=name
@as_uninitialized
def _generate_name(self_):
self = self_.param.self
self.param._set_name('%s%05d' % (self.__class__.__name__ ,object_count))
@as_uninitialized
def _setup_params(self_,**params):
"""
Initialize default and keyword parameter values.
First, ensures that all Parameters with 'instantiate=True'
(typically used for mutable Parameters) are copied directly
into each object, to ensure that there is an independent copy
(to avoid surprising aliasing errors). Then sets each of the
keyword arguments, warning when any of them are not defined as
parameters.
Constant Parameters can be set during calls to this method.
"""
self = self_.param.self
## Deepcopy all 'instantiate=True' parameters
# (build a set of names first to avoid redundantly instantiating
# a later-overridden parent class's parameter)
params_to_instantiate = {}
for class_ in classlist(type(self)):
if not issubclass(class_, Parameterized):
continue
for (k,v) in class_.__dict__.items():
# (avoid replacing name with the default of None)
if isinstance(v,Parameter) and v.instantiate and k!="name":
params_to_instantiate[k]=v
for p in params_to_instantiate.values():
self.param._instantiate_param(p)
## keyword arg setting
for name,val in params.items():
desc = self.__class__.get_param_descriptor(name)[0] # pylint: disable-msg=E1101
if not desc:
self.param.warning("Setting non-parameter attribute %s=%s using a mechanism intended only for parameters",name,val)
# i.e. if not desc it's setting an attribute in __dict__, not a Parameter
setattr(self,name,val)
@classmethod
def deprecate(cls, fn):
"""
Decorator to issue warnings for API moving onto the param
namespace and to add a docstring directing people to the
appropriate method.
"""
def inner(*args, **kwargs):
if cls._disable_stubs:
raise AssertionError('Stubs supporting old API disabled')
elif cls._disable_stubs is None:
pass
elif cls._disable_stubs is False:
get_logger(name=args[0].__class__.__name__).log(
WARNING, 'Use method %r via param namespace ' % fn.__name__)
return fn(*args, **kwargs)
inner.__doc__= "Inspect .param.%s method for the full docstring" % fn.__name__
return inner
@classmethod
def _changed(cls, event):
"""
Predicate that determines whether a Event object has actually
changed such that old != new.
"""
return not Comparator.is_equal(event.old, event.new)
# CEBALERT: this is a bit ugly
def _instantiate_param(self_,param_obj,dict_=None,key=None):
# deepcopy param_obj.default into self.__dict__ (or dict_ if supplied)
# under the parameter's _internal_name (or key if supplied)
self = self_.self
dict_ = dict_ or self.__dict__
key = key or param_obj._internal_name
param_key = (str(type(self)), param_obj.name)
if shared_parameters._share:
if param_key in shared_parameters._shared_cache:
new_object = shared_parameters._shared_cache[param_key]
else:
new_object = copy.deepcopy(param_obj.default)
shared_parameters._shared_cache[param_key] = new_object
else:
new_object = copy.deepcopy(param_obj.default)
dict_[key]=new_object
if isinstance(new_object,Parameterized):
global object_count
object_count+=1
# CB: writes over name given to the original object;
# should it instead keep the same name?
new_object.param._generate_name()
# Classmethods
def print_param_defaults(self_):
"""Print the default values of all cls's Parameters."""
cls = self_.cls
for key,val in cls.__dict__.items():
if isinstance(val,Parameter):
print(cls.__name__+'.'+key+ '='+ repr(val.default))
def set_default(self_,param_name,value):
"""
Set the default value of param_name.
Equivalent to setting param_name on the class.
"""
cls = self_.cls
setattr(cls,param_name,value)
def _add_parameter(self_, param_name,param_obj):
"""
Add a new Parameter object into this object's class.
Supposed to result in a Parameter equivalent to one declared
in the class's source code.
"""
# CEBALERT: can't we just do
# setattr(cls,param_name,param_obj)? The metaclass's
# __setattr__ is actually written to handle that. (Would also
# need to do something about the params() cache. That cache
# is a pain, but it definitely improved the startup time; it
# would be worthwhile making sure no method except for one
# "add_param()" method has to deal with it (plus any future
# remove_param() method.)
cls = self_.cls
type.__setattr__(cls,param_name,param_obj)
ParameterizedMetaclass._initialize_parameter(cls,param_name,param_obj)
# delete cached params()
try:
delattr(cls,'_%s__params'%cls.__name__)
except AttributeError:
pass
def params(self_, | |
timeout):
"""
Sets the timeout limit for an order to the RAPI.
:param timeout: The value of the timeout in seconds.
:type timeout: float
"""
self.timeout_order = float(timeout)
def set_attempts(self, number):
"""
Sets number of attempts to be made to the RAPI before the script
ends.
:param number: The value for the number of attempts.
:type number: int
"""
self.attempts = int(number)
def set_fieldConvention(self, convention):
"""
Sets the naming convention of the output fields.
:param convention: The type of naming convention for the fields.
- ``words``: The label with spaces and words will be returned.
- ``camel`` (default): The format will be lower camel case like 'camelCase'.
- ``upper``: The format will be all uppercase with underscore for spaces.
:type convention: str
"""
self.name_conv = convention
self._update_conv()
def download_image(self, url, dest_fn, fsize):
"""
Given a list of remote and local items, download the remote data
if it is not already found locally.
(Adapted from the eodms-api-client (https://pypi.org/project/eodms-api-client/) developed by <NAME>)
:param url: The download URL of the image.
:type url: str
:param dest_fn: The local destination filename for the download.
:type dest_fn: str
:param fsize: The total filesize of the image.
:type fsize: int
"""
# If we have an existing local file, check the filesize against the manifest
if os.path.exists(dest_fn):
# if all-good, continue to next file
if os.stat(dest_fn).st_size == fsize:
msg = "No download necessary. " \
"Local file already exists: %s" % dest_fn
self._log_msg(msg)
return None
# Otherwise, delete the incomplete/malformed local file and redownload
else:
msg = 'Filesize mismatch with %s. Re-downloading...' % \
os.path.basename(dest_fn)
self._log_msg(msg, 'warning')
os.remove(dest_fn)
# Use streamed download so we can wrap nicely with tqdm
with self._session.get(url, stream=True, verify=self.verify) as stream:
with open(dest_fn, 'wb') as pipe:
with tqdm.wrapattr(
pipe,
method='write',
miniters=1,
total=fsize,
desc="%s%s" % (self._header, os.path.basename(dest_fn))
) as file_out:
for chunk in stream.iter_content(chunk_size=1024):
file_out.write(chunk)
msg = '%s has been downloaded.' % dest_fn
self._log_msg(msg)
def download(self, items, dest, wait=10.0):
"""
Downloads a list of order items from the EODMS RAPI.
:param items: A list of order items returned from the RAPI.
Example:
.. code-block:: python
{'items': [
{'recordId': '8023427',
'status': 'SUBMITTED',
'collectionId': 'RCMImageProducts',
'itemId': '346204',
'orderId': '50975'},
...]}
or
.. code-block:: python
[{
'recordId': '8023427',
'status': 'SUBMITTED',
'collectionId': 'RCMImageProducts',
'itemId': '346204',
'orderId': '50975'
}, ...]
:type items: list or dict
:param dest: The local download folder location.
:type dest: str
:param wait: Sets the time to wait before checking the status of all orders.
:type wait: float or int
:return: A list of the download (completed) items.
:rtype: list
"""
msg = "Downloading images..."
self._log_msg(msg, log_indent='\n\n\t', out_indent='\n')
if items is None:
msg = "No images to download."
self._log_msg(msg)
return []
if isinstance(items, dict):
if 'items' in items.keys():
items = items['items']
if len(items) == 0:
msg = "No images to download."
self._log_msg(msg)
return []
complete_items = []
while len(items) > len(complete_items):
time.sleep(wait)
start, end = self._get_dateRange(items)
orders = self.get_orders(dtstart=start, dtend=end)
if len(orders) == 0:
msg = "No orders could be found."
self._log_msg(msg)
return []
new_count = len(complete_items)
for itm in items:
item_id = itm['itemId']
cur_item = self._get_itemFromOrders(item_id, orders)
status = cur_item['status']
record_id = cur_item['recordId']
# Check record is already complete
if self._check_complete(complete_items, record_id):
continue
if status in self.failed_status:
if status == 'FAILED':
# If the order has failed, inform user
status_mess = cur_item.get('statusMessage')
msg = "\n The following Order Item has failed:"
if status_mess is None:
msg += "\n Order Item Id: %s\n" \
" Record Id: %s" \
" Collection: %s\n" % \
(cur_item['itemId'], \
cur_item['recordId'], \
cur_item['collectionId'])
else:
msg += "\n Order Item Id: %s\n" \
" Record Id: %s\n" \
" Collection: %s\n" \
" Reason for Failure: %s" % \
(cur_item['itemId'], cur_item['recordId'], \
cur_item['collectionId'], \
cur_item['statusMessage'])
else:
# If the order was unsuccessful with another status,
# inform user
msg = "\n The following Order Item has status " \
"'%s' and will not be downloaded:" % status
msg += "\n Order Item Id: %s\n" \
" Record Id: %s\n" \
" Collection: %s\n" % \
(cur_item['itemId'], \
cur_item['recordId'], \
cur_item['collectionId'])
self._log_msg(msg)
cur_item['downloaded'] = 'False'
complete_items.append(cur_item)
elif status == 'AVAILABLE_FOR_DOWNLOAD':
cur_item['downloaded'] = 'True'
dests = cur_item['destinations']
manifest_key = list(cur_item['manifest'].keys()).pop()
fsize = int(cur_item['manifest'][manifest_key])
download_paths = []
for d in dests:
# Get the string value of the destination
str_val = d['stringValue']
str_val = str_val.replace('</br>', '')
# Parse the HTML text of the destination string
root = ElementTree.fromstring(str_val)
url = root.text
fn = os.path.basename(url)
# Download the image
msg = "Downloading image with " \
"Record Id %s (%s)." % (record_id, \
os.path.basename(url))
self._log_msg(msg)
# Save the image contents to the 'downloads' folder
out_fn = os.path.join(dest, fn)
full_path = os.path.realpath(out_fn)
if not os.path.exists(dest):
os.mkdir(dest)
self.download_image(url, out_fn, fsize)
print('')
# Record the URL and downloaded file to a dictionary
dest_info = {}
dest_info['url'] = url
dest_info['local_destination'] = full_path
download_paths.append(dest_info)
cur_item['downloadPaths'] = download_paths
complete_items.append(cur_item)
if new_count == 0 and len(complete_items) == 0:
msg = "No items are ready for download yet."
self._log_msg(msg)
elif new_count == len(complete_items):
msg = "No new items are ready for download yet."
self._log_msg(msg)
return complete_items
def get_availableFields(self, collection=None, name_type='all'):
"""
Gets a dictionary of available fields for a collection from the RAPI.
:param collection: The Collection ID.
:type collection: str
:return: A dictionary containing the available fields for the given
collection.
:rtype: dict
"""
if collection is None:
if self.collection is None:
self._log_msg('No collection can be determined.', 'warning')
return None
collection = self.collection
query_url = '%s/collections/%s' % (self.rapi_root, collection)
coll_res = self._submit(query_url, timeout=20.0)
if coll_res is None: return None
# If an error occurred
if isinstance(coll_res, QueryError):
self._log_msg(coll_res._get_msgs(True), 'warning')
return None
# Get a list of the searchFields
fields = {}
if name_type == 'title' or name_type == 'id':
srch_fields = []
for r in coll_res['searchFields']:
srch_fields.append(r[name_type])
fields['search'] = srch_fields
res_fields = []
for r in coll_res['resultFields']:
res_fields.append(r[name_type])
fields['results'] = res_fields
else:
srch_fields = {}
for r in coll_res['searchFields']:
srch_fields[r['title']] = {'id': r['id'], \
'datatype': r['datatype'], \
'choices': r.get('choices')}
fields['search'] = srch_fields
res_fields = {}
for r in coll_res['resultFields']:
res_fields[r['title']] = {'id': r['id'], \
'datatype': r['datatype']}
fields['results'] = res_fields
return fields
def get_fieldChoices(self, collection, field=None):
"""
Gets the avaiable choices for a specified field. If no choices exist, then the data type is returned.
:param collection: The collection containing the field.
:type collection: str
:param field: The field name or field ID.
:type field: str
:return: Either a list of choices or a string containing the
data type.
:rtype: list or str
"""
fields = self.get_availableFields(collection)
all_fields = {}
for f, v in fields['search'].items():
choices = []
if field is None:
field_choices = v.get('choices')
if field_choices is not None:
for c in field_choices:
value = c['value']
if not value == '':
choices.append(value)
all_fields[f] = choices
else:
all_fields[f] = {'data_type': v.get('datatype')}
else:
if f == field or v['id'] == field:
field_choices = v.get('choices')
if field_choices is not None:
for c in field_choices:
value = c['value']
if not value == '':
choices.append(value)
return choices
else:
return {'data_type': v.get('datatype')}
return all_fields
def get_collections(self, as_list=False, opt='id', redo=False):
"""
Gets a list of available collections for the current user.
:param as_list: Determines the type of return. If False, a dictionary
will be returned. If True, only a list of collection
IDs will be returned.
:type as_list: bool
:return: Either | |
= parse_yaml(paths, fn)
all_run_data = copy.deepcopy(run_data) # all_run_data includes failed jobs
if show_fails:
# remove all jobs that have no PBS info in log file
for jobid in all_run_data:
if all_run_data[jobid]['PBS log']['Run completion date'] is None:
del run_data[jobid]
# (jobid, run completion date) tuples sorted by run completion date
jobid_run_tuples = sorted([(k, v['PBS log']['Run completion date'])
for (k, v) in run_data.items()],
key=lambda t: t[1])
if len(jobid_run_tuples) == 0:
print('\nAborting: no jobs?')
return
# jobid keys into run_data sorted by run completion date
sortedjobids = [k[0] for k in jobid_run_tuples]
else:
# remove failed jobs from run_data
for jobid in all_run_data:
print('.', end='', flush=True)
pbs = all_run_data[jobid]['PBS log']
date = pbs['Run completion date']
if date is None: # no PBS info in log file
del run_data[jobid]
elif pbs['Run number'] is None: # not a model run log file
del run_data[jobid]
elif pbs['Exit Status'] != 0: # output dir belongs to this job only if Exit Status = 0
del run_data[jobid]
elif len(run_data[jobid]['config.yaml']) == 0: # output dir missing
del run_data[jobid]
# (jobid, run number) tuples sorted by run number - re-done below
jobid_run_tuples = sorted([(k, v['PBS log']['Run number'])
for (k, v) in run_data.items()],
key=lambda t: t[1])
if len(jobid_run_tuples) == 0:
print('\nAborting: no successful jobs?')
return
# Remove the older jobid if run number is duplicated - assume run was re-done
# (check by date rather than jobid, since jobid sometimes rolls over)
prev_jobid_run = jobid_run_tuples[0]
for jobid_run in jobid_run_tuples[1:]:
if jobid_run[1] == prev_jobid_run[1]: # duplicated run number
if run_data[jobid_run[0]]['PBS log']['Run completion date']\
> run_data[prev_jobid_run[0]]['PBS log']['Run completion date']:
del run_data[prev_jobid_run[0]]
prev_jobid_run = jobid_run
else:
del run_data[jobid_run[0]]
else:
prev_jobid_run = jobid_run
# re-do (jobid, run number) tuples sorted by run number
jobid_run_tuples = sorted([(k, v['PBS log']['Run number'])
for (k, v) in run_data.items()],
key=lambda t: t[1])
if len(jobid_run_tuples) == 0:
print('\nAborting: no successful jobs?')
return
# jobid keys into run_data sorted by run number
sortedjobids = [k[0] for k in jobid_run_tuples]
# allow referencing by submodel name as well as list index
for jobid in run_data:
run_data[jobid]['config.yaml']['submodels-by-name'] = dict()
for sm in run_data[jobid]['config.yaml']['submodels']:
run_data[jobid]['config.yaml']['submodels-by-name'][sm['name']] = sm
# make a 'timing' entry to contain model timestep and run length for both MATM and YATM runs
# run length is [years, months, days, seconds] to accommodate both MATM and YATM
prevjobid = -1
for jobid in sortedjobids:
r = run_data[jobid]
timing = dict()
if r['namelists']['accessom2.nml'] is None: # non-YATM run
timing['Timestep'] = r['config.yaml']['submodels'][1]['timestep'] # MOM timestep
rt = r['config.yaml']['calendar']['runtime']
timing['Run length'] = [rt['years'], rt['months'], rt['days'], 0] # insert 0 seconds
else:
timing['Timestep'] = r['namelists']['accessom2.nml']['accessom2_nml']['ice_ocean_timestep']
rp = r['namelists']['accessom2.nml']['date_manager_nml']['restart_period']
timing['Run length'] = rp[0:2] + [0] + [rp[2]] # insert 0 days
yrs = r['MOM_time_stamp.out']['Model run length (days)']/365.25 # FUDGE: assumes 365.25-day year
timing['SU per model year'] = r['PBS log']['Service Units']/yrs
timing['Walltime (hr) per model year'] = r['PBS log']['Walltime Used (hr)']/yrs
storagekeys = list(r['storage'].keys())
for k in storagekeys:
timing[k + ' per model year'] = round(r['storage'][k]/yrs, 3)
if prevjobid >= 0: # also record time including wait between runs
d1 = dateutil.parser.parse(run_data[prevjobid]['PBS log']['Run completion date'])
d2 = dateutil.parser.parse(r['PBS log']['Run completion date'])
tot_walltime = (d2-d1).total_seconds()/3600
timing['Walltime (hr) between this completion and previous completion'] = tot_walltime
timing['Wait (hr) between this run and previous'] = tot_walltime - r['PBS log']['Walltime Used (hr)']
timing['SU per calendar day'] = r['PBS log']['Service Units']/tot_walltime*24
timing['Model years per calendar day'] = yrs/tot_walltime*24
for k in storagekeys:
timing[k + ' per calendar day'] = round(r['storage'][k]/tot_walltime*24, 3)
r['timing'] = timing
prevjobid = jobid
# include changes in all git commits since previous run
for i, jobid in enumerate(sortedjobids):
print('.', end='', flush=True)
run_data[jobid]['git diff'] = \
git_diff(basepath,
run_data[sortedjobids[max(i-1, 0)]]['git log']['Commit'],
run_data[jobid]['git log']['Commit'])
# count failed jobs prior to each successful run
# BUG: always have zero count between two successful runs straddling a jobid rollover
# BUG: first run also counts all fails after a rollover
prevjobid = -1
for jobid in sortedjobids:
c = [e for e in all_run_data.keys() if e > prevjobid and e < jobid
and e not in run_data]
c.sort()
run_data[jobid]['PBS log']['Failed previous jobids'] = c
run_data[jobid]['PBS log']['Failed previous jobs'] = len(c)
prevjobid = jobid
if list_available:
print('\nAvailable data which can be tabulated if added to output_format')
print('(but you may need to edit some keys to ensure uniqueness):')
keylist = []
for k in keylistssuperset(run_data):
keylist.append((k[-1], "['" + "', '".join(k) + "']"))
keylist.sort(key = lambda x: x[1])
maxkeywidth = max([len(k[0]) for k in keylist])
for k in keylist:
print(" ('" + k[0] + "', " + " "*(maxkeywidth-len(k[0])) + k[1] + "),")
if dump_all:
dumpoutfile = os.path.splitext(outfile)[0]+'.yaml'
print('\nWriting', dumpoutfile)
with open(dumpoutfile, 'w') as outf:
yaml.dump(run_data, outf, default_flow_style=False)
###########################################################################
# Specify the output format here.
###########################################################################
# output_format is a OrderedDict of (key, value) tuples, one for each column.
# keys are column headers (arbitrary but must be unique)
# values are lists of keys into run_data (omitting job id)
# "run_summary.py --list" will list all available data you can add here
# (but you may need to edit some keys to ensure uniqueness)
output_format = OrderedDict([
('Run', ['PBS log', 'Run number']),
('Run start', ['MOM_time_stamp.out', 'Model start time']),
('Run end', ['MOM_time_stamp.out', 'Model end time']),
('Run length (y, m, d, s)', ['timing', 'Run length']),
('Run length (days)', ['MOM_time_stamp.out', 'Model run length (days)']),
('Control directory', ['paths', 'Control path']),
# ('Archive directory', ['paths', 'Archive path']),
# ('Sync directory', ['paths', 'Sync path']),
('Output directory', ['paths', 'Output path']),
('Output GiB', ['storage', 'Output path GiB']),
('Restart directory', ['paths', 'Restart path']),
('Restart GiB', ['storage', 'Restart path GiB']),
('Run by', ['git log', 'Author']),
('Run completion date', ['PBS log', 'Run completion date']),
('Job Id', ['PBS log', 'Job Id']),
('Failed jobs', ['PBS log', 'Failed previous jobs']),
('Failed jobids', ['PBS log', 'Failed previous jobids']),
('Queue', ['config.yaml', 'queue']),
('Service Units', ['PBS log', 'Service Units']),
('Walltime Used (hr)', ['PBS log', 'Walltime Used (hr)']),
('SU per model year', ['timing', 'SU per model year']),
('Walltime (hr) per model year', ['timing', 'Walltime (hr) per model year']),
('Wait (hr) between runs', ['timing', 'Wait (hr) between this run and previous']),
('SU per calendar day', ['timing', 'SU per calendar day']),
('Model years per calendar day', ['timing', 'Model years per calendar day']),
('Memory Used (Gb)', ['PBS log', 'Memory Used (Gb)']),
('NCPUs Used', ['PBS log', 'NCPUs Used']),
('MOM NCPUs', ['config.yaml', 'submodels-by-name', 'ocean', 'ncpus']),
('CICE NCPUs', ['config.yaml', 'submodels-by-name', 'ice', 'ncpus']),
# ('Max Ocean diagnostics (s)', ['access-om2.out', '(Ocean diagnostics)', 'tmax']),
# ('Max Ocean diagnostics: tracer (s)', ['access-om2.out', '(Ocean diagnostics: tracer)', 'tmax']),
('Fraction of MOM runtime in oasis_recv', ['access-om2.out', 'oasis_recv', 'tfrac']),
('Max MOM wait for oasis_recv (s)', ['access-om2.out', 'oasis_recv', 'tmax']),
('Max CICE wait for coupler (s)', ['ice_diag.d', 'timing', 'waiting_o', 'node', 'max']),
('Max CICE I/O time (s)', ['ice_diag.d', 'timing', 'ReadWrite', 'node', 'max']),
('MOM tile layout', ['namelists', 'ocean/input.nml', 'ocean_model_nml', 'layout']),
('CICE tile distribution', ['namelists', 'ice/cice_in.nml', 'domain_nml', 'distribution_type']),
('CICE block_size_x', ['ice_diag.d', 'block_size_x']),
('CICE block_size_y', ['ice_diag.d', 'block_size_y']),
('Timestep (s)', ['timing', 'Timestep']),
('MOM barotropic split', ['namelists', 'ocean/input.nml', 'ocean_model_nml', 'barotropic_split']),
('CICE dynamic split (ndtd)', ['namelists', 'ice/cice_in.nml', 'setup_nml', 'ndtd']),
# ('ktherm', ['namelists', 'ice/cice_in.nml', 'thermo_nml', 'ktherm']),
# ('Common inputs', ['config.yaml', 'input']),
# ('Atmosphere executable', ['config.yaml', 'submodels-by-name', 'atmosphere', 'exe']),
# ('Atmosphere inputs', ['config.yaml', 'submodels-by-name', 'atmosphere', 'input']),
# ('MOM executable', ['config.yaml', 'submodels-by-name', 'ocean', 'exe']),
# ('MOM inputs', ['config.yaml', 'submodels-by-name', 'ocean', 'input']),
# ('CICE executable', ['config.yaml', 'submodels-by-name', 'ice', 'exe']),
# ('CICE inputs', ['config.yaml', 'submodels-by-name', 'ice', 'input']),
# ('Payu version', ['PBS log', 'payu version']),
('Git hash of run', ['git log', 'Commit']),
('Commit date', ['git log', 'Date']),
('Git-tracked file changes', ['git diff', 'Changed files']),
('Git log messages', ['git diff', 'Messages']),
])
SUdata = [dictget(run_data, [jobid] + ['PBS log', 'Service Units'])
for jobid in sortedjobids]
stats = OrderedDict([ # tuples: (label, function)
('Total', sum),
('Mean', np.mean),
('Median', np.median),
('Min', min),
('Max', max),
('Std dev', np.std),
('SU correlation', lambda x: np.corrcoef(x, SUdata)[0, 1]),
('SU slope', lambda x: np.polyfit(x, SUdata, 1)[0]),
('Dimensionless SU slope', lambda x: np.polyfit(x, SUdata, 1)[0]*np.mean(x)/np.mean(SUdata))
])
###########################################################################
if no_stats:
stats = OrderedDict([])
if show_fails:
# output crash-related info (redefines order of any keys already | |
type
_MODULE_TYPE_ = {
"LIBRARY" : "BASE",
"SECURITY_CORE" : "SEC",
"PEI_CORE" : "PEI_CORE",
"COMBINED_PEIM_DRIVER" : "PEIM",
"PIC_PEIM" : "PEIM",
"RELOCATABLE_PEIM" : "PEIM",
"PE32_PEIM" : "PEIM",
"BS_DRIVER" : "DXE_DRIVER",
"RT_DRIVER" : "DXE_RUNTIME_DRIVER",
"SAL_RT_DRIVER" : "DXE_SAL_DRIVER",
"DXE_SMM_DRIVER" : "DXE_SMM_DRIVER",
# "SMM_DRIVER" : "DXE_SMM_DRIVER",
# "BS_DRIVER" : "DXE_SMM_DRIVER",
# "BS_DRIVER" : "UEFI_DRIVER",
"APPLICATION" : "UEFI_APPLICATION",
"LOGO" : "BASE",
}
# regular expression for converting XXX_FLAGS in [nmake] section to new type
_NMAKE_FLAG_PATTERN_ = re.compile("(?:EBC_)?([A-Z]+)_(?:STD_|PROJ_|ARCH_)?FLAGS(?:_DLL|_ASL|_EXE)?", re.UNICODE)
# dict used to convert old tool name used in [nmake] section to new ones
_TOOL_CODE_ = {
"C" : "CC",
"LIB" : "SLINK",
"LINK" : "DLINK",
}
## Constructor of DscBuildData
#
# Initialize object of DscBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform The name of platform employing this module
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDatabase, Arch='COMMON', Target=None, Toolchain=None):
self.MetaFile = FilePath
self._ModuleDir = FilePath.Dir
self._RawData = RawData
self._Bdb = BuildDatabase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._Platform = 'COMMON'
self._SourceOverridePath = None
if FilePath.Key in GlobalData.gOverrideDir:
self._SourceOverridePath = GlobalData.gOverrideDir[FilePath.Key]
self._Clear()
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of InfBuildData to None
def _Clear(self):
self._HeaderComments = None
self._TailComments = None
self._Header_ = None
self._AutoGenVersion = None
self._BaseName = None
self._DxsFile = None
self._ModuleType = None
self._ComponentType = None
self._BuildType = None
self._Guid = None
self._Version = None
self._PcdIsDriver = None
self._BinaryModule = None
self._Shadow = None
self._MakefileName = None
self._CustomMakefile = None
self._Specification = None
self._LibraryClass = None
self._ModuleEntryPointList = None
self._ModuleUnloadImageList = None
self._ConstructorList = None
self._DestructorList = None
self._Defs = None
self._Binaries = None
self._Sources = None
self._LibraryClasses = None
self._Libraries = None
self._Protocols = None
self._ProtocolComments = None
self._Ppis = None
self._PpiComments = None
self._Guids = None
self._GuidsUsedByPcd = sdict()
self._GuidComments = None
self._Includes = None
self._Packages = None
self._Pcds = None
self._PcdComments = None
self._BuildOptions = None
self._Depex = None
self._DepexExpression = None
self.__Macros = None
## Get current effective macros
def _GetMacros(self):
if self.__Macros == None:
self.__Macros = {}
# EDK_GLOBAL defined macros can be applied to EDK module
if self.AutoGenVersion < 0x00010005:
self.__Macros.update(GlobalData.gEdkGlobal)
self.__Macros.update(GlobalData.gGlobalDefines)
return self.__Macros
## Get architecture
def _GetArch(self):
return self._Arch
## Set architecture
#
# Changing the default ARCH to another may affect all other information
# because all information in a platform may be ARCH-related. That's
# why we need to clear all internal used members, in order to cause all
# information to be re-retrieved.
#
# @param Value The value of ARCH
#
def _SetArch(self, Value):
if self._Arch == Value:
return
self._Arch = Value
self._Clear()
## Return the name of platform employing this module
def _GetPlatform(self):
return self._Platform
## Change the name of platform employing this module
#
# Changing the default name of platform to another may affect some information
# because they may be PLATFORM-related. That's why we need to clear all internal
# used members, in order to cause all information to be re-retrieved.
#
def _SetPlatform(self, Value):
if self._Platform == Value:
return
self._Platform = Value
self._Clear()
def _GetHeaderComments(self):
if not self._HeaderComments:
self._HeaderComments = []
RecordList = self._RawData[MODEL_META_DATA_HEADER_COMMENT]
for Record in RecordList:
self._HeaderComments.append(Record[0])
return self._HeaderComments
def _GetTailComments(self):
if not self._TailComments:
self._TailComments = []
RecordList = self._RawData[MODEL_META_DATA_TAIL_COMMENT]
for Record in RecordList:
self._TailComments.append(Record[0])
return self._TailComments
## Retrieve all information in [Defines] section
#
# (Retriving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name, Value = Record[1], ReplaceMacro(Record[2], self._Macros, False)
# items defined _PROPERTY_ don't need additional processing
if Name in self:
self[Name] = Value
if self._Defs == None:
self._Defs = sdict()
self._Defs[Name] = Value
self._Macros[Name] = Value
# some special items in [Defines] section need special treatment
elif Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION', 'EDK_RELEASE_VERSION', 'PI_SPECIFICATION_VERSION'):
if Name in ('EFI_SPECIFICATION_VERSION', 'UEFI_SPECIFICATION_VERSION'):
Name = 'UEFI_SPECIFICATION_VERSION'
if self._Specification == None:
self._Specification = sdict()
self._Specification[Name] = GetHexVerValue(Value)
if self._Specification[Name] == None:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"'%s' format is not supported for %s" % (Value, Name),
File=self.MetaFile, Line=Record[-1])
elif Name == 'LIBRARY_CLASS':
if self._LibraryClass == None:
self._LibraryClass = []
ValueList = GetSplitValueList(Value)
LibraryClass = ValueList[0]
if len(ValueList) > 1:
SupModuleList = GetSplitValueList(ValueList[1], ' ')
else:
SupModuleList = SUP_MODULE_LIST
self._LibraryClass.append(LibraryClassObject(LibraryClass, SupModuleList))
elif Name == 'ENTRY_POINT':
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == 'UNLOAD_IMAGE':
if self._ModuleUnloadImageList == None:
self._ModuleUnloadImageList = []
if not Value:
continue
self._ModuleUnloadImageList.append(Value)
elif Name == 'CONSTRUCTOR':
if self._ConstructorList == None:
self._ConstructorList = []
if not Value:
continue
self._ConstructorList.append(Value)
elif Name == 'DESTRUCTOR':
if self._DestructorList == None:
self._DestructorList = []
if not Value:
continue
self._DestructorList.append(Value)
elif Name == TAB_INF_DEFINES_CUSTOM_MAKEFILE:
TokenList = GetSplitValueList(Value)
if self._CustomMakefile == None:
self._CustomMakefile = {}
if len(TokenList) < 2:
self._CustomMakefile['MSFT'] = TokenList[0]
self._CustomMakefile['GCC'] = TokenList[0]
else:
if TokenList[0] not in ['MSFT', 'GCC']:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"No supported family [%s]" % TokenList[0],
File=self.MetaFile, Line=Record[-1])
self._CustomMakefile[TokenList[0]] = TokenList[1]
else:
if self._Defs == None:
self._Defs = sdict()
self._Defs[Name] = Value
self._Macros[Name] = Value
#
# Retrieve information in sections specific to Edk.x modules
#
if self.AutoGenVersion >= 0x00010005:
if not self._ModuleType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"MODULE_TYPE is not given", File=self.MetaFile)
if self._ModuleType not in SUP_MODULE_LIST:
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, self._Platform]
for Record in RecordList:
Name = Record[1]
if Name == "MODULE_TYPE":
LineNo = Record[6]
break
EdkLogger.error("build", FORMAT_NOT_SUPPORTED,
"MODULE_TYPE %s is not supported for EDK II, valid values are:\n %s" % (self._ModuleType, ' '.join(l for l in SUP_MODULE_LIST)),
File=self.MetaFile, Line=LineNo)
if (self._Specification == None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x0001000A):
if self._ModuleType == SUP_MODULE_SMM_CORE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "SMM_CORE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x0001000A", File=self.MetaFile)
if (self._Specification == None) or (not 'PI_SPECIFICATION_VERSION' in self._Specification) or (int(self._Specification['PI_SPECIFICATION_VERSION'], 16) < 0x00010032):
if self._ModuleType == SUP_MODULE_MM_CORE_STANDALONE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "MM_CORE_STANDALONE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x00010032", File=self.MetaFile)
if self._ModuleType == SUP_MODULE_MM_STANDALONE:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "MM_STANDALONE module type can't be used in the module with PI_SPECIFICATION_VERSION less than 0x00010032", File=self.MetaFile)
if self._Defs and 'PCI_DEVICE_ID' in self._Defs and 'PCI_VENDOR_ID' in self._Defs \
and 'PCI_CLASS_CODE' in self._Defs and 'PCI_REVISION' in self._Defs:
self._BuildType = 'UEFI_OPTIONROM'
if 'PCI_COMPRESS' in self._Defs:
if self._Defs['PCI_COMPRESS'] not in ('TRUE', 'FALSE'):
EdkLogger.error("build", FORMAT_INVALID, "Expected TRUE/FALSE for PCI_COMPRESS: %s" %self.MetaFile)
elif self._Defs and 'UEFI_HII_RESOURCE_SECTION' in self._Defs \
and self._Defs['UEFI_HII_RESOURCE_SECTION'] == 'TRUE':
self._BuildType = 'UEFI_HII'
else:
self._BuildType = self._ModuleType.upper()
if self._DxsFile:
File = PathClass(NormPath(self._DxsFile), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if self.Sources == None:
self._Sources = []
self._Sources.append(File)
else:
if not self._ComponentType:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE,
"COMPONENT_TYPE is not given", File=self.MetaFile)
self._BuildType = self._ComponentType.upper()
if self._ComponentType in self._MODULE_TYPE_:
self._ModuleType = self._MODULE_TYPE_[self._ComponentType]
if self._ComponentType == 'LIBRARY':
self._LibraryClass = [LibraryClassObject(self._BaseName, SUP_MODULE_LIST)]
# make use some [nmake] section macros
Macros = self._Macros
Macros["EDK_SOURCE"] = GlobalData.gEcpSource
Macros['PROCESSOR'] = self._Arch
RecordList = self._RawData[MODEL_META_DATA_NMAKE, self._Arch, self._Platform]
for Name, Value, Dummy, Arch, Platform, ID, LineNo in RecordList:
Value = ReplaceMacro(Value, Macros, True)
if Name == "IMAGE_ENTRY_POINT":
if self._ModuleEntryPointList == None:
self._ModuleEntryPointList = []
self._ModuleEntryPointList.append(Value)
elif Name == "DPX_SOURCE":
File = PathClass(NormPath(Value), self._ModuleDir, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate(".dxs", CaseSensitive=False)
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo,
File=self.MetaFile, Line=LineNo)
if self.Sources == None:
self._Sources = []
self._Sources.append(File)
else:
ToolList = self._NMAKE_FLAG_PATTERN_.findall(Name)
if len(ToolList) == 0 or len(ToolList) != 1:
pass
# EdkLogger.warn("build", "Don't know how to do with macro [%s]" % Name,
# File=self.MetaFile, Line=LineNo)
else:
if self._BuildOptions == None:
self._BuildOptions = sdict()
if ToolList[0] in self._TOOL_CODE_:
Tool = self._TOOL_CODE_[ToolList[0]]
else:
Tool = | |
all or a subset of the
columns from the input files.
For complete details, see the `1dcat Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/1dcat.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> cat1d = afni.Cat()
>>> cat1d.inputs.sel = "'[0,2]'"
>>> cat1d.inputs.in_files = ['f1.1D', 'f2.1D']
>>> cat1d.inputs.out_file = 'catout.1d'
>>> cat1d.cmdline # doctest: +ALLOW_UNICODE
"1dcat -sel '[0,2]' f1.1D f2.1D > catout.1d"
>>> res = cat1d.run() # doctest: +SKIP
"""
_cmd = '1dcat'
input_spec = CatInputSpec
output_spec = AFNICommandOutputSpec
class CopyInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dcopy',
argstr='%s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_copy',
desc='output image file name',
argstr='%s',
position=-1,
name_source='in_file')
class Copy(AFNICommand):
"""Copies an image of one type to an image of the same
or different type using 3dcopy command
For complete details, see the `3dcopy Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcopy.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> copy3d = afni.Copy()
>>> copy3d.inputs.in_file = 'functional.nii'
>>> copy3d.cmdline # doctest: +ALLOW_UNICODE
'3dcopy functional.nii functional_copy'
>>> res = copy3d.run() # doctest: +SKIP
>>> from copy import deepcopy
>>> copy3d_2 = deepcopy(copy3d)
>>> copy3d_2.inputs.outputtype = 'NIFTI'
>>> copy3d_2.cmdline # doctest: +ALLOW_UNICODE
'3dcopy functional.nii functional_copy.nii'
>>> res = copy3d_2.run() # doctest: +SKIP
>>> copy3d_3 = deepcopy(copy3d)
>>> copy3d_3.inputs.outputtype = 'NIFTI_GZ'
>>> copy3d_3.cmdline # doctest: +ALLOW_UNICODE
'3dcopy functional.nii functional_copy.nii.gz'
>>> res = copy3d_3.run() # doctest: +SKIP
>>> copy3d_4 = deepcopy(copy3d)
>>> copy3d_4.inputs.out_file = 'new_func.nii'
>>> copy3d_4.cmdline # doctest: +ALLOW_UNICODE
'3dcopy functional.nii new_func.nii'
>>> res = copy3d_4.run() # doctest: +SKIP
"""
_cmd = '3dcopy'
input_spec = CopyInputSpec
output_spec = AFNICommandOutputSpec
class Edge3InputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dedge3',
argstr='-input %s',
position=0,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
desc='output image file name',
position=-1,
argstr='-prefix %s')
datum = traits.Enum(
'byte','short','float',
argstr='-datum %s',
desc='specify data type for output. Valid types are \'byte\', '
'\'short\' and \'float\'.')
fscale = traits.Bool(
desc='Force scaling of the output to the maximum integer range.',
argstr='-fscale',
xor=['gscale', 'nscale', 'scale_floats'])
gscale = traits.Bool(
desc='Same as \'-fscale\', but also forces each output sub-brick to '
'to get the same scaling factor.',
argstr='-gscale',
xor=['fscale', 'nscale', 'scale_floats'])
nscale = traits.Bool(
desc='Don\'t do any scaling on output to byte or short datasets.',
argstr='-nscale',
xor=['fscale', 'gscale', 'scale_floats'])
scale_floats = traits.Float(
desc='Multiply input by VAL, but only if the input datum is '
'float. This is needed when the input dataset '
'has a small range, like 0 to 2.0 for instance. '
'With such a range, very few edges are detected due to '
'what I suspect to be truncation problems. '
'Multiplying such a dataset by 10000 fixes the problem '
'and the scaling is undone at the output.',
argstr='-scale_floats %f',
xor=['fscale', 'gscale', 'nscale'])
verbose = traits.Bool(
desc='Print out some information along the way.',
argstr='-verbose')
class Edge3(AFNICommand):
"""Does 3D Edge detection using the library 3DEdge
by <NAME> (<EMAIL>).
For complete details, see the `3dedge3 Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dedge3.html>`_
references_ = [{'entry': BibTeX('@article{Deriche1987,'
'author={<NAME>},'
'title={Optimal edge detection using recursive filtering},'
'journal={International Journal of Computer Vision},'
'volume={2},',
'pages={167-187},'
'year={1987},'
'}'),
'tags': ['method'],
},
{'entry': BibTeX('@article{MongaDericheMalandainCocquerez1991,'
'author={<NAME>, <NAME>, <NAME>, <NAME>},'
'title={Recursive filtering and edge tracking: two primary tools for 3D edge detection},'
'journal={Image and vision computing},'
'volume={9},',
'pages={203-214},'
'year={1991},'
'}'),
'tags': ['method'],
},
]
Examples
========
>>> from nipype.interfaces import afni
>>> edge3 = afni.Edge3()
>>> edge3.inputs.in_file = 'functional.nii'
>>> edge3.inputs.out_file = 'edges.nii'
>>> edge3.inputs.datum = 'byte'
>>> edge3.cmdline # doctest: +ALLOW_UNICODE
'3dedge3 -input functional.nii -datum byte -prefix edges.nii'
>>> res = edge3.run() # doctest: +SKIP
"""
_cmd = '3dedge3'
input_spec = Edge3InputSpec
output_spec = AFNICommandOutputSpec
class EvalInputSpec(AFNICommandInputSpec):
in_file_a = File(
desc='input file to 1deval',
argstr='-a %s',
position=0,
mandatory=True,
exists=True)
in_file_b = File(
desc='operand file to 1deval',
argstr='-b %s',
position=1,
exists=True)
in_file_c = File(
desc='operand file to 1deval',
argstr='-c %s',
position=2,
exists=True)
out_file = File(
name_template='%s_calc',
desc='output image file name',
argstr='-prefix %s',
name_source='in_file_a')
out1D = traits.Bool(
desc='output in 1D',
argstr='-1D')
expr = Str(
desc='expr',
argstr='-expr "%s"',
position=3,
mandatory=True)
start_idx = traits.Int(
desc='start index for in_file_a',
requires=['stop_idx'])
stop_idx = traits.Int(
desc='stop index for in_file_a',
requires=['start_idx'])
single_idx = traits.Int(
desc='volume index for in_file_a')
other = File(
desc='other options',
argstr='')
class Eval(AFNICommand):
"""Evaluates an expression that may include columns of data from one or
more text files.
For complete details, see the `1deval Documentation.
<https://afni.nimh.nih.gov/pub/dist/doc/program_help/1deval.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> eval = afni.Eval()
>>> eval.inputs.in_file_a = 'seed.1D'
>>> eval.inputs.in_file_b = 'resp.1D'
>>> eval.inputs.expr = 'a*b'
>>> eval.inputs.out1D = True
>>> eval.inputs.out_file = 'data_calc.1D'
>>> eval.cmdline # doctest: +ALLOW_UNICODE
'1deval -a seed.1D -b resp.1D -expr "a*b" -1D -prefix data_calc.1D'
>>> res = eval.run() # doctest: +SKIP
"""
_cmd = '1deval'
input_spec = EvalInputSpec
output_spec = AFNICommandOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'in_file_a':
arg = trait_spec.argstr % value
if isdefined(self.inputs.start_idx):
arg += '[%d..%d]' % (self.inputs.start_idx,
self.inputs.stop_idx)
if isdefined(self.inputs.single_idx):
arg += '[%d]' % (self.inputs.single_idx)
return arg
return super(Eval, self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
"""Skip the arguments without argstr metadata
"""
return super(Eval, self)._parse_inputs(
skip=('start_idx', 'stop_idx', 'other'))
class FWHMxInputSpec(CommandLineInputSpec):
in_file = File(
desc='input dataset',
argstr='-input %s',
mandatory=True,
exists=True)
out_file = File(
argstr='> %s',
name_source='in_file',
name_template='%s_fwhmx.out',
position=-1,
keep_extension=False,
desc='output file')
out_subbricks = File(
argstr='-out %s',
name_source='in_file',
name_template='%s_subbricks.out',
keep_extension=False,
desc='output file listing the subbricks FWHM')
mask = File(
desc='use only voxels that are nonzero in mask',
argstr='-mask %s',
exists=True)
automask = traits.Bool(
False,
usedefault=True,
argstr='-automask',
desc='compute a mask from THIS dataset, a la 3dAutomask')
detrend = traits.Either(
traits.Bool(), traits.Int(),
default=False,
argstr='-detrend',
xor=['demed'],
usedefault=True,
desc='instead of demed (0th order detrending), detrend to the '
'specified order. If order is not given, the program picks '
'q=NT/30. -detrend disables -demed, and includes -unif.')
demed = traits.Bool(
False,
argstr='-demed',
xor=['detrend'],
desc='If the input dataset has more than one sub-brick (e.g., has a '
'time axis), then subtract the median of each voxel\'s time '
'series before processing FWHM. This will tend to remove '
'intrinsic spatial structure and leave behind the noise.')
unif = traits.Bool(
False,
argstr='-unif',
desc='If the input dataset has more than one sub-brick, then '
'normalize each voxel\'s time series to have the same MAD before '
'processing FWHM.')
out_detrend = File(
argstr='-detprefix %s',
name_source='in_file',
name_template='%s_detrend',
keep_extension=False,
desc='Save the detrended file into a dataset')
geom = traits.Bool(
argstr='-geom',
xor=['arith'],
desc='if in_file has more than one sub-brick, compute the final '
'estimate as the geometric mean of the individual sub-brick FWHM '
'estimates')
arith = traits.Bool(
argstr='-arith',
xor=['geom'],
desc='if in_file has more than one sub-brick, compute the final '
'estimate as the arithmetic mean of the individual sub-brick '
'FWHM estimates')
combine = traits.Bool(
argstr='-combine',
desc='combine the final measurements along each axis')
compat = traits.Bool(
argstr='-compat',
desc='be compatible with the older 3dFWHM')
acf = traits.Either(
traits.Bool(), File(), traits.Tuple(File(exists=True), traits.Float()),
default=False,
usedefault=True,
argstr='-acf',
desc='computes the spatial autocorrelation')
class FWHMxOutputSpec(TraitedSpec):
out_file = File(
exists=True,
desc='output file')
out_subbricks = File(
exists=True,
desc='output file (subbricks)')
out_detrend = File(
desc='output file, detrended')
fwhm = traits.Either(
traits.Tuple(traits.Float(), traits.Float(), traits.Float()),
traits.Tuple(traits.Float(), traits.Float(), traits.Float(), traits.Float()),
desc='FWHM along each axis')
acf_param = traits.Either(
traits.Tuple(traits.Float(), traits.Float(), traits.Float()),
traits.Tuple(traits.Float(), traits.Float(), traits.Float(), traits.Float()),
desc='fitted ACF model parameters')
out_acf = File(
exists=True,
desc='output acf file')
class FWHMx(AFNICommandBase):
"""
Unlike the older 3dFWHM, this program computes FWHMs for all sub-bricks
in the input dataset, each one separately. The output for each one is
written to the file specified by '-out'. The mean (arithmetic or geometric)
of all the FWHMs along each axis is written to stdout. (A non-positive
output value indicates something bad happened; e.g., FWHM in z is meaningless
for a 2D dataset; the estimation method computed incoherent intermediate results.)
For complete details, see the `3dFWHMx Documentation.
<https://afni.nimh.nih.gov/pub../pub/dist/doc/program_help/3dFWHMx.html>`_
Examples
--------
>>> from nipype.interfaces import afni
>>> fwhm = afni.FWHMx()
>>> fwhm.inputs.in_file = 'functional.nii'
>>> fwhm.cmdline # doctest: +ALLOW_UNICODE
'3dFWHMx -input functional.nii -out functional_subbricks.out > functional_fwhmx.out'
>>> res = fwhm.run() # doctest: +SKIP
(Classic) METHOD:
* Calculate ratio of variance of first differences to data variance.
* Should be the same as 3dFWHM for a 1-brick dataset.
(But the output format is simpler to use in a script.)
.. note:: IMPORTANT NOTE [AFNI > 16]
| |
import numpy as np
# Size of the maze
maze_size = (250, 400)
class Node:
# To save the index of the current node
node_index = 0
# To save the index of the parent node
parent_index = 0
# A list of all the possible actions [North, East, South, West, North-East, South-East, South-West, North-West]
possible_actions = np.zeros(8, dtype = 'bool')
# Cost to this node
cost_to_come = 0
# To save the center location
center_location = (0, 0)
# Count of possible child nodes
child_nodes = 0
# Constructor - To be used later
def __init__(self) -> None:
pass
# Method to print all the stored information of the node
def print_current_values (self) -> None:
print(f'The index of the current node is {self.node_index}')
print(f'The index of the parent node {self.parent_index}')
print(f'The cost to come of the current node is {self.cost_to_come}')
print(f'The possible actions of the current node are {self.possible_actions}')
print(f'The location of the center is {self.center_location}')
print(f'The possible child nodes are {self.child_nodes}')
# Updating the node
# Input: The location of the node, index and visited, open and obstacle lists
# Output: 1. updates the values of the center location, parent location, possible actions and number of child nodes
# 2. Returns True if there is a possibility to move
def update_node (self, location, index, list_1, list_2, list_3):
self.center_location = location
self.parent_index = index
self.action_checker(list_1, list_2, list_3)
# Method to check all the possible directions of the current node
# Input: The current node and visited, open and obstacle lists
# Output: Returns True if there is a possibility to move
def action_checker (self, list_1, list_2, list_3):
self.possible_actions = [self.check_north(self.center_location, list_1, list_2, list_3),
self.check_east(self.center_location, list_1, list_2, list_3),
self.check_south(self.center_location, list_1, list_2, list_3),
self.check_west(self.center_location, list_1, list_2, list_3),
self.check_north_east(self.center_location, list_1, list_2, list_3),
self.check_south_east(self.center_location, list_1, list_2, list_3),
self.check_south_west(self.center_location, list_1, list_2, list_3),
self.check_north_west(self.center_location, list_1, list_2, list_3)]
self.child_count()
if self.child_nodes != 0:
return True
else:
return False
# Method to update the number of child nodes
# Input: The node
# Output: Updates the child nodes in the node object
def child_count(self):
self.child_nodes = np.count_nonzero(self.possible_actions)
# Method to check if the specified node can move North
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move North
def check_north (self, node, list_1, list_2, list_3):
if ((node[0] - 1) == -1) or \
node_finder(((node[0] - 1), node[1]), list_1) or \
node_finder(((node[0] - 1), node[1]), list_2) or \
node_finder(((node[0] - 1), node[1]), list_3):
return False
else:
return True
# Method to check if the specified node can move East
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move East
def check_east (self, node, list_1, list_2, list_3):
if ((node[1] + 1) >= maze_size[1]) or \
node_finder((node[0], (node[1] + 1)), list_1) or \
node_finder((node[0], (node[1] + 1)), list_2) or \
node_finder((node[0], (node[1] + 1)), list_3):
return False
else:
return True
# Method to check if the specified node can move South
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move South
def check_south (self, node, list_1, list_2, list_3):
if ((node[0] + 1) >= maze_size[0]) or \
node_finder(((node[0] + 1), node[1]), list_1) or \
node_finder(((node[0] + 1), node[1]), list_2) or \
node_finder(((node[0] + 1), node[1]), list_3):
return False
else:
return True
# Method to check if the specified node can move West
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move West
def check_west (self, node, list_1, list_2, list_3):
if ((node[1] - 1) == -1) or \
node_finder((node[0], (node[1] - 1)), list_1) or \
node_finder((node[0], (node[1] - 1)), list_2) or \
node_finder((node[0], (node[1] - 1)), list_3):
return False
else:
return True
# Method to check if the specified node can move North-East
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move North-East
def check_north_east (self, node, list_1, list_2, list_3):
if ((node[0] - 1) == -1) or ((node[1] + 1) >= maze_size[1]) or \
node_finder(((node[0] - 1), (node[1] + 1)), list_1) or \
node_finder(((node[0] - 1), (node[1] + 1)), list_2) or \
node_finder(((node[0] - 1), (node[1] + 1)), list_3):
return False
else:
return True
# Method to check if the specified node can move South-East
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move South-East
def check_south_east (self, node, list_1, list_2, list_3):
if ((node[0] + 1) >= maze_size[0]) or ((node[1] + 1) >= maze_size[1]) or \
node_finder(((node[0] + 1), (node[1] + 1)), list_1) or \
node_finder(((node[0] + 1), (node[1] + 1)), list_2) or \
node_finder(((node[0] + 1), (node[1] + 1)), list_3):
return False
else:
return True
# Method to check if the specified node can move South-West
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move South-West
def check_south_west (self, node, list_1, list_2, list_3):
if ((node[0] + 1) >= maze_size[0]) or ((node[1] - 1) == -1) or \
node_finder(((node[0] + 1), (node[1] - 1)), list_1) or \
node_finder(((node[0] + 1), (node[1] - 1)), list_2) or \
node_finder(((node[0] + 1), (node[1] - 1)), list_3):
return False
else:
return True
# Method to check if the specified node can move North-West
# Input: Node location and visited, open and obstacle lists
# Output: True if we can move North-West
def check_north_west (self, node, list_1, list_2, list_3):
if ((node[0] - 1) == -1) or ((node[1] - 1) == -1) or \
node_finder(((node[0] - 1), (node[1] - 1)), list_1) or \
node_finder(((node[0] - 1), (node[1] - 1)), list_2) or \
node_finder(((node[0] - 1), (node[1] - 1)), list_3):
return False
else:
return True
# Method to move North
# Input: The node, node location, parent cost, cost to reach current node
# Output: Updates the node location and cost to come
def move_north (self, node, parent_cost, current_cost):
self.center_location = (node[0] - 1, node[1])
self.cost_to_come = np.round(parent_cost + current_cost, 2)
# Method to move East
# Input: The node, node location, parent cost, cost to reach current node
# Output: Updates the node location and cost to come
def move_east (self, node, parent_cost, current_cost):
self.center_location = (node[0], node[1] + 1)
self.cost_to_come = np.round(parent_cost + current_cost, 2)
# Method to move South
# Input: The node, node location, parent cost, cost to reach current node
# Output: Updates the node location and cost to come
def move_south (self, node, parent_cost, current_cost):
self.center_location = (node[0] + 1, node[1])
self.cost_to_come = np.round(parent_cost + current_cost, 2)
# Method to move West
# Input: The node, node location, parent cost, cost to reach current node
# Output: Updates the node location and cost to come
def move_west (self, node, parent_cost, current_cost):
self.center_location = (node[0], node[1] - 1)
self.cost_to_come = np.round(parent_cost + current_cost, 2)
# Method to move North-East
# Input: The node, node location, parent cost, cost to reach current node
# Output: Updates the node location and cost to come
def move_north_east (self, node, parent_cost, current_cost):
self.center_location = (node[0] - 1, node[1] + 1)
self.cost_to_come = np.round(parent_cost + current_cost, 2)
# Method to move South-East
# Input: The node, node location, parent cost, cost to reach current node
# Output: Updates the node location and cost to come
def move_south_east (self, node, parent_cost, current_cost):
self.center_location = (node[0] + 1, node[1] + 1)
self.cost_to_come = np.round(parent_cost + current_cost, 2)
# Method to move South-West
# Input: The node, node location, parent cost, cost to reach current node
# Output: Updates the node location and cost to come
def move_south_west (self, node, parent_cost, current_cost):
self.center_location = (node[0] + 1, node[1] - 1)
self.cost_to_come = np.round(parent_cost + current_cost, 2)
# Method to move North-West
# Input: | |
2, "photos are missing in db"
assert tu.album_exists_in_db("FußÄ-Füße"), "unicode album is not in db"
def test_corrupted(self):
# load 1 album with a corrupted file
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load unicode album name
tu.load_photoset("corrupted_file")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-d', '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
# no import
assert tu.count_fs_photos() == 0, "there are photos in fs"
assert tu.count_db_photos() == 0, "there are photos in db"
assert tu.album_exists_in_db("corrupted_file"), "corrupted_album not in db"
def test_empty_album(self):
# load 1 empty album
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load unicode album name
tu.load_photoset("empty_album")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
# no import
assert tu.count_fs_photos() == 0, "there are photos are in fs"
assert tu.count_db_photos() == 0, "there are photos are in db"
assert not(tu.album_exists_in_db("empty_album")), "empty_album in db"
def test_long_album(self):
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# get max_width column album name width
maxwidth = tu.get_column_width("lychee_albums", "title")
logger.info("album title length: " + str(maxwidth))
# create long album name
dest_alb_name = 'a' * (maxwidth + 10)
assert len(dest_alb_name) == (maxwidth + 10)
# copy album with name
tu.load_photoset("album1", dest_alb_name)
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
# there is a max_width album
albums = tu.get_album_ids_titles()
alb_real_name = albums.pop()["title"]
assert len(alb_real_name) == maxwidth, "album len is not " + str(maxwidth)
def test_sha1(self):
"""
Should also trigger a warn
duplicates containes photos from album1
"""
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("album1")
# load 2 album with same photo under different name
tu.load_photoset("duplicates")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
# no duplicate
assert tu.count_db_albums() == 2, "two albums not created"
assert tu.count_fs_photos() == 2, "there are duplicate photos in fs"
assert tu.count_db_photos() == 2, "there are duplicate photos in db"
assert tu.count_fs_thumb() == 2, "there are duplicate photos in thumb"
def test_album_keep_original_case(self):
# load 1 album with a mixed case name and spaces
# name in db is equal to directory name
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("album1", "AlBum_One")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
assert tu.count_db_albums() == 1, "two albums created"
assert tu.count_fs_photos() == 1, "there are duplicate photos in fs"
assert tu.count_db_photos() == 1, "there are duplicate photos in db"
assert tu.count_fs_thumb() == 1, "there are duplicate photos in thumb"
assert tu.get_album_id("AlBum_One"), 'there is no album with this name'
def test_bad_taketime(self):
# load "bad taketime" album name
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("invalid_takedate")
launch_date = datetime.datetime.now()
time.sleep(1)
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
assert tu.count_db_albums() == 1, "two albums created"
assert tu.count_fs_photos() == 1, "there are duplicate photos in fs"
assert tu.count_db_photos() == 1, "there are duplicate photos in db"
assert tu.count_fs_thumb() == 1, "there are duplicate photos in thumb"
creation_date = tu.get_album_creation_date("invalid_takedate")
creation_date = datetime.datetime.fromtimestamp(creation_date)
assert creation_date > launch_date, "creation date should be now"
def test_invalid_taketime(self):
# load "bad taketime" album name
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("invalid_taketime")
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
assert tu.count_db_albums() == 1, "too much albums created"
assert tu.count_fs_photos() == 1, "there are duplicate photos in fs"
assert tu.count_db_photos() == 1, "there are duplicate photos in db"
assert tu.count_fs_thumb() == 1, "there are duplicate photos in thumb"
def test_quotes_in_album_name(self):
# load "bad taketime" album name
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("with'\"quotes")
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
assert tu.count_db_albums() == 1, "too much albums created"
assert tu.count_fs_photos() == 1, "there are duplicate photos in fs"
assert tu.count_db_photos() == 1, "there are duplicate photos in db"
assert tu.count_fs_thumb() == 1, "there are duplicate photos in thumb"
def test_photoid_equal_timestamp(self):
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("album3")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# normal mode
before_launch = datetime.datetime.now()
time.sleep(1.1)
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
time.sleep(1.1)
after_launch = datetime.datetime.now()
photos = tu.get_photos(tu.get_album_id('album3'))
for p in photos:
logger.info(p)
# substract 4 last characters
ts = str(p['id'])[:-4]
# timestamp to date
dt = datetime.datetime.fromtimestamp(int(ts))
logger.info(dt)
assert after_launch > dt, "date from id not < date after launch"
assert dt > before_launch, "date from id not > date before launch"
def test_shutter_speed(self):
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("rotation")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
photos = tu.get_photos(tu.get_album_id('rotation'))
for p in photos:
if p['title'] == 'P1010319.JPG':
assert p['shutter'] == '1/60 s', "shutter {} not equal 1/60 s".format(p['shutter'])
assert p['focal'] == '4.9 mm', "focal {} not equal 4.9 mm".format(p['focal'])
assert p['iso'] == '100', "iso {} not equal 100".format(p['iso'])
assert p['aperture'] == 'F3.3', "aperture {} not equal F3.3".format(p['aperture'])
if p['title'] == 'P1010328.JPG':
assert p['shutter'] == '1/30 s', "shutter {} not equal 1/30 s".format(p['shutter'])
assert p['focal'] == '4.9 mm', "focal {} not equal 4.9 mm".format(p['focal'])
assert p['iso'] == '400', "iso {} not equal 400".format(p['iso'])
assert p['aperture'] == 'F3.3', "aperture {} not equal F3.3".format(p['aperture'])
def test_rotation(self):
tu = TestUtils()
assert tu.is_env_clean(tu.conf['lycheepath']), "env not clean"
# load 1 album with same photo under different name
tu.load_photoset("rotation")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
photos = tu.get_photos(tu.get_album_id('rotation'))
for p in photos:
# rotation tag is gone
pfullpath = os.path.join(lych, "uploads", "big", p['url'])
img = Image.open(pfullpath)
assert "exif" in img.info, "Pas d'info exif"
exif_dict = piexif.load(img.info["exif"])
assert exif_dict["0th"][piexif.ImageIFD.Orientation] == 1, "Exif rotation should be 1"
img.close()
def test_launch_every_test_with_cli_runner(self):
""" conf borg is shared between test and cli, this is potentially bad"""
try:
# load "bad taketime" album name
tu = TestUtils()
tu.load_photoset("album3")
# launch lycheesync
src = tu.conf['testphotopath']
lych = tu.conf['lycheepath']
conf = tu.conf['conf']
# run
runner = CliRunner()
result = runner.invoke(main, [src, lych, conf, '-v'])
# no crash
assert result.exit_code == 0, "process result is ok"
except Exception as e:
logger.exception(e)
assert | |
import argparse
import json
import multiprocessing
import os
import platform
import re
import shutil
import signal
import stat
import subprocess
import time
import traceback
import urllib
import uuid
import zipfile
from os.path import expanduser
import psutil
import requests
import yaml
from fedml.cli.edge_deployment.mqtt_manager import MqttManager
from fedml.cli.edge_deployment.yaml_utils import load_yaml_config
from fedml.mlops import MLOpsMetrics
import click
class FedMLClientRunner:
def __init__(self, args, edge_id, request_json=None):
self.mqtt_mgr = None
self.client_mqtt_mgr = None
self.edge_id = edge_id
self.process = None
self.args = args
self.request_json = request_json
self.version = args.version
self.device_id = args.device_id
self.cloud_region = args.cloud_region
self.cur_dir = os.path.split(os.path.realpath(__file__))[0]
if args.current_running_dir is not None:
self.cur_dir = args.current_running_dir
self.sudo_cmd = ""
self.is_mac = False
if platform.system() == "Darwin":
self.is_mac = True
self.agent_config = None
self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data")
self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data")
self.fedml_data_dir = self.fedml_data_base_package_dir
self.fedml_config_dir = os.path.join("/", "fedml", "conf")
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {"${FEDSYS.RUN_ID}": "",
"${FEDSYS.PRIVATE_LOCAL_DATA}": "",
"${FEDSYS.CLIENT_ID_LIST}": "",
"${FEDSYS.SYNTHETIC_DATA_URL}": "",
"${FEDSYS.IS_USING_LOCAL_DATA}": "",
"${FEDSYS.CLIENT_NUM}": "",
"${FEDSYS.CLIENT_INDEX}": "",
"${FEDSYS.CLIENT_OBJECT_LIST}": "",
"${FEDSYS.LOG_SERVER_URL}": ""}
self.container_name = None
self.mlops_metrics = None
click.echo("Current directory of client agent: " + self.cur_dir)
@staticmethod
def generate_yaml_doc(run_config_object, yaml_file):
try:
file = open(yaml_file, 'w', encoding='utf-8')
yaml.dump(run_config_object, file)
file.close()
except Exception as e:
click.echo("Generate yaml file.")
def build_dynamic_constrain_variables(self, run_id, run_config, unzip_package_path):
data_config = run_config["data_config"]
server_edge_id_list = self.request_json["edgeids"]
local_edge_id_list = [1]
local_edge_id_list[0] = self.edge_id
is_using_local_data = 0
private_data_dir = data_config["privateLocalData"]
synthetic_data_url = data_config["syntheticDataUrl"]
edges = self.request_json["edges"]
# if private_data_dir is not None \
# and len(str(private_data_dir).strip(' ')) > 0:
# is_using_local_data = 1
if private_data_dir is None or len(str(private_data_dir).strip(' ')) <= 0:
params_config = run_config.get("parameters", None)
private_data_dir = os.path.join(unzip_package_path, "fedml", "data")
if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0:
synthetic_data_url = private_data_dir
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(' ', '')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data)
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list)
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = server_edge_id_list.index(self.edge_id) + 1
client_objects = str(json.dumps(edges))
client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"')
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects
self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][
"LOG_SERVER_URL"]
def unzip_file(self, zip_file, unzip_file_path):
result = False
if zipfile.is_zipfile(zip_file):
with zipfile.ZipFile(zip_file, 'r') as zipf:
zipf.extractall(unzip_file_path)
result = True
return result
def retrieve_and_unzip_package(self, package_name, package_url):
package_file_no_extension = str(package_name).split('.')[0]
home_dir = expanduser("~")
local_package_path = os.path.join(home_dir, "fedml-client", "fedml_packages")
try:
os.makedirs(local_package_path)
except Exception as e:
click.echo("make dir")
local_package_file = os.path.join(local_package_path, os.path.basename(package_url))
if not os.path.exists(local_package_file):
urllib.request.urlretrieve(package_url, local_package_file)
unzip_package_path = local_package_path
try:
shutil.rmtree(os.path.join(unzip_package_path, package_file_no_extension), ignore_errors=True)
except Exception as e:
pass
self.unzip_file(local_package_file, unzip_package_path)
unzip_package_path = os.path.join(unzip_package_path, package_file_no_extension)
return unzip_package_path
def update_local_fedml_config(self, run_id, run_config):
packages_config = run_config["packages_config"]
# Copy config file from the client
unzip_package_path = self.retrieve_and_unzip_package(packages_config["linuxClient"],
packages_config["linuxClientUrl"])
fedml_local_config_file = unzip_package_path + os.path.join("/", "conf", "fedml.yaml")
# Load the above config to memory
config_from_container = load_yaml_config(fedml_local_config_file)
container_entry_file_config = config_from_container["entry_config"]
container_dynamic_args_config = config_from_container["dynamic_args"]
entry_file = container_entry_file_config["entry_file"]
conf_file = container_entry_file_config["conf_file"]
full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file))
home_dir = expanduser("~")
fedml_package_home_dir = os.path.join(home_dir, "fedml-client")
# Dynamically build constrain variable with realtime parameters from server
self.build_dynamic_constrain_variables(run_id, run_config, fedml_package_home_dir)
# Update entry arguments value with constrain variable values with realtime parameters from server
# currently we support the following constrain variables:
# ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow
# ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client
# ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow
# ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server,
# if this value is not null, the client will download data from this URL to use it as
# federated training data set
# ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set
container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}"
for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items():
for argument_key, argument_value in container_dynamic_args_config.items():
if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0:
replaced_argument_value = str(argument_value).replace(constrain_variable_key,
str(constrain_variable_value))
container_dynamic_args_config[argument_key] = replaced_argument_value
# Merge all container new config sections as new config dictionary
container_config_to_yaml = dict()
container_config_to_yaml["entry_config"] = container_entry_file_config
container_config_to_yaml["dynamic_args"] = container_dynamic_args_config
container_config_to_yaml["dynamic_args"]["config_version"] = self.args.config_version
container_dynamic_args_config["mqtt_config_path"] = os.path.join(unzip_package_path,
"fedml", "config",
os.path.basename(container_dynamic_args_config[
"mqtt_config_path"]))
container_dynamic_args_config["s3_config_path"] = os.path.join(unzip_package_path,
"fedml", "config",
os.path.basename(container_dynamic_args_config[
"s3_config_path"]))
log_file_dir = os.path.join(fedml_package_home_dir, "fedml", "logs")
try:
os.makedirs(log_file_dir)
except Exception as e:
pass
container_config_to_yaml["dynamic_args"]["log_file_dir"] = log_file_dir
# Save new config dictionary to local file
fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml")
FedMLClientRunner.generate_yaml_doc(container_config_to_yaml, fedml_updated_config_file)
# Build dynamic arguments and set arguments to fedml config object
self.build_dynamic_args(container_config_to_yaml, unzip_package_path)
return unzip_package_path, container_config_to_yaml
def build_dynamic_args(self, package_conf_object, base_dir):
fedml_conf_file = package_conf_object["entry_config"]["conf_file"]
print("fedml_conf_file:" + fedml_conf_file)
fedml_conf_path = os.path.join(base_dir, "fedml", "config", os.path.basename(fedml_conf_file))
fedml_conf_object = load_yaml_config(fedml_conf_path)
package_dynamic_args = package_conf_object["dynamic_args"]
fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"]
fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"]
fedml_conf_object["common_args"]["using_mlops"] = True
fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"]
fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"]
fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"])
fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"]
fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"]
fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"]
bootstrap_script_file = fedml_conf_object["environment_args"]["bootstrap"]
bootstrap_script_path = os.path.join(base_dir, "fedml", "config", os.path.basename(bootstrap_script_file))
try:
os.makedirs(package_dynamic_args["data_cache_dir"])
except Exception as e:
pass
fedml_conf_object["dynamic_args"] = package_dynamic_args
FedMLClientRunner.generate_yaml_doc(fedml_conf_object, fedml_conf_path)
try:
bootstrap_stat = os.stat(bootstrap_script_path)
os.chmod(bootstrap_script_path, bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
os.system(bootstrap_script_path)
except Exception as e:
click.echo("Exception when executing bootstrap.sh: {}", traceback.format_exc())
def build_image_unique_id(self, run_id, run_config):
config_name = str(run_config.get("configName", "run_" + str(run_id)))
config_creater = str(run_config.get("userId", "user_" + str(run_id)))
image_unique_id = re.sub('[^a-zA-Z0-9_-]', '', str(config_name + "_" + config_creater))
image_unique_id = image_unique_id.lower()
return image_unique_id
def run(self):
click.echo("start_run: " + json.dumps(self.request_json))
run_id = self.request_json["runId"]
run_config = self.request_json["run_config"]
data_config = run_config["data_config"]
packages_config = run_config["packages_config"]
# get training params
private_local_data_dir = data_config.get("privateLocalData", "")
is_using_local_data = 0
# if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0:
# is_using_local_data = 1
# start a run according to the hyper-parameters
# fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + str(run_id) + "_edge_" + str(edge_id)
fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data")
fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config")
if is_using_local_data:
fedml_local_data_dir = private_local_data_dir
self.fedml_data_dir = self.fedml_data_local_package_dir
# update local config with real time parameters from server and dynamically replace variables value
unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config)
entry_file_config = fedml_config_object["entry_config"]
dynamic_args_config = fedml_config_object["dynamic_args"]
entry_file = os.path.basename(entry_file_config["entry_file"])
conf_file = entry_file_config["conf_file"]
FedMLClientRunner.cleanup_edge_learning_process()
os.chdir(os.path.join(unzip_package_path, "fedml"))
python_program = 'python'
python_version_str = os.popen("python --version").read()
if python_version_str.find("Python 3.") == -1:
python_version_str = os.popen("python3 --version").read()
if python_version_str.find("Python 3.") != -1:
python_program = 'python3'
process = subprocess.Popen([python_program, entry_file,
'--cf', conf_file, '--rank', str(dynamic_args_config["rank"])])
FedMLClientRunner.save_edge_learning_process(process.pid)
process.wait()
self.reset_devices_status(self.edge_id)
def reset_devices_status(self, edge_id):
# if self.client_mqtt_mgr is None:
# self.client_mqtt_mgr = MqttManager(
# self.agent_config["mqtt_config"]["BROKER_HOST"],
# self.agent_config["mqtt_config"]["BROKER_PORT"],
# self.agent_config["mqtt_config"]["MQTT_USER"],
# self.agent_config["mqtt_config"]["MQTT_PWD"],
# self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"],
# "FLClient_Agent-Train",
# )
# mlops_logger = MLOpsMetrics()
# mlops_logger.set_messenger(self.client_mqtt_mgr)
self.mlops_metrics.report_client_training_status(edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
time.sleep(3)
def stop_run(self):
if self.process is not None:
try:
self.process.terminate()
self.process.join()
self.process = None
except Exception as e:
pass
FedMLClientRunner.cleanup_edge_learning_process()
FedMLClientRunner.cleanup_edge_run_process()
click.echo("Stop run successfully.")
self.mlops_metrics.report_client_training_status(self.edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
def callback_start_train(self, topic, payload):
click.echo("callback_start_train: topic = %s, payload = %s" % (topic, payload))
# get training params
request_json = json.loads(payload)
run_id = request_json["runId"]
# Terminate previous process about starting or stopping run command
if self.process is not None:
try:
self.process.terminate()
self.process.join()
self.process = None
except Exception as e:
pass
FedMLClientRunner.cleanup_edge_run_process()
# Start cross-silo server with multi processing mode
self.request_json = request_json
self.container_name = "fedml_container_run_" + str(run_id) + "_edge_" + str(self.edge_id)
self.process = multiprocessing.Process(target=self.run)
self.process.start()
FedMLClientRunner.save_edge_run_process(self.process.pid)
#self.run()
def callback_stop_train(self, topic, payload):
click.echo("callback_stop_train: topic = %s, payload = %s" % (topic, payload))
# Notify MLOps with the stopping message
self.mlops_metrics.report_client_training_status(self.edge_id,
MqttManager.MSG_MLOPS_CLIENT_STATUS_STOPPING)
request_json = json.loads(payload)
run_id = request_json["runId"]
click.echo("Stopping run...")
click.echo("Stop run with multiprocessing.")
# Stop cross-silo server with multi processing mode
self.request_json = request_json
self.container_name = "fedml_container_run_" + str(run_id) + "_edge_" + str(self.edge_id)
multiprocessing.Process(target=self.stop_run).start()
self.mlops_metrics.report_client_training_status(self.edge_id, MqttManager.MSG_MLOPS_CLIENT_STATUS_FINISHED)
def cleanup_client_with_finished_status(self):
self.stop_run()
@staticmethod
def cleanup_edge_run_process():
try:
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
edge_process_id_file = os.path.join(local_pkg_data_dir, "edge-sub-process.id")
edge_process_info = load_yaml_config(edge_process_id_file)
edge_process_id = edge_process_info.get('process_id', None)
if edge_process_id is not None:
try:
edge_process = psutil.Process(edge_process_id)
for edge_sub_process in edge_process.children():
os.kill(edge_sub_process.pid, signal.SIGTERM)
if edge_process is not None:
os.kill(edge_process.pid, signal.SIGTERM)
except Exception as e:
pass
yaml_object = {}
yaml_object['process_id'] = -1
FedMLClientRunner.generate_yaml_doc(yaml_object, edge_process_id_file)
except Exception as e:
pass
@staticmethod
def save_edge_run_process(edge_process_id):
try:
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
edge_process_id_file = os.path.join(local_pkg_data_dir, "edge-sub-process.id")
yaml_object = {}
yaml_object['process_id'] = edge_process_id
FedMLClientRunner.generate_yaml_doc(yaml_object, edge_process_id_file)
except Exception as e:
pass
@staticmethod
def cleanup_edge_learning_process():
try:
home_dir = expanduser("~")
local_pkg_data_dir = os.path.join(home_dir, "fedml-client", "fedml", "data")
edge_process_id_file = os.path.join(local_pkg_data_dir, "edge-learning-process.id")
edge_process_info = load_yaml_config(edge_process_id_file)
edge_process_id = edge_process_info.get('process_id', None)
if edge_process_id is not None:
try:
edge_process = psutil.Process(edge_process_id)
for edge_sub_process in edge_process.children():
os.kill(edge_sub_process.pid, signal.SIGTERM)
if edge_process is not None:
os.kill(edge_process.pid, signal.SIGTERM)
except Exception as e:
pass
yaml_object = {}
yaml_object['process_id'] = -1
FedMLClientRunner.generate_yaml_doc(yaml_object, edge_process_id_file)
except Exception as e:
pass
@staticmethod
def save_edge_learning_process(edge_learning_id):
try:
| |
"""
Copyright (c) 2014 NavPy Developers. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in
LICENSE.txt
"""
import numpy as np
from . import wgs84
from ..utils import input_check_Nx3 as _input_check_Nx3
from ..utils import input_check_Nx3x3 as _input_check_Nx3x3
from ..utils import input_check_Nx1 as _input_check_Nx1
def angle2dcm(rotAngle1, rotAngle2, rotAngle3, input_unit='rad',
rotation_sequence='ZYX', output_type='ndarray'):
"""
This function converts Euler Angle into Direction Cosine Matrix (DCM).
The DCM is described by three sucessive rotation rotAngle1, rotAngle2, and
rotAngle3 about the axes described by the rotation_sequence.
The default rotation_sequence='ZYX' is the aerospace sequence and rotAngle1
is the yaw angle, rotAngle2 is the pitch angle, and rotAngle3 is the roll
angle. In this case DCM transforms a vector from the locally level
coordinate frame (i.e. the NED frame) to the body frame.
This function can batch process a series of rotations (e.g., time series
of Euler angles).
Parameters
----------
rotAngle1, rotAngle2, rotAngle3 : angles {(N,), (N,1), or (1,N)}
They are a sequence of angles about successive axes described by
rotation_sequence.
input_unit : {'rad', 'deg'}, optional
Rotation angles. Default is 'rad'.
rotation_sequence : {'ZYX'}, optional
Rotation sequences. Default is 'ZYX'.
output_type : {'ndarray','matrix'}, optional
Output type. Default is 'ndarray'.
Returns
--------
C : {3x3} Direction Cosine Matrix
Notes
-----
Programmer: <NAME>
Created: May 03, 2011
Last Modified: January 12, 2016
"""
rotAngle1, N1 = _input_check_Nx1(rotAngle1)
rotAngle2, N2 = _input_check_Nx1(rotAngle2)
rotAngle3, N3 = _input_check_Nx1(rotAngle3)
if(N1 != N2 or N1 != N3):
raise ValueError('Inputs are not of same dimensions')
if(N1 > 1 and output_type != 'ndarray'):
raise ValueError('Matrix output requires scalar inputs')
R3 = np.zeros((N1, 3, 3))
R2 = np.zeros((N1, 3, 3))
R1 = np.zeros((N1, 3, 3))
if(input_unit == 'deg'):
rotAngle1 = np.deg2rad(rotAngle1)
rotAngle2 = np.deg2rad(rotAngle2)
rotAngle3 = np.deg2rad(rotAngle3)
R3[:, 2, 2] = 1.0
R3[:, 0, 0] = np.cos(rotAngle1)
R3[:, 0, 1] = np.sin(rotAngle1)
R3[:, 1, 0] = -np.sin(rotAngle1)
R3[:, 1, 1] = np.cos(rotAngle1)
R2[:, 1, 1] = 1.0
R2[:, 0, 0] = np.cos(rotAngle2)
R2[:, 0, 2] = -np.sin(rotAngle2)
R2[:, 2, 0] = np.sin(rotAngle2)
R2[:, 2, 2] = np.cos(rotAngle2)
R1[:, 0, 0] = 1.0
R1[:, 1, 1] = np.cos(rotAngle3)
R1[:, 1, 2] = np.sin(rotAngle3)
R1[:, 2, 1] = -np.sin(rotAngle3)
R1[:, 2, 2] = np.cos(rotAngle3)
if rotation_sequence == 'ZYX':
try:
# Equivalent to C = R1.dot(R2.dot(R3)) for each of N inputs but
# implemented efficiently in C extension
C = np.einsum('nij, njk, nkm -> nim', R1, R2, R3)
except AttributeError:
# Older NumPy without einsum
C = np.zeros((N1, 3, 3))
for i, (R1, R2, R3) in enumerate(zip(R1, R2, R3)):
C[i] = R1.dot(R2.dot(R3))
else:
raise NotImplementedError('Rotation sequences other than ZYX are not currently implemented')
if(N1 == 1):
C = C[0]
if(output_type == 'matrix'):
C = np.matrix(C)
return C
def dcm2angle(C, output_unit='rad', rotation_sequence='ZYX'):
"""
This function converts a Direction Cosine Matrix (DCM) into the three
rotation angles.
The DCM is described by three sucessive rotation rotAngle1, rotAngle2, and
rotAngle3 about the axes described by the rotation_sequence.
The default rotation_sequence='ZYX' is the aerospace sequence and rotAngle1
is the yaw angle, rotAngle2 is the pitch angle, and rotAngle3 is the roll
angle. In this case DCM transforms a vector from the locally level
coordinate frame (i.e. the NED frame) to the body frame.
This function can batch process a series of rotations (e.g., time series
of direction cosine matrices).
Parameters
----------
C : {(3,3), (N,3,3), or (3,3,N)}
direction consine matrix that rotates the vector from the first frame
to the second frame according to the specified rotation_sequence.
output_unit : {'rad', 'deg'}, optional
Rotation angles. Default is 'rad'.
rotation_sequence : {'ZYX'}, optional
Rotation sequences. Default is 'ZYX'.
Returns
-------
rotAngle1, rotAngle2, rotAngle3 : angles
They are a sequence of angles about successive axes described by
rotation_sequence.
Notes
-----
The returned rotAngle1 and 3 will be between +/- 180 deg (+/- pi rad).
In contrast, rotAngle2 will be in the interval +/- 90 deg (+/- pi/2 rad).
In the 'ZYX' or '321' aerospace sequence, that means the pitch angle
returned will always be inside the closed interval +/- 90 deg (+/- pi/2 rad).
Applications where pitch angles near or larger than 90 degrees in magnitude
are expected should used alternate attitude parameterizations like
quaternions.
"""
C, N = _input_check_Nx3x3(C)
if(rotation_sequence == 'ZYX'):
rotAngle1 = np.arctan2(C[..., 0, 1], C[..., 0, 0]) # Yaw
rotAngle2 = -np.arcsin(C[..., 0, 2]) # Pitch
rotAngle3 = np.arctan2(C[..., 1, 2], C[..., 2, 2]) # Roll
else:
raise NotImplementedError('Rotation sequences other than ZYX are not currently implemented')
if(output_unit == 'deg'):
rotAngle1 = np.rad2deg(rotAngle1)
rotAngle2 = np.rad2deg(rotAngle2)
rotAngle3 = np.rad2deg(rotAngle3)
return rotAngle1, rotAngle2, rotAngle3
def omega2rates(pitch, roll, input_unit='rad',
euler_angles_order='roll_pitch_yaw', output_type='ndarray'):
"""
This function is used to create the transformation matrix to go from:
[p, q, r] --> [roll_rate, pitch_rate, yaw_rate]
where pqr are xyz body rotation-rate measurements expressed in body frame.
Yaw, pitch, and roll are the Euler angles. We assume the Euler angles are
3-2-1 (i.e Yaw -> Pitch -> Roll) transformations that go from navigation-
frame to body-frame.
Parameters
----------
pitch : pitch angle, units of input_unit.
roll : roll angle , units of input_unit.
input_unit : units for input angles {'rad', 'deg'}, optional
euler_angles_order : {'roll_pitch_yaw', 'yaw_pitch_roll'}, optional
Assumed order of Euler Angles attitude state vector (see ``Notes``).
output_type : {'ndarray' or 'matrix'}, optional
Numpy array (default) or matrix
Returns
-------
R : transformation matrix, from xyz body-rate to Euler angle-rates
numpy 'output_type' 3x3 (Note: default return variable is an ARRAY,
not a matrix)
Notes
-----
Since the returned transformation matrix is used to transform one vector
to another, the assumed attitude variables order matters.
The ``euler_angles_order`` parameter can be used to specify the assumed
order.
The difference is demonstrated by example:
By default euler_angles_order='roll_pitch_yaw'
R = omega2rates(pitch, roll)
[ roll_rate] [omega_x]
[pitch_rate] = dot(R,[omega_y])
[ yaw_rate] [omega_z]
Now assume our attitude state is [yaw, pitch, roll].T
R = omega2rates(pitch, roll, euler_angles_order='yaw_pitch_roll')
[ yaw_rate] [omega_x]
[pitch_rate] = dot(R,[omega_y])
[ roll_rate] [omega_z]
References
----------
[1] Equation 2.74, Aided Navigation: GPS with High Rate Sensors,
<NAME> 2008
[2] omega2rates.m function at:
http://www.gnssapplications.org/downloads/chapter7/Chapter7_GNSS_INS_Functions.tar.gz
"""
# Apply necessary unit transformations.
if input_unit == 'rad':
pitch_rad, roll_rad = pitch, roll
elif input_unit == 'deg':
pitch_rad, roll_rad = np.radians([pitch, roll])
# Build transformation matrix.
s_r, c_r = np.sin( roll_rad), np.cos( roll_rad)
s_p, c_p = np.sin(pitch_rad), np.cos(pitch_rad)
# Check for singularities (i.e. pitch near 90 degrees)
singular_tol = 1e-2; # flags anything between [90 +/- .5 deg]
if abs(c_p) < singular_tol:
print('WARNING (omega2rates): Operating near pitch = 90 deg singularity. NaN returned. ')
return np.nan
if euler_angles_order == 'roll_pitch_yaw':
R = np.array(
[[ 1, s_r*s_p/c_p, c_r*s_p/c_p],
[ 0, c_r , -s_r ],
[ 0, s_r/c_p , c_r/c_p ]], dtype=float)
elif euler_angles_order == 'yaw_pitch_roll':
R = np.array(
[[ 0, s_r/c_p , c_r/c_p ],
[ 0, c_r , -s_r ],
[ 1, s_r*s_p/c_p, c_r*s_p/c_p]], dtype=float)
if output_type == 'ndarray':
pass
elif output_type=='matrix':
R = np.matrix(R)
else:
print("WARNING (omega2rates): Unrecognized 'output_type' requested.")
print("NaN is returned.")
return np.nan
return R
def angle2quat(rotAngle1,rotAngle2,rotAngle3,
input_unit='rad',rotation_sequence='ZYX'):
"""
Convert a sequence of rotation angles to an equivalent unit quaternion
This function can take inputs in either degree or radians, and can also
batch process a series of rotations (e.g., time series of Euler angles).
By default this function assumes aerospace rotation sequence but can be
changed using the ``rotation_sequence`` keyword argument.
Parameters
----------
rotAngle1, rotAngle2, rotAngle3 : {(N,), (N,1), or (1,N)}
They are a sequence of angles about successive axes described by rotation_sequence.
input_unit : {'rad', 'deg'}, optional
Rotation angles. Default is 'rad'.
rotation_sequence : {'ZYX'}, optional
Rotation sequences. Default is 'ZYX'.
Returns
-------
q0 : {(N,)} array like scalar componenet of the quaternion
qvec : {(N,3)} array like vector component of the quaternion
Notes
-----
Convert rotation angles to unit quaternion that transforms a vector in F1 to
F2 according to
:math:`v_q^{F2} = q^{-1} \otimes v_q^{F1} \otimes | |
<reponame>TRO-HIT/nipype<filename>nipype/interfaces/freesurfer/preprocess.py
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Provides interfaces to various commands provided by FreeSurfer
"""
import os
import os.path as op
from glob import glob
import shutil
import sys
import numpy as np
from nibabel import load
from ... import logging, LooseVersion
from ...utils.filemanip import fname_presuffix, check_depends
from ..io import FreeSurferSource
from ..base import (
TraitedSpec,
File,
traits,
Directory,
InputMultiPath,
OutputMultiPath,
CommandLine,
CommandLineInputSpec,
isdefined,
)
from .base import FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, FSCommandOpenMP, Info
from .utils import copy2subjdir
__docformat__ = "restructuredtext"
iflogger = logging.getLogger("nipype.interface")
# Keeping this to avoid breaking external programs that depend on it, but
# this should not be used internally
FSVersion = Info.looseversion().vstring
class ParseDICOMDirInputSpec(FSTraitedSpec):
dicom_dir = Directory(
exists=True,
argstr="--d %s",
mandatory=True,
desc="path to siemens dicom directory",
)
dicom_info_file = File(
"dicominfo.txt",
argstr="--o %s",
usedefault=True,
desc="file to which results are written",
)
sortbyrun = traits.Bool(argstr="--sortbyrun", desc="assign run numbers")
summarize = traits.Bool(
argstr="--summarize", desc="only print out info for run leaders"
)
class ParseDICOMDirOutputSpec(TraitedSpec):
dicom_info_file = File(exists=True, desc="text file containing dicom information")
class ParseDICOMDir(FSCommand):
"""Uses mri_parse_sdcmdir to get information from dicom directories
Examples
--------
>>> from nipype.interfaces.freesurfer import ParseDICOMDir
>>> dcminfo = ParseDICOMDir()
>>> dcminfo.inputs.dicom_dir = '.'
>>> dcminfo.inputs.sortbyrun = True
>>> dcminfo.inputs.summarize = True
>>> dcminfo.cmdline
'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize'
"""
_cmd = "mri_parse_sdcmdir"
input_spec = ParseDICOMDirInputSpec
output_spec = ParseDICOMDirOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.dicom_info_file):
outputs["dicom_info_file"] = os.path.join(
os.getcwd(), self.inputs.dicom_info_file
)
return outputs
class UnpackSDICOMDirInputSpec(FSTraitedSpec):
source_dir = Directory(
exists=True,
argstr="-src %s",
mandatory=True,
desc="directory with the DICOM files",
)
output_dir = Directory(
argstr="-targ %s", desc="top directory into which the files will be unpacked"
)
run_info = traits.Tuple(
traits.Int,
traits.Str,
traits.Str,
traits.Str,
mandatory=True,
argstr="-run %d %s %s %s",
xor=("run_info", "config", "seq_config"),
desc="runno subdir format name : spec unpacking rules on cmdline",
)
config = File(
exists=True,
argstr="-cfg %s",
mandatory=True,
xor=("run_info", "config", "seq_config"),
desc="specify unpacking rules in file",
)
seq_config = File(
exists=True,
argstr="-seqcfg %s",
mandatory=True,
xor=("run_info", "config", "seq_config"),
desc="specify unpacking rules based on sequence",
)
dir_structure = traits.Enum(
"fsfast",
"generic",
argstr="-%s",
desc="unpack to specified directory structures",
)
no_info_dump = traits.Bool(argstr="-noinfodump", desc="do not create infodump file")
scan_only = File(
exists=True,
argstr="-scanonly %s",
desc="only scan the directory and put result in file",
)
log_file = File(exists=True, argstr="-log %s", desc="explicilty set log file")
spm_zeropad = traits.Int(
argstr="-nspmzeropad %d", desc="set frame number zero padding width for SPM"
)
no_unpack_err = traits.Bool(
argstr="-no-unpackerr", desc="do not try to unpack runs with errors"
)
class UnpackSDICOMDir(FSCommand):
"""Use unpacksdcmdir to convert dicom files
Call unpacksdcmdir -help from the command line to see more information on
using this command.
Examples
--------
>>> from nipype.interfaces.freesurfer import UnpackSDICOMDir
>>> unpack = UnpackSDICOMDir()
>>> unpack.inputs.source_dir = '.'
>>> unpack.inputs.output_dir = '.'
>>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct')
>>> unpack.inputs.dir_structure = 'generic'
>>> unpack.cmdline
'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .'
"""
_cmd = "unpacksdcmdir"
input_spec = UnpackSDICOMDirInputSpec
class MRIConvertInputSpec(FSTraitedSpec):
read_only = traits.Bool(argstr="--read_only", desc="read the input volume")
no_write = traits.Bool(argstr="--no_write", desc="do not write output")
in_info = traits.Bool(argstr="--in_info", desc="display input info")
out_info = traits.Bool(argstr="--out_info", desc="display output info")
in_stats = traits.Bool(argstr="--in_stats", desc="display input stats")
out_stats = traits.Bool(argstr="--out_stats", desc="display output stats")
in_matrix = traits.Bool(argstr="--in_matrix", desc="display input matrix")
out_matrix = traits.Bool(argstr="--out_matrix", desc="display output matrix")
in_i_size = traits.Int(argstr="--in_i_size %d", desc="input i size")
in_j_size = traits.Int(argstr="--in_j_size %d", desc="input j size")
in_k_size = traits.Int(argstr="--in_k_size %d", desc="input k size")
force_ras = traits.Bool(
argstr="--force_ras_good", desc="use default when orientation info absent"
)
in_i_dir = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="--in_i_direction %f %f %f",
desc="<R direction> <A direction> <S direction>",
)
in_j_dir = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="--in_j_direction %f %f %f",
desc="<R direction> <A direction> <S direction>",
)
in_k_dir = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="--in_k_direction %f %f %f",
desc="<R direction> <A direction> <S direction>",
)
_orientations = [
"LAI",
"LIA",
"ALI",
"AIL",
"ILA",
"IAL",
"LAS",
"LSA",
"ALS",
"ASL",
"SLA",
"SAL",
"LPI",
"LIP",
"PLI",
"PIL",
"ILP",
"IPL",
"LPS",
"LSP",
"PLS",
"PSL",
"SLP",
"SPL",
"RAI",
"RIA",
"ARI",
"AIR",
"IRA",
"IAR",
"RAS",
"RSA",
"ARS",
"ASR",
"SRA",
"SAR",
"RPI",
"RIP",
"PRI",
"PIR",
"IRP",
"IPR",
"RPS",
"RSP",
"PRS",
"PSR",
"SRP",
"SPR",
]
# _orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])]
in_orientation = traits.Enum(
_orientations,
argstr="--in_orientation %s",
desc="specify the input orientation",
)
in_center = traits.List(
traits.Float,
maxlen=3,
argstr="--in_center %s",
desc="<R coordinate> <A coordinate> <S coordinate>",
)
sphinx = traits.Bool(argstr="--sphinx", desc="change orientation info to sphinx")
out_i_count = traits.Int(
argstr="--out_i_count %d", desc="some count ?? in i direction"
)
out_j_count = traits.Int(
argstr="--out_j_count %d", desc="some count ?? in j direction"
)
out_k_count = traits.Int(
argstr="--out_k_count %d", desc="some count ?? in k direction"
)
vox_size = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="-voxsize %f %f %f",
desc="<size_x> <size_y> <size_z> specify the size (mm) - useful for upsampling or downsampling",
)
out_i_size = traits.Int(argstr="--out_i_size %d", desc="output i size")
out_j_size = traits.Int(argstr="--out_j_size %d", desc="output j size")
out_k_size = traits.Int(argstr="--out_k_size %d", desc="output k size")
out_i_dir = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="--out_i_direction %f %f %f",
desc="<R direction> <A direction> <S direction>",
)
out_j_dir = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="--out_j_direction %f %f %f",
desc="<R direction> <A direction> <S direction>",
)
out_k_dir = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="--out_k_direction %f %f %f",
desc="<R direction> <A direction> <S direction>",
)
out_orientation = traits.Enum(
_orientations,
argstr="--out_orientation %s",
desc="specify the output orientation",
)
out_center = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr="--out_center %f %f %f",
desc="<R coordinate> <A coordinate> <S coordinate>",
)
out_datatype = traits.Enum(
"uchar",
"short",
"int",
"float",
argstr="--out_data_type %s",
desc="output data type <uchar|short|int|float>",
)
resample_type = traits.Enum(
"interpolate",
"weighted",
"nearest",
"sinc",
"cubic",
argstr="--resample_type %s",
desc="<interpolate|weighted|nearest|sinc|cubic> (default is interpolate)",
)
no_scale = traits.Bool(argstr="--no_scale 1", desc="dont rescale values for COR")
no_change = traits.Bool(
argstr="--nochange", desc="don't change type of input to that of template"
)
tr = traits.Int(argstr="-tr %d", desc="TR in msec")
te = traits.Int(argstr="-te %d", desc="TE in msec")
ti = traits.Int(argstr="-ti %d", desc="TI in msec (note upper case flag)")
autoalign_matrix = File(
exists=True, argstr="--autoalign %s", desc="text file with autoalign matrix"
)
unwarp_gradient = traits.Bool(
argstr="--unwarp_gradient_nonlinearity", desc="unwarp gradient nonlinearity"
)
apply_transform = File(
exists=True, argstr="--apply_transform %s", desc="apply xfm file"
)
apply_inv_transform = File(
exists=True,
argstr="--apply_inverse_transform %s",
desc="apply inverse transformation xfm file",
)
devolve_transform = traits.Str(argstr="--devolvexfm %s", desc="subject id")
crop_center = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
argstr="--crop %d %d %d",
desc="<x> <y> <z> crop to 256 around center (x, y, z)",
)
crop_size = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
argstr="--cropsize %d %d %d",
desc="<dx> <dy> <dz> crop to size <dx, dy, dz>",
)
cut_ends = traits.Int(
argstr="--cutends %d", desc="remove ncut slices from the ends"
)
slice_crop = traits.Tuple(
traits.Int,
traits.Int,
argstr="--slice-crop %d %d",
desc="s_start s_end : keep slices s_start to s_end",
)
slice_reverse = traits.Bool(
argstr="--slice-reverse", desc="reverse order of slices, update vox2ras"
)
slice_bias = traits.Float(
argstr="--slice-bias %f", desc="apply half-cosine bias field"
)
fwhm = traits.Float(argstr="--fwhm %f", desc="smooth input volume by fwhm mm")
_filetypes = [
"cor",
"mgh",
"mgz",
"minc",
"analyze",
"analyze4d",
"spm",
"afni",
"brik",
"bshort",
"bfloat",
"sdt",
"outline",
"otl",
"gdf",
"nifti1",
"nii",
"niigz",
]
_infiletypes = ["ge", "gelx", "lx", "ximg", "siemens", "dicom", "siemens_dicom"]
in_type = traits.Enum(
_filetypes + _infiletypes, argstr="--in_type %s", desc="input file type"
)
out_type = traits.Enum(_filetypes, argstr="--out_type %s", desc="output file type")
ascii = traits.Bool(
argstr="--ascii", desc="save output as ascii col>row>slice>frame"
)
reorder = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
argstr="--reorder %d %d %d",
desc="olddim1 olddim2 olddim3",
)
invert_contrast = traits.Float(
argstr="--invert_contrast %f", desc="threshold for inversting contrast"
)
in_file = File(
exists=True,
mandatory=True,
position=-2,
argstr="--input_volume %s",
desc="File to read/convert",
)
out_file = File(
argstr="--output_volume %s",
position=-1,
genfile=True,
desc="output filename or True to generate one",
)
conform = traits.Bool(
argstr="--conform",
desc="conform to 1mm voxel size in coronal slice direction with 256^3 or more",
)
conform_min = traits.Bool(argstr="--conform_min", desc="conform to smallest size")
conform_size = traits.Float(
argstr="--conform_size %s", desc="conform to size_in_mm"
)
cw256 = traits.Bool(argstr="--cw256", desc="confrom to dimensions of 256^3")
parse_only = traits.Bool(argstr="--parse_only", desc="parse input only")
subject_name = traits.Str(argstr="--subject_name %s", desc="subject name ???")
reslice_like = File(
exists=True, argstr="--reslice_like %s", desc="reslice output to match file"
)
template_type = traits.Enum(
_filetypes + _infiletypes,
argstr="--template_type %s",
desc="template file type",
)
split | |
(before_colon, after_docstring), colon, docstring, body = tokens
else:
raise CoconutInternalException("invalid match def joining tokens", tokens)
# after_docstring and body are their own self-contained suites, but we
# expect them to both be one suite, so we have to join them together
after_docstring, dedent = split_trailing_indent(after_docstring)
indent, body = split_leading_indent(body)
indentation = collapse_indents(dedent + indent)
return (
before_colon
+ colon + "\n"
+ (openindent + docstring + closeindent if docstring is not None else "")
+ after_docstring
+ indentation
+ body
)
def where_handle(tokens):
"""Process where statements."""
final_stmt, init_stmts = tokens
return "".join(init_stmts) + final_stmt + "\n"
def kwd_err_msg_handle(tokens):
"""Handle keyword parse error messages."""
internal_assert(len(tokens) == 1, "invalid keyword err msg tokens", tokens)
return 'invalid use of the keyword "' + tokens[0] + '"'
def string_atom_handle(tokens):
"""Handle concatenation of string literals."""
internal_assert(len(tokens) >= 1, "invalid string literal tokens", tokens)
if any(s.endswith(")") for s in tokens): # has .format() calls
return "(" + " + ".join(tokens) + ")"
else:
return " ".join(tokens)
string_atom_handle.ignore_one_token = True
def alt_ternary_handle(tokens):
"""Handle if ... then ... else ternary operator."""
cond, if_true, if_false = tokens
return "{if_true} if {cond} else {if_false}".format(cond=cond, if_true=if_true, if_false=if_false)
def yield_funcdef_handle(tokens):
"""Handle yield def explicit generators."""
internal_assert(len(tokens) == 1, "invalid yield def tokens", tokens)
return tokens[0] + openindent + handle_indentation(
"""
if False:
yield
""",
add_newline=True,
) + closeindent
# end: HANDLERS
# -----------------------------------------------------------------------------------------------------------------------
# MAIN GRAMMAR:
# -----------------------------------------------------------------------------------------------------------------------
class Grammar(object):
"""Coconut grammar specification."""
timing_info = None
comma = Literal(",")
dubstar = Literal("**")
star = ~dubstar + Literal("*")
at = Literal("@")
arrow = Literal("->") | fixto(Literal("\u2192"), "->")
colon_eq = Literal(":=")
unsafe_dubcolon = Literal("::")
unsafe_colon = Literal(":")
colon = ~unsafe_dubcolon + ~colon_eq + unsafe_colon
semicolon = Literal(";") | invalid_syntax("\u037e", "invalid Greek question mark instead of semicolon", greedy=True)
eq = Literal("==")
equals = ~eq + Literal("=")
lbrack = Literal("[")
rbrack = Literal("]")
lbrace = Literal("{")
rbrace = Literal("}")
lbanana = ~Literal("(|)") + ~Literal("(|>") + ~Literal("(|*") + ~Literal("(|?") + Literal("(|")
rbanana = Literal("|)")
lparen = ~lbanana + Literal("(")
rparen = Literal(")")
unsafe_dot = Literal(".")
dot = ~Literal("..") + unsafe_dot
plus = Literal("+")
minus = ~Literal("->") + Literal("-")
dubslash = Literal("//")
slash = ~dubslash + Literal("/")
pipe = Literal("|>") | fixto(Literal("\u21a6"), "|>")
star_pipe = Literal("|*>") | fixto(Literal("*\u21a6"), "|*>")
dubstar_pipe = Literal("|**>") | fixto(Literal("**\u21a6"), "|**>")
back_pipe = Literal("<|") | fixto(Literal("\u21a4"), "<|")
back_star_pipe = Literal("<*|") | ~Literal("\u21a4**") + fixto(Literal("\u21a4*"), "<*|")
back_dubstar_pipe = Literal("<**|") | fixto(Literal("\u21a4**"), "<**|")
none_pipe = Literal("|?>") | fixto(Literal("?\u21a6"), "|?>")
none_star_pipe = Literal("|?*>") | fixto(Literal("?*\u21a6"), "|?*>")
none_dubstar_pipe = Literal("|?**>") | fixto(Literal("?**\u21a6"), "|?**>")
dotdot = (
~Literal("...") + ~Literal("..>") + ~Literal("..*") + Literal("..")
| ~Literal("\u2218>") + ~Literal("\u2218*>") + fixto(Literal("\u2218"), "..")
)
comp_pipe = Literal("..>") | fixto(Literal("\u2218>"), "..>")
comp_back_pipe = Literal("<..") | fixto(Literal("<\u2218"), "<..")
comp_star_pipe = Literal("..*>") | fixto(Literal("\u2218*>"), "..*>")
comp_back_star_pipe = Literal("<*..") | fixto(Literal("<*\u2218"), "<*..")
comp_dubstar_pipe = Literal("..**>") | fixto(Literal("\u2218**>"), "..**>")
comp_back_dubstar_pipe = Literal("<**..") | fixto(Literal("<**\u2218"), "<**..")
amp = Literal("&") | fixto(Literal("\u2227") | Literal("\u2229"), "&")
caret = Literal("^") | fixto(Literal("\u22bb") | Literal("\u2295"), "^")
unsafe_bar = ~Literal("|>") + ~Literal("|*") + Literal("|") | fixto(Literal("\u2228") | Literal("\u222a"), "|")
bar = ~rbanana + unsafe_bar
percent = Literal("%")
dollar = Literal("$")
lshift = Literal("<<") | fixto(Literal("\xab"), "<<")
rshift = Literal(">>") | fixto(Literal("\xbb"), ">>")
tilde = Literal("~") | fixto(~Literal("\xac=") + Literal("\xac"), "~")
underscore = Literal("_")
pound = Literal("#")
unsafe_backtick = Literal("`")
dubbackslash = Literal("\\\\")
backslash = ~dubbackslash + Literal("\\")
dubquestion = Literal("??")
questionmark = ~dubquestion + Literal("?")
except_star_kwd = combine(keyword("except") + star)
except_kwd = ~except_star_kwd + keyword("except")
lambda_kwd = keyword("lambda") | fixto(keyword("\u03bb", explicit_prefix=colon), "lambda")
async_kwd = keyword("async", explicit_prefix=colon)
await_kwd = keyword("await", explicit_prefix=colon)
data_kwd = keyword("data", explicit_prefix=colon)
match_kwd = keyword("match", explicit_prefix=colon)
case_kwd = keyword("case", explicit_prefix=colon)
cases_kwd = keyword("cases", explicit_prefix=colon)
where_kwd = keyword("where", explicit_prefix=colon)
addpattern_kwd = keyword("addpattern", explicit_prefix=colon)
then_kwd = keyword("then", explicit_prefix=colon)
ellipsis = Forward()
ellipsis_ref = Literal("...") | Literal("\u2026")
lt = ~Literal("<<") + ~Literal("<=") + ~Literal("<|") + ~Literal("<..") + ~Literal("<*") + Literal("<")
gt = ~Literal(">>") + ~Literal(">=") + Literal(">")
le = Literal("<=") | fixto(Literal("\u2264"), "<=")
ge = Literal(">=") | fixto(Literal("\u2265"), ">=")
ne = Literal("!=") | fixto(Literal("\xac=") | Literal("\u2260"), "!=")
mul_star = star | fixto(Literal("\xd7"), "*")
exp_dubstar = dubstar | fixto(Literal("\u2191"), "**")
neg_minus = (
minus
| fixto(Literal("\u207b"), "-")
| invalid_syntax("\u2212", "U+2212 is only for negation, not subtraction")
)
sub_minus = (
minus
| fixto(Literal("\u2212"), "-")
| invalid_syntax("\u207b", "U+207b is only for subtraction, not negation")
)
div_slash = slash | fixto(Literal("\xf7") + ~slash, "/")
div_dubslash = dubslash | fixto(combine(Literal("\xf7") + slash), "//")
matrix_at_ref = at | fixto(Literal("\u22c5"), "@")
matrix_at = Forward()
test = Forward()
test_no_chain, dubcolon = disable_inside(test, unsafe_dubcolon)
test_no_infix, backtick = disable_inside(test, unsafe_backtick)
base_name_regex = r""
for no_kwd in keyword_vars + const_vars:
base_name_regex += r"(?!" + no_kwd + r"\b)"
base_name_regex += r"(?![0-9])\w+\b"
base_name = (
regex_item(base_name_regex)
| backslash.suppress() + any_keyword_in(reserved_vars)
)
name = Forward()
dotted_name = condense(name + ZeroOrMore(dot + name))
must_be_dotted_name = condense(name + OneOrMore(dot + name))
integer = combine(Word(nums) + ZeroOrMore(underscore.suppress() + Word(nums)))
binint = combine(Word("01") + ZeroOrMore(underscore.suppress() + Word("01")))
octint = combine(Word("01234567") + ZeroOrMore(underscore.suppress() + Word("01234567")))
hexint = combine(Word(hexnums) + ZeroOrMore(underscore.suppress() + Word(hexnums)))
imag_j = CaselessLiteral("j") | fixto(CaselessLiteral("i"), "j")
basenum = combine(
integer + dot + Optional(integer)
| Optional(integer) + dot + integer,
) | integer
sci_e = combine(CaselessLiteral("e") + Optional(plus | neg_minus))
numitem = ~(Literal("0") + Word(nums + "_", exact=1)) + combine(basenum + Optional(sci_e + integer))
imag_num = combine(numitem + imag_j)
bin_num = combine(CaselessLiteral("0b") + Optional(underscore.suppress()) + binint)
oct_num = combine(CaselessLiteral("0o") + Optional(underscore.suppress()) + octint)
hex_num = combine(CaselessLiteral("0x") + Optional(underscore.suppress()) + hexint)
number = addspace(
(
bin_num
| oct_num
| hex_num
| imag_num
| numitem
)
+ Optional(condense(dot + name)),
)
moduledoc_item = Forward()
unwrap = Literal(unwrapper)
comment = Forward()
comment_ref = combine(pound + integer + unwrap)
string_item = (
combine(Literal(strwrapper) + integer + unwrap)
| invalid_syntax(("\u201c", "\u201d", "\u2018", "\u2019"), "invalid unicode quotation mark; strings must use \" or '", greedy=True)
)
passthrough = combine(backslash + integer + unwrap)
passthrough_block = combine(fixto(dubbackslash, "\\") + integer + unwrap)
endline = Forward()
endline_ref = condense(OneOrMore(Literal("\n")))
lineitem = combine(Optional(comment) + endline)
newline = condense(OneOrMore(lineitem))
end_simple_stmt_item = FollowedBy(semicolon | newline)
start_marker = StringStart()
moduledoc_marker = condense(ZeroOrMore(lineitem) - Optional(moduledoc_item))
end_marker = StringEnd()
indent = Literal(openindent)
dedent = Literal(closeindent)
u_string = Forward()
f_string = Forward()
bit_b = Optional(CaselessLiteral("b"))
raw_r = Optional(CaselessLiteral("r"))
b_string = combine((bit_b + raw_r | raw_r + bit_b) + string_item)
unicode_u = CaselessLiteral("u").suppress()
u_string_ref = combine((unicode_u + raw_r | raw_r + unicode_u) + string_item)
format_f = CaselessLiteral("f").suppress()
f_string_ref = combine((format_f + raw_r | raw_r + format_f) + string_item)
string = trace(b_string | u_string | f_string)
moduledoc = string + newline
docstring = condense(moduledoc)
pipe_augassign = (
combine(pipe + equals)
| combine(star_pipe + equals)
| combine(dubstar_pipe + equals)
| combine(back_pipe + equals)
| combine(back_star_pipe + equals)
| combine(back_dubstar_pipe + equals)
| combine(none_pipe + equals)
| combine(none_star_pipe + equals)
| combine(none_dubstar_pipe + equals)
)
augassign = (
pipe_augassign
| combine(comp_pipe + equals)
| combine(dotdot + equals)
| combine(comp_back_pipe + equals)
| combine(comp_star_pipe + equals)
| combine(comp_back_star_pipe + equals)
| combine(comp_dubstar_pipe + equals)
| combine(comp_back_dubstar_pipe + equals)
| combine(unsafe_dubcolon + equals)
| combine(div_dubslash + equals)
| combine(div_slash + equals)
| combine(exp_dubstar + equals)
| combine(mul_star + equals)
| combine(plus + equals)
| combine(sub_minus + equals)
| combine(percent + equals)
| combine(amp + equals)
| combine(bar + equals)
| combine(caret + equals)
| combine(lshift + equals)
| combine(rshift + equals)
| combine(matrix_at + equals)
| combine(dubquestion + equals)
)
comp_op = (
le | ge | ne | lt | gt | eq
| addspace(keyword("not") + keyword("in"))
| keyword("in")
| addspace(keyword("is") + keyword("not"))
| keyword("is")
)
expr = Forward()
star_expr = Forward()
dubstar_expr = Forward()
comp_for = Forward()
test_no_cond = Forward()
namedexpr_test = Forward()
# for namedexpr locations only supported in Python 3.10
new_namedexpr_test = Forward()
testlist = trace(itemlist(test, comma, suppress_trailing=False))
testlist_has_comma = trace(addspace(OneOrMore(condense(test + comma)) + Optional(test)))
new_namedexpr_testlist_has_comma = trace(addspace(OneOrMore(condense(new_namedexpr_test + comma)) + Optional(test)))
testlist_star_expr = Forward()
testlist_star_expr_ref = tokenlist(Group(test) | star_expr, comma, suppress=False)
testlist_star_namedexpr = Forward()
testlist_star_namedexpr_tokens = tokenlist(Group(namedexpr_test) | star_expr, comma, suppress=False)
yield_from = Forward()
dict_comp = Forward()
dict_literal = Forward()
yield_classic = addspace(keyword("yield") + Optional(testlist))
yield_from_ref = | |
<filename>common/rtt_worker.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>, ph4r05, 2018
# pip install shellescape sarge
import logging
import signal
import threading
import time
import sys
import os
import random
import socket
import typing
import shutil
import tempfile
import paramiko
import sshtunnel
from shlex import quote
import shellescape
from sarge import Capture, Feeder, run
from . import rtt_sftp_conn
from . import rtt_utils
logger = logging.getLogger(__name__)
SARGE_FILTER_INSTALLED = False
RTT_BATTERIES = {
'Dieharder': 'dieharder',
'NIST Statistical Testing Suite': 'nist_sts',
'TestU01 Alphabit': 'tu01_alphabit',
'TestU01 Block Alphabit': 'tu01_blockalphabit',
'TestU01 Crush': 'tu01_crush',
'TestU01 Rabbit': 'tu01_rabbit',
'TestU01 Small Crush': 'tu01_smallcrush',
}
def job_battery_to_experiment(bat):
for keys in RTT_BATTERIES:
if RTT_BATTERIES[keys] == bat:
return keys
raise ValueError('Key not found: %s' % bat)
def experiment_battery_to_job(bat):
return RTT_BATTERIES[bat]
def try_fnc(fnc):
try:
return fnc()
except:
pass
class SargeLogFilter(logging.Filter):
"""Filters out debugging logs generated by sarge - output capture. It is way too verbose for debug"""
def __init__(self, name="", *args, **kwargs):
self.namex = name
logging.Filter.__init__(self, *args, **kwargs)
def filter(self, record):
if record.levelno != logging.DEBUG:
return 1
try:
# Parse messages are too verbose, skip.
if record.name == "sarge.parse":
return 0
# Disable output processing message - length of one character.
msg = record.getMessage()
if "queued chunk of length 1" in msg:
return 0
return 1
except Exception as e:
logger.error("Exception in log filtering: %s" % (e,), exc_info=e)
return 1
def install_sarge_filter():
"""
Installs Sarge log filter to avoid long 1char debug dumps
:return:
"""
global SARGE_FILTER_INSTALLED
if SARGE_FILTER_INSTALLED:
return
for handler in logging.getLogger().handlers:
handler.addFilter(SargeLogFilter("hnd"))
logging.getLogger().addFilter(SargeLogFilter("root"))
SARGE_FILTER_INSTALLED = True
def sarge_sigint(proc, sig=signal.SIGTERM):
"""
Sends sigint to sarge process
:return:
"""
proc.process_ready.wait()
p = proc.process
if not p: # pragma: no cover
raise ValueError("There is no subprocess")
p.send_signal(sig)
def escape_shell(inp):
"""
Shell-escapes input param
:param inp:
:return:
"""
try:
inp = inp.decode("utf8")
except:
pass
try:
return shellescape.quote(inp)
except:
pass
quote(inp)
class AsyncRunner:
def __init__(self, cmd, args=None, stdout=None, stderr=None, cwd=None, shell=True, env=None):
self.cmd = cmd
self.args = args
self.on_finished = None
self.on_output = None
self.on_tick = None
self.no_log_just_write = False
self.log_out_during = True
self.log_out_after = True
self.stdout = stdout
self.stderr = stderr
self.cwd = cwd
self.shell = shell
self.env = env
self.preexec_setgrp = False
self.using_stdout_cap = True
self.using_stderr_cap = True
self.ret_code = None
self.out_acc = []
self.err_acc = []
self.feeder = None
self.proc = None
self.is_running = False
self.was_running = False
self.terminating = False
self.thread = None
def run(self):
try:
self.run_internal()
except Exception as e:
self.is_running = False
logger.error("Unexpected exception in runner: %s" % (e,), exc_info=e)
finally:
self.was_running = True
def __del__(self):
self.deinit()
def deinit(self):
rtt_utils.try_fnc(lambda: self.feeder.close())
if not self.proc:
return
if self.using_stdout_cap:
rtt_utils.try_fnc(lambda: self.proc.stdout.close())
if self.using_stderr_cap:
rtt_utils.try_fnc(lambda: self.proc.stderr.close())
rtt_utils.try_fnc(lambda: self.proc.close())
def drain_stream(self, s, block=False, timeout=0.15):
ret = []
while True:
rs = s.read(-1, block, timeout)
if not rs:
break
ret.append(rs)
return ret
def run_internal(self):
def preexec_function():
os.setpgrp()
cmd = self.cmd
if self.shell:
args_str = (
" ".join(self.args) if isinstance(self.args, (list, tuple)) else self.args
)
if isinstance(cmd, (list, tuple)):
cmd = " ".join(cmd)
if args_str and len(args_str) > 0:
cmd += " " + args_str
else:
if self.args and not isinstance(self.args, (list, tuple)):
raise ValueError("!Shell requires array of args")
if self.args:
cmd += self.args
self.using_stdout_cap = self.stdout is None
self.using_stderr_cap = self.stderr is None
self.feeder = Feeder()
logger.debug("Starting command %s in %s" % (cmd, self.cwd))
run_args = {}
if self.preexec_setgrp:
run_args['preexec_fn'] = preexec_function
p = run(
cmd,
input=self.feeder,
async_=True,
stdout=self.stdout or Capture(timeout=0.1, buffer_size=1),
stderr=self.stderr or Capture(timeout=0.1, buffer_size=1),
cwd=self.cwd,
env=self.env,
shell=self.shell,
**run_args
)
self.proc = p
self.ret_code = 1
self.out_acc, self.err_acc = [], []
out_cur, err_cur = [""], [""]
def process_line(line, is_err=False):
dst = self.err_acc if is_err else self.out_acc
dst.append(line)
if self.log_out_during:
if self.no_log_just_write:
dv = sys.stderr if is_err else sys.stdout
dv.write(line + "\n")
dv.flush()
else:
logger.debug("Out: %s" % line.strip())
if self.on_output:
self.on_output(self, line, is_err)
def add_output(buffers, is_err=False, finish=False):
buffers = [x.decode("utf8") for x in buffers if x is not None and x != ""]
lines = [""]
if not buffers and not finish:
return
dst_cur = err_cur if is_err else out_cur
for x in buffers:
clines = [v.strip("\r") for v in x.split("\n")]
lines[-1] += clines[0]
lines.extend(clines[1:])
nlines = len(lines)
dst_cur[0] += lines[0]
if nlines > 1:
process_line(dst_cur[0], is_err)
dst_cur[0] = ""
for line in lines[1:-1]:
process_line(line, is_err)
if not finish and nlines > 1:
dst_cur[0] = lines[-1] or ""
if finish:
cline = dst_cur[0] if nlines == 1 else lines[-1]
if cline:
process_line(cline, is_err)
try:
while len(p.commands) == 0:
time.sleep(0.15)
logger.debug("Program started, progs: %s" % len(p.commands))
if p.commands[0] is None:
self.is_running = False
self.was_running = True
logger.error("Program could not be started")
return
self.is_running = True
self.on_change()
out = None
err = None
while p.commands[0] and p.commands[0].returncode is None:
if self.using_stdout_cap:
out = p.stdout.read(-1, False)
add_output([out], is_err=False)
if self.using_stderr_cap:
err = p.stderr.read(-1, False)
add_output([err], is_err=True)
if self.on_tick:
self.on_tick(self)
p.commands[0].poll()
if self.terminating and p.commands[0].returncode is None:
logger.debug("Terminating by sigint %s" % p.commands[0])
sarge_sigint(p.commands[0], signal.SIGTERM)
sarge_sigint(p.commands[0], signal.SIGINT)
logger.debug("Sigint sent")
logger.debug("Process closed")
# If there is data, consume it right away.
if (self.using_stdout_cap and out) or (self.using_stderr_cap and err):
continue
time.sleep(0.15)
logger.debug("Runner while ended")
p.wait()
self.ret_code = p.commands[0].returncode if p.commands[0] else -1
if self.using_stdout_cap:
try_fnc(lambda: p.stdout.close())
add_output(self.drain_stream(p.stdout, True), finish=True)
if self.using_stderr_cap:
try_fnc(lambda: p.stderr.close())
add_output(self.drain_stream(p.stderr, True), is_err=True, finish=True)
self.was_running = True
self.is_running = False
self.on_change()
logger.debug("Program ended with code: %s" % self.ret_code)
logger.debug("Command: %s" % cmd)
if self.log_out_after:
logger.debug("Std out: %s" % "\n".join(self.out_acc))
logger.debug("Error out: %s" % "\n".join(self.err_acc))
except Exception as e:
self.is_running = False
logger.error("Exception in async runner: %s" % (e,))
finally:
self.was_running = True
rtt_utils.try_fnc(lambda: self.feeder.close())
rtt_utils.try_fnc(lambda: self.proc.close())
if self.on_finished:
self.on_finished(self)
def on_change(self):
pass
def shutdown(self):
if not self.is_running:
return
self.terminating = True
time.sleep(1)
# Terminating with sigint
logger.info("Waiting for program to terminate...")
while self.is_running:
time.sleep(0.1)
logger.info("Program terminated")
self.deinit()
def start(self):
install_sarge_filter()
self.thread = threading.Thread(target=self.run, args=())
self.thread.setDaemon(False)
self.thread.start()
self.terminating = False
self.is_running = False
while not self.is_running and not self.was_running:
time.sleep(0.1)
return self
def get_rtt_runner(rtt_args, cwd=None):
rtt_env = {'LD_LIBRARY_PATH': rtt_utils.extend_lib_path(cwd)}
async_runner = AsyncRunner(rtt_args, cwd=cwd, shell=False, env=rtt_env)
async_runner.log_out_after = False
async_runner.preexec_setgrp = True
return async_runner
def get_booltest_rtt_runner(rtt_args, cwd=None):
async_runner = AsyncRunner(rtt_args, cwd=cwd, shell=False)
async_runner.log_out_after = False
async_runner.preexec_setgrp = True
return async_runner
class SSHForwarder:
def __init__(self, ssh_params: rtt_sftp_conn.SSHParams, remote_server: str, remote_port: int, local_port=None):
self.ssh_params = ssh_params
self.remote_server = remote_server
self.remote_port = remote_port
self.local_port = local_port
def start(self):
raise ValueError('Not implemented')
def shutdown(self):
raise ValueError('Not implemented')
class SSHForwarderPython(SSHForwarder):
def __init__(self, ssh_params: rtt_sftp_conn.SSHParams, remote_server: str, remote_port: int, local_port=None):
super().__init__(ssh_params, remote_server, remote_port, local_port)
self.is_running = False
self.terminating = False
self.thread = None
def run(self):
logger.info("Establishing SSH tunnel...")
local_args = {} if not self.local_port else {'local_bind_address': ('0.0.0.0', self.local_port)}
with sshtunnel.open_tunnel(
(self.ssh_params.host, self.ssh_params.port),
ssh_username=self.ssh_params.user,
ssh_pkey=self.ssh_params.pkey_file,
ssh_private_key_password=self.ssh_params.pkey_pass,
remote_bind_address=(self.remote_server, self.remote_port),
**local_args
) as tunnel:
self.local_port = tunnel.local_bind_port
self.is_running = True
logger.info("SSH tunnel established, port: %s" % self.local_port)
while not self.terminating:
time.sleep(0.5)
self.is_running = False
logger.info("Closing SSH tunnel")
def start(self):
self.thread = threading.Thread(target=self.run, args=())
self.thread.setDaemon(False)
self.thread.start()
self.terminating = False
self.is_running = False
while not self.is_running:
time.sleep(0.1)
return self
def shutdown(self):
if not self.is_running:
return
self.terminating = True
time.sleep(1)
# Terminating with sigint
logger.info("Waiting for ssh tunnel to terminate...")
while self.is_running:
time.sleep(0.1)
def bind_random_port():
for _ in range(5000):
port = random.randrange(20000, 65535)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1', port))
return s, port
except socket.error as e:
s.close()
raise ValueError('Binding took too long')
def try_to_connect(host, port, timeout=15):
tstart = time.time()
while True:
if time.time() - tstart > timeout:
raise ValueError('Could not connect in time')
s = socket.socket()
s.settimeout(5)
try:
s.connect((host, port))
return s
except socket.error as exc:
time.sleep(0.1)
continue
class SSHForwarderLinux(SSHForwarder):
def __init__(self, ssh_params: rtt_sftp_conn.SSHParams, remote_server: str, remote_port: int, local_port=None):
super().__init__(ssh_params, remote_server, remote_port, local_port)
self.on_bind_error = None
self.do_setsid = True
self.reservation_socket = None
self.runner = None # type: typing.Optional[AsyncRunner]
self.ssh_passwd_asked = False
self.ssh_passwd_entered = False
self.bind_error = False
self.ask_pass_path = None
self.first_tick = None
self.script_path = None
self.pid_path = None
def __del__(self):
logger.info("SSH shutdown on delete (dirty)")
self.shutdown()
def create_runner(self):
if self.local_port is None:
self.reservation_socket, self.local_port = bind_random_port()
logger.info("Reserving random local port: %s" % self.local_port)
args = [
'-i', '\'%s\'' % self.ssh_params.pkey_file,
'-L', '%s:%s:%s' % (self.local_port, self.remote_server, self.remote_port),
'-N',
'-oLogLevel=error',
'-oStrictHostKeyChecking=no',
'-oUserKnownHostsFile=/dev/null',
'-o ConnectTimeout=30',
'-p', '%s' % self.ssh_params.port,
'\'%s\'@%s' % (self.ssh_params.user, self.ssh_params.host),
]
args_str = ' '.join(args)
cmd = 'ssh %s' % args_str
if self.do_setsid:
self.create_shell_run_script(cmd)
cmd = | |
<gh_stars>0
#!/usr/bin/env python3
# module checking
try:
import os
import sys
import argparse
import getpass
import configparser
except:
print("Missing modules detected!")
sys.exit(1)
# Testing rich existence
try:
from rich import print
from rich.table import Table
except:
print("Error: >rich< module not found.")
sys.exit(1)
# Testing puremagic existence
try:
import puremagic as pr
except:
print("Error: >puremagic< module not found.")
sys.exit(1)
# Testing pyaxmlparser existence
try:
import pyaxmlparser
except:
print("Error: >pyaxmlparser< module not found.")
sys.exit(1)
try:
from colorama import Fore, Style
except:
print("Error: >colorama< module not found.")
sys.exit(1)
# Disabling pyaxmlparser's logs
pyaxmlparser.core.log.disabled = True
# Colors
red = Fore.LIGHTRED_EX
cyan = Fore.LIGHTCYAN_EX
white = Style.RESET_ALL
green = Fore.LIGHTGREEN_EX
# Legends
infoC = f"{cyan}[{red}*{cyan}]{white}"
infoS = f"[bold cyan][[bold red]*[bold cyan]][white]"
foundS = f"[bold cyan][[bold red]+[bold cyan]][white]"
errorS = f"[bold cyan][[bold red]![bold cyan]][white]"
# Gathering username
username = getpass.getuser()
# Is Qu1cksc0pe installed??
if os.path.exists("/usr/bin/qu1cksc0pe") == True and os.path.exists(f"/etc/qu1cksc0pe.conf") == True:
# Parsing new path and write into handler
sc0peConf = configparser.ConfigParser()
sc0peConf.read(f"/etc/qu1cksc0pe.conf")
sc0pe_path = str(sc0peConf["Qu1cksc0pe_PATH"]["sc0pe"])
path_handler = open(".path_handler", "w")
path_handler.write(sc0pe_path)
path_handler.close()
else:
# Parsing current path and write into handler
sc0pe_path = str(os.getcwd())
path_handler = open(".path_handler", "w")
path_handler.write(sc0pe_path)
path_handler.close()
libscan = configparser.ConfigParser()
# Parsing android libscanner configuration file
libscan.read("Systems/Android/libScanner.conf")
libscan["Rule_PATH"]["rulepath"] = f"{sc0pe_path}/Systems/Android/YaraRules/"
with open("Systems/Android/libScanner.conf", "w") as ff:
libscan.write(ff)
# Banner
os.system(f"python3 {sc0pe_path}/Modules/banners.py")
# User home detection
homeD = "/home"
if sys.platform == "darwin":
homeD = "/Users"
# Argument crating, parsing and handling
args = []
parser = argparse.ArgumentParser()
parser.add_argument("--file", required=False,
help="Specify a file to scan or analyze.")
parser.add_argument("--folder", required=False,
help="Specify a folder to scan or analyze.")
parser.add_argument("--analyze", required=False,
help="Analyze target file.", action="store_true")
parser.add_argument("--console", required=False,
help="Use Qu1cksc0pe on interactive shell.", action="store_true")
parser.add_argument("--db_update", required=False,
help="Update malware hash database.", action="store_true")
parser.add_argument("--docs", required=False, help="Analyze document files.",
action="store_true")
parser.add_argument("--domain", required=False,
help="Extract URLs and IP addresses from file.",
action="store_true")
parser.add_argument("--hashscan", required=False,
help="Scan target file's hash in local database.",
action="store_true")
parser.add_argument("--health", required=False,
help="Check for dependencies and configurations.",
action="store_true")
parser.add_argument("--install", required=False,
help="Install or Uninstall Qu1cksc0pe.", action="store_true")
parser.add_argument("--key_init", required=False,
help="Enter your VirusTotal API key.", action="store_true")
parser.add_argument("--lang", required=False,
help="Detect programming language.",
action="store_true")
parser.add_argument("--metadata", required=False,
help="Get exif/metadata information.",
action="store_true")
parser.add_argument("--mitre", required=False,
help="Generate MITRE ATT&CK table for target sample (Windows samples for now.).",
action="store_true")
parser.add_argument("--packer", required=False,
help="Check if your file is packed with common packers.",
action="store_true")
parser.add_argument("--resource", required=False,
help="Analyze resources in target file", action="store_true")
parser.add_argument("--report", required=False,
help="Export analysis reports into a file (JSON Format for now).", action="store_true")
parser.add_argument("--runtime", required=False,
help="Analyze APK files dynamically.", action="store_true")
parser.add_argument("--sigcheck", required=False,
help="Scan file signatures in target file.", action="store_true")
parser.add_argument("--vtFile", required=False,
help="Scan your file with VirusTotal API.",
action="store_true")
args = parser.parse_args()
# Basic analyzer function that handles single and multiple scans
def BasicAnalyzer(analyzeFile):
print(f"{infoS} Analyzing: [bold green]{analyzeFile}[white]")
fileType = str(pr.magic_file(analyzeFile))
# Windows Analysis
if "Windows Executable" in fileType or ".msi" in fileType or ".dll" in fileType or ".exe" in fileType:
print(f"{infoS} Target OS: [bold green]Windows[white]\n")
if args.report:
command = f"python3 {sc0pe_path}/Modules/winAnalyzer.py {analyzeFile} True"
else:
command = f"python3 {sc0pe_path}/Modules/winAnalyzer.py {analyzeFile} False"
os.system(command)
# Linux Analysis
elif "ELF" in fileType:
print(f"{infoS} Target OS: [bold green]Linux[white]\n")
if args.report:
command = f"python3 {sc0pe_path}/Modules/linAnalyzer.py {analyzeFile} True"
else:
command = f"python3 {sc0pe_path}/Modules/linAnalyzer.py {analyzeFile} False"
os.system(command)
# MacOSX Analysis
elif "Mach-O" in fileType:
print(f"{infoS} Target OS: [bold green]OSX[white]\n")
command = f"python3 {sc0pe_path}/Modules/osXAnalyzer.py {analyzeFile}"
os.system(command)
# Android Analysis
elif "PK" in fileType and "Java archive" in fileType:
look = pyaxmlparser.APK(analyzeFile)
if look.is_valid_APK() == True:
print(f"{infoS} Target OS: [bold green]Android[white]")
command = f"apkid -j {args.file} > apkid.json"
os.system(command)
if args.report:
command = f"python3 {sc0pe_path}/Modules/apkAnalyzer.py {analyzeFile} True"
else:
command = f"python3 {sc0pe_path}/Modules/apkAnalyzer.py {analyzeFile} False"
os.system(command)
if os.path.exists("apkid.json"):
os.remove("apkid.json")
# APP Security
choice = str(input(f"\n{infoC} Do you want to check target app\'s security? This process will take a while.[Y/n]: "))
if choice == "Y" or choice == "y":
os.system(f"python3 {sc0pe_path}/Modules/apkSecCheck.py")
else:
pass
else:
print("\n[bold white on red]Qu1cksc0pe doesn\'t support archive analysis for now ;)\n")
sys.exit(1)
else:
print("\n[bold white on red]File type not supported. Make sure you are analyze executable files or document files.")
print("[bold]>>> If you want to scan document files try [bold green][i]--docs[/i] [white]argument.")
sys.exit(1)
# Main function
def Qu1cksc0pe():
# Getting all strings from the file if the target file exists.
if args.file:
if os.path.exists(args.file):
if os.path.exists("/usr/bin/strings"):
allA = "--all"
if sys.platform == "darwin":
allA = "-a"
command = f"strings {allA} {args.file} > temp.txt"
os.system(command)
else:
print("[bold white on red][blink]strings[/blink] command not found. You need to install it.")
sys.exit(1)
else:
print("[bold white on red]Target file not found!\n")
sys.exit(1)
# Analyze the target file
if args.analyze:
# Handling --file argument
if args.file is not None:
BasicAnalyzer(analyzeFile=args.file)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--analyze[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# Analyze document files
if args.docs:
# Handling --file argument
if args.file is not None:
print(f"{infoS} Analyzing: [bold green]{args.file}[white]")
command = f"python3 {sc0pe_path}/Modules/nonExecAnalyzer.py {args.file}"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--docs[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# Hash Scanning
if args.hashscan:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/hashScanner.py {args.file} --normal"
os.system(command)
# Handling --folder argument
if args.folder is not None:
command = f"python3 {sc0pe_path}/Modules/hashScanner.py {args.folder} --multiscan"
os.system(command)
# File signature scanner
if args.sigcheck:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/sigChecker.py {args.file}"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--sigcheck[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# Resource analyzer
if args.resource:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/resourceChecker.py {args.file}"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--resource[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# metadata
if args.metadata:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/metadata.py {args.file}"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--metadata[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# MITRE ATT&CK
if args.mitre:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/mitre.py {args.file}"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--mitre[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# Language detection
if args.lang:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/languageDetect.py {args.file}"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--lang[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# VT File scanner
if args.vtFile:
# Handling --file argument
if args.file is not None:
# if there is no key quit
try:
directory = f"{homeD}/{username}/sc0pe_Base/sc0pe_VT_apikey.txt"
apik = open(directory, "r").read().split("\n")
except:
print("[bold white on red]Use [blink]--key_init[/blink] to enter your key!\n")
sys.exit(1)
# if key is not valid quit
if apik[0] == '' or apik[0] is None or len(apik[0]) != 64:
print("[bold]Please get your API key from -> [bold green][a]https://www.virustotal.com/[/a]\n")
sys.exit(1)
else:
command = f"python3 {sc0pe_path}/Modules/VTwrapper.py {apik[0]} {args.file}"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red]If you want to get banned from VirusTotal then do that :).\n")
sys.exit(1)
# packer detection
if args.packer:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/packerAnalyzer.py {args.file} --single"
os.system(command)
# Handling --folder argument
if args.folder is not None:
command = f"python3 {sc0pe_path}/Modules/packerAnalyzer.py {args.folder} --multiscan"
os.system(command)
# domain extraction
if args.domain:
# Handling --file argument
if args.file is not None:
command = f"python3 {sc0pe_path}/Modules/domainCatcher.py"
os.system(command)
# Handling --folder argument
if args.folder is not None:
print("[bold white on red][blink]--domain[/blink] argument is not supported for folder analyzing!\n")
sys.exit(1)
# Dynamic APK analyzer
if args.runtime:
command = f"python3 {sc0pe_path}/Modules/androidRuntime.py"
os.system(command)
# Interactive shell
if args.console:
command = f"python3 {sc0pe_path}/Modules/console.py"
os.system(command)
# Dependency checker
if args.health:
command = f"python3 {sc0pe_path}/Modules/checkHealth.py"
os.system(command)
# Database update
if args.db_update:
command = f"python3 {sc0pe_path}/Modules/hashScanner.py --db_update"
os.system(command)
# entering VT API key
if args.key_init:
try:
if os.path.exists(f"{homeD}/{username}/sc0pe_Base/"):
pass
else:
os.system(f"mkdir {homeD}/{username}/sc0pe_Base/")
apikey = str(input(f"{infoC} Enter your VirusTotal API key: "))
apifile = open(f"{homeD}/{username}/sc0pe_Base/sc0pe_VT_apikey.txt", "w")
apifile.write(apikey)
print(f"{foundS} Your VirusTotal API key saved.")
except KeyboardInterrupt:
print("\n[bold white on red]Program terminated by user.\n")
# Install Qu1cksc0pe on your system!!
if args.install:
print(f"{infoS} Checking permissions...")
if os.getuid() == 0:
print(f"{infoS} User: [bold green]root[white]\n")
print(f"[bold cyan][[bold red]1[bold cyan]][white] Install Qu1cksc0pe.")
print(f"[bold cyan][[bold red]2[bold cyan]][white] Uninstall Qu1cksc0pe")
choose = int(input(f"\n{green}>>>>{white} "))
if choose == 1:
print(f"\n{infoS} Looks like we have permission to | |
### Expected results for parse.py unit tests
# coding: utf-8
from collections import OrderedDict
book_1 = {
'book': {'filename': '1En', 'title': '1 Enoch', 'textStructure': ''},
'version': [
{
'attributes': {'title': 'Ethiopic 1', 'author': 'Anonymous', 'fragment': '', 'language': 'Ethiopic',},
'organisation_levels': 2,
'divisions': {'labels': ['Chapter', 'Verse'], 'delimiters': [':'], 'text': ['', '']},
'resources': [
[
{'attributes': {'name': 'Resource1'}, 'info': ['Info 1', 'Info 2'], 'url': 'www.example.com'},
{'attributes': {'name': 'Resource2'}, 'info': ['Info 3', 'Info 4'], 'url': ''}
],
[
{'attributes': {'name': 'Resource3'}, 'info': ['Info 5', 'Info 6'], 'url': 'www.example.com'},
{'attributes': {'name': 'Resource4'}, 'info': ['Info 7', 'Info 8'], 'url': ''}
],
],
'manuscripts': [
{
'p1': {
'attributes': {'abbrev': 'p1', 'language': 'Ethiopic', 'show': 'yes'},
'name': {'text': 'John Rylands Library Ethiopic 23', 'sup': ['Sup 1', 'Sup 2'],},
'bibliography': [],
},
'p2': {
'attributes': { 'abbrev': 'p2', 'language': 'Ethiopic', 'show': 'yes'},
'name': {'text': 'John Rylands Library Ethiopic 23', 'sup': [],},
'bibliography': [],
},
'p3': {
'attributes': {'abbrev': 'p3', 'language': 'Ethiopic', 'show': 'yes'},
'name': {'text': '', 'sup': ['Sup 1', 'Sup 2'],},
'bibliography': [
{'text': '<NAME>, The Ethiopic book of Enoch : A New Edition in the Light of the Aramaic Dead Sea Fragments. Oxford: Clarendon, 1978.', 'booktitle': ['Booktitle 1', 'Booktitle 2',]},
{'text': '<NAME>, The Ethiopic book of Enoch : A New Edition in the Light of the Aramaic Dead Sea Fragments. Oxford: Clarendon, 1978.', 'booktitle': []},
{'text': '', 'booktitle': ['Booktitle 3', 'Booktitle 4',]},
{'text': '', 'booktitle': []},
],
},
'Bertalotto ': {
'attributes': {'abbrev': 'Bertalotto ', 'language': 'Ethiopic', 'show': 'yes'},
'name': {'text': "Bertalotto's correction of orthographic tendencies in ms p", 'sup': [],},
'bibliography': [
{'text': '<NAME>, The Ethiopic book of Enoch : A New Edition in the Light of the Aramaic Dead Sea Fragments. Oxford: Clarendon, 1978.', 'booktitle': []},
]
},
}
],
'text_structure': {
'1:1': {
'attributes': [
{'number': '1', 'fragment': 'Fragment 1',},
{'number': '1', 'fragment': 'Fragment 2',},
],
'units': [
{'id': '1', 'group': '1', 'parallel': 'Parallel 1'},
],
'readings': {
'1': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': '', 'indent': '',},
'text': unicode('ቃለ፡በረከት፡ዘሄኖከ፡ዘከመ፡ባረከ፡ኅሩያነ፡ወጻድቃነ፡እለ፡ሀለዉ፡ይኩኑ፡በዕለተ፡ምንዳቤ፡ለአሰስሎ፡ኵሎ፡እኩያን፡ወረሲዓን፡', encoding='utf-8'),
'w': []
}
}
}
},
'1:2': {
'attributes': [
{'number': '1', 'fragment': 'Fragment 1',},
{'number': '2', 'fragment': '',},
],
'units': [
{'id': '2', 'group': '2', 'parallel': ''},
{'id': '3', 'group': '0', 'parallel': 'Parallel 2'},
{'id': '4', 'group': '0', 'parallel': ''},
],
'readings': {
'2': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': 'following', 'indent': '',},
'text': unicode('ወአውሥአ፡ሄኖክ፡ወይቤ፡ብእሲ፡ጻድቅ፡ዘእምኀበ፡እግዚአብሔር፡እንዘ፡አዕይንቲሁ፡ክሡታት፡ወይሬኢ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': 'lex', 'style': 'style', 'lang': 'lang'},
'text': 'w 1'
},
]
}
},
'3': {
'p': {
'attributes': {'option': '0', 'mss': 'p ', 'linebreak': '', 'indent': '',},
'text': unicode('ራዕየ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': 'lex', 'style': 'style', 'lang': ''},
'text': 'w 2'
},
]
},
'Bertalotto': {
'attributes': {'option': '1', 'mss': 'Bertalotto ', 'linebreak': '', 'indent': 'yes',},
'text': unicode('ራእየ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': 'lex', 'style': '', 'lang': 'lang'},
'text': 'w 3'
},
]
}
},
'4': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': '', 'indent': 'no',},
'text': unicode('ቅዱሰ፡ዘበሰማያት፡ዘአርአዩኒ፡መላእክት፡ወሰማዕኩ፡እምኀቤሆሙ፡ኵሎ፡ወአእመርኩ፡አነ፡ዘእሬኢ፡ወአኮ፡ለዝ፡ትውልድ፡አላ፡ለዘ፡ይመጽኡ፡ትውልድ፡ርሑቃን፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': 'lex', 'style': '', 'lang': ''},
'text': 'w 4'
},
]
}
},
}
},
'1:3': {
'attributes': [
{'number': '1', 'fragment': 'Fragment 1',},
{'number': '3', 'fragment': '',},
],
'units': [
{'id': '5', 'group': '0', 'parallel': ''},
{'id': '6', 'group': '0', 'parallel': ''},
{'id': '7', 'group': '0', 'parallel': ''},
],
'readings': {
'5': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': 'following', 'indent': 'yes',},
'text': unicode('በእንተ፡ኅሩያን፡እቤ፡ወአውሣእኩ፡በእንቲአሆሙ፡ምስለ፡ዘይወጽእ፡ቅዱስ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': '', 'style': 'style', 'lang': 'lang'},
'text': 'w 5'
},
]
}
},
'6': {
'p': {
'attributes': {'option': '0', 'mss': 'p ', 'linebreak': '', 'indent': '',},
'text': unicode('ወዓቢይ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': '', 'style': 'style', 'lang': ''},
'text': 'w 6'
},
]
},
'Bertalotto': {
'attributes': {'option': '1', 'mss': 'Bertalotto ', 'linebreak': '', 'indent': '',},
'text': unicode('ወዐቢይ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': '', 'style': '', 'lang': 'lang'},
'text': 'w 7'
},
]
}
},
'7': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': '', 'indent': '',},
'text': unicode('እማኅደሩ', encoding='utf-8'),
'w': [
{
'attributes': {'morph': 'morph', 'lex': '', 'style': '', 'lang': ''},
'text': 'w 8'
},
]
}
},
}
}, # end of <div>
'2:1': {
'attributes': [
{'number': '2', 'fragment': '',},
{'number': '1', 'fragment': 'Fragment 3',},
],
'units': [
{'id': '25', 'group': '0', 'parallel': ''},
{'id': '26', 'group': '0', 'parallel': ''},
{'id': '27', 'group': '0', 'parallel': ''},
{'id': '28', 'group': '0', 'parallel': ''},
{'id': '29', 'group': '0', 'parallel': ''},
],
'readings': {
'25': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': '', 'indent': '',},
'text': unicode('ጠየቁ፡ኵሎ፡ዘውስተ፡ሰማይ፡ግብረ፡እፎ፡ኢይመይጡ፡ፍናዊሆሙ፡ብርሃናት፡ዘውስተ፡ሰማይ፡ከመ፡ኵሉ፡ይሠርቅ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': 'lex', 'style': 'style', 'lang': 'lang'},
'text': 'w 9'
},
]
}
},
'26': {
'Bertalotto': {
'attributes': {'option': '0', 'mss': 'Bertalotto ', 'linebreak': '', 'indent': '',},
'text': unicode('ወየዐርብ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': 'lex', 'style': 'style', 'lang': ''},
'text': 'w 10'
},
]
},
'p': {
'attributes': {'option': '1', 'mss': 'p ', 'linebreak': '', 'indent': '',},
'text': unicode('ወየዓርብ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': 'lex', 'style': '', 'lang': 'lang'},
'text': 'w 11'
},
]
}
},
'27': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': '', 'indent': '',},
'text': unicode('ሥሩዕ፡ኵሉ፡በዘመኑ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': 'lex', 'style': '', 'lang': ''},
'text': 'w 12'
},
]
}
},
'28': {
'Bertalotto': {
'attributes': {'option': '0', 'mss': 'Bertalotto ', 'linebreak': '', 'indent': '',},
'text': unicode('ወኢይትዐደዉ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': '', 'style': 'style', 'lang': 'lang'},
'text': 'w 13'
},
]
},
'p': {
'attributes': {'option': '1', 'mss': 'p ', 'linebreak': '', 'indent': '',},
'text': unicode('ወኢይትዓደዉ፡', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': '', 'style': 'style', 'lang': ''},
'text': 'w 14'
},
]
}
},
'29': {
'Bertalotto': {
'attributes': {'option': '0', 'mss': 'Bertalotto ', 'linebreak': '', 'indent': '',},
'text': unicode('እምትእዛዞሙ።', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': '', 'style': '', 'lang': 'lang'},
'text': 'w 15'
},
]
},
'p': {
'attributes': {'option': '1', 'mss': 'p ', 'linebreak': '', 'indent': '',},
'text': unicode('እምትዛዞሙ።', encoding='utf-8'),
'w': [
{
'attributes': {'morph': '', 'lex': '', 'style': '', 'lang': ''},
'text': 'w 16'
},
]
}
},
}
}, # end of <div>
} # end of text_structure
}, # end of version
{
'attributes': {'title': 'Ethiopic 2', 'author': 'Anonymous', 'fragment': 'Fragment 1', 'language': '',},
'organisation_levels': 2,
'divisions': {'labels': ['Chapter', 'Verse'], 'delimiters': [':'], 'text': ['', '']},
'resources': [],
'manuscripts': [
{
'p1': {
'attributes': {'abbrev': 'p1', 'language': 'Ethiopic', 'show': 'yes'},
'name': {'text': '<NAME>ands Library Ethiopic 23', 'sup': [],},
'bibliography': [],
},
}
],
'text_structure': {
'1:1': {
'attributes': [
{'number': '1', 'fragment': '',},
{'number': '1', 'fragment': '',},
],
'units': [
{'id': '1', 'group': '0', 'parallel': ''},
],
'readings': {
'1': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': '', 'indent': '',},
'text': unicode('ቃለ፡በረከት፡ዘሄኖከ፡ዘከመ፡ባረከ፡ኅሩያነ፡ወጻድቃነ፡እለ፡ሀለዉ፡ይኩኑ፡በዕለተ፡ምንዳቤ፡ለአሰስሎ፡ኵሎ፡እኩያን፡ወረሲዓን፡', encoding='utf-8'),
'w': []
}
}
}
},
} # end of text_structure
}, # end of version
{
'attributes': {'title': 'Ethiopic 3', 'author': 'Anonymous', 'fragment': '', 'language': '',},
'organisation_levels': 2,
'divisions': {'labels': ['Chapter', 'Verse'], 'delimiters': [':'], 'text': ['', '']},
'resources': [],
'manuscripts': [
{
'p1': {
'attributes': {'abbrev': 'p1', 'language': 'Ethiopic', 'show': 'yes'},
'name': {'text': 'John Rylands Library Ethiopic 23', 'sup': [],},
'bibliography': [],
},
}
],
'text_structure': {
'1:1': {
'attributes': [
{'number': '1', 'fragment': '',},
{'number': '1', 'fragment': '',},
],
'units': [
{'id': '1', 'group': '0', 'parallel': ''},
],
'readings': {
'1': {
'Bertalotto p': {
'attributes': {'option': '0', 'mss': 'Bertalotto p ', 'linebreak': '', 'indent': '',},
'text': unicode('ቃለ፡በረከት፡ዘሄኖከ፡ዘከመ፡ባረከ፡ኅሩያነ፡ወጻድቃነ፡እለ፡ሀለዉ፡ይኩኑ፡በዕለተ፡ምንዳቤ፡ለአሰስሎ፡ኵሎ፡እኩያን፡ወረሲዓን፡', encoding='utf-8'),
'w': []
}
}
}
},
} # end of text_structure
}, # end of version
],
}
book_2 = | |
<gh_stars>0
import re, socket, json, sys, six
from xbmcswift2 import Plugin, xbmc, xbmcaddon, xbmcgui, xbmcplugin
from resources.lib.hamivideo.api import Hamivideo
import base64, time, os
try:
from multiprocessing.dummy import Pool as ThreadPool
threadpool_imported = True
except:
threadpool_imported = False
#import web_pdb
socket.setdefaulttimeout(180)
plugin = Plugin()
addon = xbmcaddon.Addon()
addonname = addon.getAddonInfo('name')
addonid = xbmcaddon.Addon().getAddonInfo('id')
plugin_storagepath = plugin.storage_path
fakemediaurl_suffix = 'index.m3u8'
try:
import multiprocessing
workers = multiprocessing.cpu_count()
except:
workers = 4
#project https://www.9900.com.tw/B003.htm
#project https://www.gdaily.org/22554/2020-watch-video
settings = {
'chromedriver_path': plugin.get_setting('chromedriver_path', six.text_type),
'chromebinary_location': plugin.get_setting('chromebinary_location', six.text_type),
'geckodriver_path': plugin.get_setting('geckodriver_path', six.text_type),
'firefoxbinary_location': plugin.get_setting('firefoxbinary_location', six.text_type),
'docker_remote_selenium_addr': plugin.get_setting('docker_remote_selenium_addr', six.text_type),
'browser_type': plugin.get_setting('browser_type', six.text_type),
'chromeublockpath': plugin.get_setting('chromeublockpath', six.text_type),
'firefoxublockpath': plugin.get_setting('firefoxublockpath', six.text_type),
'seleniumlogpath': plugin.get_setting('seleniumlogpath', six.text_type),
'ptsplusloginidpw': (plugin.get_setting('ptsplusid', six.text_type),plugin.get_setting('ptspluspw', six.text_type))
}
@plugin.route('/')
def index():
hamichlst = [{
'label': 'Hamivideo channels',
'path': plugin.url_for('list_hamichannels'),
'is_playable': False
}]
linetodaylst = [{
'label': 'Line Today channels',
'path': plugin.url_for('list_linetodaychannels'),
'is_playable': False
}]
maplestagelst = [{
'label': 'MapleStage channels',
'path': plugin.url_for('list_maplestagechs', churl="default", type='parent'),
'is_playable': False
}]
linetvlst = [{
'label': 'Linetv channels',
'path': plugin.url_for('list_linetvchannels', churl="default", type='parent', total_eps='default'),
'is_playable': False
}]
ptspluslst = [{
'label': 'PTS plus channels',
'path': plugin.url_for('list_ptspluschannels', churl="default", type='parent', total_eps='default'),
'is_playable': False
}]
viutvlst = [{
'label': 'Viutv channels',
'path': plugin.url_for('list_viutvchannels', churl="default", type='parent'),
'is_playable': False
}]
pokulst = [{
'label': 'Poku channels',
'path': plugin.url_for('list_pokuchannels', churl="default", type='parent'),
'is_playable': False
}]
dramaqlst = [{
'label': 'Dramaq channels',
'path': plugin.url_for('list_dramaq', drama_name="None"),
'is_playable': False
}]
directplaylst = [{
'label': 'Play Streaming URL Directly',
'path': plugin.url_for('playchannel', churl=fakemediaurl_suffix, type='direct'),
'is_playable': True,
}]
backgroundinfolist = [{
'label': 'Background info',
'path': plugin.url_for('backgroundinfo'),
'is_playable': False
},{
'label': 'Next view mode',
'path': plugin.url_for('nextviewmode'),
'is_playable': False
},]
#linetodaylst+maplestagelst+viutvlst+pokulst+
return plugin.finish(hamichlst+linetvlst+ptspluslst+dramaqlst+directplaylst) #view_mode=50
@plugin.route('/listhamichannels/')
def list_hamichannels():
hamic = Hamivideo(settings)
channels = hamic.return_hamichannels()
channels = [{
'label': '%s %s %s' % (c['name'], c['programtime'], c['program']),
'label2': '%s' % (c['program']),
'path': plugin.url_for('playchannel', churl=c['link'].replace('.do', '.m3u8'), type='hami'),
'icon': c['icon'],
'thumbnail': c['icon'],
'is_playable': True,
} for c in channels]
length_of_ch = str(len(channels))
return plugin.finish(channels)
@plugin.route('/listlinetodaychannels/')
def list_linetodaychannels():
hamic = Hamivideo(settings)
channels = hamic.return_linetodaychs()
channels = [{
'label': c['name'],
'path': plugin.url_for('playchannel', churl=c['link'].replace('.do', '.m3u8')+fakemediaurl_suffix, type='linetoday'),
'icon': c['icon'],
'thumbnail': c['icon'],
'is_playable': True,
} for c in channels]
length_of_ch = str(len(channels))
return plugin.finish(channels)
@plugin.route('/listlinetvchannels/<type>/<churl>')
def list_linetvchannels(churl="", type="parent"):
hamic = Hamivideo(settings)
if type=="parent":
channels = hamic.ret_linetv_main_menu_catgs(hamic.linetv_host_url)
channels = [{
'label': k,
'path': plugin.url_for('list_linetvchannels', churl=v, type='listsubcatgs'),
'icon': '',
'thumbnail': '',
'is_playable': False,
} for k,v in channels.items()]
if type=="listsubcatgs":
channels = hamic.ret_linetv_main_menu_catgs(churl)
channels = [{
'label': k,
'path': plugin.url_for('list_linetvchannels', churl=v, type='listdramas'),
'icon': '',
'thumbnail': '',
'is_playable': False,
} for k,v in channels.items()]
if type=="listdramas":
#channels = hamic.ret_linetv_dramas_with_description_of_a_catg(churl)
channels = hamic.ret_linetv_dramas_of_a_catg(churl)
channels = [{
'label': c['name'],
'label2': c['description'],
'path': plugin.url_for('list_linetvchannels', churl=c['id'], type='listeps'),
'icon': c['posterUrl'],
'thumbnail': c['verticalPosterUrl'],
'info': c['info'],
'is_playable': False,
} for c in channels]
if type=='listeps':
drama = hamic.ret_linetv_drama(int(churl))
episode_args = [(int(churl), c) for c in range(1, drama['total_eps']+1)]
episodedatas = hamic.try_multi_run(hamic.ret_linetv_episode_data_multi_run_wrapper, episode_args)
descriptions = hamic.try_multi_run(hamic.ret_linetv_drama_description_multi_run_wrapper, episode_args)
episodedatas = {int(d['episode']):d for d in episodedatas}
descriptions = {int(d['drama_episode']):d['drama_description'] for d in descriptions}
channels = list()
for c in range(1, int(drama['total_eps'])+1):
reqheaders = episodedatas[c]['reqheaders']
reqheaders_strs = episodedatas[c]['reqheaders_strs']
channel = {
'label': episodedatas[c]['epsInfo']['eps_title'],
'label2': '',
'path': episodedatas[c]['multibitrateplaylist'],
'icon': drama['poster_url'],
'thumbnail': drama['vertical_poster'],
'info': {'plot': descriptions[c]},
'properties': {
'inputstream': 'inputstream.adaptive',
'inputstream.adaptive.license_type': 'com.widevine.alpha', #, 'com.microsoft.playready'
'inputstream.adaptive.manifest_type': 'hls',
'inputstream.adaptive.license_key': 'time='+str(round(time.time(),3)).replace('.','').ljust(13, '0')+'|'+reqheaders_strs+'||R', #str(int(time.time() ) )
#'inputstream.adaptive.stream_headers': reqheaders_strs,
},
'is_playable': True,
#'setsubtitles': episodedatas[c]['subtitle_url']
}
channels.append(channel)
return plugin.finish(channels)
@plugin.route('/listptspluschannels/<type>/<churl>')
def list_ptspluschannels(churl="", type="parent"):
hamic = Hamivideo(settings)
if type=="parent":
channels = hamic.ret_ptsplus_main_menu_catgs()
channels = [{
'label': v['genreName'],
'path': plugin.url_for('list_ptspluschannels', churl=v['genreId'], type='listprograms'),
'icon': '',
'thumbnail': '',
'is_playable': False,
} for v in channels]
if type=="listprograms":
channels = hamic.ret_ptsplus_programs_under_a_catg(churl)
plugin.log.info('genreId is: '+churl)
channels = [{
'label': v['titleLocal'],
'label2': '',
'path': plugin.url_for('list_ptspluschannels', churl=v['seasonId'], type='listeps'),
'info': {'plot': v['synopsisLocal']},
'thumbnail': v['artWorkImagesDict']['Season_KeyVisual'],
'icon': v['artWorkImagesDict']['Season_Post'],
'is_playable': False,
} for v in channels]
if type=="listeps":
plugin.log.info('seasonId is: '+churl)
channels = hamic.ret_ptsplus_episodes_under_a_program(churl)
channels = [{
'label': '第'+'{0:03d}'.format(v['episodeNumber'])+'集',
'label2': '',
'path': v['m3u8url'],
'info': {'plot': v['synopsisLocal']},
'thumbnail': v['artWorkImagesDict']['Episode_KeyVisual'],
'icon': v['artWorkImagesDict']['Episode_Post'],
'is_playable': True,
} for v in channels]
channels = sorted(channels, key=lambda k: k['label'])
return plugin.finish(channels)
#https://ewcdn14.nowe.com/session/p8-5-f9faefbc85c-318d3fad569f91c/Content/DASH_VOS3/Live/channel(VOS_CH099)/manifest.mpd?token=<PASSWORD>
#https://ewcdn13.nowe.com/session/p8-3-37e14e40349-c4ae989921b1c8d/Content/DASH_VOS3/Live/channel(VOS_CH099)/manifest.mpd?token=<PASSWORD>
#https://ewcdn13.nowe.com/session/p8-3-37e14e40349-c4ae989921b1c8d/Content/DASH_VOS3/Live/channel(VOS_CH099)/manifest.mpd?token=<PASSWORD>
#https://ewcdn13.nowe.com/session/p8-3-37e14e40349-c4ae989921b1c8d/Content/DASH_VOS3/Live/channel(VOS_CH099)/manifest.mpd?token=<PASSWORD>
#listitem.setProperty('inputstreamaddon', 'inputstream.adaptive')
#listitem.setProperty('inputstream.adaptive.manifest_type', 'mpd')
#listitem.setMimeType('application/dash+xml')
#listitem.setProperty('inputstream.adaptive.stream_headers', 'Referer=blah&User-Agent=Blah')
#listitem.setContentLookup(False)
@plugin.route('/listviutvchannels/<type>/<churl>')
def list_viutvchannels(churl="", type="parent"):
hamic = Hamivideo(settings)
channels = [hamic.ret_viutv(chid) for chid in ["096","099"]]
channels = [{
'label': c['name'],
'label2': c['description'],
'path': plugin.url_for('playchannel', churl=c['mpdurl'], type='viutv'),
'icon': c['icon'],
'thumbnail': c['icon'],
'info': c['info'],
'properties': {
'inputstreamaddon': 'inputstream.adaptive',
'inputstream.adaptive.license_type': 'com.widevine.alpha', #, 'com.widevine.alpha'
'inputstream.adaptive.manifest_type': 'mpd',
'inputstream.adaptive.license_key': "|".join([
"https://fwp.nowe.com/wrapperWV",
json.dumps({
'Host':'fwp.nowe.com',
'Origin':'https://viu.tv',
'Referer':'https://viu.tv/',
'TE':'Trailers',
}),
json.dumps({
"rawLicenseRequestBase64":"CAQ=",
"drmToken":c['drmToken'],
}),
"B", #[Response]
#'inputstream.adaptive.stream_headers': reqheaders_strs,
])
},
'is_playable': True,
} for c in channels]
return plugin.finish(channels)
@plugin.route('/listmaplestagedramas/<type>/<churl>')
def list_maplestagechs(churl="", type="parent"):
hamic = Hamivideo(settings)
if type=="parent":
channels = hamic.ret_maplestage_parent_catgs()
channels = [{
'label': c['name'], #+xbmc.executebuiltin('Container.SetViewMode(%s)' % view_mode_id)
'path': plugin.url_for('list_maplestagechs', churl=c['link'], type='underparent'),
'icon': c['icon'],
'thumbnail': c['icon'],
'is_playable': False,
} for c in channels]
if type=="underparent":
channels = hamic.ret_maplestage_dramas_of_a_parent(churl)
channels = [{
'label': c['name'],
'path': plugin.url_for('list_maplestagechs', churl=c['link'], type='underdrama'),
'icon': c['icon'],
'thumbnail': c['icon'],
'is_playable': False,
} for c in channels]
if type=="underdrama":
channels = hamic.ret_episode_links_of_a_maplestage_drama(churl)
drama_name = channels[0]['program']
channels = [{
'label': c['name'],
'label2': c['info']['plot'],
'path': plugin.url_for('playchannel', churl=c['link']+fakemediaurl_suffix, type='maplestage'),
'icon': c['icon'],
'thumbnail': c['icon'],
'info': c['info'],
'is_playable': True,
} for c in channels]
channels.append({
'label': 'Switch to DramaQ sources for '+six.ensure_str(drama_name),
'path': plugin.url_for('list_dramaq', drama_name=base64.b64encode(six.ensure_str(drama_name)) ),
'is_playable': False,
})
length_of_ch = str(len(channels))
return plugin.finish(channels, sort_methods = ['label', 'title'])
@plugin.route('/list_dramaq/<drama_name>')
def list_dramaq(drama_name="None"):
hamic = Hamivideo(settings)
if drama_name=="None":
drama_name = plugin.keyboard(six.ensure_str(''), heading="搜尋Dramaq/Qdrama")
else:
drama_name = base64.b64decode(drama_name)
channels = hamic.ret_dramaq_episodes(drama_name)
channels = [{
'label': c['name'],
'path': plugin.url_for('playchannel', churl=c['link']+fakemediaurl_suffix, type='dramaq'),
'icon': c['icon'],
'thumbnail': c['icon'],
'info': c['info'],
'is_playable': True,
} for c in channels]
return plugin.finish(channels, sort_methods = ['label'])
@plugin.route('/nextviewmode/')
def nextviewmode():
#xbmc.executebuiltin('Container.NextViewMode()')
#xbmcplugin.setContent(plugin.handle, 'movies')
plugin.set_content('guide')
@plugin.route('/backgroundinfo')
def backgroundinfo():
import platform, os
hamic = Hamivideo(settings)
htmlsrc = "test"
items = [
{'label': plugin_storagepath},
{'label': platform.machine()},
{'label': settings['chromebinary_location']},
{'label': settings['chromedriver_path']},
{'label': htmlsrc}
]
return plugin.finish(items)
@plugin.route('/channeldetail/<churl>')
def show_channel_detail(churl):
#hamic = Hamivideo()
#xbmc.executebuiltin(streamingurl)
#xbmcgui.Dialog().ok(addonname,streamingurl)
plugin.log.info('now in addonname '+addonname+" and addon id is "+addonid)
#plugin.log.info('parsed result: %s' % streamingurl)
pluginplaybackurl = 'plugin://%s/play/%s' % (addonid, churl)
#plugin.set_resolved_url(pluginplaybackurl)
#plugin.log.info('Playing url: %s' % streamingurl)
#return plugin.finish([{"label": 'test', 'path': 'test'}])
#print(streamingurl)
#pass
#items = [{
# 'label': 'play',
# 'path': streamingurl,
# 'is_playable': True
#}]
#return plugin.finish(items)
@plugin.route('/list_pokuchannels/<churl>/<type>')
def list_pokuchannels(churl='default', type='parent'):
hamic = Hamivideo(settings)
nextmode = {
'parent': 'drama',
'drama': 'listepisodes',
'search': 'listepisodes',
'listepisodes': 'poku',
}
nextpluginurl = {
'parent': 'list_pokuchannels',
'drama': 'list_pokuchannels',
'search': 'list_pokuchannels',
'listepisodes': 'playchannel',
}
if type=='parent':
channels = {
'電視劇': 'tvseries',
'電視劇美劇': 'us',
'電視劇韓劇': 'kr',
'電視劇陸劇': 'cn',
'電視劇台劇': 'tw',
'電視劇日劇': 'jp',
'電影': 'movies',
'電影劇情電影': 'dramamovie',
'電影動作': 'action',
'電影喜劇': 'comedy',
'電影科幻': 'scifi',
'電影愛情': 'romance',
'電影動漫': 'anime',
'電影戰爭': 'war',
'電影恐怖': 'horror',
'電影動畫': 'cartoon',
'電影紀錄片': 'documentary',
'綜藝': 'tvshow',
'動漫': 'anime',
}
channels = {k:"https://poku.tv/vodtype/"+v+".html" for k,v in channels.items()}
additional_channels = {
'綜藝中國1': 'https://poku.tv/vodshow/tvshow-%E5%A4%A7%E9%99%B8----------.html',
'綜藝臺灣': 'https://poku.tv/vodshow/tvshow-%E8%87%BA%E7%81%A3----------.html',
'綜藝香港': 'https://poku.tv/vodshow/tvshow-%E9%A6%99%E6%B8%AF----------.html',
'綜藝韓國': 'https://poku.tv/vodshow/tvshow-%E9%9F%93%E5%9C%8B----------.html',
'綜藝日本': 'https://poku.tv/vodshow/tvshow-%E6%97%A5%E6%9C%AC----------.html',
'綜藝中國2': 'https://poku.tv/vodshow/tvshow-%E4%B8%AD%E5%9C%8B----------.html',
'綜藝歐美': 'https://poku.tv/vodshow/tvshow-%E6%AD%90%E7%BE%8E----------.html',
'動漫中國': 'https://poku.tv/vodshow/anime-%E5%A4%A7%E9%99%B8----------.html',
'動漫臺灣': 'https://poku.tv/vodshow/anime-%E8%87%BA%E7%81%A3----------.html',
'動漫香港': 'https://poku.tv/vodshow/anime-%E9%A6%99%E6%B8%AF----------.html',
'動漫韓國': 'https://poku.tv/vodshow/anime-%E9%9F%93%E5%9C%8B----------.html',
'動漫日本': 'https://poku.tv/vodshow/anime-%E6%97%A5%E6%9C%AC----------.html',
'動漫美國': 'https://poku.tv/vodshow/anime-%E7%BE%8E%E5%9C%8B----------.html',
}
channels = hamic.merge_two_dicts(channels, additional_channels)
channels = [{
'label': six.ensure_str(k),
'path': plugin.url_for(nextpluginurl[type], churl=v, type=nextmode[type]),
'icon': '',
'thumbnail': '',
'info': '',
'is_playable': False,
} for k,v in channels.items()]
channels.append({
'label': six.ensure_str('搜尋poku.tv'),
'path': plugin.url_for(nextpluginurl[type], churl='search', type='search'),
'icon': '',
'thumbnail': '',
'info': '',
'is_playable': False,
})
if type=='search':
searchkwd = plugin.keyboard(six.ensure_str(''), heading="搜尋Poku.tv")
searchurl = 'https://poku.tv/vodsearch/-------------.html?submit=&wd='+searchkwd
results = hamic.get_poku_dramas([searchurl,'search'])
channels = [{
'label': v['title'],
'path': plugin.url_for(nextpluginurl[type], churl=v['link'], type=nextmode[type]),
'icon': v['thumbnail'],
'thumbnail': v['thumbnail'],
'info': {
'plot': v['description'],
},
'is_playable': False,
} for v in results]
if type in ['drama','listepisodes']:
if type=='drama':
allpagesnum_info = hamic.get_poku_dramas([churl,'allnum'])
if allpagesnum_info['allpageslink']!=None:
iterargs = [[x, type] for x in allpagesnum_info['allpageslink']]
if threadpool_imported:
pool = ThreadPool(workers)
results = pool.map(hamic.get_poku_dramas, iterargs)
pool.close()
pool.join()
else:
results = [hamic.get_poku_dramas(iterarg) for iterarg in iterargs]
results = reduce(lambda x,y: x+y, results)
else:
results = hamic.get_poku_dramas([churl,type])
results = hamic.unique(results)
is_playable = False
else:
results = hamic.get_poku_dramas([churl,type])
is_playable = True
channels = [{
'label': v['title'],
'path': plugin.url_for(nextpluginurl[type], churl=(v['link']+fakemediaurl_suffix if type=='listepisodes' else v['link']), type=nextmode[type]),
'icon': v['thumbnail'],
'thumbnail': v['thumbnail'],
'info': {
'plot': (v['description']+v['metadata'] if type=='listepisodes' else v['description']),
},
'is_playable': is_playable,
} for v in results]
return plugin.finish(channels, sort_methods = ['label'])
@plugin.route('/play/<type>/<churl>')
def playchannel(churl, type="hami"):
hamic = Hamivideo(settings)
#hamic.clear_other_browser_processed()
if type in ['linetoday','maplestage','linetv','dramaq','poku']:
cchurl = churl.replace(fakemediaurl_suffix,'')
elif type=='direct':
cchurl = plugin.keyboard(six.ensure_str(''), heading="輸入串流網址").strip()
else:
cchurl = churl.replace('.m3u8','.do')
plugin.log.info('starting parsing '+cchurl+' by '+type)
if type=='maplestage':
streamingurl = hamic.ret_maplestage_streamingurl_by_req(cchurl)
#streamingurl = hamic.get_streamingurl_of_ch(cchurl, type=type, logtype='performancelogs')
subtitleurl = None
#decoding discussion: https://www.52pojie.cn/thread-944303-1-1.html
#https://tools.ietf.org/html/rfc8216#section-4.3.2.4
#https://github.com/peak3d/inputstream.adaptive/wiki/Integration
#python someone demonstrate key decryption https://www.52pojie.cn/thread-986218-1-1.html
#https://www.52pojie.cn/thread-1123891-1-1.html
if type=='dramaq':
streamingurl = hamic.ret_dramaq_streaming_url_by_req(cchurl)
subtitleurl = None
elif type=='hami':
channelid = os.path.basename(cchurl).replace('.do','')
streamingurl = hamic.ret_hami_streaming_url_by_req(channelid)
streamingurl = hamic.get_better_q_streamingsrc(streamingurl)
streamingurl = streamingurl+"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36&referer=https://hamivideo.hinet.net&origin=https://hamivideo.hinet.net"
subtitleurl = None
#elif type=='linetv':
# epi_data = hamic.ret_linetv_episode_data(url=cchurl)
# streamingurl = epi_data['multibitrateplaylist']
# subtitleurl = epi_data['epsInfo']['source'][0]['links'][0]['subtitle']
elif type=='viutv':
streamingurl = cchurl #hamic.ret_viutv(churl)['mpdurl']
subtitleurl = None
elif type=='poku':
streamingurl = hamic.get_poku_dramas([cchurl, 'findstreamingurl'])
streamingurl = streamingurl['videourl']+'|'+streamingurl['req_header_str']
subtitleurl = None
elif type=='direct':
if re.search('youtube.com/watch\?v',cchurl)!=None:
plugin.log.info('matching youtube url!')
youtube_video_id = re.match(".+youtube.com/.+v=([\w\d]+)",cchurl).group(1)
cchurl = "plugin://plugin.video.youtube/play/?video_id="+youtube_video_id
plugin.log.info('transform youtube url to '+cchurl)
streamingurl = cchurl
subtitleurl = None
elif type=='linetoday':
streamingurl = hamic.get_streamingurl_of_ch(cchurl, type=type, logtype='networklogs')
subtitleurl = None
if re.search('(timed out|timeout|unknown error|connection refused)', streamingurl)!=None:
#hamic.clear_other_browser_processed()
pass
plugin.log.info('parsing result is: '+streamingurl)
plugin.set_resolved_url(streamingurl, subtitles=subtitleurl)
# Suggested view codes for each type from different skins (initial list thanks to xbmcswift2 library)
ALL_VIEW_CODES = {
'list': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 50, # List
'skin.re-touched': 50, # List
'skin.estuary': 50,
# 50 = List, 51 = Poster, 52 = Lists,53 = Shift, 54 = InfoWall 55 = Wide list, 500 = Wall,501= List, 502 = Fanart
},
'thumbnail': {
'skin.confluence': 501, # Thumbnail
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 51, # Big icons
'skin.re-touched': 500, # Thumbnail
'skin.estuary': 500,
# 50 = List, 51 = Poster, 52 = Lists, 53 = Shift, 54 = InfoWall 55 = Wide list, 500 = Wall,501= List, 502 = Fanart
},
'movies': {
'skin.confluence': 500, # Thumbnail 515, # Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, # Thumbnail
'skin.estuary': 52,
# 50 = List, 51 = Poster,52 = Lists,53 = Shift, 54 = InfoWall 55 = Wide list, 500 = Wall,501= List, 502 = Fanart
},
'tvshows': {
'skin.confluence': 500, # Thumbnail 515, # Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, # Thumbnail
'skin.estuary': 54,
# 50 = List, 51 = Poster,52 = Lists, 53 = Shift, 54 = InfoWall 55 = Wide list, 500 = Wall,501= List, 502 = Fanart
},
'seasons': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 50, # List
'skin.estuary': 53,
# 50 = List, 51 = Poster,52 = Lists, 53 = Shift, 54 = InfoWall 55 = Wide list, 500 = Wall,501= List, 502 = Fanart
},
'episodes': {
'skin.confluence': 500, # Media Info
'skin.aeon.nox': 518, # Infopanel
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 550, # Wide
'skin.estuary': 55.
# 50 = List, 51 = Poster,52 = Lists,53 = Shift,54 = InfoWall 55 = Wide list, 500 = Wall,501= List, 502 = Fanart
},
'sets': {
'skin.confluence': 500, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 50, # List
'skin.re-touched': 50, # List
'skin.estuary': 55,
# 50 = List, 51 = Poster,52 = Lists,53 = Shift,54 = InfoWall 55 = Wide list, 500 = Wall,501= List, 502 = Fanart
},
}
def set_view(view_mode, view_code=0):
if get_setting('auto-view') == 'true':
# Set the content for | |
# -*- coding: utf-8 -*-
"""Тесты модуля matan"""
import unittest
import random
from fem.matan import Matrix
class TestMatrixOperations(unittest.TestCase):
"""Тестирование операций с матрицами"""
def test_create_col(self):
"""Тест на создание вектора столбца"""
col1 = Matrix([[1], [2], [3], [4]])
col2 = Matrix([1, 2, 3, 4])
msg = f"Столбцы\n{col1} и \n{col2}должны быть одинаковыми"
self.assertEqual(col1, col2, msg)
def test_create_row(self):
"""Тест на создание вектора строки"""
row1 = Matrix([[1], [2], [3], [4]], to_row=True)
row2 = Matrix([1, 2, 3, 4], to_row=True)
msg = f"Строки\n{row1}и\n{row2}должны быть одинаковыми"
self.assertEqual(row1, row2, msg)
def test_add_cols(self):
"""Сложение двух столбцов"""
A = Matrix([1, 2])
B = Matrix([3, 4])
C = Matrix([4, 6])
msg = f"\n{A} + \n{B} НЕ РАВНО \n{C}"
self.assertEqual(A+B, C, msg)
def test_add_rows(self):
"""Сложение двух столбцов"""
A = Matrix([1, 2], to_row=True)
B = Matrix([3, 4], to_row=True)
C = Matrix([4, 6], to_row=True)
msg = f"\n{A} + {B} НЕ РАВНО {C}"
self.assertEqual(A+B, C, msg)
def test_add_matrix(self):
"""Сложение двух матриц"""
A = Matrix([[1, 2], [3, 4]])
B = Matrix([[5, 6], [7, 8]])
C = Matrix([[6, 8], [10, 12]])
msg = f"\n{A} + \n{B} НЕ РАВНО \n{C}"
self.assertEqual(A+B, C, msg)
def test_add_col_and_row(self):
"""Сложение столбца и строки"""
col = Matrix([1, 2])
row = Matrix([3, 4], to_row=True)
with self.assertRaises(Exception):
# столбец и строку нельзя складывать
col + row
def test_add_row_and_col(self):
"""Сложение строки и столбца"""
row = Matrix([1, 2], to_row=True)
col = Matrix([3, 4])
with self.assertRaises(Exception):
# строку и столбец нельзя складывать
row + col
def test_mult_row_and_col(self):
"""Умножение строки на столбец"""
row = Matrix([1, 2], to_row=True)
col = Matrix([3, 4])
res = Matrix([11])
msg = f"\n{row * col} НЕ РАВНО {res}"
self.assertEqual(row * col, res, msg)
def test_mult_col_and_row(self):
"""Умножение столбца на строку"""
col = Matrix([1, 2])
row = Matrix([3, 4], to_row=True)
res = Matrix([[3, 4], [6, 8]])
msg = f"\n{col * row} НЕ РАВНО\n{res}"
self.assertEqual(col * row, res, msg)
def test_mult_cols(self):
"""Умножение столбца на столбец"""
col = Matrix([1, 2])
with self.assertRaises(Exception):
# столбец нельзя умножить на столбецц
# Неподходящие размерности
col * col
def test_mult_rows(self):
"""Умножение столбца на столбец"""
row = Matrix([1, 2], to_row=True)
with self.assertRaises(Exception):
# строку нельзя умножить на строку
# Неподходящие размерности
row * row
def test_mult_bad_matrix(self):
"""Умножить не соразмерные матрицы"""
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
B = Matrix([[1, 2], [3, 4]])
with self.assertRaises(Exception):
# несоразмерные матрицы нельзя умножать
A * B
def test_mult_good_matrix(self):
"""Умножение соразмерных матриц"""
A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
res = Matrix([[30, 36, 42], [66, 81, 96], [102, 126, 150]])
msg = f"\n{A * A} НЕ РАВНО\n{res}"
self.assertEqual(A * A, res, msg)
def test_mult_col_and_num(self):
"""Умножение столбца на число"""
col = Matrix([1, 2, 3])
num = 2
res = Matrix([2, 4, 6])
msg = f"\n{col}умножить на {num} НЕ РАВНО\n{res}"
self.assertEqual(col * num, res, msg)
def test_mult_row_and_num(self):
"""Умножение строки на число"""
row = Matrix([1, 2, 3], to_row=True)
num = 2
res = Matrix([2, 4, 6], to_row=True)
msg = f"\n{row}умножить на {num} НЕ РАВНО {res}"
self.assertEqual(row * num, res, msg)
def test_mult_matrix_and_num(self):
"""Умножение матрицы на число"""
m = Matrix([[1, 2], [3, 4]])
num = 2
res = Matrix([[2, 4], [6, 8]])
msg = f"\n{m}умножить на {num} НЕ РАВНО\n{res}"
self.assertEqual(m * num, res, msg)
def test_assign_number_to_column_row(self):
"""Присвоить число строке столбца"""
col = Matrix([1, 2, 3])
col[0] = 0
res = Matrix([0, 2, 3])
msg = f"\n{col}НЕ РАВНО \n{res}"
self.assertEqual(col, res, msg)
def test_assign_number_to_row_column(self):
"""Присвоить число столбцу строки"""
row = Matrix([1, 2, 3], to_row=True)
row[0] = 0
res = Matrix([0, 2, 3], to_row=True)
msg = f"\n{row}НЕ РАВНО \n{res}"
self.assertEqual(row, res, msg)
def test_assign_good_one_matrix_to_matrix_row(self):
"""Присвоить валидную матрицу строку строке матрицы"""
m = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
one_m = Matrix([10, 11, 12], to_row=True)
m[0] = one_m
res = Matrix([[10, 11, 12], [4, 5, 6], [7, 8, 9]])
msg = f"\n{m}НЕ РАВНО \n{res}"
self.assertEqual(m, res, msg)
def test_assign_bad_one_matrix_to_matrix_row(self):
"""Присвоить НЕвалидную матрицу строку строке матрицы"""
m = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
one_m = Matrix([10, 11, 12, 14], to_row=True)
with self.assertRaises(Exception):
# размерности не подходят
m[0] = one_m
def test_assign_same_row_to_matrix_row(self):
"""Присвоить строке матрицы её же строку"""
m = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
m[0] = m[2]
res = Matrix([[7, 8, 9], [4, 5, 6], [7, 8, 9]])
msg = f"\n{m}НЕ РАВНО \n{res}"
self.assertEqual(m, res, msg)
def test_mult_row_of_matrix_by_num(self):
"""Умножить строку матрицы на число"""
m = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
m[0] *= 2
res = Matrix([[2, 4, 6], [4, 5, 6], [7, 8, 9]])
msg = f"\n{m}НЕ РАВНО \n{res}"
self.assertEqual(m, res, msg)
def test_sub_row_of_matrix_by_num(self):
"""Делить строку матрицы на число"""
m = Matrix([[2, 4, 6], [4, 5, 6], [7, 8, 9]])
m[0] /= 2
res = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
msg = f"\n{m}НЕ РАВНО \n{res}"
self.assertEqual(m, res, msg)
def test_compare_matrix_row_and_separate_row(self):
"""Cравнить строку матрицы и отдельную строку"""
m = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
row = Matrix([1, 2, 3], to_row=True)
msg = f"\n{m[0]} НЕ РАВНО {row}"
self.assertTrue(m[0] == row, msg)
def test_get_col_elemet(self):
"""Тест на получение элемента матрицы столбца"""
# Элемент столбца можно получить:
# либо через одну скобку [i, j]
# Через одну скобку с одним индексом [k] - если это строка или столбец
col = Matrix([1, 2, 3, 4])
# Проверка полученеие элемента
for i in range(col.rows):
msg = f'\n{col}col[{i}]={col[i]} не равен col[{i}, 0]={col[i, 0]}'
self.assertEqual(col[i], col[i, 0], msg)
def test_get_row_elemet(self):
"""Тест на получение элемента матрицы строки"""
# Элемент строки можно получить:
# либо через одну скобку [i, j]
# Через одну скобку с одним индексом [k] - если это строка или столбец
row = Matrix([1, 2, 3, 4], to_row=True)
# Проверка полученеие элемента
for i in range(row.cols):
msg = f'\n{row}row[{i}]={row[i]} не равен row[0, {i}]={row[0, i]}'
self.assertEqual(row[i], row[0, i], msg)
def test_set_col_elemet(self):
"""Тест на присвоение элемента матрицы столбца"""
# Элемент столбца можно задать:
# либо через одну скобку [i, j]
# Через одну скобку с одним индексом [k] - если это строка или столбец
col = Matrix([1, 2, 3, 4])
# проверка задания элемента
for i in range(col.rows):
# Задаём элемент через единственный индекс
num = random.random()
col[i] = num
# Проверяем через единственный индекс
msg = f'\n{col} [i] num={num} не равно col[{i}]={col[i]}'
self.assertEqual(num, col[i], msg)
# Проверяем через двойной индекс
msg = f'\n{col} [i] num={num} не равно col[{i}, 0]={col[i, 0]}'
self.assertEqual(num, col[i, 0], msg)
# Задаём элемент через двойной индекс
num = random.random()
col[i, 0] = num
# Проверяем через двойной индекс
msg = f'\n{col}[i, j] num={num} не равно col[{i}, 0]={col[i, 0]}'
self.assertEqual(num, col[i, 0], msg)
# Проверяем через единственный индекс
msg = f'\n{col}[i, j] num={num} не равно col[{i}]={col[i]}'
self.assertEqual(num, col[i], msg)
def test_set_row_elemet(self):
"""Тест на присвоение элемента матрицы строки"""
# Элемент строки можно задать:
# либо через одну скобку [i, j]
# Через одну скобку с одним индексом [k] - если это строка или столбец
row = Matrix([1, 2, 3, 4], to_row=True)
# проверка задания элемента
for i in range(row.cols):
# Задаём элемент через единственный индекс
num = random.random()
row[i] = num
# Проверяем через единственный индекс
msg = f'\n{row} [i] num={num} не равно row[{i}]={row[i]}'
self.assertEqual(num, row[i], msg)
# Проверяем через двойной индекс
msg = f'\n{row} [i] num={num} не равно row[0, {i}]={row[0, i]}'
self.assertEqual(num, row[0, i], msg)
# Задаём элемент через двойной индекс
num = random.random()
row[0, i] = num
# Проверяем через двойной индекс
msg = f'\n{row}[i, j] num={num} не равно row[0, {i}]={row[0, i]}'
self.assertEqual(num, row[0, i], msg)
# Проверяем через единственный индекс
msg = f'\n{row}[i, j] num={num} не равно row[{i}]={row[i]}'
self.assertEqual(num, row[i], msg)
def test_get_matrix_element(self):
"""Тест на получение элемента матрицы"""
# Элемент матрицы можно получить:
# либо через одну скобку [i, j]
# Либо через двойную [i][j]
m = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# Обходим все элементы матрицы и проверяем
# правильность их возвращения
for i in range(m.rows):
for j in range(m.cols):
msg = f'\n{m}m[{i}, {j}]={m[i, j]} не равен m[{i}][{j}]={m[i][j]}'
self.assertEqual(m[i, j], m[i][j], msg)
def test_set_matrix_element(self):
"""Тест на присвоение элемента матрицы"""
# Элемент | |
"""
Classic cart-pole system implemented by <NAME> et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
from scipy.integrate import ode
g = 9.8 # gravity
force_mag = 10.0
tau = 0.02 # seconds between state updates
# cart
m_cart = 1
# pole 1
l_1 = 1 # length
m_1 = 0.1 # mass
# pole 2
l_2 = 1 # length
m_2 = 0.1 # mass
def f(time, state, input):
x = state[0]
x_dot = state[1]
theta_1 = state[2]
theta_1_dot = state[3]
theta_2 = state[4]
theta_2_dot = state[5]
x_dot_dot = ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2
+ g * l_2 * m_2 * np.sin(theta_2)) * (m_1 * np.cos(theta_2) + m_2 * np.cos(theta_2)
- m_1 * np.cos(theta_1 - theta_2) * np.cos(theta_1)
- m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1))) / (l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2
- l_2 * m_2 ** 2 - l_2 * m_1 ** 2 - 2 * l_2 * m_1 * m_2 - l_2 * m_1 * m_cart - l_2 * m_2 * m_cart
+ l_2 * m_1 ** 2 * np.cos(theta_1) ** 2 + l_2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_2 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
+ 2 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_2 * m_1 * m_2 * np.cos(theta_2) ** 2
- 2 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
+ ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2
+ g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (m_1 * np.cos(theta_1) + m_2 * np.cos(theta_1)
- m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_2))) / (l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2
- l_1 * m_2 ** 2 - l_1 * m_1 ** 2 - 2 * l_1 * m_1 * m_2 - l_1 * m_1 * m_cart - l_1 * m_2 * m_cart
+ l_1 * m_1 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
+ 2 * l_1 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_2) ** 2
- 2 * l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
- ((- m_2 * np.cos(theta_1 - theta_2) ** 2 + m_1 + m_2) *(l_1 * np.sin(theta_1) * (m_1 + m_2) * theta_1_dot ** 2
+ l_2 * m_2 * np.sin(theta_2) * theta_2_dot ** 2 + input)) / (m_1 ** 2 * np.cos(theta_1) ** 2 - m_1 * m_cart
- m_2 * m_cart - 2 * m_1 * m_2 + m_2 ** 2 * np.cos(theta_1) ** 2 + m_2 ** 2 * np.cos(theta_2) ** 2
+ m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - m_1 ** 2 - m_2 ** 2 + 2 * m_1 * m_2 * np.cos(theta_1) ** 2
+ m_1 * m_2 * np.cos(theta_2) ** 2 + m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2
+ m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2 - 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2))
theta_1_dot_dot = ((m_1 * np.cos(theta_1) + m_2 * np.cos(theta_1)
- m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_2)) * (l_1 * np.sin(theta_1) * (m_1 + m_2) * theta_1_dot ** 2
+ l_2 * m_2 * np.sin(theta_2) * theta_2_dot ** 2 + input)) \
/ (l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 - l_1 * m_2 ** 2 - l_1 * m_1 ** 2 - 2 * l_1 * m_1 * m_2
- l_1 * m_1 * m_cart - l_1 * m_2 * m_cart + l_1 * m_1 ** 2 * np.cos(theta_1) ** 2
+ l_1 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2 + l_1 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
+ 2 * l_1 * m_1 * m_2 * np.cos(theta_1) ** 2 + l_1 * m_1 * m_2 * np.cos(theta_2) ** 2
- 2 * l_1 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
- ((- l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_2_dot ** 2
+ g * l_1 * np.sin(theta_1) * (m_1 + m_2)) * (- m_2 * np.cos(theta_2) ** 2 + m_1 + m_2 + m_cart)) \
/ (l_1 ** 2 * m_1 ** 2 * np.cos(theta_1) ** 2 - l_1 ** 2 * m_2 ** 2
- 2 * l_1 ** 2 * m_1 * m_2 - l_1 ** 2 * m_1 * m_cart - l_1 ** 2 * m_2 * m_cart - l_1 ** 2 * m_1 ** 2
+ l_1 ** 2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 ** 2 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 ** 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 ** 2 * m_1 * m_2 * np.cos(theta_1) ** 2
+ l_1 ** 2 * m_1 * m_2 * np.cos(theta_2) ** 2 + l_1 ** 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2
+ l_1 ** 2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
- 2 * l_1 ** 2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 ** 2 * m_1 * m_2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)) \
+ ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2
+ g * l_2 * m_2 * np.sin(theta_2)) * (m_1 * np.cos(theta_1 - theta_2) + m_2 * np.cos(theta_1 - theta_2)
+ m_cart * np.cos(theta_1 - theta_2) - m_1 * np.cos(theta_1) * np.cos(theta_2)
- m_2 * np.cos(theta_1) * np.cos(theta_2))) / (l_1 * l_2 * m_1 ** 2 * np.cos(theta_1) ** 2 - l_1 * l_2 * m_2 ** 2
- 2 * l_1 * l_2 * m_1 * m_2 - l_1 * l_2 * m_1 * m_cart - l_1 * l_2 * m_2 * m_cart - l_1 * l_2 * m_1 ** 2
+ l_1 * l_2 * m_2 ** 2 * np.cos(theta_1) ** 2 + l_1 * l_2 * m_2 ** 2 * np.cos(theta_2) ** 2
+ l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) ** 2 + 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1) ** 2
+ l_1 * l_2 * m_1 * m_2 * np.cos(theta_2) ** 2 + l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 - theta_2) ** 2
+ l_1 * l_2 * m_2 * m_cart * np.cos(theta_1 - theta_2) ** 2
- 2 * l_1 * l_2 * m_2 ** 2 * np.cos(theta_1 - theta_2) * np.cos(theta_1) * np.cos(theta_2)
- 2 * l_1 * l_2 * m_1 * m_2 * np.cos(theta_1 | |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from vdp.model.v1alpha import healthcheck_pb2 as vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2
from vdp.model.v1alpha import model_definition_pb2 as vdp_dot_model_dot_v1alpha_dot_model__definition__pb2
from vdp.model.v1alpha import model_pb2 as vdp_dot_model_dot_v1alpha_dot_model__pb2
class ModelServiceStub(object):
"""Model service responds to incoming model requests
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Liveness = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/Liveness',
request_serializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.LivenessRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.LivenessResponse.FromString,
)
self.Readiness = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/Readiness',
request_serializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.ReadinessRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.ReadinessResponse.FromString,
)
self.ListModelDefinition = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/ListModelDefinition',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.ListModelDefinitionRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.ListModelDefinitionResponse.FromString,
)
self.GetModelDefinition = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/GetModelDefinition',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.GetModelDefinitionRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.GetModelDefinitionResponse.FromString,
)
self.ListModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/ListModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelResponse.FromString,
)
self.CreateModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/CreateModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelResponse.FromString,
)
self.CreateModelBinaryFileUpload = channel.stream_unary(
'/vdp.model.v1alpha.ModelService/CreateModelBinaryFileUpload',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelBinaryFileUploadRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelBinaryFileUploadResponse.FromString,
)
self.GetModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/GetModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelResponse.FromString,
)
self.UpdateModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/UpdateModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UpdateModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UpdateModelResponse.FromString,
)
self.DeleteModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/DeleteModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeleteModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeleteModelResponse.FromString,
)
self.LookUpModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/LookUpModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelResponse.FromString,
)
self.RenameModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/RenameModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.RenameModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.RenameModelResponse.FromString,
)
self.PublishModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/PublishModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.PublishModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.PublishModelResponse.FromString,
)
self.UnpublishModel = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/UnpublishModel',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UnpublishModelRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UnpublishModelResponse.FromString,
)
self.ListModelInstance = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/ListModelInstance',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelInstanceRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelInstanceResponse.FromString,
)
self.GetModelInstance = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/GetModelInstance',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceResponse.FromString,
)
self.LookUpModelInstance = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/LookUpModelInstance',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelInstanceRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelInstanceResponse.FromString,
)
self.DeployModelInstance = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/DeployModelInstance',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeployModelInstanceRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeployModelInstanceResponse.FromString,
)
self.UndeployModelInstance = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/UndeployModelInstance',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UndeployModelInstanceRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UndeployModelInstanceResponse.FromString,
)
self.GetModelInstanceCard = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/GetModelInstanceCard',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceCardRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceCardResponse.FromString,
)
self.TriggerModelInstance = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/TriggerModelInstance',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TriggerModelInstanceRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TriggerModelInstanceResponse.FromString,
)
self.TestModelInstance = channel.unary_unary(
'/vdp.model.v1alpha.ModelService/TestModelInstance',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceResponse.FromString,
)
self.TestModelInstanceBinaryFileUpload = channel.stream_unary(
'/vdp.model.v1alpha.ModelService/TestModelInstanceBinaryFileUpload',
request_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceBinaryFileUploadRequest.SerializeToString,
response_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceBinaryFileUploadResponse.FromString,
)
class ModelServiceServicer(object):
"""Model service responds to incoming model requests
"""
def Liveness(self, request, context):
"""Liveness method receives a LivenessRequest message and returns a
LivenessResponse message.
See https://github.com/grpc/grpc/blob/master/doc/health-checking.md
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Readiness(self, request, context):
"""Readiness method receives a ReadinessRequest message and returns a
ReadinessResponse message.
See https://github.com/grpc/grpc/blob/master/doc/health-checking.md
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListModelDefinition(self, request, context):
"""ListModelDefinition method receives a ListModelDefinitionRequest message
and returns a ListModelDefinitionResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelDefinition(self, request, context):
"""GetModelDefinition method receives a GetModelDefinitionRequest message and
returns a GetModelDefinitionResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListModel(self, request, context):
"""ListModel method receives a ListModelRequest message and returns a
ListModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateModel(self, request, context):
"""CreateModel method receives a CreateModelRequest message and returns a
CreateModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateModelBinaryFileUpload(self, request_iterator, context):
"""CreateModelBinaryFileUpload method receives a
CreateModelBinaryFileUploadRequest message and returns a
CreateModelBinaryFileUploadResponse message.
Endpoint: "POST /v1alpha/models:multipart"
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModel(self, request, context):
"""GetModel method receives a GetModelRequest message and returns a
GetModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateModel(self, request, context):
"""UpdateModel method receives a UpdateModelRequest message and returns a
UpdateModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteModel(self, request, context):
"""DeleteModel method receives a DeleteModelRequest message and returns a
DeleteModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LookUpModel(self, request, context):
"""LookUpModel method receives a LookUpModelRequest message and returns a
LookUpModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RenameModel(self, request, context):
"""RenameModel method rename a model
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PublishModel(self, request, context):
"""PublishModel method receives a PublishModelRequest message and returns a
PublishModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UnpublishModel(self, request, context):
"""UnpublishModel method receives a UnpublishModelRequest message and returns
a UnpublishModelResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListModelInstance(self, request, context):
"""ListModelInstance method receives a ListModelInstanceRequest message and
returns a ListModelInstanceResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelInstance(self, request, context):
"""GetModelInstance method receives a GetModelInstanceRequest message and
returns a GetModelInstanceResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LookUpModelInstance(self, request, context):
"""LookUpModelInstance method receives a LookUpModelInstanceRequest message
and returns a
LookUpModelInstanceResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeployModelInstance(self, request, context):
"""DeployModelInstance deploy a model instance to online state
TODO: should use [Long-running operations](https://google.aip.dev/151)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UndeployModelInstance(self, request, context):
"""UndeployModelInstance undeploy a model instance to offline state
TODO: should use [Long-running operations](https://google.aip.dev/151)
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelInstanceCard(self, request, context):
"""GetModelInstanceCard method receives a GetModelInstanceCardRequest message
and returns a GetModelInstanceCardResponse
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TriggerModelInstance(self, request, context):
"""/////////////////////////////////////////////////////
TriggerModelInstance method receives a TriggerModelInstanceRequest message
and returns a TriggerModelInstanceResponse message.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TestModelInstance(self, request, context):
"""TestModelInstance method receives a TestModelInstanceRequest message
and returns a TestModelInstanceResponse message.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TestModelInstanceBinaryFileUpload(self, request_iterator, context):
"""TestModelInstanceBinaryFileUpload method receives a
TestModelInstanceBinaryFileUploadRequest message and returns a
TestModelInstanceBinaryFileUploadResponse message.
Endpoint: "POST/v1alpha/{name=models/*/instances/*}:test-multipart"
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ModelServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Liveness': grpc.unary_unary_rpc_method_handler(
servicer.Liveness,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.LivenessRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.LivenessResponse.SerializeToString,
),
'Readiness': grpc.unary_unary_rpc_method_handler(
servicer.Readiness,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.ReadinessRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.ReadinessResponse.SerializeToString,
),
'ListModelDefinition': grpc.unary_unary_rpc_method_handler(
servicer.ListModelDefinition,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.ListModelDefinitionRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.ListModelDefinitionResponse.SerializeToString,
),
'GetModelDefinition': grpc.unary_unary_rpc_method_handler(
servicer.GetModelDefinition,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.GetModelDefinitionRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.GetModelDefinitionResponse.SerializeToString,
),
'ListModel': grpc.unary_unary_rpc_method_handler(
servicer.ListModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelResponse.SerializeToString,
),
'CreateModel': grpc.unary_unary_rpc_method_handler(
servicer.CreateModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelResponse.SerializeToString,
),
'CreateModelBinaryFileUpload': grpc.stream_unary_rpc_method_handler(
servicer.CreateModelBinaryFileUpload,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelBinaryFileUploadRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelBinaryFileUploadResponse.SerializeToString,
),
'GetModel': grpc.unary_unary_rpc_method_handler(
servicer.GetModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelResponse.SerializeToString,
),
'UpdateModel': grpc.unary_unary_rpc_method_handler(
servicer.UpdateModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UpdateModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UpdateModelResponse.SerializeToString,
),
'DeleteModel': grpc.unary_unary_rpc_method_handler(
servicer.DeleteModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeleteModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeleteModelResponse.SerializeToString,
),
'LookUpModel': grpc.unary_unary_rpc_method_handler(
servicer.LookUpModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelResponse.SerializeToString,
),
'RenameModel': grpc.unary_unary_rpc_method_handler(
servicer.RenameModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.RenameModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.RenameModelResponse.SerializeToString,
),
'PublishModel': grpc.unary_unary_rpc_method_handler(
servicer.PublishModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.PublishModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.PublishModelResponse.SerializeToString,
),
'UnpublishModel': grpc.unary_unary_rpc_method_handler(
servicer.UnpublishModel,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UnpublishModelRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UnpublishModelResponse.SerializeToString,
),
'ListModelInstance': grpc.unary_unary_rpc_method_handler(
servicer.ListModelInstance,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelInstanceRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelInstanceResponse.SerializeToString,
),
'GetModelInstance': grpc.unary_unary_rpc_method_handler(
servicer.GetModelInstance,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceResponse.SerializeToString,
),
'LookUpModelInstance': grpc.unary_unary_rpc_method_handler(
servicer.LookUpModelInstance,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelInstanceRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelInstanceResponse.SerializeToString,
),
'DeployModelInstance': grpc.unary_unary_rpc_method_handler(
servicer.DeployModelInstance,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeployModelInstanceRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.DeployModelInstanceResponse.SerializeToString,
),
'UndeployModelInstance': grpc.unary_unary_rpc_method_handler(
servicer.UndeployModelInstance,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UndeployModelInstanceRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.UndeployModelInstanceResponse.SerializeToString,
),
'GetModelInstanceCard': grpc.unary_unary_rpc_method_handler(
servicer.GetModelInstanceCard,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceCardRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelInstanceCardResponse.SerializeToString,
),
'TriggerModelInstance': grpc.unary_unary_rpc_method_handler(
servicer.TriggerModelInstance,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TriggerModelInstanceRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TriggerModelInstanceResponse.SerializeToString,
),
'TestModelInstance': grpc.unary_unary_rpc_method_handler(
servicer.TestModelInstance,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceResponse.SerializeToString,
),
'TestModelInstanceBinaryFileUpload': grpc.stream_unary_rpc_method_handler(
servicer.TestModelInstanceBinaryFileUpload,
request_deserializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceBinaryFileUploadRequest.FromString,
response_serializer=vdp_dot_model_dot_v1alpha_dot_model__pb2.TestModelInstanceBinaryFileUploadResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'vdp.model.v1alpha.ModelService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ModelService(object):
"""Model service responds to incoming model requests
"""
@staticmethod
def Liveness(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/Liveness',
vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.LivenessRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.LivenessResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Readiness(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/Readiness',
vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.ReadinessRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_healthcheck__pb2.ReadinessResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListModelDefinition(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/ListModelDefinition',
vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.ListModelDefinitionRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.ListModelDefinitionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetModelDefinition(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/GetModelDefinition',
vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.GetModelDefinitionRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__definition__pb2.GetModelDefinitionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/ListModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.ListModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/CreateModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateModelBinaryFileUpload(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/vdp.model.v1alpha.ModelService/CreateModelBinaryFileUpload',
vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelBinaryFileUploadRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.CreateModelBinaryFileUploadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/GetModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.GetModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/UpdateModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.UpdateModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.UpdateModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/DeleteModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.DeleteModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.DeleteModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def LookUpModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/LookUpModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.LookUpModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RenameModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/RenameModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.RenameModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.RenameModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PublishModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/vdp.model.v1alpha.ModelService/PublishModel',
vdp_dot_model_dot_v1alpha_dot_model__pb2.PublishModelRequest.SerializeToString,
vdp_dot_model_dot_v1alpha_dot_model__pb2.PublishModelResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UnpublishModel(request,
target,
options=(),
| |
'''
@FileName : init_guess.py
@EditTime : 2021-12-13 13:37:50
@Author : <NAME>
@Email : <EMAIL>
@Description :
'''
from core.utils.recompute3D import recompute3D
import torch
import numpy as np
from core.utils.umeyama import umeyama
import cv2
from core.utils.visualization3d import Visualization
from core.affinity.affinity import ComposedAffinity
from core.assignment.associate import simple_associate
from scipy import signal
def joint_interpolation(poses, n_joints):
""" Interpolate poses to a complete motion, the empty frame is None """
start = 0
# If the first frame is None
if poses[start] is None:
poses[start] = np.zeros((n_joints, 3))
for n, joint in enumerate(poses):
if joint is not None:
if n >= len(poses)-1:
break
if poses[start+1] is not None:
start += 1
if n != start:
j1 = poses[start]
start_t = start
det = (joint - j1) / (n - start_t)
for i in range(n - start_t):
poses[start] = j1 + det * i
start += 1
t = poses[start]
# If the last frame is None
while(start<n+1):
poses[start] = t
start += 1
return poses
def rec_3D_joints(keypoints, extris, intris, idx, filter_joints_idx, first_frame=False):
keypoints = np.array(keypoints, dtype=np.float32)
keypoints = keypoints[:,:,idx,filter_joints_idx]
n_views, n_frames, n_joints = keypoints.shape[:3]
joints = []
for f in range(n_frames):
if first_frame and f > 0:
break
# Filter out unreliable detection
pack = [[keypoints[v][f], extris[v], intris[v]] for v in range(n_views) if keypoints[v][f][:,2].max() > 0.2]
if len(pack) < 2: # Do not process single view case
joints.append(None)
continue
keps = np.array([p[0] for p in pack])
cam_extris = np.array([p[1] for p in pack])
cam_intris = np.array([p[2] for p in pack])
rec_joints3d = recompute3D(cam_extris, cam_intris, keps.copy())
joints.append(rec_joints3d)
# Interpolation
joints = joint_interpolation(joints, n_joints)
return joints
def physics_geometry_filter(keypoints, extris, intris, frames_seq, flags, img_paths, dataset_obj, filter_joints_idx):
"""
Filter out the noisy detection and recompute 3D joints using the filtered keypoints
"""
# Calculate the joints in first frame
last_js = []
for idx in range(dataset_obj.num_people):
rec_joints3d = rec_3D_joints(keypoints, extris, intris, idx, filter_joints_idx, first_frame=True)
last_js.append(rec_joints3d[0])
joints = []
affinity_model = ComposedAffinity(cameras=[extris, intris])
n_views = len(extris)
n_people = dataset_obj.num_people
n_joints = len(filter_joints_idx)
total_n_joints = dataset_obj.num_joints
Pall = np.array([intri @ extri[:3] for extri, intri in zip(extris, intris)])
# Container to save filtered results
filterd_keypoints = [[[np.zeros((total_n_joints,3)) for n in range(n_people)] for f in range(frames_seq)] for v in range(n_views)]
last_2d = [[keypoints[v][0][idx] for v in range(n_views)] for idx in range(n_people)]
total_joints = []
for i in range(frames_seq):
keyps = [keypoints[v][i] for v in range(n_views)]
joint = []
for idx, last_j in enumerate(last_js):
# Filter
affinity, dimGroups = affinity_model(keyps, None, last_2d[idx], last_j, images=img_paths)
keyps, output = simple_associate(keyps, affinity, dimGroups, Pall, idx)
# Recompute 3D joints from the filtered keypoints and the initial cameras
pack = [[k, extris[i], intris[i]] for i, k in enumerate(output) if k is not None]
if len(pack) < 2: # do not process single view case
joint.append(None)
continue
keps = np.array([p[0][filter_joints_idx] for p in pack])
cam_extris = np.array([p[1] for p in pack])
cam_intris = np.array([p[2] for p in pack])
rec_joints3d = recompute3D(cam_extris, cam_intris, keps.copy())
joint.append(rec_joints3d)
# Save the filtered keypoints
for v in range(n_views):
filterd_keypoints[v][i][idx] = output[v] if output[v] is not None else np.zeros((total_n_joints,3))
if output[v] is not None:
last_2d[idx][v] = output[v]
if len(cam_extris) > 2:
last_js[idx] = rec_joints3d
total_joints.append(joint)
# Interpolation
interpolated_joints = []
for idx in range(n_people):
joints = [j[idx] for j in total_joints]
joints = joint_interpolation(joints, n_joints)
interpolated_joints.append(np.array(joints))
return filterd_keypoints, flags, interpolated_joints
def init_guess(setting, data, dataset_obj, frames_seq=1, use_torso=False, **kwargs):
models = setting['model']
dtype = setting['dtype']
keypoints = data['keypoints']
flags = data['flags']
device = setting['device']
est_scale = not setting['fix_scale']
fixed_scale = 1. if setting['fixed_scale'] is None else setting['fixed_scale']
extris = setting['extrinsics']
intris = setting['intrinsics']
# The joints that are used for calculating consistency
# (LS,RS,LE,RE,LW,RW,LH,RH,LK,RK,LA,RA)
filter_joints_idx = [5,6,7,8,9,10,11,12,13,14,15,16]
# Step1: Get initial joint positions for SMPL model
init_SMPL_joints = []
for idx in range(dataset_obj.num_people):
# Reset the SMPL model with initial parameters
init_t = torch.zeros((frames_seq, 3), dtype=dtype)
init_r = torch.zeros((frames_seq, 3), dtype=dtype)
init_s = torch.tensor(fixed_scale, dtype=dtype)
init_shape = torch.zeros((1, 10), dtype=dtype)
models[idx].reset_params(transl=init_t, global_orient=init_r, scale=init_s, betas=init_shape)
init_pose = torch.zeros((frames_seq, 69), dtype=dtype, device=device)
with torch.no_grad():
model_output = models[idx](return_verts=False, return_full_pose=False, body_pose=init_pose)
output_joints = model_output.joints.cpu().numpy()
init_SMPL_joints.append(output_joints)
# Step2: Get reconstructed joint positions from 2D detections and inital cameras
use_filter = False
if use_filter:
keypoints, flags, init_rec_joints = physics_geometry_filter(keypoints, extris, intris, frames_seq, flags, data['img_path'], dataset_obj, filter_joints_idx)
else:
init_rec_joints = []
for idx in range(dataset_obj.num_people):
# Recompute 3D joints with the initial cameras
rec_joints3ds = rec_3D_joints(keypoints, extris, intris, idx, filter_joints_idx)
init_rec_joints.append(np.array(rec_joints3ds))
# Step3: Align the SMPL models to the reconstructed joints
for idx in range(dataset_obj.num_people):
rec_joints3ds = init_rec_joints[idx]
model_joint3ds = init_SMPL_joints[idx]
rotations, translations = [], []
# Filter out noisy reconstrction with Butterworth Filter
b, a = signal.butter(3, 0.05, 'lowpass')
filtered_joints = signal.filtfilt(b, a, rec_joints3ds.T).T.copy()
# Calculate the global rotations and translations for SMPL models
for joints, joints3d in zip(model_joint3ds, filtered_joints):
# We align the SMPL to the joints in torso (L_Shoulder, R_Shoulder, L_Hip, R_Hip)
joints = joints[[5,6,11,12]]
joints3d = joints3d[[0,1,6,7]]
if abs(joints3d).max() < 0.1:
rotations.append(np.zeros((3,)))
translations.append(np.zeros((3,)))
continue
# get transformation
rot, trans, scale = umeyama(joints, joints3d, est_scale)
rot = cv2.Rodrigues(rot)[0].reshape(3,)
rotations.append(rot)
translations.append(trans)
# Apply the estimated results
if est_scale:
init_s = torch.tensor(scale, dtype=dtype)
else:
init_s = torch.tensor(fixed_scale, dtype=dtype)
init_t = torch.tensor(translations, dtype=dtype)
init_r = torch.tensor(rotations, dtype=dtype)
models[idx].reset_params(transl=init_t, global_orient=init_r, scale=init_s)
if kwargs.get('use_vposer') or kwargs.get('use_motionprior'):
with torch.no_grad():
setting['pose_embedding'][idx].fill_(0)
# Visualize the initialized results
if False:
import os
from core.utils.render import Renderer
if kwargs.get('use_vposer'):
vposer = setting['vposer']
init_pose = vposer.decode(
setting['pose_embedding'][idx], output_type='aa').view(
frames_seq, -1)
elif kwargs.get('use_motionprior'):
vposer = setting['vposer']
init_pose = vposer.decode(
setting['pose_embedding'][idx], t=setting['pose_embedding'][idx].shape[0]).view(
setting['pose_embedding'][idx].shape[0], -1)
else:
init_pose = torch.zeros((frames_seq, 69), dtype=dtype, device=device)
model_output = models[idx](return_verts=True, return_full_pose=True, body_pose=init_pose)
for i, (joints, verts) in enumerate(zip(model_output.joints.detach().cpu().numpy(), model_output.vertices.detach().cpu().numpy())):
for v in range(1):
img = cv2.imread(os.path.join(dataset_obj.img_folder, data['img_path'][v][i]))
render = Renderer(resolution=(img.shape[1], img.shape[0]))
img = render(verts, models[idx].faces, extris[v][:3,:3].copy(), extris[v][:3,3].copy(), intris[v].copy(), img.copy(), color=[1,1,0.9], viz=False)
render.vis_img("img", img)
render.renderer.delete()
del render
del model_output
torch.cuda.empty_cache()
data['keypoints'] = keypoints
data['flags'] = flags
return data
def fix_params(setting, scale=None, shape=None):
"""
Use the fixed shape and scale parameters.
"""
dtype = setting['dtype']
models = setting['model']
for model in models:
init_t = model.transl
init_r = model.global_orient
init_s = model.scale
init_shape = model.betas
if scale is not None:
init_s = torch.tensor(scale, dtype=dtype)
model.scale.requires_grad = False
if shape is not None:
init_shape = torch.tensor(shape, dtype=dtype)
model.betas.requires_grad = False
model.reset_params(transl=init_t, global_orient=init_r, scale=init_s, betas=init_shape)
def appearance_align(data, dataset_obj):
'''
We assume the first frame is aligned.
'''
torso = [i for i in range(26)] #[5,6,11,12,18,19]
n_views = len(data['img_path'])
appearance_buffer = []
appearances = data['appearances']
keypoints = data['keypoints']
aligned_keypoints = [[[None for n in range(dataset_obj.num_people)] for f in range(dataset_obj.frames)] for v in range(n_views)]
aligned_appearances= [[[None for n in range(dataset_obj.num_people)] for f in range(dataset_obj.frames)] for v in range(n_views)]
# Load initial appearance for each person based on the first frame.
for idx in range(dataset_obj.num_people):
app_t = []
for v in range(n_views):
if appearances[v][0][idx] is not None:
app_t.append(appearances[v][0][idx])
app_t = np.array(app_t)
app_t = np.mean(app_t, axis=0)
appearance_buffer.append(app_t)
appearance_buffer = np.array(appearance_buffer)
# Align each frame
for f in range(dataset_obj.frames):
if f == 105:
print(1)
for v in range(n_views):
app_t = [[i, app] for i, app in enumerate(appearances[v][f]) if app is not None]
if len(app_t) < 1:
continue
apps = np.array([ap[1] for ap in app_t])[None,:] * 100
temp_buffer = appearance_buffer.copy()[:,None] * 100
loss = np.linalg.norm(apps - temp_buffer, axis=-1)
if f > 0:
kep_lt = np.ones((dataset_obj.num_people, len(torso), 3)) * 100
non_idx = []
for i, kep in enumerate(aligned_keypoints[v][f-1]):
if kep is not None:
kep_lt[i] = kep[torso]
else:
non_idx.append(i)
#kep_lt = [[i, kep] for i, kep in enumerate(keypoints[v][f-1]) if kep is not None]
kep_t = [[i, kep[torso]] for i, kep in enumerate(keypoints[v][f]) if kep is not None]
keplt = kep_lt[:,None]
kept = np.array([kep[1] for kep in kep_t])[None,:]
loss_kp = np.linalg.norm(keplt[...,:2] - kept[...,:2], axis=-1)
conf = np.sqrt(keplt[...,-1] * kept[...,-1])
loss_kp = (loss_kp * conf).sum(axis=-1)/conf.sum(axis=-1)
# if len(non_idx) > 0:
# loss_kp[non_idx] = 0
# loss = loss + loss_kp
for igt, gt in enumerate(appearance_buffer):
if loss[igt].min() > 100:
continue
bestid = np.argmin(loss[igt])
loss[:,bestid] = 1000
if f > 0 and aligned_keypoints[v][f-1][igt] is not None:
dis = np.linalg.norm(aligned_keypoints[v][f-1][igt][:,:2] - keypoints[v][f][app_t[bestid][0]][:,:2], axis=-1)
conf | |
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
import traceback
from github import GithubException, GithubObject
from issue_finder import CommitFinder, IssueFinder
from mantisdump import MantisDump, MantisSchema
# Python3 redefined 'unicode' to be 'str'
if sys.version_info[0] >= 3:
unicode = str
def pluralize(number):
"Trivial method to return an 's' if 'number' is not equal to 1"
return "" if number == 1 else "s"
class MantisConverter(object):
LABEL_COLORS = {
"new": "fcbdbd",
"feedback": "e3b7eb",
"acknowledged": "ffcd85",
"confirmed": "fff494",
"assigned": "c2dfff",
"resolved": "d2f5b0",
}
def __init__(self, mantis_dump, svndb, gitrepo, project_names=None,
verbose=False):
# GitHub or local repo
if gitrepo is None:
raise Exception("Please specify a Git repo object")
self.__gitrepo = gitrepo
if mantis_dump is None:
raise Exception("Please specify the Mantis dump file")
if not os.path.exists(mantis_dump):
raise Exception("Mantis dump file \"%s\" does not exist" %
(mantis_dump, ))
# list of Mantis projects associated with the Subversion project
self.__project_names = project_names
self.__close_resolved = False
self.__preserve_all_status = False
self.__preserve_resolved = False
# cached lists of GitHub issue labels and milestones
# (loaded when needed)
self.__labels = None
self.__milestones = None
# dictionary mapping Mantis issue numbers to GitHub issue numbers
self.__mantis2github = {}
# list of missing issues
self.__missing = []
# get the list of all Mantis issues
self.__all_issues = self.load_issues(mantis_dump, verbose=False)
# get the list of Mantis issues referenced in SVN log messages
# and SVN commits referenced in Mantis notes
if svndb is None:
self.__svn_issues = None
else:
self.__svn_issues = self.find_svn_issues(svndb)
# ordered list of Mantis issues for this project
self.__project_issue_numbers = \
self.__create_issue_order(verbose=verbose)
if verbose:
if self.__project_names is None:
pstr = "all projects"
else:
pstr = ", ".join(self.__project_names)
num_issues = len(self.__project_issue_numbers)
print("Found %d issue%s (out of %d total) for %s" %
(num_issues, pluralize(num_issues),
len(self.__all_issues), pstr))
def __len__(self):
return len(self.__project_issue_numbers)
def __add_github_label(self, label_name):
if self.__labels is None:
tmplist = {}
for label in self.__gitrepo.get_labels():
tmplist[label.name] = label
self.__labels = tmplist
if label_name in self.__labels:
return self.__labels[label_name]
if label_name not in self.LABEL_COLORS:
raise Exception("No color found for issue status \"%s\"" %
label_name)
color = self.LABEL_COLORS[label_name]
description = "Mantis status %s" % label_name
try:
label = self.__gitrepo.create_label(label_name, color, description)
except GithubException:
raise Exception("Cannot create label %s color %s (%s)" %
(label_name, color, description))
self.__labels[label.name] = label
return label
def __add_github_milestone(self, milestone_name):
if self.__milestones is None:
tmplist = {}
for milestone in self.__gitrepo.get_milestones():
tmplist[milestone.title] = milestone
self.__milestones = tmplist
if milestone_name in self.__milestones:
return self.__milestones[milestone_name]
description = milestone_name
try:
milestone = self.__gitrepo.create_milestone(milestone_name,
state="open",
description=description)
except GithubException:
raise Exception("Cannot create milestone %s (%s)" %
(milestone_name, description))
self.__milestones[milestone.title] = milestone
return milestone
def __create_issue_order(self, verbose=False):
"""
Create an ordered list of Mantis issue numbers for this project
"""
# find Mantis issues referenced in SVN commits
references = {}
if self.__svn_issues is not None:
for rev, numlist in self.__svn_issues.items():
for inum in numlist:
if inum is None:
print("ERROR: Ignoring rev%d issue number set"
" to None" % (rev, ), file=sys.stderr)
else:
references[inum] = 1
if verbose:
print("Found %d referenced Mantis issues" % len(references))
# find Mantis issues for the specified project(s)
for issue in self.__all_issues.values():
if self.__project_names is None or \
issue.project in self.__project_names:
if issue.id is None:
print("ERROR: Found ID set to None in issue %s" %
(issue, ), file=sys.stderr)
else:
references[issue.id] = 0
if verbose:
num_refs = len(references)
print("Found %d total Mantis issue%s" %
(num_refs, pluralize(num_refs)))
mantis_ids = list(references.keys())
mantis_ids.sort()
return mantis_ids
@classmethod
def __mantis_issue_to_strings(cls, issue, foreign_project=None):
"""
Convert a Mantis issue to a title string and a body string for GitHub
"""
title_prefix = None
title = None
message = None
for text in (issue.summary, issue.description):
if text is not None and isinstance(text, bytes):
text = text.decode("utf-8", "ignore")
if text is not None and text != "":
if title is None:
title_prefix = "[%s on %s] " % \
(issue.reporter, issue.date_submitted)
title = text
else:
message = "\n" + text
if title is None:
print("WARNING: No summary/description for issue #%d" %
(issue.id, ), file=sys.stderr)
title = "Mantis issue %d" % issue.id
if foreign_project is not None:
title = "%s: %s" % (foreign_project, title)
if title_prefix is not None:
title = "%s: %s" % (title_prefix, title)
for fld, text in (("Steps to Reproduce", issue.steps_to_reproduce),
("Additional Information",
issue.additional_information)):
if text is not None and text != "":
if message is None:
message = "%s: %s" % (fld, text)
else:
message += "\n\n%s: %s" % (fld, text)
return title, message
@classmethod
def __mantis_note_to_string(cls, note, database=None):
text = "[%s on %s]" % (note.reporter, note.last_modified)
if not isinstance(note.text, bytes):
fixed = note.text
else:
fixed = note.text.decode("utf-8", "ignore")
# if there's no SVN commit database,
# we can't map SVN revisions to Git hashs
if database is None:
return text + "\n" + fixed
# add Git hashes to all notes which refer to SVN revision numbers
for line in fixed.split("\n"):
svn_rev = CommitFinder.find_in_text(line)
# if we find an SVN revision, link it to the Git hash
cstr = "\n"
if svn_rev is not None:
result = database.find_hash_from_revision(revision=svn_rev)
if result is not None and result[1] is not None:
cstr = "\n[commit %s] " % (result[1], )
# add this line to the note
text += cstr + line
return text
def __open_issue(self, issue, database=None):
"Open a GitHub issue which copies the Mantis issue"
if self.__project_names is None or \
issue.project in self.__project_names:
foreign_project = None
else:
foreign_project = issue.project
labels = GithubObject.NotSet
if not issue.is_closed:
if self.__preserve_all_status or \
(self.__preserve_resolved and issue.is_resolved):
label_name = issue.status
label = self.__add_github_label(label_name)
labels = (label, )
ms_name = None
if issue.fixed_in_version != "":
ms_name = issue.fixed_in_version
elif issue.target_version != "":
ms_name = issue.target_version
if ms_name is None:
milestone = GithubObject.NotSet
else:
milestone = self.__add_github_milestone(ms_name)
title, message = \
self.__mantis_issue_to_strings(issue, foreign_project)
retries = 6
sleep_secs = 60
while retries > 0:
try:
gh_issue = self.__gitrepo.create_issue(title, message,
milestone=milestone,
labels=labels)
break
except GithubException as gex:
if gex.status != 403:
raise
retries -= 1
if retries <= 0:
print("WARNING: Failed to open issue for Mantis #%s" %
(issue.id, ), file=sys.stderr)
return None
print("\r%s\rMantis #%s sleeping for %s seconds ." %
(" "*60, issue.id, sleep_secs), end="")
sys.stdout.flush()
time.sleep(sleep_secs)
sleep_secs += 60
print(".. retrying \"%s\"\r" % (title, ))
sys.stdout.flush()
continue
for note in issue.notes:
message = self.__mantis_note_to_string(note, database=database)
try:
gh_issue.create_comment(message)
except GithubException:
raise Exception("Cannot create GitHub issue #%d"
" comment \"%s\"" % (gh_issue.number, message))
self.__mantis2github[issue.id] = gh_issue
return gh_issue
def add_issues(self, mantis_id=None, add_after=False, database=None,
pause_count=None, pause_seconds=None, report_progress=None,
verbose=False):
"""
If 'mantis_id' is None, add all issues
If `add_after` is False, add issues with IDs less than `mantis_id`.
If `add_after` is True, add issues with IDs greater than `mantis_id`.
"""
issues = []
for inum in self.__project_issue_numbers:
if mantis_id is not None:
if not add_after and inum >= mantis_id:
# we've added all issues before 'mantis_id', exit the loop
break
if add_after and inum <= mantis_id:
# we haven't started adding yet, keep looking
continue
if inum in self.__mantis2github:
# issue has been added, continue looking
continue
if inum not in self.__all_issues:
if inum not in self.__missing:
# add to list of missing issues and complain
self.__missing.append(inum)
if mantis_id is None:
extra = ""
else:
extra = " (before adding #%s)" % (mantis_id, )
print("ERROR: Cannot add missing issue #%s%s" %
(inum, extra), file=sys.stderr)
continue
issues.append(self.__all_issues[inum])
if verbose:
print("\nOpening %d issues%s" %
(len(issues), "" if mantis_id is None
else " preceeding Mantis #%s" % (mantis_id, )))
else:
# start the Mantis progress on a new line
print()
# attempt to create all the preceding issues
for count, issue in enumerate(issues):
if report_progress is not None:
report_progress(count, len(issues), "Mantis", "issue",
issue.id)
try:
gh_issue = self.__open_issue(issue, database=database)
if gh_issue is not None:
if issue.is_closed or (self.__close_resolved and
issue.is_resolved):
gh_issue.edit(body="No associated GitHub commit",
state="closed")
except KeyboardInterrupt:
raise
except:
print("Failed to open & close issue #%s (%d of %d)" %
(issue.id, count, len(issues)), file=sys.stderr)
traceback.print_exc()
gh_issue = None
# if requested, pause a bit after adding the number of issues
# specified by 'pause_count'
if pause_count | |
<reponame>leozz37/makani
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import os
import string
import tempfile
import unittest
import jsmin
import makani
from makani.analysis.checks import base_check
from makani.analysis.checks import check_range
from makani.analysis.checks import gradebook
from makani.analysis.checks import gradebook_base_check
from makani.analysis.checks import log_util
from makani.gs.monitor2.apps.layout import stoplights
from makani.gs.monitor2.apps.receiver import aio_util
from makani.gs.monitor2.project import settings
from makani.lib.python import struct_tree
from makani.lib.python.h5_utils import h5_io
import mock
import numpy
_GRADEBOOK_FILE = os.path.join(
makani.HOME, 'gs/monitor2/apps/plugins/layouts/gradebook.json')
def _Multiply(v, multiplier):
return v * multiplier
def IsDevicePopulated(populated, device_id): # pylint: disable=unused-argument
return True
class PatchCheckers(object):
def __init__(self):
self._patchers = [
mock.patch(
('makani.analysis.checks.avionics_util.'
'IsDevicePopulated'),
IsDevicePopulated),
]
def __enter__(self):
for p in self._patchers:
p.start()
def __exit__(self, exc_type, exc_value, traceback):
for p in self._patchers:
p.stop()
class TestAnalysis(unittest.TestCase):
"""Test utilities in the analysis module."""
def _GradebookFromString(self, gradebook_string):
return gradebook.Gradebook(json.loads(jsmin.jsmin(gradebook_string)),
settings.NETWORK_YAML)
def testSeparateFieldIndex(self):
segments, slices = base_check.SeparateFieldIndex('a[:].b[0].c')
self.assertEqual(segments, ['a', 'b', 'c'])
self.assertEqual(slices, [':', '0'])
segments, slices = base_check.SeparateFieldIndex('ab[:][0].c')
self.assertEqual(segments, ['ab', 'c'])
self.assertEqual(slices, [':', '0'])
segments, slices = base_check.SeparateFieldIndex('a.bc[:][0]')
self.assertEqual(segments, ['a', 'bc'])
self.assertEqual(slices, [':', '0'])
def testIndicesToOrderAndDedup(self):
sequence = numpy.array([1, 10, 10, 2, 2, 3, 4, 5, 4, 3, 7, 7, 7, 8, 8, 1])
prefix_template = string.Template('$message_type.$aio_node')
data = {'a': {'b': {'aio_header': {'sequence': sequence}}}}
indices = log_util.MessageOrderedIndices(
struct_tree.StructTree(data), 'a', 'b', prefix_template, wraparound=10)
reference = numpy.array([1, 0, 3, 5, 6, 7, 10, 13, 15])
output = numpy.array([10, 1, 2, 3, 4, 5, 7, 8, 1])
self.assertTrue(numpy.array_equal(indices, reference))
self.assertTrue(numpy.array_equal(sequence[indices], output))
reference = numpy.array([1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1])
bitmask = log_util.MessageDedupSelection(
struct_tree.StructTree(data), 'a', 'b', prefix_template, wraparound=10)
self.assertTrue(numpy.array_equal(bitmask, reference))
def testRangeClasses(self):
array = numpy.array([0, 1, 2, 3, 4])
crange = check_range.Singleton(1)
self.assertIn(1, crange)
self.assertNotIn(3, crange)
self.assertIn(numpy.array([1, 1]), crange)
self.assertNotIn(numpy.array([1, 2]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 1, 0, 0, 0])))
self.assertEqual(str(crange), '1')
crange = check_range.Container({1, 'a'})
self.assertIn(1, crange)
self.assertIn('a', crange)
self.assertNotIn(3, crange)
self.assertNotIn('b', crange)
self.assertIn(numpy.array([1, 1]), crange)
self.assertNotIn(numpy.array([1, 2]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 1, 0, 0, 0])))
self.assertEqual(str(crange), "{1, 'a'}")
crange = check_range.Container({1: 2, 'a': 'b'})
self.assertIn(1, crange)
self.assertIn('a', crange)
self.assertNotIn(3, crange)
self.assertNotIn('b', crange)
self.assertIn(numpy.array([1, 1]), crange)
self.assertNotIn(numpy.array([1, 2]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 1, 0, 0, 0])))
self.assertEqual(str(crange), "{1, 'a'}")
crange = check_range.Container([1, 2, 'a', 'b'])
self.assertIn(1, crange)
self.assertIn('a', crange)
self.assertNotIn(3, crange)
self.assertNotIn('c', crange)
self.assertIn(numpy.array([1, 2]), crange)
self.assertNotIn(numpy.array([1, 3]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 1, 1, 0, 0])))
self.assertEqual(str(crange), "{1, 2, 'a', 'b'}")
crange = check_range.Interval([1, 3])
self.assertIn(1, crange)
self.assertIn(3, crange)
self.assertIn(2, crange)
self.assertNotIn(5, crange)
self.assertNotIn(0, crange)
self.assertIn(numpy.array([1, 2, 3]), crange)
self.assertNotIn(numpy.array([1, 2, 3, 4]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 1, 1, 1, 0])))
self.assertEqual(str(crange), '[1, 3]')
crange = check_range.Interval([1, 3], inclusiveness=[False, False])
self.assertIn(1.1, crange)
self.assertIn(2.9, crange)
self.assertIn(2, crange)
self.assertNotIn(1, crange)
self.assertNotIn(3, crange)
self.assertIn(numpy.array([2, 2]), crange)
self.assertNotIn(numpy.array([1, 2, 3]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 0, 1, 0, 0])))
self.assertEqual(str(crange), '(1, 3)')
crange = check_range.Interval([3, None])
self.assertIn(3, crange)
self.assertNotIn(2, crange)
self.assertIn(numpy.array([3, 4]), crange)
self.assertNotIn(numpy.array([1, 2, 3]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 0, 0, 1, 1])))
self.assertEqual(str(crange), '[3, inf]')
crange = check_range.Interval([None, 3])
self.assertIn(3, crange)
self.assertNotIn(4, crange)
self.assertIn(numpy.array([2, 3]), crange)
self.assertNotIn(numpy.array([2, 3, 4]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([1, 1, 1, 1, 0])))
self.assertEqual(str(crange), '[-inf, 3]')
crange = check_range.Interval([None, None])
self.assertIn(4, crange)
self.assertIn(numpy.array([2, 2]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([1, 1, 1, 1, 1])))
self.assertEqual(str(crange), '[-inf, inf]')
crange = check_range.Container([])
self.assertNotIn(4, crange)
self.assertNotIn(numpy.array([2, 2]), crange)
self.assertTrue(numpy.array_equal(
crange.Select(array), numpy.array([0, 0, 0, 0, 0])))
self.assertEqual(str(crange), '{}')
with self.assertRaises(AssertionError):
check_range.Singleton(None)
def testRangeChecker(self):
array = numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
cranges = check_range.RangeChecker([{3, 4}, 5, [7], [8, None], [None, 1]])
self.assertIn(0, cranges)
self.assertIn(1, cranges)
self.assertNotIn(2, cranges)
self.assertIn(3, cranges)
self.assertIn(4, cranges)
self.assertIn(5, cranges)
self.assertNotIn(6, cranges)
self.assertIn(7, cranges)
self.assertIn(8, cranges)
self.assertIn(9, cranges)
self.assertTrue(numpy.array_equal(
cranges.Select(array), numpy.array([1, 1, 0, 1, 1, 1, 0, 1, 1, 1])))
cranges = check_range.RangeChecker([])
self.assertNotIn(4, cranges)
self.assertTrue(numpy.array_equal(
cranges.Select(array), numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])))
with self.assertRaises(TypeError):
check_range.RangeChecker(None)
cranges = check_range.RangeChecker([[None, None]])
self.assertIn(123, cranges)
self.assertIn('a', cranges)
cranges = check_range.RangeChecker([None])
self.assertNotIn(123, cranges)
self.assertNotIn('a', cranges)
def testRolling(self):
array = numpy.array([0, 1, 2, 3, 4, 5, 6])
rolled = numpy.array([
[0, 1, 2, 3],
[2, 3, 4, 5],
])
self.assertTrue(numpy.array_equal(log_util.Rolling(array, 4, 2), rolled))
rolled = numpy.array([
[1, 2, 3, 4],
[3, 4, 5, 6],
])
self.assertTrue(numpy.array_equal(log_util.Rolling(array, 4, 2, False),
rolled))
def testAioCheckByRange(self):
normal_ranges = check_range.RangeChecker([[5, 8], [15, 18]])
warning_ranges = check_range.RangeChecker([0, [3, 20]])
self.assertEqual(
aio_util.CheckByRange(
5, normal_ranges, warning_ranges, 'test')[0]['stoplight'],
stoplights.STOPLIGHT_NORMAL)
self.assertEqual(
aio_util.CheckByRange(
0, normal_ranges, warning_ranges, 'test')[0]['stoplight'],
stoplights.STOPLIGHT_WARNING)
self.assertEqual(
aio_util.CheckByRange(
1, normal_ranges, warning_ranges, 'test')[0]['stoplight'],
stoplights.STOPLIGHT_ERROR)
array = numpy.array([5, 15, 18])
self.assertEqual(
aio_util.CheckByRange(
array, normal_ranges, warning_ranges, '')[0]['stoplight'],
stoplights.STOPLIGHT_NORMAL)
array = numpy.array([0, 5, 18])
self.assertEqual(
aio_util.CheckByRange(
array, normal_ranges, warning_ranges, '')[0]['stoplight'],
stoplights.STOPLIGHT_WARNING)
array = numpy.array([1, 5, 10, 18])
self.assertEqual(
aio_util.CheckByRange(
array, normal_ranges, warning_ranges, '')[0]['stoplight'],
stoplights.STOPLIGHT_ERROR)
def testLogCheckByRange(self):
normal_ranges = check_range.RangeChecker([[5, 8], [15, 18]])
warning_ranges = check_range.RangeChecker([0, [3, 20]])
array = numpy.array([0, 1, 5, 10, 18])
results = log_util.CheckByRange(
array, normal_ranges, warning_ranges, min_gap=1000)
self.assertEqual(
results['warning'],
{'total': 5, 'count': 2, 'range': [0, 10], 'sections': [(0, 4)],
'expecting': '[[5, 8], [15, 18]]'})
self.assertEqual(
results['error'],
{'total': 5, 'count': 1, 'range': [1, 1], 'sections': [(1, 2)],
'expecting': '[0, [3, 20]]'})
def _GetValidSampleGradebook(self):
gradebook_string = """
{
"imports": {
"motor_thermal_types":
"makani.avionics.common.motor_thermal_types",
"analysis_tests":
"makani.analysis.checks.tests"
},
"checks": {
"ServoStatus": {
"ServoA4": {
"angle_desired": {
"normal_ranges": [[0, 90]],
"warning_ranges": "any"
}
}
},
"MotorStatus": {
"(.*)": {
"temps[motor_thermal_types.kMotorThermalChannelBoard]": {
"normal_ranges": [[60, 70], 0],
"warning_ranges": "any",
// Callback to preprocess the data.
"callback": "analysis_tests._Multiply",
"callback_args": [2],
"name": "Board Temperature"
},
"temps[motor_thermal_types.kMotorThermalChannelControllerAir]": {
"normal_ranges": [[60, 70], 0],
"warning_ranges": [[60, 90]],
// Callback to preprocess the data.
"callback": "analysis_tests._Multiply",
"callback_args": [2]
}
}
}
}
}
"""
return self._GradebookFromString(gradebook_string)
def testGradebook(self):
book = self._GetValidSampleGradebook()
criteria = book.GetCriteria(['ServoStatus', 'ServoA4', 'angle_desired'])
self.assertIn(80, criteria.normal_ranges)
self.assertNotIn(100, criteria.normal_ranges)
self.assertIn(100, criteria.warning_ranges)
criteria = book.GetCriteria(['MotorStatus', 'MotorPti', 'temps[0]'])
self.assertIn(0, criteria.normal_ranges)
self.assertIn(60, criteria.normal_ranges)
self.assertIn(70, criteria.normal_ranges)
self.assertNotIn(20, criteria.normal_ranges)
field_map = book.GetFieldMap('$message_type.')
self.assertEqual(set(field_map.keys()), {'ServoStatus', 'MotorStatus'})
self.assertEqual(set(field_map['MotorStatus'].keys()), {
''.join(x)
for x in itertools.product(('DynoMotor', 'Motor'), 'PS', 'bt', 'io')})
self.assertEqual(field_map['MotorStatus']['MotorPti'],
{
'temps[0]': ['MotorStatus.temps[0]'],
'temps[1]': ['MotorStatus.temps[1]'],
})
def testGradebookOnAio(self):
book = self._GetValidSampleGradebook()
messages = struct_tree.StructTree({
'MotorStatus': {
'MotorPti': {
'temps': [30, 40, 50, 60]
}
}
}, True)
checks = gradebook_base_check.GradebookChecks()
checks.Initialize(book, for_log=False, use_full_name=False)
for item in checks.List():
item.Check(*item.Populate(messages))
if item.FieldIndex() == 'MotorStatus.MotorPti.temps[0]':
self.assertEqual(item.GetResults(), [{
'name': 'Value',
'value': 60,
'stoplight': 3
}])
elif item.FieldIndex() == 'MotorStatus.MotorPti.temps[1]':
self.assertEqual(item.GetResults(), [{
'name': 'Value',
'value': 80,
'stoplight': 2
}])
else:
self.assertFalse(item.GetResults())
def testGradebookOnLog(self):
book = self._GetValidSampleGradebook()
message = numpy.array(
[(((30., 31.),), (1,)),
(((50., 51.),), (3,)),
(((40., 41.),), (2,)),
(((60., 61.),), (4,))],
dtype=[
('message', [('temps', 'f', (2,))]),
('aio_header', [('sequence', '>u2'),])
]
)
dataset = {
'messages': {
'kAioNodeMotorPti': {
'kMessageTypeMotorStatus': message
}
}
}
with tempfile.NamedTemporaryFile() as temp_hdf5:
h5_io.H5Dump(temp_hdf5.name, dataset)
checks = gradebook_base_check.GradebookChecks()
checks.Initialize(book, for_log=True, use_full_name=True)
for item in checks.List():
item.Check(*item.Populate(struct_tree.StructTree(temp_hdf5.name)))
# After ordering the sequence and applying the callback, temps[0]
# becomes [60.0, 80.0, 100.0, 120.0] and temps[1] becomes
# [62.0, 82.0, 102.0, 122.0].
if item.FieldIndex() == 'MotorStatus.MotorPti.temps[0]':
self.assertEqual(item.GetResults(), {
'MotorPti.Board Temperature (Value)': {
'warning': {'count': 3, 'range': [80.0, 120.0], 'total': 4,
'sections': [(1, 4)],
'expecting': '[[60, 70], 0]'}
}
})
self.assertTrue(item.HasWarnings())
self.assertFalse(item.HasErrors())
elif item.FieldIndex() == 'MotorStatus.MotorPti.temps[1]':
self.assertEqual(item.GetResults(), {
'MotorPti.temps[1] (Value)': {
'warning': {'count': 1, 'range': [82.0, 82.0], 'total': 4,
'sections': [(1, 2)],
'expecting': '[[60, 70], 0]'},
'error': {'count': 2, 'range': [102.0, 122.0], 'total': 4,
'sections': [(2, 4)],
'expecting': '[[60, 90]]'}
}
})
self.assertTrue(item.HasWarnings())
self.assertTrue(item.HasErrors())
else:
self.assertFalse(item.GetResults())
self.assertFalse(item.HasWarnings())
self.assertFalse(item.HasErrors())
def testBadGradebook(self):
gradebook_string = """
{
"imports": {
"bad_module": "bad_package.bad_module"
}
}
"""
with self.assertRaises(gradebook.GradebookParserError):
self._GradebookFromString(gradebook_string)
gradebook_string = """
{
"checks": {
"BadMessage": {}
}
}
"""
with self.assertRaises(gradebook.GradebookParserError):
self._GradebookFromString(gradebook_string)
gradebook_string = """
{
"imports": {
"analysis_tests":
"makani.analysis.checks.tests"
},
"checks": {
"ServoStatus": {
"ServoA4": {
"angle_bias": {
"normal_ranges": [[0, 90]],
"warning_ranges": "any",
// A missing function from a module.
"callback": "analysis_tests._Divide"
}
}
}
}
}
"""
with self.assertRaises(gradebook.GradebookParserError):
self._GradebookFromString(gradebook_string)
gradebook_string = """
{
"checks": {
"ServoStatus": {
| |
import pytest
import matplotlib.pyplot as plt
import numpy as np
import popkinmocks as pkm
@pytest.fixture
def my_component():
ssps = pkm.model_grids.milesSSPs()
ssps.logarithmically_resample(dv=100.)
ssps.calculate_fourier_transform()
ssps.get_light_weights()
cube = pkm.ifu_cube.IFUCube(ssps=ssps, nx=9, ny=10)
gc1 = pkm.components.growingDisk(cube=cube, rotation=0., center=(0,0))
gc1.set_p_t(lmd=2., phi=0.8)
gc1.set_p_x_t(sig_x_lims=(0.5, 0.2),
sig_y_lims=(0.03, 0.1),
alpha_lims=(1.2, 0.8))
gc1.set_t_dep(sig_x=0.5,
sig_y=0.1,
alpha=3.,
t_dep_in=0.5,
t_dep_out=5.)
gc1.set_p_z_tx()
gc1.set_mu_v(sig_x_lims=(0.5, 0.1),
sig_y_lims=(0.1, 0.1),
rmax_lims=(0.5, 1.1),
vmax_lims=(250., 100.),
vinf_lims=(60., 40.))
gc1.set_sig_v(sig_x_lims=(0.7, 0.1),
sig_y_lims=(0.2, 0.1),
alpha_lims=(3.0, 2.5),
sig_v_in_lims=(70., 50.),
sig_v_out_lims=(80., 60.))
gc1.evaluate_ybar()
return ssps, cube, gc1
@pytest.fixture
def my_kinematic_maps():
delta_E_v_x = np.array([
[-2.63231254, -2.91232087, -2.98508593, -1.98673529, 3.78808848,
3.78808848, -1.98673529, -2.98508593, -2.91232087, -2.63231254],
[-2.13502906, -2.45075383, -2.69793969, -2.22245885, 3.19109946,
3.19109946, -2.22245885, -2.69793969, -2.45075383, -2.13502906],
[-1.51721452, -1.80255015, -2.1309258 , -2.17208163, 1.81371172,
1.81371172, -2.17208163, -2.1309258 , -1.80255015, -1.51721452],
[-0.79161243, -0.96531765, -1.21070541, -1.49427586, -0.1610911 ,
-0.1610911 , -1.49427586, -1.21070541, -0.96531765, -0.79161243],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0.79161243, 0.96531765, 1.21070541, 1.49427586, 0.1610911 ,
0.1610911 , 1.49427586, 1.21070541, 0.96531765, 0.79161243],
[ 1.51721452, 1.80255015, 2.1309258 , 2.17208163, -1.81371172,
-1.81371172, 2.17208163, 2.1309258 , 1.80255015, 1.51721452],
[ 2.13502906, 2.45075383, 2.69793969, 2.22245885, -3.19109946,
-3.19109946, 2.22245885, 2.69793969, 2.45075383, 2.13502906],
[ 2.63231254, 2.91232087, 2.98508593, 1.98673529, -3.78808848,
-3.78808848, 1.98673529, 2.98508593, 2.91232087, 2.63231254]])
delta_var_v_x = np.array([
[-280.21425525, -291.19986619, -291.65748034, -244.40588447,
-293.61176514, -293.61176514, -244.40588447, -291.65748034,
-291.19986619, -280.21425525],
[-262.32619221, -272.2404863 , -278.53941553, -250.2600659 ,
-257.36865076, -257.36865076, -250.2600659 , -278.53941553,
-272.2404863 , -262.32619221],
[-245.37601092, -251.26140478, -257.12608689, -246.94594179,
-202.56176355, -202.56176355, -246.94594179, -257.12608689,
-251.26140478, -245.37601092],
[-232.85496802, -233.77421875, -233.61601769, -227.79587279,
-181.7993307 , -181.7993307 , -227.79587279, -233.61601769,
-233.77421875, -232.85496802],
[-228.18799987, -226.78761345, -222.59783462, -211.84505418,
-195.8310754 , -195.8310754 , -211.84505418, -222.59783462,
-226.78761345, -228.18799987],
[-232.85496802, -233.77421875, -233.61601769, -227.79587279,
-181.7993307 , -181.7993307 , -227.79587279, -233.61601769,
-233.77421875, -232.85496802],
[-245.37601092, -251.26140478, -257.12608689, -246.94594179,
-202.56176355, -202.56176355, -246.94594179, -257.12608689,
-251.26140478, -245.37601092],
[-262.32619221, -272.2404863 , -278.53941553, -250.2600659 ,
-257.36865076, -257.36865076, -250.2600659 , -278.53941553,
-272.2404863 , -262.32619221],
[-280.21425525, -291.19986619, -291.65748034, -244.40588447,
-293.61176514, -293.61176514, -244.40588447, -291.65748034,
-291.19986619, -280.21425525]])
delta_skew_v_x = np.array([
[-0.14324404, -0.15862658, -0.16467698, -0.11463442, 0.18223122,
0.18223122, -0.11463442, -0.16467698, -0.15862658, -0.14324404],
[-0.1171153 , -0.13410869, -0.14820029, -0.1258044 , 0.15951882,
0.15951882, -0.1258044 , -0.14820029, -0.13410869, -0.1171153 ],
[-0.08405618, -0.09957583, -0.11747619, -0.121175 , 0.09641303,
0.09641303, -0.121175 , -0.11747619, -0.09957583, -0.08405618],
[-0.04424465, -0.05391409, -0.0675346 , -0.08357988, -0.009748 ,
-0.009748 , -0.08357988, -0.0675346 , -0.05391409, -0.04424465],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ],
[ 0.04424465, 0.05391409, 0.0675346 , 0.08357988, 0.009748 ,
0.009748 , 0.08357988, 0.0675346 , 0.05391409, 0.04424465],
[ 0.08405618, 0.09957583, 0.11747619, 0.121175 , -0.09641303,
-0.09641303, 0.121175 , 0.11747619, 0.09957583, 0.08405618],
[ 0.1171153 , 0.13410869, 0.14820029, 0.1258044 , -0.15951882,
-0.15951882, 0.1258044 , 0.14820029, 0.13410869, 0.1171153 ],
[ 0.14324404, 0.15862658, 0.16467698, 0.11463442, -0.18223122,
-0.18223122, 0.11463442, 0.16467698, 0.15862658, 0.14324404]])
delta_kurt_v_x = np.array([
[-0.05086873, -0.05405702, -0.05568311, -0.04437223, -0.03793468,
-0.03793468, -0.04437223, -0.05568311, -0.05405702, -0.05086873],
[-0.04656647, -0.04921657, -0.05156603, -0.0461324 , -0.03620234,
-0.03620234, -0.0461324 , -0.05156603, -0.04921657, -0.04656647],
[-0.04239277, -0.04396597, -0.04576849, -0.04462888, -0.03099179,
-0.03099179, -0.04462888, -0.04576849, -0.04396597, -0.04239277],
[-0.03917829, -0.03946778, -0.03966134, -0.03926559, -0.03064167,
-0.03064167, -0.03926559, -0.03966134, -0.03946778, -0.03917829],
[-0.03794008, -0.03760283, -0.03672604, -0.03514706, -0.03743389,
-0.03743389, -0.03514706, -0.03672604, -0.03760283, -0.03794008],
[-0.03917829, -0.03946778, -0.03966134, -0.03926559, -0.03064167,
-0.03064167, -0.03926559, -0.03966134, -0.03946778, -0.03917829],
[-0.04239277, -0.04396597, -0.04576849, -0.04462888, -0.03099179,
-0.03099179, -0.04462888, -0.04576849, -0.04396597, -0.04239277],
[-0.04656647, -0.04921657, -0.05156603, -0.0461324 , -0.03620234,
-0.03620234, -0.0461324 , -0.05156603, -0.04921657, -0.04656647],
[-0.05086873, -0.05405702, -0.05568311, -0.04437223, -0.03793468,
-0.03793468, -0.04437223, -0.05568311, -0.05405702, -0.05086873]])
return delta_E_v_x, delta_var_v_x, delta_skew_v_x, delta_kurt_v_x
@pytest.fixture
def my_ybar():
ybar = np.array([[
[1.66503907e-07, 2.50790314e-07, 5.14674482e-07, 3.34792026e-07,
2.00169632e-07],
[1.72432910e-07, 2.73377303e-07, 6.41443532e-07, 3.87723843e-07,
2.11101778e-07],
[1.73808494e-07, 2.81770920e-07, 7.99925189e-07, 4.16908084e-07,
2.14249485e-07],
[1.69921633e-07, 2.67298718e-07, 6.14766128e-07, 3.76492694e-07,
2.07368985e-07],
[1.62462075e-07, 2.42726446e-07, 4.79021326e-07, 3.21478885e-07,
1.94621366e-07]],
[[1.82074881e-07, 2.76814846e-07, 5.81555277e-07, 3.73077668e-07,
2.19687251e-07],
[1.87718002e-07, 2.99395561e-07, 7.30464312e-07, 4.28388405e-07,
2.30306513e-07],
[1.88770248e-07, 3.06619783e-07, 8.85958564e-07, 4.54873360e-07,
2.32873273e-07],
[1.84696877e-07, 2.91174753e-07, 6.67850264e-07, 4.10589086e-07,
2.25614454e-07],
[1.76838721e-07, 2.64512976e-07, 5.25959705e-07, 3.50300927e-07,
2.11999609e-07]],
[[1.63627114e-07, 2.48525524e-07, 5.25187306e-07, 3.34836620e-07,
1.97338003e-07],
[1.68708135e-07, 2.68573140e-07, 6.52644330e-07, 3.83549190e-07,
2.06820312e-07],
[1.69921000e-07, 2.75567210e-07, 7.84323467e-07, 4.07850387e-07,
2.09491566e-07],
[1.66822897e-07, 2.63473972e-07, 6.13428884e-07, 3.72590034e-07,
2.03899524e-07],
[1.60325493e-07, 2.40759492e-07, 4.88627858e-07, 3.20425681e-07,
1.92487762e-07]],
[[1.70426217e-07, 2.57397205e-07, 5.38202276e-07, 3.45194536e-07,
2.05046627e-07],
[1.76854175e-07, 2.80221904e-07, 6.73254196e-07, 3.98333138e-07,
2.16412619e-07],
[1.79803094e-07, 2.91937009e-07, 8.44428120e-07, 4.32949595e-07,
2.21778297e-07],
[1.78412976e-07, 2.84434418e-07, 7.01377895e-07, 4.07341196e-07,
2.18828490e-07],
[1.73084470e-07, 2.63494388e-07, 5.58817773e-07, 3.56047725e-07,
2.08916742e-07]],
[[1.77229738e-07, 2.68601732e-07, 5.62901991e-07, 3.61334368e-07,
2.13535044e-07],
[1.83396155e-07, 2.91427877e-07, 7.09885256e-07, 4.15813189e-07,
2.24664803e-07],
[1.85709465e-07, 3.01607892e-07, 8.79930876e-07, 4.47673038e-07,
2.29084998e-07],
[1.83427421e-07, 2.91425538e-07, 7.06680623e-07, 4.15565366e-07,
2.24695361e-07],
[1.77224931e-07, 2.68370911e-07, 5.59722173e-07, 3.60511928e-07,
2.13473486e-07]],
[[1.75398267e-07, 2.65386099e-07, 5.51247442e-07, 3.56184177e-07,
2.11202651e-07],
[1.81699796e-07, 2.88524375e-07, 6.98556961e-07, 4.11228126e-07,
2.22533778e-07],
[1.84145868e-07, 2.99137885e-07, 8.77273315e-07, 4.44270026e-07,
2.27175489e-07],
[1.81950349e-07, 2.89169549e-07, 7.02702028e-07, 4.12528744e-07,
2.22915144e-07],
[1.75818229e-07, 2.66301210e-07, 5.55043114e-07, 3.57770563e-07,
2.11801036e-07]],
[[1.66938646e-07, 2.53055653e-07, 5.26550036e-07, 3.40034134e-07,
2.01183284e-07],
[1.72418890e-07, 2.74223204e-07, 6.66385134e-07, 3.91467558e-07,
2.11296351e-07],
[1.73990411e-07, 2.82320301e-07, 8.21770638e-07, 4.18706520e-07,
2.14549991e-07],
[1.71174183e-07, 2.70973774e-07, 6.47855433e-07, 3.84829848e-07,
2.09390566e-07],
[1.64896074e-07, 2.48648524e-07, 5.13397507e-07, 3.32670755e-07,
1.98279413e-07]]])
return ybar
@pytest.fixture
def my_second_component(my_component):
ssps, cube, gc1 = my_component
gc2 = pkm.components.growingDisk(cube=cube,
rotation=10.,
center=(0.05,-0.07))
gc2.set_p_t(lmd=1.6, phi=0.3)
gc2.set_p_x_t(sig_x_lims=(0.9, 0.5),
sig_y_lims=(0.9, 0.5),
alpha_lims=(1.1, 1.1))
gc2.set_t_dep(sig_x=0.9,
sig_y=0.8,
alpha=1.1,
t_dep_in=6.,
t_dep_out=1.)
gc2.set_p_z_tx()
gc2.set_mu_v(sig_x_lims=(0.5, 0.4),
sig_y_lims=(0.4, 0.6),
rmax_lims=(0.7, 0.2),
vmax_lims=(-150., -190.),
vinf_lims=(-70., -30.))
gc2.set_sig_v(sig_x_lims=(0.4, 0.3),
sig_y_lims=(0.3, 0.4),
alpha_lims=(2.0, 1.5),
sig_v_in_lims=(70., 90.),
sig_v_out_lims=(10., 20.))
gc2.evaluate_ybar()
return gc2
@pytest.fixture
def my_two_component_data():
ybar_trim = np.array([[
[6.78795924e-07, 7.01686348e-07, 8.01859822e-07, 6.54575404e-07,
5.86147750e-07],
[8.93118591e-07, 1.01044963e-06, 1.08760627e-06, 9.08970737e-07,
8.04216872e-07],
[1.09005990e-06, 1.51395591e-06, 2.18895041e-06, 1.56156125e-06,
1.08249656e-06],
[1.06030173e-06, 1.35771822e-06, 1.73337573e-06, 1.51595824e-06,
1.12148005e-06],
[8.87440824e-07, 1.02287996e-06, 1.23583244e-06, 1.13249444e-06,
9.56997325e-07]],
[[6.95057611e-07, 8.35432206e-07, 1.06261004e-06, 8.99305095e-07,
7.32804325e-07],
[8.09742717e-07, 1.05492919e-06, 1.43766915e-06, 1.16205100e-06,
8.66742670e-07],
[8.75172239e-07, 1.23894490e-06, 2.14608013e-06, 1.40958716e-06,
9.45855918e-07],
[8.29082597e-07, 1.10288440e-06, 1.52975025e-06, 1.21627303e-06,
8.86178288e-07],
[7.18016977e-07, 8.75130099e-07, 1.10430275e-06, 9.41110691e-07,
7.56015945e-07]],
[[6.49843531e-07, 7.89512647e-07, 1.03427326e-06, 8.65798455e-07,
6.91205926e-07],
[7.53867148e-07, 9.89815363e-07, 1.40081086e-06, 1.10819279e-06,
8.11863590e-07],
[8.12491049e-07, 1.15577039e-06, 2.06803774e-06, 1.33052216e-06,
8.82837321e-07],
[7.69889623e-07, 1.03204022e-06, 1.48874368e-06, 1.15180122e-06,
8.27565395e-07],
[6.69875308e-07, 8.26185161e-07, 1.08094531e-06, 8.98515696e-07,
7.08868996e-07]],
[[5.38070313e-07, 6.61388554e-07, 8.92535864e-07, 7.39803311e-07,
5.77997480e-07],
[6.23192516e-07, 8.22422661e-07, 1.20670143e-06, 9.37011543e-07,
6.72947869e-07],
[6.77164279e-07, 9.65679392e-07, 1.75427227e-06, 1.11299841e-06,
7.32227156e-07],
[6.48558116e-07, 8.74125093e-07, 1.27402740e-06, 9.79381998e-07,
6.95077317e-07],
[5.66898459e-07, 7.01068275e-07, 9.27462212e-07, 7.67217788e-07,
6.00170529e-07]]])
kurtosis_map = np.array([
[1.91380693, 1.55782066, 1.27171119, 1.08833282, 1.02749551,
1.07150682, 1.11635444, 1.23115968, 1.405092 , 1.62503012],
[2.30940445, 1.83576031, 1.42308581, 1.15675259, 1.0759083 ,
1.13154697, 1.18255044, 1.34491972, 1.59086255, 1.88417317],
[2.97029226, 2.39380564, 1.76191518, 1.29496091, 1.1713133 ,
1.21681466, 1.29511235, 1.57516296, 1.94876758, 2.32885887],
[3.82246852, 3.4337086 , 2.71641518, 1.76578055, 1.34413923,
1.33416537, 1.6430093 , 2.19152103, 2.68168558, 3.05339968],
[4.02059459, 3.94261714, 3.80437848, 3.50847935, 2.6304971 ,
2.73780921, 3.23658831, 3.55078133, 3.74262018, 3.86515134],
[3.16015004, 2.77293542, 2.23467474, 1.62505916, 1.40562588,
1.31127397, 2.49819678, 3.38667439, 3.80341415, 3.99499944],
[2.31046908, 1.90151357, 1.52427507, 1.30081999, 1.23387369,
1.08291611, 1.42886235, 2.13711064, 2.82258727, 3.32086012],
[1.83368963, 1.54303228, 1.32668847, 1.21687229, 1.11168503,
1.03704828, 1.18182108, 1.57250558, 2.08491235, 2.59049802],
[1.58092215, 1.37653744, 1.23203566, 1.15021943, 1.04524221,
1.01358309, 1.09786527, 1.34131655, 1.69539686, 2.09911566]])
return ybar_trim, kurtosis_map
def test_component_normalisation(my_component):
"""Tests the normalisations of all densities evaluated for a component
"""
ssps, cube, gc1 = my_component
v_edg = np.linspace(-900, 900, 20)
dv = v_edg[1] - v_edg[0]
na = np.newaxis
# check p_t
a = gc1.get_p_t(density=False, light_weighted=False)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_t(density=True, light_weighted=False)
assert np.isclose(np.sum(a*ssps.delta_t), 1)
a = gc1.get_p_t(density=False, light_weighted=True)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_t(density=True, light_weighted=True)
assert np.isclose(np.sum(a*ssps.delta_t), 1)
# check p_x_t
a = gc1.get_p_x_t(density=False, light_weighted=False)
assert np.allclose(np.sum(a, (0,1)), 1.)
a = gc1.get_p_x_t(density=True, light_weighted=False)
assert np.allclose(np.sum(a*cube.dx*cube.dy, (0,1)), 1.)
a = gc1.get_p_x_t(density=False, light_weighted=True)
assert np.allclose(np.sum(a, (0,1)), 1.)
a = gc1.get_p_x_t(density=True, light_weighted=True)
assert np.allclose(np.sum(a*cube.dx*cube.dy, (0,1)), 1.)
# check p_tx
a = gc1.get_p_tx(density=False, light_weighted=False)
assert np.isclose(np.sum(a), 1.)
a = gc1.get_p_tx(density=True, light_weighted=False)
assert np.isclose(np.sum(a*ssps.delta_t[:,na,na]*cube.dx*cube.dy), 1)
a = gc1.get_p_tx(density=False, light_weighted=True)
assert np.isclose(np.sum(a), 1.)
a = gc1.get_p_tx(density=True, light_weighted=True)
assert np.isclose(np.sum(a*ssps.delta_t[:,na,na]*cube.dx*cube.dy), 1)
# check p_z_tx
a = gc1.get_p_z_tx(density=False, light_weighted=False)
assert np.allclose(np.sum(a, 0), 1.)
a = gc1.get_p_z_tx(density=True, light_weighted=False)
assert np.allclose(np.sum(a*ssps.delta_z[:,na,na,na], 0), 1.)
a = gc1.get_p_z_tx(density=False, light_weighted=True)
assert np.allclose(np.sum(a, 0), 1.)
a = gc1.get_p_z_tx(density=True, light_weighted=True)
assert np.allclose(np.sum(a*ssps.delta_z[:,na,na,na], 0), 1.)
# check p_txz
a = gc1.get_p_txz(density=False, light_weighted=False)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_txz(density=True, light_weighted=False)
vol_elmt = ssps.delta_t[:,na,na,na]*cube.dx*cube.dy*ssps.delta_z[na,na,na,:]
assert np.isclose(np.sum(a * vol_elmt), 1)
a = gc1.get_p_txz(density=False, light_weighted=True)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_txz(density=True, light_weighted=True)
vol_elmt = ssps.delta_t[:,na,na,na]*cube.dx*cube.dy*ssps.delta_z[na,na,na,:]
assert np.isclose(np.sum(a * vol_elmt), 1)
# check get_p_x
a = gc1.get_p_x(density=False, light_weighted=False)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_x(density=True, light_weighted=False)
assert np.isclose(np.sum(a*cube.dx*cube.dy), 1)
a = gc1.get_p_x(density=False, light_weighted=True)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_x(density=True, light_weighted=True)
assert np.isclose(np.sum(a*cube.dx*cube.dy), 1)
# check get_p_z
a = gc1.get_p_z(density=False, light_weighted=False)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_z(density=True, light_weighted=False)
assert np.isclose(np.sum(a*ssps.delta_z), 1)
a = gc1.get_p_z(density=False, light_weighted=True)
assert np.isclose(np.sum(a), 1)
a = gc1.get_p_z(density=True, light_weighted=True)
assert np.isclose(np.sum(a*ssps.delta_z), 1)
# check p_tz_x
a = gc1.get_p_tz_x(density=False, light_weighted=False)
assert np.allclose(np.sum(a, (0,1)), 1.)
a = gc1.get_p_tz_x(density=True, light_weighted=False)
vol_elmt = ssps.delta_t[:,na,na,na]*ssps.delta_z[na,:,na,na]
assert np.allclose(np.sum(a*vol_elmt, (0,1)), 1.)
a = gc1.get_p_tz_x(density=False, light_weighted=True)
assert np.allclose(np.sum(a, (0,1)), 1.)
a = gc1.get_p_tz_x(density=True, light_weighted=True)
vol_elmt = ssps.delta_t[:,na,na,na]*ssps.delta_z[na,:,na,na]
assert np.allclose(np.sum(a*vol_elmt, (0,1)), 1.)
# check p_tz
a = gc1.get_p_tz(density=False, light_weighted=False)
assert np.isclose(np.sum(a), 1.)
a = gc1.get_p_tz(density=True, light_weighted=False)
assert np.isclose(np.sum(a*ssps.delta_t[:,na]*ssps.delta_z[na,:]), 1.)
a | |
<filename>tencentcloud/eiam/v20210420/eiam_client.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.eiam.v20210420 import models
class EiamClient(AbstractClient):
_apiVersion = '2021-04-20'
_endpoint = 'eiam.tencentcloudapi.com'
_service = 'eiam'
def AddAccountToAccountGroup(self, request):
"""账号组添加账号
:param request: Request instance for AddAccountToAccountGroup.
:type request: :class:`tencentcloud.eiam.v20210420.models.AddAccountToAccountGroupRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.AddAccountToAccountGroupResponse`
"""
try:
params = request._serialize()
body = self.call("AddAccountToAccountGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AddAccountToAccountGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def AddUserToUserGroup(self, request):
"""加入用户到用户组
:param request: Request instance for AddUserToUserGroup.
:type request: :class:`tencentcloud.eiam.v20210420.models.AddUserToUserGroupRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.AddUserToUserGroupResponse`
"""
try:
params = request._serialize()
body = self.call("AddUserToUserGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AddUserToUserGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateAccountGroup(self, request):
"""创建账号组
:param request: Request instance for CreateAccountGroup.
:type request: :class:`tencentcloud.eiam.v20210420.models.CreateAccountGroupRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.CreateAccountGroupResponse`
"""
try:
params = request._serialize()
body = self.call("CreateAccountGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateAccountGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateAppAccount(self, request):
"""创建应用账号
:param request: Request instance for CreateAppAccount.
:type request: :class:`tencentcloud.eiam.v20210420.models.CreateAppAccountRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.CreateAppAccountResponse`
"""
try:
params = request._serialize()
body = self.call("CreateAppAccount", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateAppAccountResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateOrgNode(self, request):
"""新建一个机构节点
:param request: Request instance for CreateOrgNode.
:type request: :class:`tencentcloud.eiam.v20210420.models.CreateOrgNodeRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.CreateOrgNodeResponse`
"""
try:
params = request._serialize()
body = self.call("CreateOrgNode", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateOrgNodeResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateUser(self, request):
"""新建一个用户
:param request: Request instance for CreateUser.
:type request: :class:`tencentcloud.eiam.v20210420.models.CreateUserRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.CreateUserResponse`
"""
try:
params = request._serialize()
body = self.call("CreateUser", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateUserResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateUserGroup(self, request):
"""新建用户组
:param request: Request instance for CreateUserGroup.
:type request: :class:`tencentcloud.eiam.v20210420.models.CreateUserGroupRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.CreateUserGroupResponse`
"""
try:
params = request._serialize()
body = self.call("CreateUserGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateUserGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteAccountGroup(self, request):
"""删除账号组
:param request: Request instance for DeleteAccountGroup.
:type request: :class:`tencentcloud.eiam.v20210420.models.DeleteAccountGroupRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteAccountGroupResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteAccountGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteAccountGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteAppAccount(self, request):
"""删除应用账号
:param request: Request instance for DeleteAppAccount.
:type request: :class:`tencentcloud.eiam.v20210420.models.DeleteAppAccountRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteAppAccountResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteAppAccount", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteAppAccountResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteOrgNode(self, request):
"""删除一个机构节点
:param request: Request instance for DeleteOrgNode.
:type request: :class:`tencentcloud.eiam.v20210420.models.DeleteOrgNodeRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteOrgNodeResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteOrgNode", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteOrgNodeResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteUser(self, request):
"""通过用户名或用户 id 删除用户。
:param request: Request instance for DeleteUser.
:type request: :class:`tencentcloud.eiam.v20210420.models.DeleteUserRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteUserResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteUser", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteUserResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteUserGroup(self, request):
"""删除一个用户组
:param request: Request instance for DeleteUserGroup.
:type request: :class:`tencentcloud.eiam.v20210420.models.DeleteUserGroupRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteUserGroupResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteUserGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteUserGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DeleteUsers(self, request):
"""批量删除当前节点下的用户。如果出现个别用户删除错误,将不影响其余被勾选用户被删除的操作,同时提示未被删除的用户名称/用户ID。
:param request: Request instance for DeleteUsers.
:type request: :class:`tencentcloud.eiam.v20210420.models.DeleteUsersRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DeleteUsersResponse`
"""
try:
params = request._serialize()
body = self.call("DeleteUsers", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DeleteUsersResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeAccountGroup(self, request):
"""查询账号组列表
:param request: Request instance for DescribeAccountGroup.
:type request: :class:`tencentcloud.eiam.v20210420.models.DescribeAccountGroupRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeAccountGroupResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeAccountGroup", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeAccountGroupResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeAppAccount(self, request):
"""查询应用账号列表
:param request: Request instance for DescribeAppAccount.
:type request: :class:`tencentcloud.eiam.v20210420.models.DescribeAppAccountRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeAppAccountResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeAppAccount", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeAppAccountResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeApplication(self, request):
"""获取一个应用的信息。
:param request: Request instance for DescribeApplication.
:type request: :class:`tencentcloud.eiam.v20210420.models.DescribeApplicationRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeApplicationResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeApplication", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeApplicationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeOrgNode(self, request):
"""根据机构节点ID读取机构节点信息
:param request: Request instance for DescribeOrgNode.
:type request: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgNodeRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgNodeResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeOrgNode", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeOrgNodeResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeOrgResourcesAuthorization(self, request):
"""查询指定机构下的资源授权列表
:param request: Request instance for DescribeOrgResourcesAuthorization.
:type request: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgResourcesAuthorizationRequest`
:rtype: :class:`tencentcloud.eiam.v20210420.models.DescribeOrgResourcesAuthorizationResponse`
"""
try:
params = request._serialize()
body = self.call("DescribeOrgResourcesAuthorization", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeOrgResourcesAuthorizationResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePublicKey(self, request):
"""获取JWT公钥信息。
:param request: Request instance for DescribePublicKey.
:type | |
str) == True :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
# it's a variable length opcode
else :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_VARIABLE( i, op_value, self.__raw_buff[ i : ] )
len_format = calcsize(r_format)
raw_buff = self.__raw_buff[ i : i + 1 + len_format ]
jbc = JBC( class_manager, JAVA_OPCODES[ op_value ][0], raw_buff, ( r_function, v_function, r_buff, r_format, f_function ) )
self.__bytecodes.append( jbc )
i += len_format
else :
self.__bytecodes.append( JBC( class_manager, JAVA_OPCODES[ op_value ][0], self.__raw_buff[ i ] ) )
else :
bytecode.Exit( "op_value 0x%x is unknown" % op_value )
i += 1
# Create branch bytecodes list
idx = 0
nb = 0
for i in self.__bytecodes :
self.__maps.append( idx )
if i.get_name() in BRANCH_JVM_OPCODES :
self.__branches.append( nb )
idx += i.get_length()
nb += 1
def _patch_bytecodes(self) :
methods = []
for i in self.__bytecodes :
if "invoke" in i.get_name() :
operands = i.get_operands()
methods.append( operands )
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands[0] )
new_name_and_type_index = self.__CM.create_name_and_type( operands[1], operands[2] )
self.__CM.create_method_ref( new_class_index, new_name_and_type_index )
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *operands[0:] )
if value == -1 :
bytecode.Exit( "Unable to found method " + str(operands) )
raw_buff += pack(r_format, *v_function( value ) )
i.reload( raw_buff )
elif "anewarray" in i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands )
raw_buff += pack(r_format, *v_function( new_class_index ) )
i.reload( raw_buff )
elif "getstatic" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands[0] )
new_name_and_type_index = self.__CM.create_name_and_type( operands[1], operands[2] )
self.__CM.create_field_ref( new_class_index, new_name_and_type_index )
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *operands[1:] )
if value == -1 :
bytecode.Exit( "Unable to found method " + str(operands) )
raw_buff += pack(r_format, *v_function( value ) )
i.reload( raw_buff )
elif "ldc" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
if operands[0] != "CONSTANT_Integer" and operands[0] != "CONSTANT_String" :
bytecode.Exit( "...." )
if operands[0] == "CONSTANT_Integer" :
new_int_index = self.__CM.create_integer( operands[1] )
raw_buff += pack(r_format, *v_function( new_int_index ) )
elif operands[0] == "CONSTANT_String" :
new_string_index = self.__CM.create_string( operands[1] )
raw_buff += pack(r_format, *v_function( new_string_index ) )
i.reload( raw_buff )
elif "new" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands )
raw_buff += pack(r_format, *v_function( new_class_index ) )
i.reload( raw_buff )
return methods
def get(self) :
"""
Return all bytecodes
@rtype : L{list}
"""
return self.__bytecodes
def get_raw(self) :
return ''.join(x.get_raw() for x in self.__bytecodes)
def show(self) :
"""
Display the code like a disassembler
"""
nb = 0
for i in self.__bytecodes :
print nb, self.__maps[nb],
i.show( self.__maps[nb] )
print
nb += 1
def pretty_show(self, m_a) :
"""
Display the code like a disassembler but with instructions' links
"""
bytecode.PrettyShow( m_a.basic_blocks.gets() )
bytecode.PrettyShowEx( m_a.exceptions.gets() )
def get_relative_idx(self, idx) :
"""
Return the relative idx by given an offset in the code
@param idx : an offset in the code
@rtype : the relative index in the code, it's the position in the list of a bytecode
"""
n = 0
x = 0
for i in self.__bytecodes :
#print n, idx
if n == idx :
return x
n += i.get_length()
x += 1
return -1
def get_at(self, idx) :
"""
Return a specific bytecode at an index
@param : the index of a bytecode
@rtype : L{JBC}
"""
return self.__bytecodes[ idx ]
def remove_at(self, idx) :
"""
Remove bytecode at a specific index
@param idx : the index to remove the bytecode
@rtype : the length of the removed bytecode
"""
val = self.__bytecodes[idx]
val_m = self.__maps[idx]
# Remove the index if it's in our branch list
if idx in self.__branches :
self.__branches.remove( idx )
# Adjust each branch
for i in self.__branches :
self.__bytecodes[i].adjust_r( self.__maps[i], val_m, val.get_length() )
# Remove it !
self.__maps.pop(idx)
self.__bytecodes.pop(idx)
# Adjust branch and map list
self._adjust_maps( val_m, val.get_length() * -1 )
self._adjust_branches( idx, -1 )
return val.get_length()
def _adjust_maps(self, val, size) :
nb = 0
for i in self.__maps :
if i > val :
self.__maps[ nb ] = i + size
nb = nb + 1
def _adjust_maps_i(self, val, size) :
nb = 0
x = 0
for i in self.__maps :
if i == val :
x+=1
if x == 2 :
self.__maps[ nb ] = i + size
if i > val :
self.__maps[ nb ] = i + size
nb = nb + 1
def _adjust_branches(self, val, size) :
nb = 0
for i in self.__branches :
if i > val :
self.__branches[ nb ] = i + size
nb += 1
def insert_at(self, idx, byte_code) :
"""
Insert bytecode at a specific index
@param idx : the index to insert the bytecode
@param bytecode : a list which represent the bytecode
@rtype : the length of the inserted bytecode
"""
# Get the op_value and add it to the raw_buff
op_name = byte_code[0]
op_value = INVERT_JAVA_OPCODES[ op_name ]
raw_buff = pack( '>B', op_value )
new_jbc = None
# If it's an op_value with args, we must handle that !
if len( JAVA_OPCODES[ op_value ] ) > 1 :
# Find information about the op_value
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
# Special values for this op_value (advanced bytecode)
if len( JAVA_OPCODES[ op_value ] ) == 6 :
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *byte_code[1:] )
if value == -1 :
bytecode.Exit( "Unable to found " + str(byte_code[1:]) )
raw_buff += pack(r_format, *v_function( value ) )
else :
raw_buff += pack(r_format, *v_function( *byte_code[1:] ) )
new_jbc = JBC(self.__CM, op_name, raw_buff, ( r_function, v_function, r_buff, r_format, f_function ) )
else :
new_jbc = JBC(self.__CM, op_name, raw_buff)
# Adjust each branch with the new insertion
val_m = self.__maps[ idx ]
for i in self.__branches :
self.__bytecodes[i].adjust_i( self.__maps[i], val_m, new_jbc.get_length() )
# Insert the new bytecode at the correct index
# Adjust maps + branches
self.__bytecodes.insert( idx, new_jbc )
self.__maps.insert( idx, val_m )
self._adjust_maps_i( val_m, new_jbc.get_length() )
self._adjust_branches( idx, 1 )
# Add it to the branches if it's a correct op_value
if new_jbc.get_name() in BRANCH_JVM_OPCODES :
self.__branches.append( idx )
# FIXME
# modify the exception table
# modify tableswitch and lookupswitch instructions
# return the length of the raw_buff
return len(raw_buff)
def remplace_at(self, idx, bytecode) :
"""
Remplace bytecode at a specific index by another bytecode (remplace = remove + insert)
@param idx : the index to insert the bytecode
@param bytecode : a list which represent the bytecode
@rtype : the length of the inserted bytecode
"""
self.remove_at(idx) * (-1)
size = self.insert_at(idx, bytecode)
return size
def set_cm(self, cm) :
self.__CM = cm
for i in self.__bytecodes :
i.set_cm( cm )
class BasicAttribute(object) :
def __init__(self) :
self.__attributes = []
def get_attributes(self) :
return self.__attributes
def set_cm(self, cm) :
self.__CM = cm
class CodeAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(CodeAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 max_stack;
# u2 max_locals;
# u4 code_length;
# u1 code[code_length];
self.low_struct = SVs( CODE_LOW_STRUCT[0], CODE_LOW_STRUCT[1], buff.read( calcsize(CODE_LOW_STRUCT[0]) ) )
self.__code = JavaCode( class_manager, buff.read( self.low_struct.get_value().code_length ) )
# u2 exception_table_length;
self.exception_table_length = SV( '>H', buff.read(2) )
# { u2 start_pc;
# u2 end_pc;
# u2 handler_pc;
# u2 catch_type;
# } exception_table[exception_table_length];
self.__exception_table = []
for i in range(0, self.exception_table_length.get_value()) :
et | |
f')
plt.title('Electric field (Intensity/Mean)')
plt.xlabel('$x/r_f$')
if not subplot:
plt.show()
return
def plot_delay(self, subplot=False):
# get frequency to set the scale, enter in GHz
Freq = self.freq/1000
plt.subplot(2, 1, 1)
plt.plot(np.linspace(0, self.dx*self.nx, self.nx),
-self.dm/(2*self.dlam*Freq))
plt.ylabel('Group delay (ns)')
plt.xlabel('$x/r_f$')
plt.subplot(2, 1, 2)
plt.plot(np.mean(self.pulsewin, axis=1))
plt.ylabel('Intensity (arb)')
plt.xlabel('Delay (arb)')
plt.show()
return
def plot_pulse(self, subplot=False):
# get frequency to set the scale, enter in GHz
Freq = self.freq/1000
lpw = np.log10(self.pulsewin)
vmax = np.max(lpw)
vmin = np.median(lpw) - 3
plt.pcolormesh(np.linspace(0, self.dx*self.nx, self.nx),
(np.arange(0, 3*self.nf/2, 1) - self.nf/2) /
(2*self.dlam*Freq),
lpw[int(self.nf/2):, :], vmin=vmin, vmax=vmax)
plt.colorbar
plt.ylabel('Delay (ns)')
plt.xlabel('$x/r_f$')
plt.plot(np.linspace(0, self.dx*self.nx, self.nx),
-self.dm/(2*self.dlam*Freq), 'k') # group delay=-phase delay
plt.show()
def plot_all(self):
plt.figure(2)
plt.subplot(2, 2, 1)
self.plot_screen(subplot=True)
plt.subplot(2, 2, 2)
self.plot_intensity(subplot=True)
plt.subplot(2, 1, 2)
self.plot_dynspec(subplot=True)
plt.show()
"""
The code below is unfinished, but will eventually allow one to compute the ACF
analytically, including a phase gradient. A dynamic spectrum with phase
gradients (beyond those that arise naturally) can be simulated from this.
"""
class ACF():
def __init__(self, s_max=5, dnu_max=5, ns=256, nf=256, ar=1, alpha=5/3,
phasegrad_x=0, phasegrad_y=0, V_x=1, V_y=0, psi=0, amp=1,
use_t=True, plot=False, display=True):
"""
Generate an ACF from the theoretical function in:
Rickett et al. (2014)
s_max - number of coherence spatial scales to calculate over
dnu_max - number of decorrelation bandwidths to calculate over
ns - number of spatial steps
nf - number of decorrelation bandwidth steps
alpha - exponent for interstellar turbulence
ar - axial ratio of diffractive scintillation. Major axis defines x
phasegrad_x - phase gradient in x direction
phasegrad_y - phase gradient in y direction
Vx - Effective velocity in x direction
Vy - Effective velocity in y direction
If ISS spectrum is a Kolmogorov power-law with no inner or outer scale,
alpha=5/3
"""
self.s_max = s_max
self.dnu_max = dnu_max
self.ns = ns
self.nf = nf
self.ar = ar
self.alpha = alpha
self.phasegrad_x = phasegrad_x
self.phasegrad_y = phasegrad_y
self.V_x = V_x
self.V_y = V_y
self.use_t = use_t
# self.psi = psi
self.amp = amp
return
def calc_acf(self, plot=False):
"""
computes 2-D ACF of intensity vs t and v where optimal sampling of t
and v is provided with the output ACF
assume ISS spectrum is a Kolmogorov power-law with no inner or outer
scale
requires velocity and angular displacement due to phase gradient
(vectors) vectors are x, y where x = major axis of spatial structure,
i.e. density variations are elongated by "ar" in the x direction. y is
90deg CCW.
implement the integrals in Appendix A of Rickett, Coles et al ApJ 2014
on the analysis of the double pulsar scintillation equations A1 and A2.
A2 has an error. It would be correct if nu were replaced by omega,
i.e. had an extra 2*pi
coordinates are with respect to ar major axis so we don't have to
rotate the structure, we put V and sig vectors in the structure
coordinates.
The distance sn is normalized by So and the frequency dnun by \nu_{0.5}
the spatial scale and the frequency scale respectively.
the phase gradient is normalized by the 1/s0, i.e. sigxn = gradphix*s0
if there is no phase gradient then the acf is symmetric and only one
quadrant needs to be calculated. Otherwise two quadrants are necessary.
new algorithm to compute same integral. Normalized integral is
game(sn, dnun) = -j/(2pi)^2 (1/dnun) sum sum (dsn)^2
game(snp,0)exp((j/4pi)(1/dnun) | sn - snp|^2
the worst case sampling is when dnun is very small. Then the argument
of the complex exponential becomes large and aliasing will occur. If
dnun=0.01 and dsp=0.1 the alias will peak at snx = 5. Reducing the
sampling dsp to 0.05 will push that alias out to snx = 8. However
halving dsp will increase the time by a factor of 4.
The frequency decorrelation is quite linear near the origin and looks
quasi-exponential, the 0.5 width is dnun = 0.15. Sampling of 0.05 is
more than adequate in frequency. Sampling of 0.1 in sn is adequate
dnun = 0.0 is divergent with this integral but can be obtained
trivially from the ACF of the electric field directly
Use formula vec{S} = vec{V} t - 2 vec{vec{sigma_p}}}delta nu/nu
equation A6 to get equal t sampling. dt = ds / |V| and tmax= Smax + 2
|sigma_p| dnu/nu
"""
alph2 = self.alpha/2
nf = self.nf
ns = self.ns
spmax = self.s_max
dnumax = self.dnu_max
sigxn = self.phasegrad_x
sigyn = self.phasegrad_y
V_x = self.V_x
V_y = self.V_y
amp = self.amp
Vmag = np.sqrt(self.V_x**2 + self.V_y**2)
dsp = 2 * spmax / (ns)
# ddnun = 2 * dnumax / nf
sqrtar = np.sqrt(self.ar)
# equally spaced dnu array dnu = dnun * nuhalf
dnun = np.linspace(0, dnumax, int(np.ceil(nf/2)))
ndnun = len(dnun)
if sigxn == 0 and sigyn == 0:
# calculate only one quadrant tn >= 0
gammitv = np.zeros((int(ns/2), int(nf/2)))
# equally spaced t array t= tn*S0
tn = np.arange(0.0, spmax/Vmag, dsp/Vmag)
snx = V_x*tn
sny = V_y*tn
snp = np.arange(-2*spmax, 2*spmax, dsp)
SNPX, SNPY = np.meshgrid(snp, snp)
gammes = np.exp(-0.5*((SNPX/sqrtar)**2 +
(SNPY*sqrtar)**2)**alph2) # ACF of e-field
# compute dnun = 0 first
gammitv[:, 0] = np.exp(-0.5*((snx/sqrtar)**2 +
(sny*sqrtar)**2)**alph2)
# now do first dnu step with double spatial resolution
snp2 = np.arange(-2*spmax, 2*spmax, dsp/2)
SNPX2, SNPY2 = np.meshgrid(snp2, snp2)
gammes2 = np.exp(-0.5*((SNPX2/sqrtar)**2 +
(SNPY2*sqrtar)**2)**alph2) # ACF of e-field
for isn in range(0, len(snx)):
ARG = ((SNPX2-snx[isn])**2 + (SNPY2-sny[isn])**2)/(2*dnun[1])
temp = gammes2 * np.exp(1j*ARG)
gammitv[isn, 1] = -1j*(dsp/2)**2 * \
np.sum(temp)/((2*np.pi)*dnun[1])
# now do remainder of dnu array
for idn in range(2, ndnun):
for isn in range(0, len(snx)):
ARG = ((SNPX-snx[isn])**2 +
(SNPY-sny[isn])**2)/(2*dnun[idn])
temp = gammes*np.exp(1j * ARG)
gammitv[isn, idn] = -1j*dsp**2 * \
np.sum(temp)/((2*np.pi)*dnun[idn])
# equation A1 convert ACF of E to ACF of I
gammitv = np.real(gammitv * np.conj(gammitv)).squeeze()
nr, nc = np.shape(gammitv)
gam2 = np.zeros((nr, nc*2))
gam2[:, 1:nc] = np.fliplr(gammitv[:, 1:])
gam2[:, nc:] = gammitv
gam3 = np.zeros((nr*2, nc*2))
gam3[1:nr, :] = np.flipud(gam2[1:, :])
gam3[nr:, :] = gam2
gam3 = np.transpose(gam3)
nf, nt = np.shape(gam3)
t2 = np.linspace(-spmax/Vmag, spmax/Vmag, nt)
f2 = np.linspace(-dnumax, dnumax, nf)
s2 = t2*Vmag
else:
# calculate two quadrants -tmax t < tmax
if self.use_t:
# equally spaced t array t = tn*S0
tn = np.linspace(-spmax, spmax, ns)
snp = np.arange(-spmax*Vmag, spmax*Vmag, dsp)
else:
tn = np.linspace(-spmax/Vmag, spmax/Vmag, ns)
snp = np.arange(-spmax, spmax, dsp)
snx, sny = V_x * tn, V_y * tn
SNPX, SNPY = np.meshgrid(snp, snp)
gammes = np.exp(-0.5 * ((SNPX / sqrtar)**2 +
(SNPY * sqrtar)**2)**alph2) # ACF of E-field
# compute dnun = 0 first
gammitv = np.zeros((int(ns), int(np.ceil(nf / 2))))
gammitv[:, 0] = np.exp(-0.5 * ((snx / sqrtar)**2 +
(sny * sqrtar)**2)**alph2)
for idn in range(1, int(np.ceil(nf/2))):
snxt = snx - 2 * sigxn * dnun[idn]
snyt = sny - 2 * sigyn * dnun[idn]
for isn in range(ns):
temp = gammes * np.exp(1j * ((SNPX - snxt[isn])**2 +
(SNPY - snyt[isn])**2) /
(2 * dnun[idn]))
gammitv[isn, idn] = -1j * dsp**2 * np.sum(temp[:]) /\
((2 * np.pi) * dnun[idn])
# equation A1 convert ACF of E to ACF of I
gammitv = np.real(gammitv * np.conj(gammitv))
gam3 = amp * np.transpose(np.conj(np.hstack((np.fliplr(np.flipud(
gammitv[:, 1:])), gammitv))))
# scale by amplitude and crop to match data
f2 = np.hstack((np.flip(-dnun[1:]), dnun))
t2 = tn
s2 = t2 * Vmag
self.fn = f2
self.tn = t2
self.sn = s2
self.acf = gam3
if plot:
self.plot_acf()
return
def calc_sspec(self):
arr = np.fft.fftshift(self.acf)
arr = np.fft.fft2(arr)
arr = np.fft.fftshift(arr)
arr = np.real(arr)
self.sspec = 10*np.log10(arr)
def plot_acf(self, display=True):
"""
Plots the simulated ACF
"""
plt.pcolormesh(self.tn, self.fn, self.acf)
plt.xlabel(r'Time lag ($s/s_d$)')
plt.ylabel(r'Frequency lag ($\nu/\nu_d$)')
if display:
plt.show()
def plot_sspec(self, display=True):
"""
Plots the simulated ACF
"""
plt.pcolormesh(self.tn, self.fn, self.sspec)
plt.xlabel(r'Delay')
plt.ylabel(r'Doppler')
if display:
plt.show()
return
class Brightness():
def __init__(self, ar=1.0, exponent=1.67, thetagx=0, thetagy=0.0, psi=90,
thetarx=0, thetary=0.0, df=0.04, dt=0.08, dx=0.1,
nf=10, nt=80, nx=30, ncuts=5, plot=True, contour=True,
figsize=(10, 8), smooth_jacobian=False):
"""
Simulate Delay-Doppler Spectrum from Scattered angular spectrum from
Yao et | |
<filename>tests/test_urls.py
# -*- coding: utf-8 -*-
"""
Tests for the basecampy3.urls package.
"""
from __future__ import unicode_literals
import logging
import os
import re
import time
import unittest
import uuid
from datetime import date, datetime, timedelta
import dateutil
import pytz
from tzlocal import get_localzone
from basecampy3 import Basecamp3, exc
logger = logging.getLogger("basecampy3")
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logging.getLogger("basecampy3.transport_adapter").setLevel(logging.INFO)
try:
PRE_MADE_PROJECT_ID = os.environ["BC3_TEST_PROJECT_ID"]
"""
REQUIRED: An ID for a Project you create in advance that must have
a Question (Check-In), an Answer to that Question, and Email Forwards
must be enabled.
"""
except Exception:
raise EnvironmentError("You must define environment variable "
"'BC3_TEST_PROJECT_ID' so that Questions and "
"Forwards can be tested.")
class APITest(unittest.TestCase):
PROJECT_TEST_NAME_PREFIX = "_DELETE_pytest__basecampy3_"
PROJECT_TEST_DESCRIPTION = "Trash me I am a test project."
UPLOAD_TEST_FILE_NAME = "testfile.png"
api = None
def __init__(self, methodName='runTest'):
super(APITest, self).__init__(methodName=methodName)
if APITest.api is None:
APITest.api = Basecamp3()
def setUp(self):
proj = self._create_test_project(middle="URLs")
self.project_id = proj["id"]
dock = proj["dock"]
dock_by_name = {i["name"]: i["id"] for i in dock}
self.campfire_id = dock_by_name["chat"]
self.message_board_id = dock_by_name["message_board"]
self.todoset_id = dock_by_name["todoset"]
self.schedule_id = dock_by_name["schedule"]
self.questionnaire_id = dock_by_name["questionnaire"]
self.vault_id = dock_by_name["vault"]
self.inbox_id = dock_by_name["inbox"]
# premade project that will be used for
# a subset of the tests (because they do not support
# creation via the API, the data must already exist)
url = self.api.urls.projects.get(project=PRE_MADE_PROJECT_ID)
data = self._get_data(url)
self.premade_project = data["id"]
dock = data["dock"]
dock_by_name = {i["name"]: i["id"] for i in dock}
self.premade_questionnaire = dock_by_name["questionnaire"]
self.premade_inbox = dock_by_name["inbox"]
def tearDown(self):
# trash any projects we missed
trashed = 0
url = self.api.urls.projects.list()
response = url.request(self.api.session)
if not response.ok:
logger.error("Unable to list projects to delete test projects.")
raise exc.Basecamp3Error(response=response)
data = response.json()
projects_to_delete = [pd for pd in data if pd["name"].startswith(self.PROJECT_TEST_NAME_PREFIX)]
errors = []
for project in projects_to_delete:
del_url = self.api.urls.projects.trash(project["id"])
response = del_url.request(self.api.session)
if not response.ok:
ex = exc.Basecamp3Error(response=response)
errors.append(ex)
else:
trashed += 1
if errors:
for error in errors:
logger.error(error)
logger.error("Not all test projects got deleted.")
logger.info("Test(s) complete. Deleted %s out of %s test project(s).",
trashed, len(projects_to_delete))
time.sleep(1)
def _create_test_project(self, middle="", suffix=None):
if suffix is None:
suffix = uuid.uuid4()
name = "%s%s%s" % (self.PROJECT_TEST_NAME_PREFIX, middle, suffix)
url = self.api.urls.projects.create(name=name, description=self.PROJECT_TEST_DESCRIPTION)
response = url.request(self.api.session)
if not response.ok:
logger.error("Unable to create a test project.")
raise exc.Basecamp3Error(response=response)
project_data = response.json()
return project_data
def test_campfire_lines(self):
test_text = "Good morning!"
# Create Campfire Line
data = self._create_campfire_line(test_text)
line_id = data["id"]
assert data["content"] == test_text
# Get Campfire Line
data = self._get_campfire_line(line_id)
assert data["id"] == line_id
assert data["content"] == test_text
# Get Campfire Line (test caching)
data = self._get_campfire_line(line_id)
assert data["id"] == line_id
assert data["content"] == test_text
# List Campfire Lines
data = self._list_campfire_lines()
assert len(data) == 1
# Delete the Campfire Line
url = self.api.urls.campfire_lines.delete(project=self.project_id,
campfire=self.campfire_id,
line=line_id)
self._get_no_content(url)
logger.info("Successfully tested Campfire Lines!")
def test_campfires(self):
# Get Campfire
url = self.api.urls.campfires.get(project=self.project_id, campfire=self.campfire_id)
data = self._get_data(url)
assert data["id"] == self.campfire_id
assert data["type"] == "Chat::Transcript"
assert "lines_url" in data
# List Campfires visible to user account-wide
url = self.api.urls.campfires.list()
data = self._get_data(url)
assert len(data) > 0
assert data[0]["type"] == "Chat::Transcript"
assert "lines_url" in data[0]
logger.info("Successfully tested Campfires!")
def test_chatbots(self):
test_service_name = "TestBot%s" % int(time.time())
test_command_url = "https://example.com"
test_content = "<strong>Howdy</strong> there."
# Create Chatbot
url = self.api.urls.chatbots.create(project=self.project_id,
campfire=self.campfire_id,
service_name=test_service_name,
command_url=test_command_url)
data = self._get_data(url)
chatbot_id = data["id"]
# Get Chatbot
url = self.api.urls.chatbots.get(project=self.project_id, campfire=self.campfire_id, chatbot=chatbot_id)
data = self._get_data(url)
assert data["id"] == chatbot_id
assert data["service_name"] == test_service_name
assert data["command_url"] == test_command_url
# Create Line as Chatbot
match = re.search(r"integrations/([^/]+)/buckets", data["lines_url"])
chatbot_key = match.group(1)
url = self.api.urls.chatbots.create_line(project=self.project_id,
campfire=self.campfire_id,
chatbot_key=chatbot_key,
content=test_content)
self._get_no_content(url)
# List Chatbots
url = self.api.urls.chatbots.list(project=self.project_id, campfire=self.campfire_id)
data = self._get_data(url)
assert len(data) > 0
test_updated_service_name = "two%s" % test_service_name
test_updated_command_url = "%s/updated" % test_command_url
# Update Chatbot
url = self.api.urls.chatbots.update(project=self.project_id,
campfire=self.campfire_id,
chatbot=chatbot_id,
service_name=test_updated_service_name,
command_url=test_updated_command_url)
data = self._get_data(url)
assert data["id"] == chatbot_id
assert data["service_name"] == test_updated_service_name
assert data["command_url"] == test_updated_command_url
# Delete Chatbot
url = self.api.urls.chatbots.delete(project=self.project_id,
campfire=self.campfire_id,
chatbot=chatbot_id)
self._get_no_content(url)
logger.info("Successfully tested chatbots!")
def test_comments(self):
comment_content = "Just a test comment here."
# Create Document to put Comments on
url = self.api.urls.documents.create(project=self.project_id, vault=self.vault_id,
title="Test Comment Document",
content="Does not really matter what is in here...",
status="active")
data = self._get_data(url)
document_id = data["id"]
url = self.api.urls.comments.create(project=self.project_id,
recording=document_id,
content=comment_content)
data = self._get_data(url)
comment_id = data["id"]
assert data["content"] == comment_content
# Get Comment
url = self.api.urls.comments.get(project=self.project_id, comment=comment_id)
data = self._get_data(url)
assert data["id"] == comment_id
assert data["content"] == comment_content
# List Comments by Recording
url = self.api.urls.comments.list_by_recording(project=self.project_id,
recording=document_id)
data = self._get_data(url)
assert len(data) > 0
new_comment_content = "I changed my mind."
# Update Comment
url = self.api.urls.comments.update(project=self.project_id,
comment=comment_id,
content=new_comment_content)
data = self._get_data(url)
assert data["id"] == comment_id
assert data["content"] == new_comment_content
self._recording_tests(comment_id, self.api.urls.comments, test_visibility=False)
# Delete our test document
self.api.urls.documents.trash(project=self.project_id, recording=document_id)
logger.info("Successfully tested Comments!")
def test_documents(self):
test_title = "Test Document"
test_content = "I am an awesome <strong>Document!</strong>"
# Create Document
url = self.api.urls.documents.create(project=self.project_id, vault=self.vault_id,
title=test_title,
content=test_content,
status="active")
data = self._get_data(url)
document_id = data["id"]
assert data["status"] == "active"
assert data["title"] == test_title
assert data["content"] == test_content
# Get Document
url = self.api.urls.documents.get(project=self.project_id, document=document_id)
data = self._get_data(url)
assert data["id"] == document_id
print(data)
# List Documents By Vault
url = self.api.urls.documents.list_by_vault(project=self.project_id, vault=self.vault_id)
data = self._get_data(url)
assert len(data) > 0
new_document_title = "Oh that is new"
new_content = "Wowee!<br><br>I am a document!"
# Update Document
url = self.api.urls.documents.update(project=self.project_id, document=document_id,
title=new_document_title, content=new_content)
data = self._get_data(url)
assert data["id"] == document_id
assert data["title"] == new_document_title
assert data["content"] == new_content
print(data)
self._recording_tests(document_id, self.api.urls.documents)
logger.info("Successfully tested Documents!")
def test_message_boards(self):
url = self.api.urls.message_boards.get(project=self.project_id,
board=self.message_board_id)
data = self._get_data(url)
assert data["id"] == self.message_board_id
logger.info("Successfully tested Message Boards!")
def test_message_types(self):
message_type_name = "Neigh"
message_type_icon = "🐴"
# Create new Message Type
url = self.api.urls.message_types.create(project=self.project_id,
name=message_type_name,
icon=message_type_icon)
data = self._get_data(url)
message_type_id = data["id"]
assert message_type_name == data["name"]
assert message_type_icon == data["icon"]
# List Message Types
url = self.api.urls.message_types.list(project=self.project_id)
data = self._get_data(url)
assert len(data) > 0
# Get Message Type
url = self.api.urls.message_types.get(project=self.project_id,
message_type=message_type_id)
data = self._get_data(url)
assert data["id"] == message_type_id
assert message_type_name == data["name"]
assert message_type_icon == data["icon"]
new_name = "Moo"
new_icon = "🐄"
# Update Message Type
url = self.api.urls.message_types.update(project=self.project_id,
message_type=message_type_id,
name=new_name,
icon=new_icon)
data = self._get_data(url)
assert data["name"] == new_name
assert data["icon"] == new_icon
url = self.api.urls.message_types.delete(project=self.project_id,
message_type=message_type_id)
self._get_no_content(url)
logger.info("Successfully tested Message Types!")
def test_messages(self):
subject = "BasecamPY Test Subject"
content = "<strong>Welcome to BasecamPY!</strong><br>Enjoy!"
# List Message Types
url = self.api.urls.message_types.list(project=self.project_id)
data = self._get_data(url)
# just get the first category to start with
category_id = data[0]["id"]
# we'll use this one to test updating later
new_category_id = data[1]["id"]
# Create new Message
url = self.api.urls.messages.create(project=self.project_id,
message_board=self.message_board_id,
subject=subject, content=content,
category_id=category_id)
data = self._get_data(url)
message_id = data["id"]
assert data["subject"] == subject
assert data["content"] == content
# List by Message Board
url = self.api.urls.messages.list_by_message_board(project=self.project_id,
message_board=self.message_board_id)
data = self._get_data(url)
assert len(data) > 0
# Get a Message
url = self.api.urls.messages.get(project=self.project_id, message=message_id)
data = self._get_data(url)
assert data["id"] == message_id
assert data["category"]["id"] == category_id
assert data["subject"] == subject
assert data["content"] == content
new_subject = "Basecampy3 is neat"
new_content = "I think so anyway"
# Update a Message
url = self.api.urls.messages.update(project=self.project_id,
message=message_id,
subject=new_subject,
content=new_content,
category_id=new_category_id)
data = self._get_data(url)
assert data["id"] == message_id
assert data["category"]["id"] == new_category_id
assert data["subject"] == new_subject
assert data["content"] == new_content
self._recording_tests(message_id, self.api.urls.messages)
logger.info("Successfully tested Message objects!")
def test_people(self):
# Get Current User
person_id = self._get_current_user()
# Get User
url = self.api.urls.people.get(person=person_id)
data = self._get_data(url)
assert data["id"] == person_id
# List Pingable
url = self.api.urls.people.list_pingable()
_ = self._get_data(url)
# List by Project
url = self.api.urls.people.list_by_project(self.project_id)
data = self._get_data(url)
assert len(data) > 0
# List all People
url = self.api.urls.people.list()
data = self._get_data(url)
assert len(data) > 0
logger.info("Successfully tested People objects!")
def test_projects(self):
# List Projects
url = self.api.urls.projects.list()
data = self._get_data(url)
assert len(data) > 1
# Get Project
url = self.api.urls.projects.get(project=self.project_id)
data = self._get_data(url)
assert data["id"] == self.project_id
old_name = data["name"]
old_desc = data["description"]
# we will skip Create projects because it gets tested by every other test!
# Update Project
new_name = "%s Renamed" % old_name
new_desc = "%s Modified" % old_desc
url = self.api.urls.projects.update(project=self.project_id,
name=new_name,
description=new_desc)
data = self._get_data(url)
assert data["id"] == self.project_id
assert data["name"] == new_name
assert data["description"] == new_desc
# We will skip trashing a project | |
<gh_stars>0
import pytest
from anchore_engine.common.models.policy_engine import NVDReference
from anchore_engine.services.policy_engine.engine.vulns.mappers import (
ENGINE_DISTRO_MAPPERS,
ENGINE_PACKAGE_MAPPERS,
GRYPE_PACKAGE_MAPPERS,
EngineGrypeDBMapper,
JavaMapper,
VulnerabilityMapper,
)
@pytest.mark.parametrize(
"test_distro, expected_os, expected_like_os",
[
pytest.param("rhel", "redhat", "fedora", id="rhel"),
pytest.param("amzn", "amazonlinux", "fedora", id="amazonlinux"),
pytest.param("ol", "oraclelinux", "fedora", id="oraclelinux"),
pytest.param("centos", "centos", "fedora", id="centos"),
pytest.param("debian", "debian", "debian", id="debian"),
pytest.param("ubuntu", "ubuntu", "debian", id="ubuntu"),
pytest.param("alpine", "alpine", "alpine", id="alpine"),
pytest.param("sles", "sles", "sles", id="sles"),
pytest.param("windows", "windows", "", id="windows"),
pytest.param("rocky", "rockylinux", "fedora", id="rocky"),
],
)
def test_engine_distro_mappers(test_distro, expected_os, expected_like_os):
mapper = ENGINE_DISTRO_MAPPERS.get(test_distro)
assert mapper.grype_os == expected_os
assert mapper.grype_like_os == expected_like_os
assert mapper.to_grype_distro("0") == {
"name": expected_os,
"version": "0",
"idLike": expected_like_os,
}
@pytest.mark.parametrize(
"test_type, expected_type",
[
pytest.param("java", "java-archive", id="java"),
pytest.param("APKG", "apk", id="apkg"),
pytest.param("dpkg", "deb", id="dpkg"),
pytest.param("kb", "msrc-kb", id="msrc-kb"),
],
)
def test_engine_package_mappers(test_type, expected_type):
mapper = ENGINE_PACKAGE_MAPPERS.get(test_type)
assert mapper.grype_type == expected_type
@pytest.mark.parametrize(
"test_type, expected_type",
[
pytest.param("jenkins-plugin", "java", id="jenkins"),
pytest.param("java-archive", "java", id="java"),
pytest.param("deb", "dpkg", id="dpkg"),
pytest.param("apk", "APKG", id="apkg"),
pytest.param("msrc-kb", "kb", id="msrc-kb"),
],
)
def test_grype_package_mappers(test_type, expected_type):
mapper = GRYPE_PACKAGE_MAPPERS.get(test_type)
assert mapper.engine_type == expected_type
class TestJavaMapper:
@pytest.mark.parametrize(
"input_metadata, expected_output",
[
(
{
"pom.properties": "\ngroupId=org.yaml\nartifactId=snakeyaml\nversion=\n"
},
{
"pomProperties": {
"artifactId": "snakeyaml",
"groupId": "org.yaml",
"version": "",
}
},
),
(
{
"pom.properties": "groupId=org.yaml\nartifactId=snakeyaml\nversion=1.18"
},
{
"pomProperties": {
"artifactId": "snakeyaml",
"groupId": "org.yaml",
"version": "1.18",
}
},
),
(
{
"pom.properties": {
"artifactId": "snakeyaml",
"groupId": "org.yaml",
"version": "1.18",
}
},
{
"pomProperties": {
"artifactId": "snakeyaml",
"groupId": "org.yaml",
"version": "1.18",
}
},
),
(
{
"pom.properties": "\ngroupId=org.yaml\nartifactId=snakeyaml\nversion=1.18\n",
"someProperty": "someValue",
},
{
"pomProperties": {
"artifactId": "snakeyaml",
"groupId": "org.yaml",
"version": "1.18",
},
},
),
(
{"pom.properties": "\ngroupId\nartifactId=snakeyaml\nversion=1.18\n"},
{
"pomProperties": {
"artifactId": "snakeyaml",
"groupId": "",
"version": "1.18",
}
},
),
(
{"pom.properties": "\norg.yaml\nartifactId=snakeyaml\nversion=1.18\n"},
{
"pomProperties": {
"artifactId": "snakeyaml",
"org.yaml": "",
"version": "1.18",
}
},
),
],
)
def test_image_content_to_grype_metadata(self, input_metadata, expected_output):
# Function under test
result = JavaMapper._image_content_to_grype_metadata(input_metadata)
# Validate result
assert result == expected_output
class TestImageContentAPIToGrypeSbom:
@pytest.mark.parametrize(
"mapper, test_input, expected",
[
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("npm"),
{
"cpes": [
"cpe:2.3:a:lodash:lodash:4.17.4:*:*:*:*:*:*:*",
"cpe:2.3:a:*:lodash:4.17.4:*:*:*:*:*:*:*",
],
"license": "MIT",
"licenses": ["MIT"],
"location": "/node_modules/lodash/package.json",
"origin": "<NAME> <<EMAIL>> (http://allyoucanleet.com/)",
"package": "lodash",
"type": "NPM",
"version": "4.17.4",
},
{
"name": "lodash",
"type": "npm",
"language": "javascript",
"locations": [{"path": "/node_modules/lodash/package.json"}],
"cpes": [
"cpe:2.3:a:*:lodash:4.17.4:*:*:*:*:*:*:*",
"cpe:2.3:a:lodash:lodash:4.17.4:*:*:*:*:*:*:*",
],
"version": "4.17.4",
},
id="npm",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("gem"),
{
"cpes": [
"cpe:2.3:a:jessica-lynn-suttles:bundler:2.1.4:*:*:*:*:*:*:*",
"cpe:2.3:a:jessica_lynn_suttles:bundler:2.1.4:*:*:*:*:*:*:*",
],
"license": "MIT",
"licenses": ["MIT"],
"location": "/usr/lib/ruby/gems/2.7.0/specifications/bundler-2.1.4.gemspec",
"origin": "...",
"package": "bundler",
"type": "GEM",
"version": "2.1.4",
},
{
"name": "bundler",
"type": "gem",
"language": "ruby",
"locations": [
{
"path": "/usr/lib/ruby/gems/2.7.0/specifications/bundler-2.1.4.gemspec"
}
],
"cpes": [
"cpe:2.3:a:jessica_lynn_suttles:bundler:2.1.4:*:*:*:*:*:*:*",
"cpe:2.3:a:jessica-lynn-suttles:bundler:2.1.4:*:*:*:*:*:*:*",
],
"version": "2.1.4",
},
id="gem",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("python"),
{
"cpes": [
"cpe:2.3:a:python-pip:pip:21.2.2:*:*:*:*:*:*:*",
"cpe:2.3:a:python:pip:21.2.2:*:*:*:*:*:*:*",
"cpe:2.3:a:pip:pip:21.2.2:*:*:*:*:*:*:*",
],
"license": "MIT",
"licenses": ["MIT"],
"location": "/usr/local/lib/python3.9/site-packages/pip",
"origin": "The pip developers <<EMAIL>>",
"package": "pip",
"type": "PYTHON",
"version": "21.2.2",
},
{
"name": "pip",
"version": "21.2.2",
"type": "python",
"cpes": [
"cpe:2.3:a:python-pip:pip:21.2.2:*:*:*:*:*:*:*",
"cpe:2.3:a:python:pip:21.2.2:*:*:*:*:*:*:*",
"cpe:2.3:a:pip:pip:21.2.2:*:*:*:*:*:*:*",
],
"language": "python",
"locations": [
{"path": "/usr/local/lib/python3.9/site-packages/pip"}
],
},
id="python",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("java"),
{
"cpes": [
"cpe:2.3:a:amqp-client:amqp_client:5.9.0:*:*:*:*:*:*:*",
"cpe:2.3:a:amqp_client:amqp_client:5.9.0:*:*:*:*:*:*:*",
],
"implementation-version": "N/A",
"location": "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/march_hare-4.3.0-java/lib/ext/rabbitmq-client.jar:amqp-client",
"maven-version": "5.9.0",
"metadata": {
"pom.properties": "\ngroupId=com.rabbitmq\nartifactId=amqp-client\nversion=5.9.0\n"
},
"origin": "com.rabbitmq",
"package": "amqp-client",
"specification-version": "N/A",
"type": "JAVA-JAR",
"version": "5.9.0",
},
{
"cpes": [
"cpe:2.3:a:amqp-client:amqp_client:5.9.0:*:*:*:*:*:*:*",
"cpe:2.3:a:amqp_client:amqp_client:5.9.0:*:*:*:*:*:*:*",
],
"language": "java",
"locations": [
{
"path": "/usr/share/logstash/vendor/bundle/jruby/2.5.0/gems/march_hare-4.3.0-java/lib/ext/rabbitmq-client.jar:amqp-client"
}
],
"metadata": {
"pomProperties": {
"artifactId": "amqp-client",
"groupId": "com.rabbitmq",
"version": "5.9.0",
}
},
"metadataType": "JavaMetadata",
"name": "amqp-client",
"type": "java-archive",
"version": "5.9.0",
},
id="java",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("dpkg"),
{
"cpes": ["cpe:2.3:a:bsdutils:bsdutils:1:2.33.1-0.1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"licenses": [
"BSD-2-clause",
],
"origin": "<NAME> <<EMAIL>> (maintainer)",
"package": "bsdutils",
"size": "293000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "1:2.33.1-0.1",
},
{
"name": "bsdutils",
"version": "1:2.33.1-0.1",
"type": "deb",
"cpes": ["cpe:2.3:a:bsdutils:bsdutils:1:2.33.1-0.1:*:*:*:*:*:*:*"],
"locations": [{"path": "pkgdb"}],
"metadataType": "DpkgMetadata",
"metadata": {"source": "util-linux"},
},
id="dpkg-with-source",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("APKG"),
{
"cpes": [
"cpe:2.3:a:ssl-client:ssl_client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl_client:ssl_client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl-client:ssl-client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl_client:ssl-client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl:ssl_client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl:ssl-client:1.32.1-r5:*:*:*:*:*:*:*",
],
"license": "GPL-2.0-only",
"licenses": ["GPL-2.0-only"],
"origin": "<NAME> <<EMAIL>>",
"package": "ssl_client",
"size": "28672",
"sourcepkg": "busybox",
"type": "APKG",
"version": "1.32.1-r5",
},
{
"name": "ssl_client",
"version": "1.32.1-r5",
"type": "apk",
"cpes": [
"cpe:2.3:a:ssl-client:ssl_client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl_client:ssl_client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl-client:ssl-client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl_client:ssl-client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl:ssl_client:1.32.1-r5:*:*:*:*:*:*:*",
"cpe:2.3:a:ssl:ssl-client:1.32.1-r5:*:*:*:*:*:*:*",
],
"locations": [{"path": "pkgdb"}],
"metadataType": "ApkMetadata",
"metadata": {"originPackage": "busybox"},
},
id="apkg-with-source",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("kb"),
{
"cpes": None,
"license": "Unknown",
"licenses": ["Unknown"],
"origin": "Microsoft",
"package": "935509",
"size": "0",
"sourcepkg": "10855",
"type": "kb",
"version": "935509",
},
{
"name": "10855",
"version": "935509",
"type": "msrc-kb",
"locations": [{"path": "registry"}],
},
id="microsoft-kb",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("rpm"),
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"license": "OpenSSL and ASL 2.0",
"licenses": ["OpenSSL", "and", "ASL", "2.0"],
"origin": "Red Hat, Inc.",
"package": "openssl-libs",
"size": "3757208",
"sourcepkg": "openssl-1.1.1g-15.el8_3.src.rpm",
"type": "rpm",
"version": "1:1.1.1g-15.el8_3",
},
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "rpm",
"locations": [{"path": "pkgdb"}],
"version": "1:1.1.1g-15.el8_3",
"metadataType": "RpmdbMetadata",
"metadata": {
"sourceRpm": "openssl-1.1.1g-15.el8_3.src.rpm",
"epoch": 1,
},
},
id="rpm-with-epoch",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("rpm"),
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"license": "OpenSSL and ASL 2.0",
"licenses": ["OpenSSL", "and", "ASL", "2.0"],
"origin": "Red Hat, Inc.",
"package": "openssl-libs",
"size": "3757208",
"sourcepkg": "openssl-1.1.1g-15.el8_3.src.rpm",
"type": "rpm",
"version": "1.1.1g-15.el8_3",
},
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "rpm",
"locations": [{"path": "pkgdb"}],
"version": "1.1.1g-15.el8_3",
"metadataType": "RpmdbMetadata",
"metadata": {
"sourceRpm": "openssl-1.1.1g-15.el8_3.src.rpm",
"epoch": None,
},
},
id="rpm-no-epoch",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("rpm"),
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"license": "OpenSSL and ASL 2.0",
"licenses": ["OpenSSL", "and", "ASL", "2.0"],
"origin": "Red Hat, Inc.",
"package": "openssl-libs",
"size": "3757208",
"sourcepkg": None,
"type": "rpm",
"version": "1.1.1g-15.el8_3",
},
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "rpm",
"locations": [{"path": "pkgdb"}],
"version": "1.1.1g-15.el8_3",
"metadataType": "RpmdbMetadata",
"metadata": {
"sourceRpm": None,
"epoch": None,
},
},
id="rpm-no-src",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("rpm"),
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"license": "OpenSSL and ASL 2.0",
"licenses": ["OpenSSL", "and", "ASL", "2.0"],
"origin": "Red Hat, Inc.",
"package": "openssl-libs",
"size": "3757208",
"sourcepkg": None,
"type": "rpm",
"version": "1:1.1.1g-15.el8_3",
},
{
"cpes": [
"cpe:2.3:a:openssl-libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl-libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl_libs:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:openssl:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl-libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
"cpe:2.3:a:redhat:openssl_libs:1.1.1g-15.el8_3:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "rpm",
"locations": [{"path": "pkgdb"}],
"version": "1:1.1.1g-15.el8_3",
"metadataType": "RpmdbMetadata",
"metadata": {
"sourceRpm": None,
"epoch": 1,
},
},
id="rpm-no-src-but-epoch",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("go"),
{
"cpes": [],
"package": "openssl-libs",
"type": "go",
"version": "1.1.1",
},
{
"cpes": [
"cpe:2.3:a:*:openssl-libs:1.1.1:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "go-module",
"language": "go",
"version": "1.1.1",
},
id="go-mod-no-cpes",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("go"),
{
"cpes": [
"cpe:2.3:a:myvendor:openssl-libs:1.1.1-a:*:*:*:*:*:*:*",
],
"package": "openssl-libs",
"type": "go",
"version": "1.1.1",
},
{
"cpes": [
"cpe:2.3:a:myvendor:openssl-libs:1.1.1-a:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "go-module",
"language": "go",
"version": "1.1.1",
},
id="go-mod-with-cpes",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("binary"),
{
"cpes": [],
"package": "openssl-libs",
"type": "go",
"version": "1.1.1",
},
{
"cpes": [
"cpe:2.3:a:*:openssl-libs:1.1.1:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "binary",
"language": "",
"version": "1.1.1",
},
id="binary-no-cpes",
),
pytest.param(
ENGINE_PACKAGE_MAPPERS.get("binary"),
{
"cpes": [
"cpe:2.3:a:myvendor:openssl-libs:1.1.1-a:*:*:*:*:*:*:*",
],
"package": "openssl-libs",
"type": "go",
"version": "1.1.1",
},
{
"cpes": [
"cpe:2.3:a:myvendor:openssl-libs:1.1.1-a:*:*:*:*:*:*:*",
],
"name": "openssl-libs",
"type": "binary",
"language": "",
"version": "1.1.1",
},
id="binary-with-cpes",
),
],
)
def test_mappers(self, mapper, test_input, expected):
actual = mapper.image_content_to_grype_sbom(test_input)
# sort the list attributes before comparing
actual = {
key: sorted(value) if isinstance(value, list) else value
for key, value in actual.items()
}
expected = {
key: sorted(value) if isinstance(value, list) else value
for key, value in expected.items()
}
assert actual.pop("id")
assert actual == expected
class TestVulnerabilityMapper:
@pytest.mark.parametrize(
"vuln_id,feed_group,nvd_refs,expected",
[
pytest.param("foobar", "vulndb:xyz", None, "foobar", id="none-nvd-refs"),
pytest.param(None, None, None, None, id="all-none"),
pytest.param("foobar", "vulndb:xyz", [], "foobar", id="no-nvd-refs"),
pytest.param(
"foobar", "vulndb:xyz", ["1"], "foobar", id="invalid-nvd-refs"
),
pytest.param("foobar", None, [], "foobar", id="none-feed-group"),
pytest.param("foobar", 1, [], "foobar", id="invalid-feed-group-int"),
pytest.param(
"foobar", ["x", "y"], None, "foobar", id="invalid-feed-group-list"
),
pytest.param(
"foobar",
"abc:xyz",
[NVDReference(vulnerability_id="CVE-xyz")],
"foobar",
id="valid-dont-transform",
),
pytest.param(
"foobar",
"vulndb",
[NVDReference(vulnerability_id="CVE-xyz")],
"CVE-xyz",
id="valid-transform",
),
pytest.param(
"foobar",
"vulndb",
[
NVDReference(vulnerability_id="CVE-xyz"),
NVDReference(vulnerability_id="CVE-pqr"),
],
"foobar",
id="valid-multiple-nvd-refs",
),
],
)
def test_get_normalized_vulnerability_id(
self, vuln_id, feed_group, nvd_refs, expected
):
assert (
VulnerabilityMapper._try_get_normalized_vulnerability_id(
vuln_id, feed_group, nvd_refs
)
== expected
)
@pytest.mark.parametrize(
"vuln_id,url,expected",
[
pytest.param(
"foobar",
None,
"http://<valid endpoint not found>/query/vulnerabilities?id=foobar",
id="none-url",
),
pytest.param(
"foobar",
"",
"http://<valid endpoint not found>/query/vulnerabilities?id=foobar",
id="blank-url",
),
],
)
def test_try_make_link(self, vuln_id, url, expected):
assert VulnerabilityMapper._try_make_link(vuln_id, url) == expected
class TestEngineGrypeDBMapper:
@pytest.mark.parametrize(
"cvss_dict, expected",
[
pytest.param(
{
"VendorMetadata": None,
"Metrics": {
"BaseScore": 7.8,
"ExploitabilityScore": 10,
"ImpactScore": 6.9,
},
"Vector": "AV:N/AC:L/Au:N/C:N/I:N/A:C",
"Version": "2.0",
},
{
"version": "2.0",
"vector_string": "AV:N/AC:L/Au:N/C:N/I:N/A:C",
"severity": "High",
"base_metrics": {
"base_score": 7.8,
"expolitability_score": 10,
"impact_score": 6.9,
},
},
id="nvd-cvss-2",
),
pytest.param(
{
"VendorMetadata": None,
"Metrics": {
"BaseScore": 7.5,
"ExploitabilityScore": 3.9,
"ImpactScore": 3.6,
},
"Vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H",
"Version": "3.1",
},
{
"version": | |
<filename>banyan/controllers/cloud_resource.py
import logging
from typing import List
from uuid import UUID
import copy
from time import sleep
from cement import Controller, ex
from banyan.controllers.base import Base
from banyan.api import BanyanApiClient
from banyan.model.cloud_resource import CloudResource, CloudResourceInfo
from banyan.ext.iaas.base import IaasResource
class CloudResourceController(Controller):
class Meta:
label = 'cloud_resource'
stacked_type = 'nested'
stacked_on = 'base'
help = 'manage cloud resources discovered from IaaS'
@property
def _client(self) -> BanyanApiClient:
return self.app.client
def trunc(self, value, num_chars) -> str:
if not value:
return ''
value = str(value)
if len(value) < num_chars + 3:
return value
else:
return '...' + value[-num_chars:]
@ex(help='list cloud_resources',
arguments=[
(['--cloud'],
{
'help': 'filter by provider - AWS | AZURE | GCP | OCI | ...'
}),
(['--account'],
{
'help': 'filter by account'
}),
(['--region'],
{
'help': 'filter by region/location/zone'
}),
(['--resource_type'],
{
'help': 'filter by type - ec2 | vm | rds | ...'
})
])
def list(self):
params={'cloud_provider': self.app.pargs.cloud, 'account': self.app.pargs.account, 'region': self.app.pargs.region, 'resource_type': self.app.pargs.resource_type}
synced_resources: List[CloudResourceInfo] = self._client.cloud_resources.list(params=params)
results = list()
headers = ['Name', 'ID', 'Cloud', 'Account', 'Region', 'Type', 'Private Address', 'Public Address', '# Tags', 'Status']
for res in synced_resources:
new_res = [res.name[:20], res.resource_udid, res.cloud_provider, self.trunc(res.account,6),
res.region, res.resource_type,
self.trunc(res.private_ip or res.private_dns_name, 24),
self.trunc(res.public_ip or res.public_dns_name, 24),
len(res.tags or []), res.status]
results.append(new_res)
self.app.render(results, handler='tabulate', headers=headers, tablefmt='simple')
@ex(help='show details & tags of a cloud_resource',
arguments=[
(['resource_uuid'],
{
'help': 'get discovered resource by Banyan UUID.'
}),
])
def get(self):
id: UUID = self.app.pargs.resource_uuid
d_resource: CloudResourceInfo = self._client.cloud_resources.get(id)
dr_json = CloudResourceInfo.Schema().dump(d_resource)
self.app.render(dr_json, handler='json', indent=2, sort_keys=True)
@ex(help='create a new cloud_resource',
arguments=[
(['resources_json'],
{
'help': 'JSON blob describing the new discovered resource(s) to be created, or a filename '
'containing JSON prefixed by "@" (example: @res.json).'
}),
])
def create(self):
d_resource = Base.get_json_input(self.app.pargs.resources_json)
d_resource: CloudResourceInfo = self._client.cloud_resources.create(d_resource)
dr_json = CloudResource.Schema().dump(d_resource)
self.app.render(dr_json, handler='json', indent=2, sort_keys=True)
@ex(hide=True, help='update status for a given cloud_resource record',
arguments=[
(['resource_uuid'],
{
'help': 'Banyan UUID of discovered resource to update.'
}),
(['status'],
{
'help': 'Status - Discovered | Ignored | Published'
}),
])
def update_status(self):
id: UUID = self.app.pargs.resource_uuid
status: str = self.app.pargs.status
d_resource: CloudResourceInfo = self._client.cloud_resources.update_status(id, status)
dr_json = CloudResource.Schema().dump(d_resource)
self.app.render(dr_json, handler='json', indent=2, sort_keys=True)
@ex(help='delete a given cloud_resource record',
arguments=[
(['resource_uuid'],
{
'help': 'Banyan UUID of discovered resource to delete.'
}),
])
def delete(self):
id: UUID = self.app.pargs.resource_uuid
info = self._client.cloud_resources.delete(id)
self.app.render(info, handler='json')
@ex(hide=True, help='delete all cloud_resource records')
def delete_all(self):
synced_resources: List[CloudResourceInfo] = self._client.cloud_resources.list()
for d_resource in synced_resources:
info = self._client.cloud_resources.delete(d_resource.id)
self.app.render(info, handler='json')
sleep(0.05)
@ex(help='show cloud_resources associated with services',
arguments=[
(['--resource_uuid'],
{
'help': 'Banyan UUID of discovered resource to list associations for.'
}),
])
def list_associations(self):
assocs = self._client.cloud_resources.associations(self.app.pargs.resource_uuid)
results = list()
headers = ['ID', 'Resource ID', 'Resource Name', 'Resource Type', 'Service ID', 'Service Name', 'Resource Status']
for res in assocs:
new_res = [res.id, self.trunc(res.resource_udid,9), res.resource_name, res.resource_type,
res.service_id, res.service_name, res.resource_status]
results.append(new_res)
self.app.render(results, handler='tabulate', headers=headers, tablefmt='simple')
@ex(hide=True, help='associate cloud_resource with service',
arguments=[
(['resource_uuid'],
{
'help': 'Banyan UUID of discovered resource to associate.'
}),
(['service_name'],
{
'metavar': 'service_name_or_id',
'help': 'Name or ID of the service to associate a discovered resource with.',
}),
])
def associate_with_service(self):
info = self._client.cloud_resources.associate(self.app.pargs.resource_uuid, self.app.pargs.service_name)
self.app.render(info, handler='json')
@ex(hide=True, help='dissociate cloud_resource from service',
arguments=[
(['association_uuid'],
{
'help': 'Association UUID of discovered resource and service.'
}),
])
def dissociate_from_service(self):
info = self._client.cloud_resources.dissociate(self.app.pargs.association_uuid)
self.app.render(info, handler='json')
@ex(help='test AWS configuration by getting EC2 resources',
arguments=[
(['region'],
{
'help': 'region where AWS EC2 resources exist - us-east-1, us-west-2, etc.'
})
])
def test_aws(self):
try:
from banyan.ext.iaas.aws import AwsController
except Exception as ex:
raise NotImplementedError("AWS SDK not configured correctly > %s" % ex.args[0])
aws = AwsController(self.app.pargs.region)
instances: List[IaasResource] = aws.list_ec2()
if len(instances):
print('--> AWS configuration test passed. Found %d resources.' % len(instances))
else:
print('--> AWS configuration test failed. Check your AWS credentials and SDK configuration.')
@ex(help='sync cloud_resources with AWS IaaS',
arguments=[
(['resource_type'],
{
'help': 'type of AWS resource - ec2 | rds | elb. You can say "all" but be careful!'
}),
(['region'],
{
'help': 'region where AWS resources run - us-east-1, us-west-2, etc. You can say "all" but be careful!'
}),
(['--tag_name'],
{
'help': 'only sync resources with specific tag name. If not specified, sync all resources.'
}),
(['--wait_for_input', '-w'],
{
'action': 'store_true',
'help': 'wait for user input before proceeding to next step'
})
])
def sync_aws(self):
try:
from banyan.ext.iaas.aws import AwsController
except Exception as ex:
raise NotImplementedError("AWS SDK not configured correctly > %s" % ex.args[0])
rt = self.app.pargs.resource_type.lower()
wait = self.app.pargs.wait_for_input
instances: List[IaasResource] = list()
aws = AwsController(self.app.pargs.region, self.app.pargs.tag_name)
if rt == 'ec2' or rt == 'all':
Base.wait_for_input(wait, 'Getting list of AWS EC2 resources')
instances += aws.list_ec2()
if rt == 'rds' or rt == 'all':
Base.wait_for_input(wait, 'Getting list of AWS RDS resources')
instances += aws.list_rds()
if rt == 'elb' or rt == 'all':
Base.wait_for_input(wait, 'Getting list of AWS ELB resources')
instances += aws.list_elb()
results = Base.tabulate_iaas_resources(instances)
self.app.render(results, handler='tabulate', headers='keys', tablefmt='simple')
if len(results) == 0:
print('--> No AWS resources to sync')
return
Base.wait_for_input(wait, 'Filtering for new AWS resources')
params={'cloud_provider': aws.provider, 'resource_type': rt, 'region': self.app.pargs.region}
synced_resources: List[CloudResourceInfo] = self._client.cloud_resources.list(params=Base.sanitize_alls(params))
added_instances = Base.added_iaas_resources(instances, synced_resources)
new_results = Base.tabulate_iaas_resources(added_instances)
self.app.render(new_results, handler='tabulate', headers='keys', tablefmt='simple')
if len(new_results) == 0:
print('--> No new AWS resources to sync')
return
Base.wait_for_input(wait, 'Syncing into Banyan Cloud Resource inventory')
for instance in added_instances:
res = Base.convert_iaas_resource(instance)
self._client.cloud_resources.create(res)
print('\n--> Added AWS resource id(name): %s(%s)' % (res.resource_id, res.resource_name))
sleep(0.05)
print('\n--> Sync with AWS successful.')
@ex(help='test Azure configuration',
arguments=[
(['resource_group'],
{
'help': 'Azure Resource Group where some VMs run. You can say "all" but be careful!'
})
])
def test_azure(self):
try:
from banyan.ext.iaas.azure_cloud import AzureController
except Exception as ex:
raise NotImplementedError("Azure SDK not configured correctly > %s" % ex.args[0])
azr = AzureController(self.app.pargs.resource_group)
instances: List[IaasResource] = azr.list_vm()
if len(instances):
print('--> Azure configuration test passed. Found %d resources.' % len(instances))
else:
print('--> Azure configuration test failed. Check your Azure credentials and SDK configuration.')
@ex(help='sync cloud_resources with Azure IaaS',
arguments=[
(['resource_type'],
{
'help': 'type of Azure resource - vm | lbl. You can say "all" but be careful!'
}),
(['resource_group'],
{
'help': 'Azure Resource Group where resources run. You can say "all" but be careful!'
}),
(['--location'],
{
'help': 'location where Azure resources run - centralus, eastus, etc. If not specified, all locations are used.'
}),
(['--tag_name'],
{
'help': 'only sync resources with specific tag name. If not specified, sync all resources.'
}),
(['--wait_for_input', '-w'],
{
'action': 'store_true',
'help': 'wait for user input before proceeding to next step'
})
])
def sync_azure(self):
try:
from banyan.ext.iaas.azure_cloud import AzureController
except Exception as ex:
raise NotImplementedError("Azure SDK not configured correctly > %s" % ex.args[0])
rt = self.app.pargs.resource_type.lower()
wait = self.app.pargs.wait_for_input
instances: List[IaasResource] = list()
azr = AzureController(self.app.pargs.resource_group, self.app.pargs.location, self.app.pargs.tag_name)
if rt == 'vm' or rt == 'all':
Base.wait_for_input(wait, 'Getting list of Azure VM resources')
instances += azr.list_vm()
if rt == 'lb' or rt == 'all':
Base.wait_for_input(wait, 'Getting list of Azure LB resources')
instances += azr.list_lb()
results = Base.tabulate_iaas_resources(instances, ['id'])
self.app.render(results, handler='tabulate', headers='keys', tablefmt='simple')
if len(results) == 0:
print('--> No Azure resources to sync')
return
Base.wait_for_input(wait, 'Filtering for new Azure resources')
params={'cloud_provider': azr.provider, 'resource_type': rt, 'account': self.app.pargs.resource_group, 'region': self.app.pargs.location}
synced_resources: List[CloudResourceInfo] = self._client.cloud_resources.list(params=Base.sanitize_alls(params))
added_instances = Base.added_iaas_resources(instances, synced_resources)
new_results = Base.tabulate_iaas_resources(added_instances, ['id'])
self.app.render(new_results, handler='tabulate', headers='keys', tablefmt='simple')
if len(new_results) == 0:
print('--> No new Azure resources to sync')
return
Base.wait_for_input(wait, 'Syncing into Banyan Cloud Resource inventory')
for instance in added_instances:
res = Base.convert_iaas_resource(instance)
self._client.cloud_resources.create(res)
print('\n--> Added Azure resource id(name): %s(%s)' % (res.resource_id, res.resource_name))
sleep(0.05)
print('\n--> Sync with Azure successful.')
@ex(help='test Google Cloud configuration',
arguments=[
(['project'],
{
'help': 'Project ID where GCP resources run. You can say "all" but be careful!'
})
])
def test_gcp(self):
try:
from banyan.ext.iaas.gcp import GcpController
except Exception as ex:
raise NotImplementedError("GCP Client Libraries for Python not configured correctly > %s" % ex.args[0])
gcp = GcpController(self.app.pargs.project)
instances: List[IaasResource] = gcp.list_vm()
if len(instances):
print('--> Google Cloud configuration test passed. Found %d resources.' % len(instances))
else:
print('--> Google Cloud | |
<gh_stars>0
import os
import subprocess
from unittest import mock
from . import *
from .... import *
from mopack.builders.bfg9000 import Bfg9000Builder
from mopack.config import Config
from mopack.path import Path
from mopack.sources import Package
from mopack.sources.apt import AptPackage
from mopack.sources.sdist import TarballPackage
from mopack.types import ConfigurationError
def mock_exists(p):
return os.path.basename(p) == 'mopack.yml'
class TestTarball(SDistTestCase):
pkg_type = TarballPackage
srcurl = 'http://example.invalid/hello-bfg.tar.gz'
srcpath = os.path.join(test_data_dir, 'hello-bfg.tar.gz')
def setUp(self):
self.config = Config([])
def mock_urlopen(self, url):
return open(self.srcpath, 'rb')
def check_fetch(self, pkg):
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('tarfile.TarFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False): # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_called_once_with(srcdir, None)
def test_url(self):
pkg = self.make_package('foo', url=self.srcurl, build='bfg9000')
self.assertEqual(pkg.url, self.srcurl)
self.assertEqual(pkg.path, None)
self.assertEqual(pkg.patch, None)
self.assertEqual(pkg.builder, self.make_builder(Bfg9000Builder, 'foo'))
self.assertEqual(pkg.needs_dependencies, True)
self.assertEqual(pkg.should_deploy, True)
self.check_fetch(pkg)
self.check_resolve(pkg)
def test_path(self):
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000')
self.assertEqual(pkg.url, None)
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.patch, None)
self.assertEqual(pkg.builder, self.make_builder(Bfg9000Builder, 'foo'))
self.assertEqual(pkg.needs_dependencies, True)
self.assertEqual(pkg.should_deploy, True)
self.check_fetch(pkg)
self.check_resolve(pkg)
def test_zip_path(self):
srcpath = os.path.join(test_data_dir, 'hello-bfg.zip')
pkg = self.make_package('foo', build='bfg9000', path=srcpath)
self.assertEqual(pkg.url, None)
self.assertEqual(pkg.path, Path(srcpath))
self.assertEqual(pkg.builder, self.make_builder(Bfg9000Builder, 'foo'))
self.assertEqual(pkg.needs_dependencies, True)
self.assertEqual(pkg.should_deploy, True)
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('zipfile.ZipFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False): # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_called_once_with(srcdir, None)
self.check_resolve(pkg)
def test_invalid_url_path(self):
with self.assertRaises(TypeError):
self.make_package('foo', build='bfg9000')
with self.assertRaises(TypeError):
self.make_package('foo', url=self.srcurl, path=self.srcpath,
build='bfg9000')
def test_files(self):
pkg = self.make_package('foo', path=self.srcpath,
files='/hello-bfg/include/', build='bfg9000')
self.assertEqual(pkg.files, ['/hello-bfg/include/'])
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('tarfile.TarFile.extract') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False): # noqa
pkg.fetch(self.config, self.pkgdir)
self.assertEqual(mtar.mock_calls, [
mock.call('hello-bfg/include', srcdir),
mock.call('hello-bfg/include/hello.hpp', srcdir),
])
self.check_resolve(pkg)
def test_patch(self):
patch = os.path.join(test_data_dir, 'hello-bfg.patch')
pkg = self.make_package('foo', path=self.srcpath, patch=patch,
build='bfg9000')
self.assertEqual(pkg.patch, Path(patch))
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
with mock.patch('mopack.sources.sdist.urlopen', self.mock_urlopen), \
mock.patch('mopack.sources.sdist.pushd'), \
mock.patch('tarfile.TarFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True), \
mock.patch('os.path.exists', return_value=False), \
mock.patch('builtins.open', mock_open_after_first()) as mopen, \
mock.patch('os.makedirs'), \
mock.patch('subprocess.run') as mrun: # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_called_once_with(srcdir, None)
mrun.assert_called_once_with(
['patch', '-p1'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stdin=mopen(),
universal_newlines=True, check=True
)
self.check_resolve(pkg)
def test_build(self):
build = {'type': 'bfg9000', 'extra_args': '--extra'}
pkg = self.make_package('foo', path=self.srcpath, build=build,
usage='pkg_config')
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', extra_args='--extra'
))
self.check_fetch(pkg)
self.check_resolve(pkg)
def test_infer_build(self):
# Basic inference
pkg = self.make_package('foo', path=self.srcpath)
self.assertEqual(pkg.builder, None)
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock_open_after_first(
read_data='export:\n build: bfg9000'
)), \
mock.patch('tarfile.TarFile.extractall'): # noqa
config = pkg.fetch(self.config, self.pkgdir)
self.assertEqual(config.export.build, 'bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000'
))
self.check_resolve(pkg)
# Infer but override usage and version
pkg = self.make_package('foo', path=self.srcpath,
usage={'type': 'system'})
self.assertEqual(pkg.builder, None)
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock_open_after_first(
read_data='export:\n build: bfg9000'
)), \
mock.patch('tarfile.TarFile.extractall'): # noqa
config = pkg.fetch(self.config, self.pkgdir)
self.assertEqual(config.export.build, 'bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000',
usage={'type': 'system'}
))
with mock.patch('subprocess.run', side_effect=OSError()), \
mock.patch('mopack.usage.path_system.PathUsage._filter_path',
lambda *args: []), \
mock.patch('mopack.usage.path_system.file_outdated',
return_value=True), \
mock.patch('os.makedirs'), \
mock.patch('builtins.open'): # noqa
self.check_resolve(pkg, usage={
'name': 'foo', 'type': 'system',
'path': [self.pkgconfdir(None)], 'pcfiles': ['foo'],
'generated': True, 'auto_link': False,
})
def test_infer_build_override(self):
pkg = self.make_package('foo', path=self.srcpath, build='cmake',
usage='pkg_config')
with mock.patch('os.path.exists', mock_exists), \
mock.patch('builtins.open', mock_open_after_first(
read_data='export:\n build: bfg9000'
)), \
mock.patch('tarfile.TarFile.extractall'): # noqa
config = pkg.fetch(self.config, self.pkgdir)
self.assertEqual(config.export.build, 'bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='cmake', usage='pkg_config'
))
with mock.patch('mopack.builders.cmake.pushd'):
self.check_resolve(pkg)
def test_usage(self):
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage='pkg_config')
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', usage='pkg_config'
))
self.check_fetch(pkg)
self.check_resolve(pkg)
with mock.patch('subprocess.run') as mrun:
pkg.version(self.pkgdir)
mrun.assert_called_once_with(
['pkg-config', 'foo', '--modversion'],
check=True, env={'PKG_CONFIG_PATH': self.pkgconfdir('foo')},
stdout=subprocess.PIPE, universal_newlines=True
)
usage = {'type': 'pkg_config', 'path': 'pkgconf'}
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage=usage)
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', usage=usage
))
self.check_fetch(pkg)
self.check_resolve(pkg, usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo', 'pkgconf')], 'pcfiles': ['foo'],
'extra_args': [],
})
usage = {'type': 'path', 'libraries': []}
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage=usage)
self.assertEqual(pkg.path, Path(self.srcpath))
self.assertEqual(pkg.builder, self.make_builder(
Bfg9000Builder, 'foo', usage=usage
))
self.check_fetch(pkg)
self.check_resolve(pkg, usage={
'name': 'foo', 'type': 'path', 'path': [self.pkgconfdir(None)],
'pcfiles': ['foo'], 'generated': True, 'auto_link': False,
})
with mock.patch('subprocess.run') as mrun:
self.assertEqual(pkg.version(self.pkgdir), None)
mrun.assert_not_called()
def test_submodules(self):
submodules_required = {'names': '*', 'required': True}
submodules_optional = {'names': '*', 'required': False}
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
submodules=submodules_required)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'])
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_required)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
submodules=submodules_optional)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'])
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000',
usage={'type': 'pkg_config', 'pcfile': 'bar'},
submodules=submodules_optional)
self.check_fetch(pkg)
self.check_resolve(pkg, submodules=['sub'], usage={
'name': 'foo', 'type': 'pkg_config',
'path': [self.pkgconfdir('foo')], 'pcfiles': ['bar', 'foo_sub'],
'extra_args': [],
})
def test_invalid_submodule(self):
pkg = self.make_package(
'foo', path=self.srcpath, build='bfg9000',
submodules={'names': ['sub'], 'required': True}
)
with self.assertRaises(ValueError):
pkg.get_usage(self.pkgdir, ['invalid'])
def test_already_fetched(self):
def mock_exists(p):
return os.path.basename(p) == 'foo'
build = {'type': 'bfg9000', 'extra_args': '--extra'}
pkg = self.make_package('foo', path=self.srcpath, srcdir='srcdir',
build=build, usage='pkg_config')
with mock.patch('os.path.exists', mock_exists), \
mock.patch('tarfile.TarFile.extractall') as mtar, \
mock.patch('os.path.isdir', return_value=True): # noqa
pkg.fetch(self.config, self.pkgdir)
mtar.assert_not_called()
self.check_resolve(pkg)
def test_deploy(self):
deploy_paths = {'prefix': '/usr/local'}
pkg = self.make_package('foo', url=self.srcurl, build='bfg9000',
deploy_paths=deploy_paths)
self.assertEqual(pkg.should_deploy, True)
self.check_fetch(pkg)
with mock_open_log() as mopen, \
mock.patch('mopack.builders.bfg9000.pushd'), \
mock.patch('subprocess.run') as mrun: # noqa
pkg.resolve(self.pkgdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'foo.log'
), 'a')
builddir = os.path.join(self.pkgdir, 'build', 'foo')
mrun.assert_any_call(
['bfg9000', 'configure', builddir, '--prefix', '/usr/local'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, check=True
)
with mock_open_log() as mopen, \
mock.patch('mopack.builders.bfg9000.pushd'), \
mock.patch('subprocess.run'): # noqa
pkg.deploy(self.pkgdir)
mopen.assert_called_with(os.path.join(
self.pkgdir, 'logs', 'deploy', 'foo.log'
), 'a')
pkg = self.make_package('foo', url='http://example.com',
build='bfg9000', deploy=False)
self.assertEqual(pkg.should_deploy, False)
with mock_open_log() as mopen:
pkg.deploy(self.pkgdir)
mopen.assert_not_called()
def test_clean_pre(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
oldpkg = self.make_package('foo', path=self.srcpath,
srcdir='bfg_project', build='bfg9000')
newpkg1 = self.make_package('foo', path=otherpath, build='bfg9000')
newpkg2 = self.make_package(AptPackage, 'foo')
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
# Tarball -> Tarball (same)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(oldpkg, self.pkgdir), False)
mlog.assert_not_called()
mrmtree.assert_not_called()
# Tarball -> Tarball (different)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(newpkg1, self.pkgdir), True)
mlog.assert_called_once()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(newpkg2, self.pkgdir), True)
mlog.assert_called_once()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(None, self.pkgdir), True)
mlog.assert_called_once()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing (quiet)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_pre(None, self.pkgdir, True), True)
mlog.assert_not_called()
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
def test_clean_post(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
oldpkg = self.make_package('foo', path=self.srcpath,
srcdir='bfg_project', build='bfg9000')
newpkg1 = self.make_package('foo', path=otherpath, build='bfg9000')
newpkg2 = self.make_package(AptPackage, 'foo')
# Tarball -> Tarball (same)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(oldpkg, self.pkgdir), False)
mlog.assert_not_called()
mclean.assert_not_called()
# Tarball -> Tarball (different)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(newpkg1, self.pkgdir), True)
mlog.assert_called_once()
mclean.assert_called_once_with(self.pkgdir)
# Tarball -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(newpkg2, self.pkgdir), True)
mlog.assert_called_once()
mclean.assert_called_once_with(self.pkgdir)
# Tarball -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(None, self.pkgdir), True)
mlog.assert_called_once()
mclean.assert_called_once_with(self.pkgdir)
# Tarball -> nothing (quiet)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean: # noqa
self.assertEqual(oldpkg.clean_post(None, self.pkgdir, True), True)
mlog.assert_not_called()
mclean.assert_called_once_with(self.pkgdir)
def test_clean_all(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
oldpkg = self.make_package('foo', path=self.srcpath,
srcdir='bfg_project', build='bfg9000')
newpkg1 = self.make_package('foo', path=otherpath, build='bfg9000')
newpkg2 = self.make_package(AptPackage, 'foo')
srcdir = os.path.join(self.pkgdir, 'src', 'foo')
# Tarball -> Tarball (same)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(oldpkg, self.pkgdir),
(False, False))
mlog.assert_not_called()
mclean.assert_not_called()
mrmtree.assert_not_called()
# Tarball -> Tarball (different)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(newpkg1, self.pkgdir),
(True, True))
self.assertEqual(mlog.call_count, 2)
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> Apt
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(newpkg2, self.pkgdir),
(True, True))
self.assertEqual(mlog.call_count, 2)
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(None, self.pkgdir),
(True, True))
self.assertEqual(mlog.call_count, 2)
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
# Tarball -> nothing (quiet)
with mock.patch('mopack.log.pkg_clean') as mlog, \
mock.patch(mock_bfgclean) as mclean, \
mock.patch('shutil.rmtree') as mrmtree: # noqa
self.assertEqual(oldpkg.clean_all(None, self.pkgdir, True),
(True, True))
mlog.assert_not_called()
mclean.assert_called_once_with(self.pkgdir)
mrmtree.assert_called_once_with(srcdir, ignore_errors=True)
def test_equality(self):
otherpath = os.path.join(test_data_dir, 'other_project.tar.gz')
pkg = self.make_package('foo', path=self.srcpath, build='bfg9000')
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000'
))
self.assertEqual(pkg, self.make_package(
'foo', path=self.srcpath, build='bfg9000',
config_file='/path/to/mopack2.yml'
))
self.assertNotEqual(pkg, self.make_package(
'bar', path=self.srcpath, build='bfg9000'
))
self.assertNotEqual(pkg, self.make_package(
'foo', url=self.srcurl, build='bfg9000'
))
self.assertNotEqual(pkg, self.make_package(
'foo', path=otherpath, build='bfg9000'
))
def test_rehydrate(self):
opts = self.make_options()
pkg = TarballPackage('foo', path=self.srcpath, build='bfg9000',
_options=opts, config_file=self.config_file)
data = through_json(pkg.dehydrate())
self.assertEqual(pkg, Package.rehydrate(data, _options=opts))
pkg = TarballPackage('foo', url=self.srcurl, build='bfg9000',
_options=opts, config_file=self.config_file)
data = through_json(pkg.dehydrate())
self.assertEqual(pkg, Package.rehydrate(data, | |
<filename>ads/feature_engineering/feature_type/handler/feature_validator.py
#!/usr/bin/env python
# -*- coding: utf-8 -*--
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
"""
The module that helps to register custom validators for the feature types and
extending registered validators with dispatching based on the specific arguments.
Classes
-------
FeatureValidator
The Feature Validator class to manage custom validators.
FeatureValidatorMethod
The Feature Validator Method class. Extends methods which requires
dispatching based on the specific arguments.
"""
import inspect
from typing import Any, Callable, Dict, Tuple, Union
import pandas as pd
class WrongHandlerMethodSignature(ValueError):
def __init__(self, handler_name: str, condition: str, handler_signature: str):
super().__init__(
f"The registered condition {condition} is not compatible "
f"with the provided {handler_name} method. "
f"Expected parameters: {handler_signature}"
)
class ValidatorNotFound(ValueError):
def __init__(self, name: str):
super().__init__(f"Validator {name} not found.")
class ValidatorWithConditionNotFound(ValueError):
def __init__(self, name: str):
super().__init__(f"Validator {name} with provided condition not found.")
class ValidatorAlreadyExists(ValueError):
def __init__(self, name: str):
super().__init__(f"Validator {name} already exists.")
class ValidatorWithConditionAlreadyExists(ValueError):
def __init__(self, name: str):
super().__init__(f"Validator {name} with provided condition already exists.")
def _prepare_condition(params: Union[Tuple, Dict[str, Any]]) -> Tuple:
"""Converts provided parameters to Tuple.
Parameters
-----------
params: (Union[Tuple, Dict[str, Any]])
The condition which will be used to register a new validator.
Returns
-------
Tuple
Prepared condition.
Raises
------
ValueError
If condition not provided or provided in the wrong format.
"""
if not params:
raise ValueError("Condition not provided.")
if not isinstance(params, (dict, tuple)):
raise ValueError(
"Wrong format for the condition. Condition should be dict or list."
)
if not isinstance(params, tuple):
return tuple((key, params[key]) for key in params)
return params
class FeatureValidator:
"""The Feature Validator class to manage custom validators.
Methods
-------
register(self, name: str, handler: Callable, condition: Union[Tuple, Dict[str, Any]] = None, replace: bool = False) -> None
Registers new validator.
unregister(self, name: str, condition: Union[Tuple, Dict[str, Any]] = None) -> None
Unregisters validator.
registered(self) -> pd.DataFrame
Gets the list of registered validators.
Examples
--------
>>> series = pd.Series(['+1-202-555-0141', '+1-202-555-0142'], name='Phone Number')
>>> def phone_number_validator(data: pd.Series) -> pd.Series:
... print("phone_number_validator")
... return data
>>> def universal_phone_number_validator(data: pd.Series, country_code) -> pd.Series:
... print("universal_phone_number_validator")
... return data
>>> def us_phone_number_validator(data: pd.Series, country_code) -> pd.Series:
... print("us_phone_number_validator")
... return data
>>> PhoneNumber.validator.register(name="is_phone_number", handler=phone_number_validator, replace=True)
>>> PhoneNumber.validator.register(name="is_phone_number", handler=universal_phone_number_validator, condition = ('country_code',))
>>> PhoneNumber.validator.register(name="is_phone_number", handler=us_phone_number_validator, condition = {'country_code':'+1'})
>>> PhoneNumber.validator.is_phone_number(series)
phone_number_validator
0 +1-202-555-0141
1 +1-202-555-0142
>>> PhoneNumber.validator.is_phone_number(series, country_code = '+7')
universal_phone_number_validator
0 +1-202-555-0141
1 +1-202-555-0142
>>> PhoneNumber.validator.is_phone_number(series, country_code = '+1')
us_phone_number_validator
0 +1-202-555-0141
1 +1-202-555-0142
>>> PhoneNumber.validator.registered()
Validator Condition Handler
---------------------------------------------------------------------------------
0 is_phone_number () phone_number_validator
1 is_phone_number ('country_code') universal_phone_number_validator
2 is_phone_number {'country_code': '+1'} us_phone_number_validator
>>> series.ads.validator.is_phone_number()
phone_number_validator
0 +1-202-555-0141
1 +1-202-555-0142
>>> series.ads.validator.is_phone_number(country_code = '+7')
universal_phone_number_validator
0 +1-202-555-0141
1 +1-202-555-0142
>>> series.ads.validator.is_phone_number(country_code = '+1')
us_phone_number_validator
0 +1-202-555-0141
1 +1-202-555-0142
"""
def __init__(self):
"""Initializes the FeatureValidator."""
self._validators = {}
def register(
self,
name: str,
handler: Callable,
condition: Union[Tuple, Dict[str, Any]] = None,
replace: bool = False,
) -> None:
"""Registers new validator.
Parameters
----------
name : str
The validator name.
handler: callable
The handler.
condition: Union[Tuple, Dict[str, Any]]
The condition for the validator.
replace: bool
The flag indicating if the registered validator should be replaced with the new one.
Returns
-------
None
Nothing.
Raises
------
ValueError
The name is empty or handler is not provided.
TypeError
The handler is not callable.
The name of the validator is not a string.
ValidatorAlreadyExists
The validator is already registered.
"""
if not name:
raise ValueError("Validator name is not provided.")
if not isinstance(name, str):
raise TypeError("Validator name should be a string.")
if not replace and name in self._validators:
if not condition:
raise ValidatorAlreadyExists(name)
if self._validators[name]._has_condition(condition):
raise ValidatorWithConditionAlreadyExists(name)
if not handler:
raise ValueError("Handler is not provided.")
if not callable(handler):
raise TypeError("Handler should be a function.")
if condition:
self._validators[name].register(condition=condition, handler=handler)
else:
self._validators[name] = FeatureValidatorMethod(handler)
def unregister(
self, name: str, condition: Union[Tuple, Dict[str, Any]] = None
) -> None:
"""Unregisters validator.
Parameters
----------
name: str
The name of the validator to be unregistered.
condition: Union[Tuple, Dict[str, Any]]
The condition for the validator to be unregistered.
Returns
-------
None
Nothing.
Raises
------
TypeError
The name of the validator is not a string.
ValidatorNotFound
The validator not found.
ValidatorWIthConditionNotFound
The validator with provided condition not found.
"""
if not name:
raise ValueError("Validator name is not provided.")
if not isinstance(name, str):
raise TypeError("Validator name should be a string.")
if name not in self._validators:
raise ValidatorNotFound(name)
if condition and not self._validators[name]._has_condition(condition):
raise ValidatorWithConditionNotFound(name)
if condition:
self._validators[name].unregister(condition)
else:
del self._validators[name]
def registered(self) -> pd.DataFrame:
"""Gets the list of registered validators.
Returns
-------
pd.DataFrame
The list of registerd validators.
"""
result_df = pd.DataFrame((), columns=["Validator", "Condition", "Handler"])
for key, feature_validator in self._validators.items():
feature_validators_df = feature_validator.registered()
feature_validators_df.insert(0, "Validator", key)
result_df = result_df.append(feature_validators_df)
result_df.reset_index(drop=True, inplace=True)
return result_df
def _bind_data(self, data: pd.Series) -> None:
"""Binds the data to the all registered validators.
Parameters
----------
data: pd.Series
The data to be processed.
"""
for validator in self._validators.values():
validator._bind_data(data)
def __getattr__(self, attr):
"""Makes it possible to invoke registered validators as a regular method."""
if attr in self._validators:
return self._validators[attr]
raise AttributeError(attr)
class FeatureValidatorMethod:
"""The Feature Validator Method class.
Extends methods which requires dispatching based on the specific arguments.
Methods
-------
register(self, condition: Union[Tuple, Dict[str, Any]], handler: Callable) -> None
Registers new handler.
unregister(self, condition: Union[Tuple, Dict[str, Any]]) -> None
Unregisters existing handler.
registered(self) -> pd.DataFrame
Gets the list of registered handlers.
"""
def __init__(self, handler: Callable):
"""Initializes the Feature Validator Method.
Parameters
----------
handler: Callable
The handler that will be called by default if suitable one not found.
"""
if not handler:
raise ValueError("Default handler is not specified.")
self._default_handler = handler
self._handlers = {}
self._data = None
def register(
self, condition: Union[Tuple, Dict[str, Any]], handler: Callable
) -> None:
"""Registers new handler.
Parameters
-----------
condition: Union[Tuple, Dict[str, Any]]
The condition which will be used to register a new handler.
handler: Callable
The handler to be registered.
Returns
-------
None
Nothing.
Raises
------
ValueError
If condition not provided or provided in the wrong format.
If handler not provided or has wrong format.
"""
if not condition:
raise ValueError("Condition not provided.")
if not isinstance(condition, (dict, tuple)):
raise ValueError(
"Wrong format for the condition. Condition should be dict or list."
)
if not handler or not callable(handler):
raise ValueError("Handler not provided. Handler should be a function.")
prepared_condition = _prepare_condition(condition)
# self.__validate_handler_signature(handler)
self._handlers[prepared_condition] = handler
self._data = None
def unregister(self, condition: Union[Tuple, Dict[str, Any]]) -> None:
"""Unregisters existing handler.
Parameters
-----------
condition: Union[Tuple, Dict[str, Any]]
The condition which will be used to unregister a handler.
Returns
-------
None
Nothing.
Raises
------
ValueError
If condition not provided or provided in the wrong format.
If condition not registered.
"""
if not condition:
raise ValueError("Condition not provided.")
if not isinstance(condition, (dict, tuple)):
raise ValueError(
"Wrong format for the condition. Condition should be dict or list."
)
prepared_condition = _prepare_condition(condition)
if prepared_condition not in self._handlers:
raise ValueError("Condition not registered.")
del self._handlers[prepared_condition]
def registered(self) -> pd.DataFrame:
"""Gets the list of registered handlers.
Returns
-------
pd.DataFrame
The list of registerd handlers.
"""
result = [("()", self._default_handler.__name__)]
for key, value in self._handlers.items():
try:
str_key = str(dict(key))
except ValueError:
str_key = str(key)
result.append((str_key, value.__name__))
return pd.DataFrame(result, columns=["Condition", "Handler"])
def _process(self, *args, **kwargs) -> pd.Series:
"""Finds and invokes a suitable handler for the provided condition.
Parameters
----------
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments. Parameters to search suitable handler.
Returns
-------
pd.Series
The result of invoked handler.
"""
if kwargs:
for key in (
tuple((key, kwargs[key]) for key in kwargs),
tuple(kwargs.keys()),
):
if key in self._handlers:
return self._handlers[key](self._data, *args, **kwargs)
return self._default_handler(self._data, *args, **kwargs)
def _bind_data(self, data: pd.Series) -> None:
"""Binds the data to the validator.
Parameters
----------
data: pd.Series
The data to be processed.
"""
self._data = data
def _validate_handler_signature(
self, condition: Union[Tuple, Dict[str, Any]], handler: Callable
) -> bool:
"""Validates handler signature.
Parameters
----------
condition: Union[Tuple, Dict[str, Any]]
The condition to validate.
handler: Callabe
The hanlder to validate.
Returns
-------
bool
True if provided condition and handler arguments compatible.
Raises
-------
WrongHandlerMethodSignature
If provided condition and | |
<reponame>renato2099/weld
import pandas as pd
import grizzly_impl
from lazy_op import LazyOpResult, to_weld_type
from weld.weldobject import *
import utils
class SeriesWeld(LazyOpResult):
"""Summary
Attributes:
column_name (TYPE): Description
df (TYPE): Description
dim (int): Description
expr (TYPE): Description
weld_type (TYPE): Description
"""
def __init__(self, expr, weld_type, df=None, column_name=None, index_type=None, index_name=None):
"""Summary
TODO: Implement an actual Index Object like how Pandas does
Args:
expr (TYPE): Description
weld_type (TYPE): Description
df (None, optional): Description
column_name (None, optional): Description
"""
self.expr = expr
self.weld_type = weld_type
self.dim = 1
self.df = df
self.column_name = column_name
self.index_type = index_type
self.index_name = index_name
def __getitem__(self, key):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(key, slice):
start = key.start
# TODO : We currently do nothing with step
step = key.step
stop = key.stop
if self.index_type is not None:
index_expr = grizzly_impl.get_field(self.expr, 0)
column_expr = grizzly_impl.get_field(self.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
sliced_expr = grizzly_impl.slice_vec(zip_expr, start, stop)
unzip_expr = grizzly_impl.unzip_columns(
sliced_expr,
[self.index_type, self.weld_type]
)
return SeriesWeld(
unzip_expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
else:
return SeriesWeld(
grizzly_impl.slice_vec(
self.expr,
start,
stop
)
)
else:
# By default we return as if the key were predicates to filter by
return self.filter(key)
def __setitem__(self, predicates, new_value):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if self.df is not None and self.column_name is not None:
self.df[self.column_name] = self.mask(predicates, new_value)
@property
def loc(self):
return WeldLocIndexer(
self
)
def __getattr__(self, key):
"""Summary
Args:
key (TYPE): Description
Returns:
TYPE: Description
Raises:
Exception: Description
"""
if key == 'str' and self.weld_type == WeldVec(WeldChar()):
return StringSeriesWeld(
self.expr,
self.weld_type,
self.df,
self.column_name
)
raise AttributeError("Attr %s does not exist" % key)
@property
def index(self):
if self.index_type is not None:
return SeriesWeld(
grizzly_impl.get_field(
self.expr,
0
),
self.index_type,
self.df,
self.index_name
)
# TODO : Make all series have a series attribute
raise Exception("No index present")
def evaluate(self, verbose=False, passes=None):
if self.index_type is not None:
index, column = LazyOpResult(
self.expr,
WeldStruct([WeldVec(self.index_type), WeldVec(self.weld_type)]),
0
).evaluate(verbose=verbose, passes=passes)
series = pd.Series(column, index)
series.index.rename(self.index_name, True)
return series
else:
column = LazyOpResult.evaluate(self, verbose=verbose, passes=passes)
return pd.Series(column)
def sort_values(self, ascending=False):
""" Sorts the values of this series
"""
if self.index_type is not None:
index_expr = grizzly_impl.get_field(self.expr, 0)
column_expr = grizzly_impl.get_field(self.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
result_expr = grizzly_impl.sort(zip_expr, 1, self.weld_type, ascending)
unzip_expr = grizzly_impl.unzip_columns(
result_expr,
[self.index_type, self.weld_type]
)
return SeriesWeld(
unzip_expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
else:
result_expr = grizzly_impl.sort(self.expr)
# TODO need to finish this
def unique(self):
"""Summary
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.unique(
self.expr,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def lower(self):
"""Summary
Returns:
TYPE: Description
"""
# TODO : Bug in nested map operating on strings
# TODO : Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.to_lower(
self.expr,
elem_type
),
self.weld_type,
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
def contains(self, string):
"""Summary
Returns:
TYPE: Description
"""
# Check that self.weld_type is a string type
vectype = self.weld_type
if isinstance(vectype, WeldVec):
elem_type = vectype.elemType
if isinstance(elem_type, WeldChar):
return SeriesWeld(
grizzly_impl.contains(
self.expr,
elem_type,
string
),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call to_lower on non string type")
def isin(self, ls):
if isinstance(ls, SeriesWeld):
if self.weld_type == ls.weld_type:
return SeriesWeld(
grizzly_impl.isin(self.expr,
ls.expr,
self.weld_type),
WeldBit(),
self.df,
self.column_name
)
raise Exception("Cannot call isin on different typed list")
def prod(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"*",
1,
self.weld_type
),
self.weld_type,
0
)
def sum(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"+",
0,
self.weld_type
),
self.weld_type,
0
)
def max(self):
"""Summary
Returns:
TYPE: Description
"""
pass
def min(self):
"""Summary
Returns:
TYPE: Description
"""
pass
def count(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.count(
self.expr,
self.weld_type
),
WeldInt(),
0
)
def mask(self, predicates, new_value):
"""Summary
Args:
predicates (TYPE): Description
new_value (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return SeriesWeld(
grizzly_impl.mask(
self.expr,
predicates,
new_value,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def filter(self, predicates):
if isinstance(predicates, SeriesWeld):
predicates = predicates.expr
return SeriesWeld(
grizzly_impl.filter(
self.expr,
predicates,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def add(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"+",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def __sub__(self, other):
# TODO subtractionw without index variables
if self.index_type is not None:
index = grizzly_impl.get_field(self.expr, 0)
expr1 = grizzly_impl.get_field(self.expr, 1)
else:
expr1 = self.expr
if other.index_type is not None:
index2 = grizzly_impl.get_field(other.expr, 0)
expr2 = grizzly_impl.get_field(other.expr, 1)
else:
expr2 = other.expr
index_expr = LazyOpResult(index, self.index_type, 0)
sub_expr = SeriesWeld(
grizzly_impl.element_wise_op(
expr1,
expr2,
"-",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
index_sub_expr = utils.group([index_expr, sub_expr])
return SeriesWeld(
index_sub_expr.expr,
self.weld_type,
self.df,
self.column_name,
self.index_type,
self.index_name
)
# We also need to ensure that both indexes of the subtracted
# columns are compatible
def sub(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"-",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def mul(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"*",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def div(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"/",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def per_element_and(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"&&",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def mod(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if isinstance(other, SeriesWeld):
other = other.expr
return SeriesWeld(
grizzly_impl.element_wise_op(
self.expr,
other,
"%",
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
def __eq__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"==",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __ne__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"!=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __gt__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
">",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __ge__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
if self.index_type is not None:
expr = grizzly_impl.get_field(self.expr, 1)
else:
expr = self.expr
return SeriesWeld(
grizzly_impl.compare(
expr,
other,
">=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __lt__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"<",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
def __le__(self, other):
"""Summary
Args:
other (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.compare(
self.expr,
other,
"<=",
self.weld_type
),
WeldBit(),
self.df,
self.column_name
)
class StringSeriesWeld:
"""Summary
Attributes:
column_name (TYPE): Description
df (TYPE): Description
dim (int): Description
expr (TYPE): Description
weld_type (TYPE): Description
"""
def __init__(self, expr, weld_type, df=None, column_name=None):
"""Summary
Args:
expr (TYPE): Description
weld_type (TYPE): Description
df (None, optional): Description
column_name (None, optional): Description
"""
self.expr = expr
self.weld_type = weld_type
self.dim = 1
self.df = df
self.column_name = column_name
def slice(self, start, size):
"""Summary
Args:
start (TYPE): Description
size (TYPE): Description
Returns:
TYPE: Description
"""
return SeriesWeld(
grizzly_impl.slice(
self.expr,
start,
size,
self.weld_type
),
self.weld_type,
self.df,
self.column_name
)
class WeldLocIndexer:
"""
Label location based indexer for selection by label for Series objects.
Attributes:
grizzly_obj (TYPE): The Series being indexed into.
"""
def __init__(self, grizzly_obj):
# If index_type field of grizzly_obj is None
# then we assume normal 0 - 1 indexing
self.grizzly_obj = grizzly_obj
def __getitem__(self, key):
if isinstance(self.grizzly_obj, SeriesWeld):
series = self.grizzly_obj
if isinstance(key, SeriesWeld):
if series.index_type is not None:
index_expr = grizzly_impl.get_field(series.expr, 0)
column_expr = grizzly_impl.get_field(series.expr, 1)
zip_expr = grizzly_impl.zip_columns([index_expr, column_expr])
predicate_expr = grizzly_impl.isin(index_expr, key.expr, series.index_type)
filtered_expr = grizzly_impl.filter(
zip_expr,
predicate_expr
)
unzip_expr = grizzly_impl.unzip_columns(
filtered_expr,
[series.index_type, series.weld_type]
)
return SeriesWeld(
unzip_expr,
series.weld_type,
series.df,
series.column_name,
series.index_type,
series.index_name
)
# TODO : Need to implement for | |
(0,5) : self.tech_level+1
elif self.government == 7 : self.tech_level+=2
elif self.government == 13 : self.tech_level-=2
elif self.government == 14 : self.tech_level-=2
else:
self.tech_level =0
self.tech_level=min(self.tech_level,max_tl)
# CTM-sequence (specialized technology levels)
if self.population > 0:
# tECCME
self.tech_level_civilian = min(max(self.tech_level-7+roll(6,6)//3,0),max_tl)
self.tech_level_civilian_energy = min(max(self.tech_level_civilian-3+roll(4,6)//2//2,0),max_tl)
self.tech_level_civilian_computing = min(max(self.tech_level_civilian-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_civilian_communication = min(max(self.tech_level_civilian-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_civilian_medicine = min(max(self.tech_level_civilian-3+roll(2,6)//1//2,0),max_tl)
self.tech_level_civilian_environment = min(max(self.tech_level_civilian-3+roll(4,6)//2//2,0),max_tl)
# tLWAS
self.tech_level_transportation = min(max(self.tech_level-7+roll(6,6)//3,0),max_tl)
self.tech_level_transportation_land = min(max(self.tech_level_transportation-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_transportation_water = min(max(self.tech_level_transportation-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_transportation_air = min(max(self.tech_level_transportation-3+roll(4,6)//2//2,0),max_tl)
self.tech_level_transportation_space = min(max(self.tech_level_transportation-3+roll(4,6)//2//2,0),max_tl)
# tPPHH
self.tech_level_military = min(max(self.tech_level-7+roll(6,6)//3,0),max_tl)
self.tech_level_military_personalweapons = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_military_personalarmour = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_military_heavyweapons = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
self.tech_level_military_heavyarmour = min(max(self.tech_level_military-3+roll(6,6)//3//2,0),max_tl)
else:
# tECCME
self.tech_level_civilian = 0
self.tech_level_civilian_energy = 0
self.tech_level_civilian_computing = 0
self.tech_level_civilian_communication = 0
self.tech_level_civilian_medicine = 0
self.tech_level_civilian_environment = 0
# tLWAS
self.tech_level_transportation = 0
self.tech_level_transportation_land = 0
self.tech_level_transportation_water = 0
self.tech_level_transportation_air = 0
self.tech_level_transportation_space = 0
# tPPHH
self.tech_level_military = 0
self.tech_level_military_personalweapons = 0
self.tech_level_military_personalarmour = 0
self.tech_level_military_heavyweapons = 0
self.tech_level_military_heavyarmour = 0
def newPBJ(self):
# PBJ-sequence (resources)
if self.population==0: self.population_mod=0
self.asteroid_belts =max(roll(2,6)//2-3,0)
if self.size==0:
self.asteroid_belts+=1
self.jovian_planets =max(roll(2,6)-5,0)
def newWDITTP(self):
# WDITTP-sequence (specialized law levels)
if self.population > 0:
self.law_level_weapons = self.law_level-2+roll(1,6)//2
self.law_level_drugs = self.law_level-2+roll(1,6)//2
self.law_level_information = self.law_level-2+roll(1,6)//2
self.law_level_technology = self.law_level-2+roll(1,6)//2
self.law_level_travellers = self.law_level
self.law_level_powers = self.law_level-2+roll(1,6)//2
# Government-ammended WDITTP-sequence
if self.government in [0,10]:
self.law_level -= 1
if self.government in [1,3,4,5,6,8,9,11,12]:
self.law_level_weapons += roll(1,6)
if self.government in [1,2,4,8,9]:
self.law_level_drugs += roll(1,6)
if self.government in [5,9,11]:
self.law_level_information+= roll(1,6)
if self.government in [1,3,5,6,9,11]:
self.law_level_technology += roll(1,6)
if self.government in [1,3,6,9]:
self.law_level_travellers += roll(1,6)
if self.government in [1,3,4,9]:
self.law_level_powers += roll(1,6)
if self.government>=13 or self.government==7:
for i in range(roll(1,6)-1):
es=random.choice(("self.law_level_weapons ",\
"self.law_level_drugs ",\
"self.law_level_information",\
"self.law_level_technology ",\
"self.law_level_travellers ",\
"self.law_level_powers "))
exec(es+"+=(roll(2,6)+1)//2")
self.law_level = self.law_level_travellers
else:
self.law_level_weapons = 0
self.law_level_drugs = 0
self.law_level_information = 0
self.law_level_technology = 0
self.law_level_travellers = 0
self.law_level_powers = 0
# reset law levels
self.law_level_weapons = max(self.law_level_weapons ,0)
self.law_level_drugs = max(self.law_level_drugs ,0)
self.law_level_information= max(self.law_level_information,0)
self.law_level_technology = max(self.law_level_technology ,0)
self.law_level_travellers = max(self.law_level_travellers ,0)
self.law_level_powers = max(self.law_level_powers ,0)
pass
def newInfo(self,quirk_chance):
# travel code
self.travel_code=" "
if self.population > 0 \
and self.populated \
and (self.government == 0 \
or self.government == 7 \
or self.government == 10 \
or self.law_level == 0 \
or self.law_level_weapons == 0 \
or self.law_level_information >= 9 \
or self.law_level_technology >= 9 \
or self.law_level_travellers >= 9 \
or self.government == 0 \
or self.atmosphere >= 10 ):
self.travel_code="a"
self.trade_codes = self.getTradeCodes()
if self.population>0 \
and quirk_chance<random.randrange(100):
self.quirk = self.getQuirk()
else:
self.quirk = ""
pass
def getTradeCodes(self):
rc = ''
from traceback import print_exc
from sys import argv
path = argv[0][:argv[0].rfind("\\")+1]
NAME = 'name'
TAG = 'tag'
SIZE = 'size'
ATMOSPHERE = 'atmosphere'
HYDROGRAPHICS = 'hydrographics'
POPULATION = 'population'
GOVERNMENT = 'government'
LAWLEVEL = 'lawlevel'
TECHLEVEL = 'techlevel'
for tradecode in self.TRADECODES['tradecode']:
try:
tcode = True
for requirement in tradecode['requirements'].keys():
if requirement.endswith('__info') \
or requirement == XML_Parse.CDATA \
or requirement == XML_Parse.ATTR_TAG\
or tradecode['requirements'][requirement][XML_Parse.CDATA] == None \
or tradecode['requirements'][requirement][XML_Parse.CDATA].strip() == '' :
continue
else:
req_info = tradecode['requirements'][requirement][XML_Parse.CDATA]
if requirement == SIZE:
req = splitup( req_info )
if self.size in req: pass
else: tcode = False
elif requirement == ATMOSPHERE:
req = splitup( req_info )
if self.atmosphere in req: pass
else: tcode = False
elif requirement == HYDROGRAPHICS:
req = splitup( req_info )
if self.hydrographics in req: pass
else: tcode = False
elif requirement == POPULATION:
req = splitup( req_info )
if self.population in req: pass
else: tcode = False
elif requirement == GOVERNMENT:
req = splitup( req_info )
if self.government in req: pass
else: tcode = False
elif requirement == LAWLEVEL:
req = splitup( req_info )
if self.law_level in req: pass
else: tcode = False
elif requirement == TECHLEVEL:
req = splitup( req_info )
if self.tech_level in req: pass
else: tcode = False
# print( ' ' + tradecode['name'] + ' is ' + str(tcode) )
if tradecode[XML_Parse.ATTR_TAG][TAG]=="As" and self.type=="satellite": tcode=False
if tradecode[XML_Parse.ATTR_TAG][TAG]=="Ga" and abs(self.GoldieDist)>10: tcode=False
if tradecode[XML_Parse.ATTR_TAG][TAG]=="Ic" and self.GoldieDist>12 and self.hydrographics>0: tcode=True
if tcode:
rc += " " + tradecode[XML_Parse.ATTR_TAG][TAG]
except Exception as e:
print(tradecode[XML_Parse.ATTR_TAG][TAG])
print_exc()#print(e)
return rc
def getTag(self):
r = roll(1,100)
rc = ''
tags = [ 'Abandoned Colony', \
'Alien Ruins', \
'Altered Humanity', \
'Anthromorphs', \
'Battleground', \
'Bubble Cities', \
'Cheap Life', \
'Civil War', \
'Cold War', \
'Colony', \
'Cyclical Doom', \
'Doomed World', \
'Dying Race', \
'Eugenics Cult', \
'Feral World', \
'Flying Cities', \
'Forbidden Tech', \
'Freak Geology', \
'Freak Weather', \
'Friendly Foe', \
'Gold Rush', \
'Great Work', \
'Hatred', \
'Hivemind', \
'Holy War', \
'Hostile Biosphere', \
'Hostile Space', \
'Immortals', \
'Local Specialty', \
'Local Tech', \
'Major Spaceyard', \
'Megacorps', \
'Mercenaries', \
'Minimal Contact', \
'Misandry/Misogyny', \
'Night World', \
'Nomads', \
'Out of Contact', \
'Outpost World', \
'Pilgrimage Site', \
'Pleasure World', \
'Police State', \
'Post-Scarcity', \
'Tech Cultists', \
'Primitive Aliens', \
'Quarantined World', \
'Radioactive World', \
'Refugees', \
'Regional Hegemon', \
'Restrictive Laws', \
'Revanchists', \
'Revolutionaries', \
'Rigid Culture', \
'Rising Hegemon', \
'Ritual Combat', \
'Robots', \
'Seagoing Cities', \
'Sealed Menace', \
'Secret Masters', \
'Sectarians', \
'Seismic Instability', \
'Shackled World', \
'Societal Despair', \
'Sole Supplier', \
'Taboo Treasure', \
'Terraform Failure', \
'Tomb World', \
'Unshackled AI', \
'Urbanized Surface', \
'Utopia', \
'Xenophiles', \
'Xenophobes', \
'Zombies' ]
rc = random.choice( tags )
return rc
def getQuirk(self):
rc = ''
from traceback import print_exc
from sys import argv
dom_quirk=random.choice(self.CULTURE['quirk'])
rc+=dom_quirk[XML_Parse.ATTR_TAG]['index']
return rc
def newOrbitInfo(self):
year=365.25
week=7
day =1
self.isTideLocked=False
if self.type=="satellite":
# position in orbit
if self.isMainworld:
self.parent.pos=0
self.pos=0
else:
self.pos=random.randrange(73)*5
# orbital period
if self.band.startswith("near" ):
self.orbital_period=1.0
elif self.band.startswith("mid" ):
if roll(1,6)<=1: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*max(self.parent.gravity,1)/10*(3+roll(2,6))/10*retrograde
elif self.band.startswith("far" ):
if roll(1,6)<=3: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*max(self.parent.gravity*1.1,1)/10*(roll(1,2)*0.5+0.15)*retrograde
elif self.band.startswith("rogue"):
if roll(1,6)<=5: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*max(self.parent.gravity*1.5,1)/10*(roll(1,2)*0.15+0.15)*retrograde
else:
self.orbital_period=1.0
# length of day
if self.band.startswith("near" ):
self.isTideLocked =True
self.rotations_per_orbit=1.0
elif self.band.startswith("mid" ):
if self.parent.gravity+roll(2,6) > 12:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*32*(3+roll(2,6))/10
elif self.band.startswith("far" ):
if self.parent.gravity+roll(2,6) > 16:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*32*(3+roll(2,6))/7
elif self.band.startswith("rogue"):
if self.parent.gravity+roll(2,6) > 20:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*32*(3+roll(2,6))/5
# traversal
self.weekly_traversal=360*(7/self.orbital_period)
else:
# re-get orbit
self.orbit=int(self.AUtoStar*100)
# position in orbit
if self.isMainworld:
self.pos=0
else:
self.pos=random.randrange(73)*5
# orbital period
if self.band.startswith("near" ): self.orbital_period=year*self.AUtoStar*(2+roll(2,4))/10
elif self.band.startswith("mid" ): self.orbital_period=year*self.AUtoStar*(7+roll(1,6))/10
elif self.band.startswith("far" ):
if roll(1,6)<=1: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*self.AUtoStar*(2+roll(2,6))*retrograde
elif self.band.startswith("rogue"):
if roll(1,6)<=2: retrograde= 1.00
else: retrograde=-1.25
self.orbital_period=year*self.AUtoStar*(4+4*roll(4,6))*retrograde
else:
self.orbital_period=1.0
# length of day
if self.band.startswith("near" ):
if roll(2,6) > 5:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*self.AUtoStar*roll( 1,6)
elif self.band.startswith("mid" ):
if not self.isGasGiant and roll(2,6) > 9:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*self.AUtoStar*roll(10,6)*10
elif self.band.startswith("far" ):
if not self.isGasGiant and roll(2,6) > 11:
self.isTideLocked=True
self.rotations_per_orbit=1.0
else:
self.rotations_per_orbit=day*self.AUtoStar*roll( 3,6)*100
elif self.band.startswith("rogue"):
self.rotations_per_orbit=day*self.AUtoStar*roll( 3,6)*100
# traversal
self.weekly_traversal=360*(7/self.orbital_period)
if self.atmosphere==15:
if self.hydrographics==10:
self.comment +="Atmo:\"Panthalassic world (>85% water by volume)\","
elif not self.isTideLocked and roll(1,6)==2:
self.comment +="Atmo:\"Constant Storms (pressure changes wildly)\","
elif not self.isTideLocked and self.size>2 and self.size<7:
self.comment +="Atmo:\"Ellipsoidal Atmosphere (viable density only at equator)\","
pass
def load(self,uwppp):
# #
## ref: M 49 - X420000-0 KN3 00000 0-00000 0-0000 0-0000 00000 |n|noname I|n| |a| Ba De Po |o|143.18|1.00|0.00|o| |c|---|c|
# create pointer
p=0
# Mainworld Status
self.isMainworld=False
p+=0
length=1
marker=uwppp[p:p+length]
if marker in ("M","m"): self.isMainworld=True
if marker in ("•","M"): self.type="planet"
else: self.type="satellite"
# AUtoStar
p+=length
length=4
if self.type!="satellite":
self.AUtoStar = float( uwppp[p:p+length] ) / 100
self.orbit = int( float( uwppp[p:p+length] ) )
else:
self.AUtoStar = -1
self.orbit = int( float( uwppp[p:p+length] ) )
# UWP
p+=length+3
length=9
uwp=uwppp[p:p+length]
if uwp.lower().startswith("sgg"):
self.isGasGiant=True
self.isGasGiantSmall=True
self.size =int( self.config.get("GAS GIANTS","size (sgg)") )
self.gravity = self.size
self.atmosphere =int( self.config.get("GAS GIANTS","atmosphere") )
self.hydrographics =int( self.config.get("GAS GIANTS","hydrographics") )
self.population =0
self.government =0
self.law_level =0
self.tech_level =0
elif uwp.lower().startswith("lgg"):
self.size =int( self.config.get("GAS GIANTS","size (lgg)") )
self.gravity =self.size
self.atmosphere =int( self.config.get("GAS GIANTS","atmosphere") )
self.hydrographics =int( self.config.get("GAS GIANTS","hydrographics") )
self.population =0
self.government =0
self.law_level =0
self.tech_level =0
else:
self.starport = uwp[ 0: 1].lower()
self.size = uwp[ 1: 2].lower()
self.atmosphere =findPosInList(self.HEX_EXPANDED,uwp[ 2: 3].lower())[0]
self.hydrographics =findPosInList(self.HEX_EXPANDED,uwp[ 3: 4].lower())[0]
self.population =findPosInList(self.HEX_EXPANDED,uwp[ | |
<filename>vst_sim/src/vstsim/grasping/grasp_sampler.py
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import copy
import logging
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
# import os, IPython, sys
import math
import random
import time
import scipy.stats as stats
from vstsim.visualization.GL_visualizer3d import GL_Visualizer
try:
import pcl
except ImportError:
logging.warning('Failed to import pcl!')
import vstsim
import itertools as it
import multiprocessing
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import datetime
import pickle
from vstsim.grasping import Grasp, Contact3D, ParallelJawPtGrasp3D, PointGraspMetrics3D, \
VacuumPoint, GraspQuality_Vacuum, DexterousVacuumPoint, DexterousQuality_Vacuum, \
ChameleonTongueContact, ChameleonTongue_Quality
from vstsim.grasping import GraspInfo, GraspInfo_TongueGrasp
from vstsim.grasping import math_robot
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
from autolab_core import RigidTransform
import scipy
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
USE_OPENRAVE = True
try:
import openravepy as rave
except ImportError:
logger.warning('Failed to import OpenRAVE')
USE_OPENRAVE = False
try:
import rospy
import moveit_commander
except ImportError:
logger.warning("Failed to import rospy, you can't grasp now.")
try:
from mayavi import mlab
except ImportError:
mlab = []
logger.warning('Do not have mayavi installed, please set the vis to False')
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, <EMAIL>, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Classes for sampling grasps.
Author: <NAME>
"""
"""
Modified by: <NAME>
Email : <EMAIL>
Date : 23/02/2020 09:53
"""
color_ls = np.ones((14, 3), dtype=np.int32)
color_ls[0, :] = np.array([255, 0, 0]) # Red
color_ls[1, :] = np.array([60, 180, 75]) # Green
color_ls[2, :] = np.array([255, 225, 25]) # Yellow
color_ls[3, :] = np.array([0, 130, 200]) # Blue
color_ls[4, :] = np.array([245, 130, 48]) # Orange
color_ls[5, :] = np.array([145, 30, 180]) # Purple
color_ls[6, :] = np.array([70, 240, 240]) # Cyan
color_ls[7, :] = np.array([240, 50, 230]) # Magenta
color_ls[8, :] = np.array([210, 245, 60]) # Lime
color_ls[9, :] = np.array([250, 190, 190]) # Pink
color_ls[10, :] = np.array([0, 128, 128]) # Teal
color_ls[11, :] = np.array([128, 0, 0]) # Maroon
color_ls[12, :] = np.array([128, 128, 0]) # Olive
color_ls[13, :] = np.array([0, 0, 128]) # Navy
# class GraspSampler(metaclass=ABCMeta):
class GraspSampler:
""" Base class for various methods to sample a number of grasps on an object.
Should not be instantiated directly.
Attributes
----------
gripper : :obj:`RobotGripper`
the gripper to compute grasps for
config : :obj:`YamlConfig`
configuration for the grasp sampler
"""
__metaclass__ = ABCMeta
def __init__(self, gripper, config):
self.gripper = gripper
self._configure(config)
def _configure(self, config):
""" Configures the grasp generator."""
#########################################
if 'sampling_friction_coef' in list(config.keys()):
self.friction_coef = config['sampling_friction_coef']
else:
self.friction_coef = 2.0
#########################################
if 'num_cone_faces' in list(config.keys()):
self.num_cone_faces = config['num_cone_faces']
else:
self.num_cone_faces = 8
#########################################
if 'grasp_samples_per_surface_point' in list(config.keys()):
self.num_samples = config['grasp_samples_per_surface_point']
else:
self.num_samples = 1
#########################################
if 'target_num_grasps' in list(config.keys()):
self.target_num_grasps = config['target_num_grasps']
else:
self.target_num_grasps = 1
#########################################
if self.target_num_grasps is None:
self.target_num_grasps = 1
#########################################
if 'min_contact_dist' in list(config.keys()):
self.min_contact_dist = config['min_contact_dist']
else:
self.min_contact_dist = 0.0
#########################################
if 'num_grasp_rots' in list(config.keys()):
self.num_grasp_rots = config['num_grasp_rots']
else:
self.num_grasp_rots = 0.0
###########################################
###########################################
# parameters for virtual camera
#########################################
if 'back_up_dis' in list(config.keys()):
self.back_up_dis = config['back_up_dis']
else:
self.back_up_dis = 1
#########################################
if 'max_projection_dis' in list(config.keys()):
self.max_projection_dis = config['max_projection_dis']
else:
self.max_projection_dis = 1
#########################################
if 'num_projection_steps' in list(config.keys()):
self.num_projection_steps = config['num_projection_steps']
else:
self.num_projection_steps = 20
#########################################
if 'resolution_pc' in list(config.keys()):
self.resolution_pc = config['resolution_pc']
else:
self.resolution_pc = 24
#########################################
if 'angle_range_max' in list(config.keys()):
self.angle_range = config['angle_range_max']
else:
self.angle_range = 30.0
#########################################
if 'angle_range_max' in list(config.keys()):
self.angle_range_max = config['angle_range_max']
else:
self.angle_range_max = 5.0
#########################################
if 'angle_range_min' in list(config.keys()):
self.angle_range_min = config['angle_range_min']
else:
self.angle_range_min = 1.0
#########################################
if 'num_angle_steps' in list(config.keys()):
self.num_angle_steps = config['num_angle_steps']
else:
self.num_angle_steps = 5
#########################################
if 'scale_obj' in list(config.keys()):
self.scale_obj = config['scale_obj']
else:
self.scale_obj = 1.0
#########################################
if 'dim_grasp_matrix' in list(config.keys()):
self.dim_grasp_matrix = config['dim_grasp_matrix']
else:
self.dim_grasp_matrix = 100
#########################################
if 'max_num_surface_points' in list(config.keys()):
self.max_num_surface_points_ = config['max_num_surface_points']
else:
self.max_num_surface_points_ = 100
#########################################
if 'grasp_dist_thresh' in list(config.keys()):
self.grasp_dist_thresh_ = config['grasp_dist_thresh']
else:
self.grasp_dist_thresh_ = 0
@abstractmethod
def sample_grasps(self, graspable, num_grasps_generate, vis, **kwargs):
"""
Create a list of candidate grasps for a given object.
Must be implemented for all grasp sampler classes.
Parameters
---------
graspable : :obj:`GraspableObject3D`
object to sample grasps on
num_grasps_generate : int
vis : bool
"""
grasp = []
return grasp
# pass
def generate_grasps_stable_poses(self, graspable, stable_poses, target_num_grasps=None, grasp_gen_mult=5,
max_iter=3, sample_approach_angles=False, vis=False, **kwargs):
"""Samples a set of grasps for an object, aligning the approach angles to the object stable poses.
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
stable_poses : :obj:`list` of :obj:`meshpy.StablePose`
list of stable poses for the object with ids read from the database
target_num_grasps : int
number of grasps to return, defualts to self.target_num_grasps
grasp_gen_mult : int
number of additional grasps to generate
max_iter : int
number of attempts to return an exact number of grasps before giving up
sample_approach_angles : bool
whether or not to sample approach angles
vis : bool
Return
------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
# sample dense grasps
unaligned_grasps = self.generate_grasps(graspable, target_num_grasps=target_num_grasps,
grasp_gen_mult=grasp_gen_mult,
max_iter=max_iter, vis=vis)
# align for each stable pose
grasps = {}
print(sample_approach_angles) # add by Liang
for stable_pose in stable_poses:
grasps[stable_pose.id] = []
for grasp in unaligned_grasps:
aligned_grasp = grasp.perpendicular_table(grasp)
grasps[stable_pose.id].append(copy.deepcopy(aligned_grasp))
return grasps
def generate_grasps(self, graspable, target_num_grasps=None, grasp_gen_mult=5, max_iter=3,
sample_approach_angles=False, vis=False, **kwargs):
"""Samples a set of grasps for an object.
Parameters
----------
graspable : :obj:`GraspableObject3D`
the object to grasp
target_num_grasps : int
number of grasps to return, defualts to self.target_num_grasps
grasp_gen_mult : int
number of additional grasps to generate
max_iter : int
number of attempts to return an exact number of grasps before giving up
sample_approach_angles : bool
whether or not to sample approach angles
vis : bool
whether show the grasp on picture
Return
------
:obj:`list` of :obj:`ParallelJawPtGrasp3D`
list of generated grasps
"""
# get num grasps
if target_num_grasps is None:
target_num_grasps = self.target_num_grasps
num_grasps_remaining = target_num_grasps
grasps = []
k = 1
while num_grasps_remaining > 0 and k <= max_iter:
# SAMPLING: generate more than we need
num_grasps_generate = grasp_gen_mult * num_grasps_remaining
new_grasps = self.sample_grasps(graspable, num_grasps_generate, vis, **kwargs)
# COVERAGE REJECTION: prune grasps by distance
pruned_grasps = []
for grasp in new_grasps:
min_dist = np.inf
for cur_grasp in grasps:
dist = ParallelJawPtGrasp3D.distance(cur_grasp, grasp)
if dist < min_dist:
min_dist = dist
for cur_grasp in pruned_grasps:
dist = ParallelJawPtGrasp3D.distance(cur_grasp, grasp)
if dist < min_dist:
min_dist = dist
if min_dist >= self.grasp_dist_thresh_:
pruned_grasps.append(grasp)
# ANGLE EXPANSION sample grasp rotations around the axis
candidate_grasps = []
if sample_approach_angles:
for grasp in pruned_grasps:
# construct a set of rotated grasps
for i in range(self.num_grasp_rots):
rotated_grasp = copy.copy(grasp)
delta_theta = 0 # add by <NAME>
print("This function can not use yes, as delta_theta is not set. --<NAME>")
rotated_grasp.set_approach_angle(i * delta_theta)
candidate_grasps.append(rotated_grasp)
else:
candidate_grasps = pruned_grasps
# add to the current grasp set
grasps += candidate_grasps
logger.info('%d/%d grasps found after iteration %d.',
len(grasps), target_num_grasps, k)
grasp_gen_mult *= 2
num_grasps_remaining = target_num_grasps - len(grasps)
k += 1
# shuffle computed grasps
random.shuffle(grasps)
if len(grasps) > target_num_grasps:
logger.info('Truncating %d grasps to %d.',
len(grasps), target_num_grasps)
grasps = grasps[:target_num_grasps]
logger.info('Found %d grasps.', len(grasps))
return grasps
def show_points(self, point, color='lb', scale_factor=.0005):
if color == 'b':
color_f = (0, 0, 1)
elif color == 'r':
color_f = (1, 0, 0)
elif color == 'g':
color_f = (0, 1, 0)
elif color == 'lb': # light blue
color_f = (0.22, 1, 1)
else:
color_f = (1, 1, 1)
if point.size == 3: # vis for only one point, shape must be (3,), for shape (1, 3) is not work
point = point.reshape(3, )
mlab.points3d(point[0], point[1], point[2], color=color_f, scale_factor=scale_factor)
else: # vis | |
<filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/courseware/tests/test_tabs.py
"""
Test cases for tabs.
"""
from unittest.mock import MagicMock, Mock, patch
import pytest
from crum import set_current_request
from django.contrib.auth.models import AnonymousUser
from django.http import Http404
from django.urls import reverse
from milestones.tests.utils import MilestonesTestCaseMixin
from edx_toggles.toggles.testutils import override_waffle_flag
from lms.djangoapps.courseware.tabs import (
CourseInfoTab,
CoursewareTab,
DatesTab,
ExternalDiscussionCourseTab,
ExternalLinkCourseTab,
ProgressTab,
get_course_tab_list
)
from lms.djangoapps.courseware.tests.helpers import LoginEnrollmentTestCase
from lms.djangoapps.courseware.views.views import StaticCourseTabView, get_static_tab_fragment
from openedx.core.djangolib.testing.utils import get_mock_request
from openedx.core.lib.courses import get_course_by_id
from openedx.features.course_experience import DISABLE_UNIFIED_COURSE_TAB_FLAG
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import InstructorFactory
from common.djangoapps.student.tests.factories import StaffFactory
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.util.milestones_helpers import (
add_course_content_milestone,
add_course_milestone,
add_milestone,
get_milestone_relationship_types
)
from xmodule import tabs as xmodule_tabs
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.utils import TEST_DATA_DIR
from xmodule.modulestore.xml_importer import import_course_from_xml
class TabTestCase(SharedModuleStoreTestCase):
"""Base class for Tab-related test cases."""
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create(org='edX', course='toy', run='2012_Fall')
cls.fake_dict_tab = {'fake_key': 'fake_value'}
cls.books = None
def setUp(self):
super().setUp()
self.reverse = lambda name, args: "name/{}/args/{}".format(name, ",".join(str(a) for a in args))
def create_mock_user(self, is_staff=True, is_enrolled=True):
"""
Creates a mock user with the specified properties.
"""
user = UserFactory(is_staff=is_staff)
user.is_enrolled = is_enrolled
return user
def is_tab_enabled(self, tab, course, user):
"""
Returns true if the specified tab is enabled.
"""
return tab.is_enabled(course, user=user)
def set_up_books(self, num_books):
"""Initializes the textbooks in the course and adds the given number of books to each textbook"""
self.books = [MagicMock() for _ in range(num_books)]
for book_index, book in enumerate(self.books):
book.title = f'Book{book_index}'
self.course.textbooks = self.books
self.course.pdf_textbooks = self.books
self.course.html_textbooks = self.books
def check_tab(
self,
tab_class,
dict_tab,
expected_link,
expected_tab_id,
expected_name='same',
invalid_dict_tab=None,
):
"""
Helper method to verify a tab class.
'tab_class' is the class of the tab that is being tested
'dict_tab' is the raw dictionary value of the tab
'expected_link' is the expected value for the hyperlink of the tab
'expected_tab_id' is the expected value for the unique id of the tab
'expected_name' is the expected value for the name of the tab
'invalid_dict_tab' is an invalid dictionary value for the tab.
Can be 'None' if the given tab class does not have any keys to validate.
"""
# create tab
tab = tab_class(tab_dict=dict_tab)
# name is as expected
assert tab.name == expected_name
# link is as expected
assert tab.link_func(self.course, self.reverse) == expected_link
# verify active page name
assert tab.tab_id == expected_tab_id
# validate tab
assert tab.validate(dict_tab)
if invalid_dict_tab:
with pytest.raises(xmodule_tabs.InvalidTabsException):
tab.validate(invalid_dict_tab)
# check get and set methods
self.check_get_and_set_methods(tab)
# check to_json and from_json methods
self.check_tab_json_methods(tab)
# check equality methods
self.check_tab_equality(tab, dict_tab)
# return tab for any additional tests
return tab
def check_tab_equality(self, tab, dict_tab):
"""Tests the equality methods on the given tab"""
assert tab == dict_tab
# test __eq__
ne_dict_tab = dict_tab
ne_dict_tab['type'] = 'fake_type'
assert tab != ne_dict_tab
# test __ne__: incorrect type
assert tab != {'fake_key': 'fake_value'}
# test __ne__: missing type
def check_tab_json_methods(self, tab):
"""Tests the json from and to methods on the given tab"""
serialized_tab = tab.to_json()
deserialized_tab = tab.from_json(serialized_tab)
assert serialized_tab == deserialized_tab
def check_can_display_results(
self,
tab,
expected_value=True,
for_authenticated_users_only=False,
for_staff_only=False,
for_enrolled_users_only=False
):
"""Checks can display results for various users"""
if for_staff_only:
user = self.create_mock_user(is_staff=True, is_enrolled=True)
assert expected_value == self.is_tab_enabled(tab, self.course, user)
if for_authenticated_users_only:
user = self.create_mock_user(is_staff=False, is_enrolled=False)
assert expected_value == self.is_tab_enabled(tab, self.course, user)
if not for_staff_only and not for_authenticated_users_only and not for_enrolled_users_only:
user = AnonymousUser()
assert expected_value == self.is_tab_enabled(tab, self.course, user)
if for_enrolled_users_only:
user = self.create_mock_user(is_staff=False, is_enrolled=True)
assert expected_value == self.is_tab_enabled(tab, self.course, user)
def check_get_and_set_methods(self, tab):
"""Test __getitem__ and __setitem__ calls"""
assert tab['type'] == tab.type
assert tab['tab_id'] == tab.tab_id
with pytest.raises(KeyError):
_ = tab['invalid_key']
self.check_get_and_set_method_for_key(tab, 'name')
self.check_get_and_set_method_for_key(tab, 'tab_id')
with pytest.raises(KeyError):
tab['invalid_key'] = 'New Value'
def check_get_and_set_method_for_key(self, tab, key):
"""Test __getitem__ and __setitem__ for the given key"""
old_value = tab[key]
new_value = 'New Value'
tab[key] = new_value
assert tab[key] == new_value
tab[key] = old_value
assert tab[key] == old_value
class TextbooksTestCase(TabTestCase):
"""Test cases for Textbook Tab."""
def setUp(self):
super().setUp()
self.set_up_books(2)
self.dict_tab = MagicMock()
self.course.tabs = [
xmodule_tabs.CourseTab.load('textbooks'),
xmodule_tabs.CourseTab.load('pdf_textbooks'),
xmodule_tabs.CourseTab.load('html_textbooks'),
]
self.num_textbook_tabs = sum(1 for tab in self.course.tabs if tab.type in [
'textbooks', 'pdf_textbooks', 'html_textbooks'
])
self.num_textbooks = self.num_textbook_tabs * len(self.books)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_TEXTBOOK": True})
def test_textbooks_enabled(self):
type_to_reverse_name = {'textbook': 'book', 'pdftextbook': 'pdf_book', 'htmltextbook': 'html_book'}
num_textbooks_found = 0
user = self.create_mock_user(is_staff=False, is_enrolled=True)
for tab in xmodule_tabs.CourseTabList.iterate_displayable(self.course, user=user):
# verify all textbook type tabs
if tab.type == 'single_textbook':
book_type, book_index = tab.tab_id.split("/", 1)
expected_link = self.reverse(
type_to_reverse_name[book_type],
args=[str(self.course.id), book_index]
)
assert tab.link_func(self.course, self.reverse) == expected_link
assert tab.name.startswith(f'Book{book_index}')
num_textbooks_found = num_textbooks_found + 1
assert num_textbooks_found == self.num_textbooks
class StaticTabDateTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""Test cases for Static Tab Dates."""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
cls.page = ItemFactory.create(
category="static_tab", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="new_tab"
)
cls.course.tabs.append(xmodule_tabs.CourseTab.load('static_tab', name='New Tab', url_slug='new_tab'))
cls.course.save()
def test_logged_in(self):
self.setup_user()
url = reverse('static_tab', args=[str(self.course.id), 'new_tab'])
resp = self.client.get(url)
self.assertContains(resp, "OOGIE BLOOGIE")
def test_anonymous_user(self):
url = reverse('static_tab', args=[str(self.course.id), 'new_tab'])
resp = self.client.get(url)
self.assertContains(resp, "OOGIE BLOOGIE")
def test_invalid_course_key(self):
self.setup_user()
self.addCleanup(set_current_request, None)
request = get_mock_request(self.user)
with pytest.raises(Http404):
StaticCourseTabView().get(request, course_id='edX/toy', tab_slug='new_tab')
def test_get_static_tab_fragment(self):
self.setup_user()
course = get_course_by_id(self.course.id)
self.addCleanup(set_current_request, None)
request = get_mock_request(self.user)
tab = xmodule_tabs.CourseTabList.get_tab_by_slug(course.tabs, 'new_tab')
# Test render works okay
tab_content = get_static_tab_fragment(request, course, tab).content
assert str(self.course.id) in tab_content
assert 'static_tab' in tab_content
# Test when render raises an exception
with patch('lms.djangoapps.courseware.views.views.get_module') as mock_module_render:
mock_module_render.return_value = MagicMock(
render=Mock(side_effect=Exception('Render failed!'))
)
static_tab_content = get_static_tab_fragment(request, course, tab).content
assert 'this module is temporarily unavailable' in static_tab_content
class StaticTabDateTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the static tab dates of an XML course
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Set up the tests
"""
super().setUp()
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that tabs still appear when
# the course is already closed
self.xml_course_key = self.store.make_course_key('edX', 'detached_pages', '2014')
import_course_from_xml(
self.store,
'test_user',
TEST_DATA_DIR,
source_dirs=['2014'],
static_content_store=None,
target_id=self.xml_course_key,
raise_on_failure=True,
create_if_not_present=True,
)
# this text appears in the test course's tab
# common/test/data/2014/tabs/8e4cce2b4aaf4ba28b1220804619e41f.html
self.xml_data = "static 463139"
self.xml_url = "8e4cce2b4aaf4ba28b1220804619e41f"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('static_tab', args=[str(self.xml_course_key), self.xml_url])
resp = self.client.get(url)
self.assertContains(resp, self.xml_data)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('static_tab', args=[str(self.xml_course_key), self.xml_url])
resp = self.client.get(url)
self.assertContains(resp, self.xml_data)
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True})
class EntranceExamsTabsTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase, MilestonesTestCaseMixin):
"""
Validate tab behavior when dealing with Entrance Exams
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True})
def setUp(self):
"""
Test case scaffolding
"""
super().setUp()
self.course = CourseFactory.create()
self.instructor_tab = ItemFactory.create(
category="instructor", parent_location=self.course.location,
data="Instructor Tab", display_name="Instructor"
)
self.extra_tab_2 = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="Extra Tab", display_name="Extra Tab 2"
)
self.extra_tab_3 = ItemFactory.create(
category="static_tab", parent_location=self.course.location,
data="Extra Tab", display_name="Extra Tab 3"
)
self.setup_user()
self.enroll(self.course)
self.user.is_staff = True
self.relationship_types = get_milestone_relationship_types()
self.addCleanup(set_current_request, None)
def test_get_course_tabs_list_entrance_exam_enabled(self):
"""
Unit Test: test_get_course_tabs_list_entrance_exam_enabled
"""
entrance_exam = ItemFactory.create(
category="chapter",
parent_location=self.course.location,
data="Exam Data",
display_name="Entrance Exam",
is_entrance_exam=True
)
milestone = {
'name': 'Test Milestone',
'namespace': f'{str(self.course.id)}.entrance_exams',
'description': 'Testing Courseware Tabs'
}
self.user.is_staff = False
self.course.entrance_exam_enabled = True
self.course.entrance_exam_id = str(entrance_exam.location)
milestone = add_milestone(milestone)
add_course_milestone(
str(self.course.id),
self.relationship_types['REQUIRES'],
milestone
)
add_course_content_milestone(
str(self.course.id),
str(entrance_exam.location),
self.relationship_types['FULFILLS'],
milestone
)
course_tab_list = get_course_tab_list(self.user, self.course)
assert len(course_tab_list) == 2
assert course_tab_list[0]['tab_id'] == 'courseware'
assert course_tab_list[0]['name'] == 'Entrance Exam'
def test_get_course_tabs_list_skipped_entrance_exam(self):
"""
Tests tab list is not limited if user is allowed to skip entrance exam.
"""
#create a user
student = UserFactory()
# login as instructor hit skip entrance exam api in instructor app
instructor = InstructorFactory(course_key=self.course.id)
self.client.logout()
self.client.login(username=instructor.username, password='<PASSWORD>')
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': str(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': student.email,
})
assert response.status_code == 200
# log in again as student
self.client.logout()
self.login(self.email, self.password)
course_tab_list = get_course_tab_list(self.user, self.course)
assert len(course_tab_list) == 5
def test_course_tabs_list_for_staff_members(self):
"""
Tests tab list is not limited if user is member of staff
and has not passed entrance exam.
"""
# Login as member of staff
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='<PASSWORD>')
course_tab_list = get_course_tab_list(staff_user, self.course)
assert len(course_tab_list) == 5
class TextBookCourseViewsTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
Validate tab behavior when dealing with textbooks.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super().setUp()
self.set_up_books(2)
self.setup_user()
self.enroll(self.course)
self.num_textbook_tabs = sum(1 for tab in self.course.tabs if tab.type in [
'textbooks', 'pdf_textbooks', 'html_textbooks'
])
self.num_textbooks = self.num_textbook_tabs * len(self.books)
def set_up_books(self, num_books):
"""Initializes the textbooks in the course and adds the given number of books to each textbook"""
self.books = [MagicMock() for _ in range(num_books)]
for book_index, book in enumerate(self.books):
book.title | |
assert sech(5*pi*I/4) == -sqrt(2)
assert sech(-5*pi*I/4) == -sqrt(2)
assert sech(pi*I/6) == 2/sqrt(3)
assert sech(-pi*I/6) == 2/sqrt(3)
assert sech(7*pi*I/6) == -2/sqrt(3)
assert sech(-5*pi*I/6) == -2/sqrt(3)
assert sech(pi*I/105) == 1/cos(pi/105)
assert sech(-pi*I/105) == 1/cos(pi/105)
assert sech(x*I) == 1/cos(x)
assert sech(k*pi*I) == 1/cos(k*pi)
assert sech(17*k*pi*I) == 1/cos(17*k*pi)
assert sech(n).is_real is True
def test_sech_series():
x = Symbol('x')
assert sech(x).series(x, 0, 10) == \
1 - x**2/2 + 5*x**4/24 - 61*x**6/720 + 277*x**8/8064 + O(x**10)
def test_sech_fdiff():
x = Symbol('x')
raises(ArgumentIndexError, lambda: sech(x).fdiff(2))
def test_asinh():
x, y = symbols('x,y')
assert asinh(x) == asinh(x)
assert asinh(-x) == -asinh(x)
#at specific points
assert asinh(nan) == nan
assert asinh( 0) == 0
assert asinh(+1) == log(sqrt(2) + 1)
assert asinh(-1) == log(sqrt(2) - 1)
assert asinh(I) == pi*I/2
assert asinh(-I) == -pi*I/2
assert asinh(I/2) == pi*I/6
assert asinh(-I/2) == -pi*I/6
# at infinites
assert asinh(oo) == oo
assert asinh(-oo) == -oo
assert asinh(I*oo) == oo
assert asinh(-I *oo) == -oo
assert asinh(zoo) == zoo
#properties
assert asinh(I *(sqrt(3) - 1)/(2**(S(3)/2))) == pi*I/12
assert asinh(-I *(sqrt(3) - 1)/(2**(S(3)/2))) == -pi*I/12
assert asinh(I*(sqrt(5) - 1)/4) == pi*I/10
assert asinh(-I*(sqrt(5) - 1)/4) == -pi*I/10
assert asinh(I*(sqrt(5) + 1)/4) == 3*pi*I/10
assert asinh(-I*(sqrt(5) + 1)/4) == -3*pi*I/10
# Symmetry
assert asinh(-S.Half) == -asinh(S.Half)
def test_asinh_rewrite():
x = Symbol('x')
assert asinh(x).rewrite(log) == log(x + sqrt(x**2 + 1))
def test_asinh_series():
x = Symbol('x')
assert asinh(x).series(x, 0, 8) == \
x - x**3/6 + 3*x**5/40 - 5*x**7/112 + O(x**8)
t5 = asinh(x).taylor_term(5, x)
assert t5 == 3*x**5/40
assert asinh(x).taylor_term(7, x, t5, 0) == -5*x**7/112
def test_asinh_fdiff():
x = Symbol('x')
raises(ArgumentIndexError, lambda: asinh(x).fdiff(2))
def test_acosh():
x = Symbol('x')
assert acosh(-x) == acosh(-x)
#at specific points
assert acosh(1) == 0
assert acosh(-1) == pi*I
assert acosh(0) == I*pi/2
assert acosh(Rational(1, 2)) == I*pi/3
assert acosh(Rational(-1, 2)) == 2*pi*I/3
assert acosh(nan) == nan
# at infinites
assert acosh(oo) == oo
assert acosh(-oo) == oo
assert acosh(I*oo) == oo + I*pi/2
assert acosh(-I*oo) == oo - I*pi/2
assert acosh(zoo) == zoo
assert acosh(I) == log(I*(1 + sqrt(2)))
assert acosh(-I) == log(-I*(1 + sqrt(2)))
assert acosh((sqrt(3) - 1)/(2*sqrt(2))) == 5*pi*I/12
assert acosh(-(sqrt(3) - 1)/(2*sqrt(2))) == 7*pi*I/12
assert acosh(sqrt(2)/2) == I*pi/4
assert acosh(-sqrt(2)/2) == 3*I*pi/4
assert acosh(sqrt(3)/2) == I*pi/6
assert acosh(-sqrt(3)/2) == 5*I*pi/6
assert acosh(sqrt(2 + sqrt(2))/2) == I*pi/8
assert acosh(-sqrt(2 + sqrt(2))/2) == 7*I*pi/8
assert acosh(sqrt(2 - sqrt(2))/2) == 3*I*pi/8
assert acosh(-sqrt(2 - sqrt(2))/2) == 5*I*pi/8
assert acosh((1 + sqrt(3))/(2*sqrt(2))) == I*pi/12
assert acosh(-(1 + sqrt(3))/(2*sqrt(2))) == 11*I*pi/12
assert acosh((sqrt(5) + 1)/4) == I*pi/5
assert acosh(-(sqrt(5) + 1)/4) == 4*I*pi/5
assert str(acosh(5*I).n(6)) == '2.31244 + 1.5708*I'
assert str(acosh(-5*I).n(6)) == '2.31244 - 1.5708*I'
def test_acosh_rewrite():
x = Symbol('x')
assert acosh(x).rewrite(log) == log(x + sqrt(x - 1)*sqrt(x + 1))
def test_acosh_series():
x = Symbol('x')
assert acosh(x).series(x, 0, 8) == \
-I*x + pi*I/2 - I*x**3/6 - 3*I*x**5/40 - 5*I*x**7/112 + O(x**8)
t5 = acosh(x).taylor_term(5, x)
assert t5 == - 3*I*x**5/40
assert acosh(x).taylor_term(7, x, t5, 0) == - 5*I*x**7/112
def test_acosh_fdiff():
x = Symbol('x')
raises(ArgumentIndexError, lambda: acosh(x).fdiff(2))
def test_asech():
x = Symbol('x')
assert asech(-x) == asech(-x)
# values at fixed points
assert asech(1) == 0
assert asech(-1) == pi*I
assert asech(0) == oo
assert asech(2) == I*pi/3
assert asech(-2) == 2*I*pi / 3
assert asech(nan) == nan
# at infinites
assert asech(oo) == I*pi/2
assert asech(-oo) == I*pi/2
assert asech(zoo) == I*AccumBounds(-pi/2, pi/2)
assert asech(I) == log(1 + sqrt(2)) - I*pi/2
assert asech(-I) == log(1 + sqrt(2)) + I*pi/2
assert asech(sqrt(2) - sqrt(6)) == 11*I*pi / 12
assert asech(sqrt(2 - 2/sqrt(5))) == I*pi / 10
assert asech(-sqrt(2 - 2/sqrt(5))) == 9*I*pi / 10
assert asech(2 / sqrt(2 + sqrt(2))) == I*pi / 8
assert asech(-2 / sqrt(2 + sqrt(2))) == 7*I*pi / 8
assert asech(sqrt(5) - 1) == I*pi / 5
assert asech(1 - sqrt(5)) == 4*I*pi / 5
assert asech(-sqrt(2*(2 + sqrt(2)))) == 5*I*pi / 8
# properties
# asech(x) == acosh(1/x)
assert asech(sqrt(2)) == acosh(1/sqrt(2))
assert asech(2/sqrt(3)) == acosh(sqrt(3)/2)
assert asech(2/sqrt(2 + sqrt(2))) == acosh(sqrt(2 + sqrt(2))/2)
assert asech(S(2)) == acosh(1/S(2))
# asech(x) == I*acos(1/x)
# (Note: the exact formula is asech(x) == +/- I*acos(1/x))
assert asech(-sqrt(2)) == I*acos(-1/sqrt(2))
assert asech(-2/sqrt(3)) == I*acos(-sqrt(3)/2)
assert asech(-S(2)) == I*acos(-S.Half)
assert asech(-2/sqrt(2)) == I*acos(-sqrt(2)/2)
# sech(asech(x)) / x == 1
assert expand_mul(sech(asech(sqrt(6) - sqrt(2))) / (sqrt(6) - sqrt(2))) == 1
assert expand_mul(sech(asech(sqrt(6) + sqrt(2))) / (sqrt(6) + sqrt(2))) == 1
assert (sech(asech(sqrt(2 + 2/sqrt(5)))) / (sqrt(2 + 2/sqrt(5)))).simplify() == 1
assert (sech(asech(-sqrt(2 + 2/sqrt(5)))) / (-sqrt(2 + 2/sqrt(5)))).simplify() == 1
assert (sech(asech(sqrt(2*(2 + sqrt(2))))) / (sqrt(2*(2 + sqrt(2))))).simplify() == 1
assert expand_mul(sech(asech((1 + sqrt(5)))) / ((1 + sqrt(5)))) == 1
assert expand_mul(sech(asech((-1 - sqrt(5)))) / ((-1 - sqrt(5)))) == 1
assert expand_mul(sech(asech((-sqrt(6) - sqrt(2)))) / ((-sqrt(6) - sqrt(2)))) == 1
# numerical evaluation
assert str(asech(5*I).n(6)) == '0.19869 - 1.5708*I'
assert str(asech(-5*I).n(6)) == '0.19869 + 1.5708*I'
def test_asech_series():
x = Symbol('x')
t6 = asech(x).expansion_term(6, x)
assert t6 == -5*x**6/96
assert asech(x).expansion_term(8, x, t6, 0) == -35*x**8/1024
def test_asech_rewrite():
x = Symbol('x')
assert asech(x).rewrite(log) == log(1/x + sqrt(1/x - 1) * sqrt(1/x + 1))
def test_asech_fdiff():
x = Symbol('x')
raises(ArgumentIndexError, lambda: asech(x).fdiff(2))
def test_acsch():
x = Symbol('x')
assert acsch(-x) == acsch(-x)
assert acsch(x) == -acsch(-x)
# values at fixed points
assert acsch(1) == log(1 + sqrt(2))
assert acsch(-1) == - log(1 + sqrt(2))
assert acsch(0) == zoo
assert acsch(2) == log((1+sqrt(5))/2)
assert acsch(-2) == - log((1+sqrt(5))/2)
assert acsch(I) == - I*pi/2
assert acsch(-I) == I*pi/2
assert acsch(-I*(sqrt(6) + sqrt(2))) == I*pi / 12
assert acsch(I*(sqrt(2) + sqrt(6))) == -I*pi / 12
assert acsch(-I*(1 + sqrt(5))) == I*pi / 10
assert acsch(I*(1 + sqrt(5))) == -I*pi / 10
assert acsch(-I*2 / sqrt(2 - sqrt(2))) == I*pi / 8
assert acsch(I*2 / sqrt(2 - sqrt(2))) == -I*pi / 8
assert acsch(-I*2) == I*pi / 6
assert acsch(I*2) == -I*pi / 6
assert acsch(-I*sqrt(2 + 2/sqrt(5))) == I*pi / 5
assert acsch(I*sqrt(2 + 2/sqrt(5))) == -I*pi / 5
assert acsch(-I*sqrt(2)) == I*pi / 4
assert acsch(I*sqrt(2)) == -I*pi / 4
assert acsch(-I*(sqrt(5)-1)) == 3*I*pi / 10
assert acsch(I*(sqrt(5)-1)) == -3*I*pi / 10
assert acsch(-I*2 / sqrt(3)) == I*pi / 3
assert acsch(I*2 / sqrt(3)) == -I*pi / 3
assert acsch(-I*2 / sqrt(2 + sqrt(2))) == 3*I*pi / 8
assert acsch(I*2 / sqrt(2 + sqrt(2))) == -3*I*pi / 8
assert acsch(-I*sqrt(2 - 2/sqrt(5))) == 2*I*pi / 5
assert acsch(I*sqrt(2 - 2/sqrt(5))) == -2*I*pi / 5
assert acsch(-I*(sqrt(6) - sqrt(2))) == 5*I*pi / 12
assert acsch(I*(sqrt(6) - sqrt(2))) == -5*I*pi / 12
assert acsch(nan) == nan
# properties
# acsch(x) == asinh(1/x)
assert acsch(-I*sqrt(2)) == asinh(I/sqrt(2))
assert acsch(-I*2 / sqrt(3)) == asinh(I*sqrt(3) / 2)
# acsch(x) == -I*asin(I/x)
assert acsch(-I*sqrt(2)) == -I*asin(-1/sqrt(2))
assert acsch(-I*2 / sqrt(3)) == -I*asin(-sqrt(3)/2)
# csch(acsch(x)) / x == 1
assert expand_mul(csch(acsch(-I*(sqrt(6) + sqrt(2)))) / (-I*(sqrt(6) + sqrt(2)))) == 1
assert expand_mul(csch(acsch(I*(1 + sqrt(5)))) / ((I*(1 + sqrt(5))))) == 1
assert (csch(acsch(I*sqrt(2 - 2/sqrt(5)))) / (I*sqrt(2 - 2/sqrt(5)))).simplify() == 1
assert (csch(acsch(-I*sqrt(2 - 2/sqrt(5)))) / (-I*sqrt(2 - 2/sqrt(5)))).simplify() == 1
# numerical evaluation
assert str(acsch(5*I+1).n(6)) == '0.0391819 - 0.193363*I'
assert str(acsch(-5*I+1).n(6)) == '0.0391819 + 0.193363*I'
def test_acsch_infinities():
assert acsch(oo) == 0
assert acsch(-oo) == 0
assert acsch(zoo) == 0
def test_acsch_rewrite():
x = Symbol('x')
assert acsch(x).rewrite(log) == log(1/x + sqrt(1/x**2 + 1))
def test_acsch_fdiff():
x = Symbol('x')
raises(ArgumentIndexError, lambda: acsch(x).fdiff(2))
def test_atanh():
x = Symbol('x')
#at specific points
assert atanh(0) == 0
assert atanh(I) == I*pi/4
assert atanh(-I) == -I*pi/4
assert atanh(1) == oo
assert atanh(-1) == -oo
assert atanh(nan) == nan
# at infinites
assert atanh(oo) == -I*pi/2
assert atanh(-oo) == I*pi/2
assert atanh(I*oo) == I*pi/2
assert atanh(-I*oo) == -I*pi/2
assert atanh(zoo) == I*AccumBounds(-pi/2, pi/2)
#properties
assert atanh(-x) == -atanh(x)
assert atanh(I/sqrt(3)) == I*pi/6
assert atanh(-I/sqrt(3)) == -I*pi/6
assert atanh(I*sqrt(3)) == I*pi/3
assert atanh(-I*sqrt(3)) == -I*pi/3
assert atanh(I*(1 + sqrt(2))) == 3*pi*I/8
assert atanh(I*(sqrt(2) - 1)) == pi*I/8
assert atanh(I*(1 - sqrt(2))) == -pi*I/8
assert atanh(-I*(1 + sqrt(2))) == -3*pi*I/8
assert atanh(I*sqrt(5 + 2*sqrt(5))) == 2*I*pi/5
assert atanh(-I*sqrt(5 + 2*sqrt(5))) == -2*I*pi/5
assert atanh(I*(2 - | |
DA: " + str(da))
tree = self.process_das([da])[0]
log_debug("RESULT: %s" % str(tree))
# append the tree to a t-tree document, if requested
if gen_doc:
zone = self.get_target_zone(gen_doc)
zone.ttree = tree.create_ttree()
zone.sentence = str(da)
# return the result
return tree
def init_slot_err_stats(self):
"""Initialize slot error statistics accumulator."""
self.slot_err_stats = SlotErrAnalyzer(self.validation_delex_slots)
def get_slot_err_stats(self):
"""Return current slot error statistics, as a string."""
return ("Slot error: %.6f (M: %d, S: %d, T: %d)" %
(self.slot_err_stats.slot_error(), self.slot_err_stats.missing,
self.slot_err_stats.superfluous, self.slot_err_stats.total))
@staticmethod
def load_from_file(model_fname):
"""Detect correct model type (plain/ensemble) and start loading."""
model_type = Seq2SeqGen # default to plain generator
with file_stream(model_fname, 'rb', encoding=None) as fh:
data = pickle.load(fh)
if isinstance(data, type):
model_type = data
return model_type.load_from_file(model_fname)
class Seq2SeqGen(Seq2SeqBase, TFModel):
"""A plain sequence-to-sequence generator (using encoder-decoder architecture
from TensorFlow)."""
def __init__(self, cfg):
"""Initialize the generator, fill in the configuration."""
Seq2SeqBase.__init__(self, cfg)
TFModel.__init__(self, scope_name='seq2seq_gen-' + cfg.get('scope_suffix', ''))
# extract the individual elements out of the configuration dict
self.emb_size = cfg.get('emb_size', 50)
self.batch_size = cfg.get('batch_size', 10)
self.dropout_keep_prob = cfg.get('dropout_keep_prob', 1)
self.optimizer_type = cfg.get('optimizer_type', 'adam')
self.passes = cfg.get('passes', 5)
self.min_passes = cfg.get('min_passes', 1)
self.improve_interval = cfg.get('improve_interval', 10)
self.top_k = cfg.get('top_k', 5)
# self.checkpoint_dir = cfg.get('checkpoint_dir', '/tmp/') # TODO fix (not used now)
self.use_dec_cost = cfg.get('use_dec_cost', False)
self.alpha = cfg.get('alpha', 1e-3)
self.alpha_decay = cfg.get('alpha_decay', 0.0)
self.validation_size = cfg.get('validation_size', 0)
self.validation_freq = cfg.get('validation_freq', 10)
self.validation_no_overlap = cfg.get('validation_no_overlap', False)
self.validation_use_all_refs = cfg.get('validation_use_all_refs', False)
self.validation_delex_slots = cfg.get('validation_delex_slots', set())
self.validation_use_train_refs = cfg.get('validation_use_train_refs', False)
if self.validation_delex_slots:
self.validation_delex_slots = set(self.validation_delex_slots.split(','))
self.multiple_refs = cfg.get('multiple_refs', False) # multiple references for validation
self.ref_selectors = cfg.get('ref_selectors', None) # selectors of validation trees (if in separate file)
self.max_cores = cfg.get('max_cores')
self.mode = cfg.get('mode', 'tokens' if cfg.get('use_tokens') else 'trees')
self.nn_type = cfg.get('nn_type', 'emb_seq2seq')
self.randomize = cfg.get('randomize', True)
self.cell_type = cfg.get('cell_type', 'lstm')
self.bleu_validation_weight = cfg.get('bleu_validation_weight', 0.0)
self.use_context = cfg.get('use_context', False)
# Train Summaries
self.train_summary_dir = cfg.get('tb_summary_dir', None)
if self.train_summary_dir:
self.loss_summary_seq2seq = None
self.train_summary_op = None
self.train_summary_writer = None
def _init_training(self, das_file, ttree_file, data_portion,
context_file, validation_files, lexic_files):
"""Load training data, prepare batches, build the NN.
@param das_file: training DAs (file path)
@param ttree_file: training t-trees (file path)
@param data_portion: portion of the data to be actually used for training
@param context_file: training contexts (file path)
@param validation_files: validation file paths (or None)
@param lexic_files: paths to lexicalization data (or None)
"""
# read training data
log_info('Reading DAs from ' + das_file + '...')
das = read_das(das_file)
log_info('Reading t-trees/sentences from ' + ttree_file + '...')
trees = read_trees_or_tokens(ttree_file, self.mode, self.language, self.selector)
if self.use_context:
das = self._load_contexts(das, context_file)
# make training data smaller if necessary
train_size = int(round(data_portion * len(trees)))
self.train_trees = trees[:train_size]
self.train_das = das[:train_size]
# get validation set (default to empty)
self.valid_trees = []
self.valid_das = []
# load separate validation data files...
if validation_files:
valid_trees_for_lexic = self._load_valid_data(validation_files)
valid_idxs = None # not needed, abstraction loads from separate file
# ... or save part of the training data for validation:
elif self.validation_size > 0:
valid_trees_for_lexic, valid_idxs = self._cut_valid_data() # will set train_trees, valid_trees, train_das, valid_das
else:
valid_trees_for_lexic, valid_idxs = None, None
if self.validation_use_all_refs: # try to use multiple references (not in lexicalizer)
self._regroup_valid_refs()
log_info('Using %d training, %d validation instances (with %d references).' %
(len(self.train_das), len(self.valid_das),
len([ref for inst in self.valid_trees for ref in inst])))
# initialize embeddings
if self.use_context:
self.da_embs = ContextDAEmbeddingSeq2SeqExtract(cfg=self.cfg)
else:
self.da_embs = DAEmbeddingSeq2SeqExtract(cfg=self.cfg)
if self.mode == 'tokens':
self.tree_embs = TokenEmbeddingSeq2SeqExtract(cfg=self.cfg)
elif self.mode == 'tagged_lemmas':
self.tree_embs = TaggedLemmasEmbeddingSeq2SeqExtract(cfg=self.cfg)
else:
self.tree_embs = TreeEmbeddingSeq2SeqExtract(cfg=self.cfg)
self.da_dict_size = self.da_embs.init_dict(self.train_das)
self.tree_dict_size = self.tree_embs.init_dict(self.train_trees)
self.max_tree_len = self.tree_embs.get_embeddings_shape()[0]
self.max_da_len = self.da_embs.get_embeddings_shape()[0]
# prepare training batches
self.train_enc = [cut_batch_into_steps(b)
for b in grouper([self.da_embs.get_embeddings(da)
for da in self.train_das],
self.batch_size, None)]
self.train_dec = [cut_batch_into_steps(b)
for b in grouper([self.tree_embs.get_embeddings(tree)
for tree in self.train_trees],
self.batch_size, None)]
# train lexicalizer (store surface forms, possibly train LM)
if self.lexicalizer:
self.lexicalizer.train(lexic_files, self.train_trees, valid_trees_for_lexic, valid_idxs)
# train the classifier for filtering n-best lists
if self.classif_filter:
self.classif_filter.train(self.train_das, self.train_trees,
valid_das=self.valid_das,
valid_trees=self.valid_trees)
# convert validation data to flat trees to enable F1 measuring
if self.validation_size > 0 and self.mode in ['tokens', 'tagged_lemmas']:
self.valid_trees = self._valid_data_to_flat_trees(self.valid_trees)
# initialize top costs
self.top_k_costs = [float('nan')] * self.top_k
self.checkpoint_path = None
# build the NN
self._init_neural_network()
# initialize the NN variables
self.session.run(tf.compat.v1.global_variables_initializer())
def _load_contexts(self, das, context_file):
"""Load input context utterances from a .yaml.gz/.pickle.gz/.txt file and add them to the
given DAs (each returned item is then a tuple of context + DA)."""
# read contexts, combine them with corresponding DAs for easier handling
if context_file is None:
raise ValueError('Expected context utterances file name!')
log_info('Reading context utterances from %s...' % context_file)
contexts = read_trees_or_tokens(context_file, 'tokens', self.language, self.selector)
return [(context, da) for context, da in zip(contexts, das)]
def _load_valid_data(self, valid_data_paths):
"""Load validation data from separate files (comma-separated list of files with DAs, trees,
and optionally contexts is expected)."""
# parse validation data file specification
valid_data_paths = valid_data_paths.split(',')
if len(valid_data_paths) == 3: # with contexts (this does not determine if they're used)
valid_das_file, valid_trees_file, valid_context_file = valid_data_paths
else:
valid_das_file, valid_trees_file = valid_data_paths
# load the validation data
log_info('Reading DAs from ' + valid_das_file + '...')
self.valid_das = read_das(valid_das_file)
log_info('Reading t-trees/sentences from ' + valid_trees_file + '...')
valid_trees = read_trees_or_tokens(valid_trees_file, self.mode, self.language, self.ref_selectors, ref_mode=True)
if self.use_context:
self.valid_das = self._load_contexts(self.valid_das, valid_context_file)
# reorder validation data for multiple references (see also _cut_valid_data)
# if we empty-lines-separated reference text file, use that organization
if self.mode == 'tokens' and any([len(inst) > 1 for inst in valid_trees]):
self.valid_trees = valid_trees
valid_trees = [tree for inst in valid_trees for tree in inst] # no grouping for lexicalizer
# other type of validation data grouping
else:
if self.mode == 'tokens': # roll back the grouping when it's not used in our text file
valid_trees = [inst[0] for inst in valid_trees]
if self.multiple_refs:
valid_size = len(valid_trees)
num_refs, refs_stored = self._check_multiple_ref_type(valid_size)
# serial: different instances next to each other, then synonymous in the same order
if refs_stored == 'serial':
valid_tree_chunks = [chunk for chunk in
chunk_list(valid_trees, old_div(valid_size, num_refs))]
self.valid_trees = [[chunk[i] for chunk in valid_tree_chunks]
for i in range(old_div(valid_size, num_refs))]
if len(self.valid_das) > len(self.valid_trees):
self.valid_das = self.valid_das[0:old_div(valid_size, num_refs)]
# parallel: synonymous instances next to each other
elif refs_stored == 'parallel':
self.valid_trees = [chunk for chunk in chunk_list(valid_trees, num_refs)]
if len(self.valid_das) > len(self.valid_trees):
self.valid_das = self.valid_das[::num_refs]
# no multiple references; make lists of size 1 to simplify working with the data
else:
self.valid_trees = [[tree] for tree in valid_trees]
return valid_trees # return for lexicalizer, which doesn't use any grouping
def _tokens_to_flat_trees(self, sents):
"""Use sentences (pairs token-tag) read from Treex files and convert them into flat
trees (each token has a node right under the root, lemma is the token, formeme is 'x').
Uses TokenEmbeddingSeq2SeqExtract conversion there and back.
@param sents: sentences to be converted
@return: a list of flat trees
"""
return [self.tree_embs.ids_to_tree(self.tree_embs.get_embeddings(sent)) for sent in sents]
def _check_multiple_ref_type(self, data_size):
"""Parse multiple references setting from the configuration file and check if the data size
is compatible with it."""
num_refs, refs_stored = self.multiple_refs.split(',')
num_refs = int(num_refs)
if data_size % num_refs != 0:
raise Exception('Data length must be divisible by the number of references!')
return num_refs, refs_stored
def _regroup_valid_refs(self):
"""Group all validation trees/sentences according to the same DA (sorted and
possibly delexicalized).
"""
if self.use_context: # if context is used, then train_das are da[1]
normalized_das = [da[1].get_delexicalized(self.validation_delex_slots)
for da in self.valid_das]
else:
normalized_das = [da.get_delexicalized(self.validation_delex_slots)
for da in self.valid_das]
da_groups = {}
for trees, da in zip(self.valid_trees, normalized_das):
da.sort()
da_groups[da] = da_groups.get(da, [])
da_groups[da].extend(trees)
# use training trees as additional references if needed
if self.validation_use_train_refs:
if self.use_context: # if context is used, then train_das are da[1]
normalized_train_das = [da[1].get_delexicalized(self.validation_delex_slots)
for da in self.train_das]
else:
normalized_train_das = [da.get_delexicalized(self.validation_delex_slots)
for da in self.train_das]
for tree, da in zip(self.train_trees, normalized_train_das):
da.sort()
if da in da_groups:
da_groups[da].append(tree)
# deduplicate the references
for da_group in da_groups.values():
da_group.sort()
da_groups = {da: [sent for sent, _ in groupby(da_group)]
for da, da_group in da_groups.items()}
# store the references in correct order
self.valid_trees = [da_groups[da] for da in normalized_das]
def _cut_valid_data(self):
"""Put | |
#!/usr/bin/python
# General imports
from __future__ import absolute_import, division, print_function
from sshutil.cmd import SSHCommand
import logging
import telnetlib
import socket
import json
import time
import random
from socket import AF_INET
from socket import AF_INET6
# ipaddress dependencies
from ipaddress import IPv4Interface, IPv6Interface
from ipaddress import IPv4Network, IPv6Network
from ipaddress import AddressValueError
# NetworkX dependencies
from networkx.readwrite import json_graph
# Main Routing Table
MAIN_ROUTING_TABLE = 254
ZEBRA_PORT = 2601
SSH_PORT = 22
MIN_TABLE_ID = 2
# Linux kernel supports up to 255 different tables (or 2**32?)
MAX_TABLE_ID = 2**32 - 1 # = 255
# Table where we store our seg6local routes
LOCAL_SID_TABLE = MAIN_ROUTING_TABLE
# Reserved table IDs
RESERVED_TABLEIDS = [0, 253, 254, 255]
RESERVED_TABLEIDS.append(LOCAL_SID_TABLE)
RESERVED_TENANTIDS = [0]
WAIT_TOPOLOGY_INTERVAL = 1
# Logger reference
logger = logging.getLogger(__name__)
# Initialize random seed
random.seed(time.time())
class InterfaceType:
UNKNOWN = 'unknown'
WAN = 'wan'
LAN = 'lan'
'''
class DeviceStatus:
NOT_CONNECTED = 'Not Connected'
CONNECTED = 'Connected'
RUNNING = 'Running'
'''
supported_interface_types = [
InterfaceType.WAN,
InterfaceType.LAN
]
# Generate a random token used to authenticate the tenant
def generate_token():
# Example of token: J4Ie2QKOHz3IVSQs8yA1ahAKfl1ySrtVxGVuT6NkuElGfC8cm55rFhyzkc79pjSLOsr7zKOu7rkMgNMyEHlze4iXVNoX1AtifuieNrrW4rrCroScpGdQqHMETJU46okS # noqa: E501
seq = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
token = ''
for _ in range(0, 128):
token += random.choice(seq)
# Return the token
return token
# Return true if the IP address belongs to the network
def IPv6AddrInNet(ipaddr, net):
# return IPv6Interface(unicode(ipaddr)) in IPv6Network(unicode(net))
return IPv6Interface(ipaddr) in IPv6Network(net)
# Find a IPv6 address contained in the net
def findIPv6AddrInNet(ipaddrs, net):
for ipaddr in ipaddrs:
if IPv6AddrInNet(ipaddr, net):
return ipaddr
return None
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def print_and_die(msg, code=-2):
print(msg)
exit(code)
class TenantIDAllocator:
def __init__(self):
# Set of reusable tenant ID
self.reusable_tenantids = set()
# Last used tenant ID
self.last_allocated_tenantid = -1
# Mapping token to tenant ID
self.token_to_tenantid = dict()
# Allocate and return a new tenant ID for a token
def get_new_tenantid(self, token):
if self.token_to_tenantid.get(token):
# The token already has a tenant ID
return -1
else:
# Check if a reusable tenant ID is available
if self.reusable_tenantids:
tenantid = self.reusable_tenantids.pop()
else:
# Get new tenant ID
self.last_allocated_tenantid += 1
while self.last_allocated_tenantid in RESERVED_TENANTIDS:
# Skip reserved tenant ID
self.last_allocated_tenantid += 1
tenantid = self.last_allocated_tenantid
# If tenant ID is valid
if validate_tenantid(tenantid) is True:
# Assigne tenant ID to the token
self.token_to_tenantid[token] = str(tenantid)
return str(tenantid)
# Return -1 if tenant IDs are finished
else:
return -1
# Return tenant ID, if no tenant ID assigned to the token return -1
def get_tenantid(self, token):
return str(self.token_to_tenantid.get(token, -1))
# Release tenant ID and mark it as reusable
def release_tenantid(self, token):
# Check if the token has an associated tenantid
if token in self.token_to_tenantid:
tenantid = self.token_to_tenantid[token]
# Unassigne the tenant ID
del self.token_to_tenantid[token]
# Mark the tenant ID as reusable
self.reusable_tenantids.add(tenantid)
return str(tenantid)
else:
# The token has not an associated tenant ID
return -1
# Table ID Allocator
class TableIDAllocator:
def __init__(self):
# Mapping VPN name to table ID, indexed by tenant ID
self.vpn_to_tableid = dict()
# Set of reusable table IDs, indexed by tenant ID
self.reusable_tableids = dict()
# Last used table ID, indexed by tenant ID
self.last_allocated_tableid = dict()
# Allocate and return a new table ID for a VPN
def get_new_tableid(self, vpn_name, tenantid):
if tenantid not in self.vpn_to_tableid:
# Initialize data structures
self.vpn_to_tableid[tenantid] = dict()
self.reusable_tableids[tenantid] = set()
self.last_allocated_tableid[tenantid] = -1
# Get the new table ID
if self.vpn_to_tableid[tenantid].get(vpn_name):
# The VPN already has an associated table ID
return -1
else:
# Check if a reusable table ID is available
if self.reusable_tableids[tenantid]:
tableid = self.reusable_tableids[tenantid].pop()
else:
# If not, get a new table ID
self.last_allocated_tableid[tenantid] += 1
while self.last_allocated_tableid[
tenantid
] in RESERVED_TABLEIDS:
# Skip reserved table IDs
self.last_allocated_tableid[tenantid] += 1
tableid = self.last_allocated_tableid[tenantid]
# Assign the table ID to the VPN name
self.vpn_to_tableid[tenantid][vpn_name] = tableid
# And return
return tableid
# Return the table ID assigned to the VPN
# If the VPN has no assigned table IDs, return -1
def get_tableid(self, vpn_name, tenantid):
if tenantid not in self.vpn_to_tableid:
return -1
return self.vpn_to_tableid[tenantid].get(vpn_name, -1)
# Release a table ID and mark it as reusable
def release_tableid(self, vpn_name, tenantid):
# Check if the VPN has an associated table ID
if self.vpn_to_tableid[tenantid].get(vpn_name):
# The VPN has an associated table ID
tableid = self.vpn_to_tableid[tenantid][vpn_name]
# Unassign the table ID
del self.vpn_to_tableid[tenantid][vpn_name]
# Mark the table ID as reusable
self.reusable_tableids[tenantid].add(tableid)
# If the tenant has no VPNs,
# destory data structures
if len(self.vpn_to_tableid[tenantid]) == 0:
del self.vpn_to_tableid[tenantid]
del self.reusable_tableids[tenantid]
del self.last_allocated_tableid[tenantid]
# Return the table ID
return tableid
else:
# The VPN has not an associated table ID
return -1
class VTEPIPv6NetAllocator:
bit = 16
net = u"fcfb::/%d" % bit
prefix = 64
def __init__(self):
print("*** Calculating Available Mgmt Addresses")
self.hosts = (IPv6Network(self.net)).hosts()
def nextVTEPAddress(self):
n_host = next(self.hosts)
return n_host.__str__()
class VTEPIPv4NetAllocator:
bit = 8
net = u"10.0.0.0/%d" % bit
prefix = 16
def __init__(self):
print("*** Calculating Available Mgmt Addresses")
self.vtepnet = (IPv4Network(self.net)).hosts()
def nextVTEPAddress(self):
n_host = next(self.vtepnet)
return n_host.__str__()
# Utiliy function to check if the provided table ID is valid
def validate_table_id(tableid):
return tableid >= MIN_TABLE_ID and tableid <= MAX_TABLE_ID
def validate_deviceid(deviceid):
return deviceid is not None and deviceid != ''
def validate_overlayid(overlayid):
return overlayid is not None and overlayid != ''
def validate_tenantid(tenantid):
return tenantid is not None and tenantid != ''
def validate_overlay_type(overlay_type):
return overlay_type in supported_overlay_types
def validate_overlay_name(overlay_name):
return overlay_name is not None and overlay_name != ''
def validate_tunnel_mode(tunnel_mode, supported_tunnel_modes):
return tunnel_mode in supported_tunnel_modes
def validate_port(port):
return port >= 0 and port <= 65535
def validate_interface_type(interface_type):
return interface_type in supported_interface_types
# Utiliy function to check if the IP
# is a valid IPv6 address
def validate_ipv6_address(ip):
if ip is None:
return False
try:
IPv6Interface(ip)
return True
except AddressValueError:
return False
# Utiliy function to check if the IP
# is a valid IPv4 address
def validate_ipv4_address(ip):
if ip is None:
return False
try:
IPv4Interface(ip)
return True
except AddressValueError:
return False
# Utiliy function to check if the IP
# is a valid address
def validate_ip_address(ip):
return validate_ipv4_address(ip) or validate_ipv6_address(ip)
# Utiliy function to get the IP address family
def getAddressFamily(ip):
if validate_ipv6_address(ip):
# IPv6 address
return AF_INET6
elif validate_ipv4_address(ip):
# IPv4 address
return AF_INET
else:
# Invalid address
return None
class OverlayType:
IPv6Overlay = 'IPv6Overlay'
IPv4Overlay = 'IPv4Overlay'
supported_overlay_types = [OverlayType.IPv4Overlay, OverlayType.IPv6Overlay]
'''
class VPN:
# tableid=-1):
def __init__(self, tunnel_id, vpn_name, vpn_type, interfaces, tenantid,
tunnel_mode):
# Tunnel ID
self.id = tunnel_id
# VPN name
self.vpn_name = vpn_name
# VPN type
self.vpn_type = vpn_type
# Interfaces belonging to the VPN
self.interfaces = set(interfaces)
#self.interfaces = dict()
# for interface in interfaces:
# routerid = interface.routerid
# interface_name = interface.interface_name
# if self.interfaces.get(routerid) is None:
# self.interfaces[routerid] = dict()
# self.interfaces[routerid][interface_name] = interface
# Tenant ID
self.tenantid = tenantid
# Table ID
#self.tableid = tableid
#self.tunnel_specific_data = dict()
self.tunnel_mode = tunnel_mode
def removeInterface(self, routerid, interface_name):
for interface in self.interfaces.copy():
if interface.routerid == routerid and \
interface.interface_name == interface_name:
self.interfaces.remove(interface)
return True
return False
def numberOfInterfaces(self, routerid):
num = 0
for interface in self.interfaces:
if interface.routerid == routerid:
num += 1
return num
def getInterface(self, routerid, interface_name):
for interface in self.interfaces:
if interface.routerid == routerid and \
interface.interface_name == interface_name:
return interface
return None
class Interface:
def __init__(self, routerid, interface_name):
# Router ID
self.routerid = routerid
# Interface name
self.interface_name = interface_name
'''
# IPv6 utility functions
def del_ipv6_nd_prefix_quagga(router, intf, prefix):
# Establish a telnet connection with the zebra daemon
# and try to reconfigure the addressing plan of the router
router = str(router)
port = ZEBRA_PORT
intf = str(intf)
prefix = str(prefix)
try:
print('%s - Trying to reconfigure addressing plan' % router)
password = '<PASSWORD>'
# Init telnet
tn = telnetlib.Telnet(router, port)
# Password
tn.read_until(b'Password: ')
tn.write(b'%s\r\n' % password.encode())
# Terminal length set to 0 to not have interruptions
tn.write(b'terminal length 0\r\n')
# Enable
tn.write(b'enable\r\n')
# Password
tn.read_until(b'Password: ')
tn.write(b'%s\r\n' % password.encode())
# Configure terminal
tn.write(b'configure terminal\r\n')
# Interface configuration
tn.write(b'interface %s\r\n' % intf.encode())
# Remove old IPv6 prefix
tn.write(b'no ipv6 nd prefix %s\r\n' % prefix)
# Close interface configuration
tn.write(b'q\r\n')
# Close configuration mode
tn.write(b'q\r\n')
# Close privileged mode
tn.write(b'q\r\n')
# Close telnet
tn.close()
except socket.error:
print('Error: cannot establish a connection '
'to %s on port %s' | |
<reponame>jfallaire/generator-ps-boilerplate-project<filename>generators/simple/templates/src/platform/extensionRunner/cdf/document_definition.py<gh_stars>1-10
"""
- THIS FILE IS GENERATED -
CoveoInterfaces/DocumentDefinition/CoveoDocumentDefinition.jid
"""
from attr import attrib, attrs
from enum import auto
from typing import Any, Dict, List, Optional as Opt, Union
from .root import CASING, JidEnumFlag, JidType
class CompressionType(JidEnumFlag):
"""
Attributes:
Uncompressed: Document is uncompressed
ZLib: Data is compressed with zlib
GZip: Data is compressed with GZip
LZMA: Data is compressed with LZMA (e.g. 7-zip)
Deflate: Data is compressed with Zlib (No Header, e.g. DeflateStream from .Net)
"""
Uncompressed: int = auto()
ZLib: int = auto()
GZip: int = auto()
LZMA: int = auto()
Deflate: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class BlobEntry(JidType, hint="Coveo.BlobEntry"):
"""A structure that represents a blob entry from a store.
Attributes:
id_: The Id of the blob.
inline_blob: The blob content when inline.
compression: The compression method on the blob
"""
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"})
inline_blob: Opt[Union[str, bytes]] = None
compression: Opt[CompressionType] = None
def __init__(
self,
*,
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"}),
inline_blob: Opt[Union[str, bytes]] = None,
compression: Opt[CompressionType] = None,
) -> None:
"""
Parameters:
id_: The Id of the blob.
inline_blob: The blob content when inline.
compression: The compression method on the blob
"""
@attrs(kw_only=True, auto_attribs=True)
class LocalBlobEntry(BlobEntry, hint="Coveo.LocalBlobEntry"):
"""Blob entry that is stored locally
Attributes:
file_name: the local filename to access the blob from
"""
file_name: Opt[str] = None
def __init__(self, *, file_name: Opt[str] = None) -> None:
"""
Parameters:
file_name: the local filename to access the blob from
"""
class PermissionIdentityType(JidEnumFlag):
"""Defines permission identity types.
Attributes:
Unknown: Represents a standard, or undefined identity.
User: Represents a 'User' identity.
Group: Represents a 'Group' identity.
VirtualGroup: Represents a 'VirtualGroup' identity.
"""
Unknown: int = auto()
User: int = auto()
Group: int = auto()
VirtualGroup: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class Permission(JidType, hint="Coveo.Permission"):
"""A structure that represents a single permission.
Attributes:
identity_type: The type of identity.
security_provider: The name of the security provider.
identity: The identity, as defined by the specified security provider.
additional_info: The additional information
"""
identity_type: Opt[PermissionIdentityType] = None
security_provider: Opt[str] = None
identity: Opt[str] = None
additional_info: Opt[Dict[str, str]] = None
def __init__(
self,
*,
identity_type: Opt[PermissionIdentityType] = None,
security_provider: Opt[str] = None,
identity: Opt[str] = None,
additional_info: Opt[Dict[str, str]] = None,
) -> None:
"""
Parameters:
identity_type: The type of identity.
security_provider: The name of the security provider.
identity: The identity, as defined by the specified security provider.
additional_info: The additional information
"""
@attrs(kw_only=True, auto_attribs=True)
class SecurityIdentity(JidType, hint="Coveo.SecurityIdentity"):
"""A structure that represents a single security identity. Also known as a declarator.
Attributes:
identity_type: The type of security identity
provider: Security provider associated with the identity.
name: Name of the security identity.
additional_info: Additional information associated with the security identity as key-value pairs.
"""
identity_type: Opt[PermissionIdentityType] = None
provider: Opt[str] = None
name: Opt[str] = None
additional_info: Opt[Dict[str, str]] = None
def __init__(
self,
*,
identity_type: Opt[PermissionIdentityType] = None,
provider: Opt[str] = None,
name: Opt[str] = None,
additional_info: Opt[Dict[str, str]] = None,
) -> None:
"""
Parameters:
identity_type: The type of security identity
provider: Security provider associated with the identity.
name: Name of the security identity.
additional_info: Additional information associated with the security identity as key-value pairs.
"""
@attrs(kw_only=True, auto_attribs=True)
class PermissionSet(JidType, hint="Coveo.PermissionSet"):
"""A structure that represents a collection of allowed and denied permissions.
Attributes:
allow_anonymous: Indicates if anonymous users (i.e.: everyone) are allowed.
allowed_permissions: The list of allowed permissions.
denied_permissions: The list of denied permissions.
name: An optional permission set name.
"""
allow_anonymous: Opt[bool] = None
allowed_permissions: Opt[List[Permission]] = None
denied_permissions: Opt[List[Permission]] = None
name: Opt[str] = None
def __init__(
self,
*,
allow_anonymous: Opt[bool] = None,
allowed_permissions: Opt[List[Permission]] = None,
denied_permissions: Opt[List[Permission]] = None,
name: Opt[str] = None,
) -> None:
"""
Parameters:
allow_anonymous: Indicates if anonymous users (i.e.: everyone) are allowed.
allowed_permissions: The list of allowed permissions.
denied_permissions: The list of denied permissions.
name: An optional permission set name.
"""
@attrs(kw_only=True, auto_attribs=True)
class PermissionLevel(JidType, hint="Coveo.PermissionLevel"):
"""A structure that represents a level of permission where multiple permission sets can be specified.
Attributes:
name: An optional permission level name.
"""
permission_sets: Opt[List[PermissionSet]] = None
name: Opt[str] = None
def __init__(self, *, permission_sets: Opt[List[PermissionSet]] = None, name: Opt[str] = None) -> None:
"""
Parameters:
name: An optional permission level name.
"""
@attrs(kw_only=True, auto_attribs=True)
class PermissionModel(JidType, hint="Coveo.PermissionModel"):
"""A structure that represent a permissions model that contains one or many permission levels."""
permission_levels: Opt[List[PermissionLevel]] = None
def __init__(self, *, permission_levels: Opt[List[PermissionLevel]] = None) -> None:
...
@attrs(kw_only=True, auto_attribs=True)
class SummarySentence(JidType, hint="Coveo.SummarySentence"):
"""
Attributes:
text: The summary sentence text.
word_count: The number of words in the summary sentence.
score: The score of the sentence.
"""
text: Opt[str] = None
word_count: Opt[int] = None
score: Opt[int] = None
def __init__(self, *, text: Opt[str] = None, word_count: Opt[int] = None, score: Opt[int] = None) -> None:
"""
Parameters:
text: The summary sentence text.
word_count: The number of words in the summary sentence.
score: The score of the sentence.
"""
@attrs(kw_only=True, auto_attribs=True)
class DataStreamValue(JidType, hint="Coveo.DataStreamValue"):
"""A structure that represents a data stream.
Attributes:
value: The blob entry containing the data.
origin: The name of the component that created this data.
"""
value: Opt[BlobEntry] = None
origin: Opt[str] = None
def __init__(self, *, value: Opt[BlobEntry] = None, origin: Opt[str] = None) -> None:
"""
Parameters:
value: The blob entry containing the data.
origin: The name of the component that created this data.
"""
@attrs(kw_only=True, auto_attribs=True)
class MetaDataValue(JidType, hint="Coveo.MetaDataValue"):
"""A structure that represents a collection of meta data from the same origin
Attributes:
values: The map of meta data.
origin: The origin of the meta data.
"""
values: Opt[Dict[str, List[Any]]] = None
origin: Opt[str] = None
def __init__(self, *, values: Opt[Dict[str, List[Any]]] = None, origin: Opt[str] = None) -> None:
"""
Parameters:
values: The map of meta data.
origin: The origin of the meta data.
"""
class OperationType(JidEnumFlag):
"""Defines document operation types.
Attributes:
Add: Add the document.
Delete: Delete a specific document.
DeleteOlderThan: Delete documents that are older than /OperationId/.
DeleteAndChildren: Delete the document and its children.
"""
Add: int = auto()
Delete: int = auto()
DeleteOlderThan: int = auto()
DeleteAndChildren: int = auto()
@attrs(kw_only=True, auto_attribs=True)
class Document(JidType, hint="Coveo.Document"):
"""A structure that represents a document.
Attributes:
operation_id: An Id used to order operations.
index_identifier: An identifier used to identify the index this document should go to.
source_operation_id: The Id of the source operation that last processed this document.
type_: The operation to perform on this document.
source_key: The Id of the source that contains this document.
source_id: The unique Id of the source that contains this document.
organization_id: The Id of the organization to which this document belong.
id_: The Id of this document.
parent_id: The Id of the parent document.
top_parent_id: The Id of the top parent document.
permissions: The permissions of this document.
meta_data: The meta data values pertaining to this document.
data_streams: The data streams (blobs) associated with this document.
attachments: The collection of children documents.
attachment_ids: The collection of children ids.
"""
operation_id: Opt[int] = None
index_identifier: Opt[str] = None
source_operation_id: Opt[str] = None
type_: Opt[OperationType] = attrib(default=None, metadata={CASING: "Type"})
source_key: Opt[str] = None
source_id: Opt[str] = None
organization_id: Opt[str] = None
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"})
parent_id: Opt[str] = None
top_parent_id: Opt[str] = None
permissions: Opt[List[PermissionLevel]] = None
meta_data: Opt[List[MetaDataValue]] = None
data_streams: Opt[Dict[str, List[DataStreamValue]]] = None
attachments: "Opt[List[Document]]" = None
attachment_ids: Opt[List[str]] = None
def __init__(
self,
*,
operation_id: Opt[int] = None,
index_identifier: Opt[str] = None,
source_operation_id: Opt[str] = None,
type_: Opt[OperationType] = attrib(default=None, metadata={CASING: "Type"}),
source_key: Opt[str] = None,
source_id: Opt[str] = None,
organization_id: Opt[str] = None,
id_: Opt[str] = attrib(default=None, metadata={CASING: "Id"}),
parent_id: Opt[str] = None,
top_parent_id: Opt[str] = None,
permissions: Opt[List[PermissionLevel]] = None,
meta_data: Opt[List[MetaDataValue]] = None,
data_streams: Opt[Dict[str, List[DataStreamValue]]] = None,
attachments: "Opt[List[Document]]" = None,
attachment_ids: Opt[List[str]] = None,
) -> None:
"""
Parameters:
operation_id: An Id used to order operations.
index_identifier: An identifier used to identify the index this document should go to.
source_operation_id: The Id of the source operation that last processed this document.
type_: The operation to | |
DPR 633/72"
riferimento_ordine=""
quantita="1"
prezzo="2,00"
sconti=""
codice_iva="53"
u_m="Nr"
importo="2,00"
fattura.add_row(codice_articolo,descrizione,riferimento_ordine,u_m,quantita,prezzo,sconti,importo,codice_iva)
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = 2
else:
lista_codici_iva[codice_iva] +=2
if scritta_esenzione:
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
bollo_presente = False
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),"")
if bollo:
_bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(_bollo)
importo_totale_da_salvare = importo_totale +imposta_iva
# print "Imposta iva {0}".format(imposta_iva)
# print "Importo calcolato {0}".format(importo_totale_da_salvare)
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(ritorna_prezzo_europeo(importo_totale_da_salvare)))
# db.fatture_salvate.insert(scadenza=scadenza_salvata,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
# db(db.fattura).delete()
# db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_preview_istantanea(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
bollo= dati_cliente.bollo
if bollo:
db(db.righe_in_fattura_istantanea.codice_articolo=="BOLLO").delete()
db.righe_in_fattura_istantanea.insert(
codice_articolo="BOLLO",
descrizione="art. 15 DPR 633/72",
riferimento_ordine="",
qta="1",
prezzo="2",
sconti="",
codice_iva="Esenzione Iva",
commento=""
)
scritta_esenzione = False
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("FATTURA IMMEDIATA",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare,anteprima=True)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
scritta_esenzione = False
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and not 'commento' in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
descrizione_codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["descrizione_codice_iva"]
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.u_m,row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","","",""
row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","",""
row.descrizione=row.commento
row.u_m=""
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
if scritta_esenzione:
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
scadenza=""
bollo_presente = False
bollo = 0
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),"")
bollo = 0
"""
if bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
importo_totale += float(bollo)
"""
importo_totale_da_salvare = importo_totale +imposta_iva
importo_totale = Money(str(importo_totale),"EUR")
importo_totale = importo_totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
fattura.footer(str(importo_totale)," "," "," "," ",str(importo_totale),str(return_currency(imposta_totale)))
fattura.totale(str(importo_totale_da_salvare))
# db.fatture_salvate.insert(scadenza=scadenza_salvata,nome_cliente=nome_cliente,data_fattura = datetime.datetime.now().strftime("%d/%m/%Y"),numero_fattura = numero_fattura_da_salvare,id_cliente=id_cliente,id_ddt = lista_ddt,totale = importo_totale_da_salvare)
# print "SCADENZA {0}".format(scadenza)
"""
fattura.foote,Field('nome_cliente')sr("Totale merce","Sconto","Netto merce","spese varie","spese_trasporto","totale_imponibile","Totale imposta")
fattura.footer_2("CodIva","Spese accessorie","Imponibile","Iva","Imposta","Bolli")
fattura.footer_2("CodIva2","Spese accessorie2","Imponibile2","Iva2","Imposta2","Bolli2")
fattura.totale("14567645")
"""
fattura.add_row("","","","","","","","","")
fattura.add_row("",annotazioni,"","","","","","","")
fattura.insert_rows()
fattura.create_pdf()
# db(db.fattura).delete()
# db.fattura.insert(numero_fattura = numero_fattura_da_salvare)
@service.jsonrpc
@service.jsonrpc2
def crea_fattura_preview_istantanea_accredito(args):
# print "In preview instantanea accredito"
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
annotazioni=dati_cliente.annotazioni
# print "1"
# print dettagli_banca
# print "2"
start_date = datetime.datetime.now()
fattura = FATTURA("NOTA DI ACCREDITO",datetime.datetime.now().date().strftime("%d/%m/%Y"),numero_fattura_da_salvare,anteprima=True)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica cliente"
return locals()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
if True:
rows = db(db.righe_in_fattura_istantanea).select()
for row in rows:
try:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
sconti = row.sconti
if row.sconti is None:
sconti=""
if len(row.codice_articolo) > 0 and not 'commento' in row.codice_articolo:
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.qta)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.qta) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
row.codice_articolo,prezzo,sconti,importo,codice_iva,row.riferimento_ordine,row.qta = "","","","","","",""
row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.qta,prezzo,sconti,importo,codice_iva)
# print lista_codici_iva
scadenza=""
| |
(a_name, i, run))
clr_redraw()
if self.verbose and not self.ipy:
print('iter %d took %.3f seconds' % (i, time.time() - iter_time))
elif self.ipy:
iter_timebox.value = time.time() - iter_time
iter_progress.value += 1
iter_progress.description = 'Iter [%d/%d]' % (iter_progress.value, n_iter)
# run finished; store final acquisitions
for acquisition in acquisitions:
self.final_acquisitions[acquisition.acq_name].append(acquisition)
self.save(temp=True) # save in case of error later
if self.verbose and not self.ipy:
runtime = time.time() - begin_time
print('Run %d took %.3f seconds' % (run+1, runtime))
elif self.ipy:
run_progress.value += 1
run_progress.description = 'Run [%d/%d]' % (run_progress.value, n_runs)
run_timebox.value = time.time() - begin_time
iter_progress.value = 1
iter_progress.description = 'Iter [%d/%d]' % (iter_progress.value, n_iter)
if self.ipy:
run_timebox.close()
iter_timebox.close()
run_progress.close()
iter_progress.close()
sim_name.close()
@property
def observed_data(self):
obs_data = set() # set for removing duplicate data points
for acq_type, acqs in self.final_acquisitions.items():
for acq in acqs:
for theta, discr in zip(acq.model.thetas, acq.model.discrs):
data = np.zeros(self.input_dim + 1)
data[:-1] = theta
data[-1] = discr
obs_data.add(tuple(data))
return np.array(list(obs_data))
def get_observed_data_for_acq(self, acq_name):
f_acqs = self.final_acquisitions[acq_name]
obs_data = set()
for acq in f_acqs:
for theta, discr in zip(acq.model.thetas, acq.model.discrs):
datum = np.zeros(self.input_dim + 1)
datum[:-1] = theta
datum[-1] = discr
obs_data.add(tuple(datum))
return np.array(list(obs_data))
def _get_ground_truth(self, h, n_grid_pts, n_local_pts, c_spr):
"""
Loads/calculates ground truth, depending on the simulator.
"""
out = dict()
# Bacterial infections simulator (true, not interpolated)
if isinstance(self.sim, BacterialInfectionsSimulator):
bac_res = pkl.load(open('BAC_DATA/BACTERIAL_RESULTS.p', 'rb')).reshape(-1, 4)
if isinstance(self.sim, BacterialInfections2D):
k_dim = self.sim.known_dim
k_prm = self.sim.known_param
full_data = bac_res[np.where(abs(bac_res[:, k_dim] - k_prm) < 0.0001), :].squeeze()
full_data = np.delete(full_data, obj=k_dim, axis=1).reshape(-1, 3)
else:
full_data = bac_res
if len(full_data) < n_grid_pts**2:
warnings.warn('Only %d BactInf data points available...' % len(full_data))
grid_thetas = full_data[:n_grid_pts**2, :-1]
grid_pdf_true = kde.calculate_bact_inf_pdf(self.sim, full_data[:n_grid_pts**2, :], h)
local_thetas = full_data[:n_local_pts**2, :-1]
local_pdf_true = kde.calculate_bact_inf_pdf(self.sim, full_data[:n_local_pts**2, :], h)
multiple_minima = False
out['grid_thetas'] = grid_thetas
out['grid_pdf_true'] = grid_pdf_true
out['local_thetas'] = local_thetas
out['local_pdf_true'] = local_pdf_true
out['multiple_minima'] = multiple_minima
return out
# =======================================================
# 1 or 2 dimensions => create grid
if self.input_dim in [1,2]:
grid_thetas = kde.create_grid(self.sim, n_grid_pts)
grid_pdf_true = kde.calculate_true_pdf(self.sim, h, grid_thetas)
# Gaussian => sample from it
elif self.sim.name[:20] == 'MultivariateGaussian':
n_grid_pts = max(n_grid_pts, 10000)
print('Sampling %d points for MC estimate of TV/KL' % n_grid_pts)
grid_thetas = self.sim.sample(n_samples=n_grid_pts, cov_spread=c_spr)
grid_pdf_true = self.sim.pdf(grid_thetas)
# Other 3D+ => use MCMC to sample points for ground truth
else:
n_samps = int(n_grid_pts / 50)
grid_thetas = np.zeros((0, self.input_dim))
for i in range(50):
print('MCMC iter %d' % i)
samples = sample_mcmc(self.sim, h, burnin=200, n_samples=n_samps, progress_bar=True)
grid_thetas = np.vstack([grid_thetas, samples])
if self.ipy:
clear_output()
grid_pdf_true = kde.calculate_true_pdf(self.sim, h, grid_thetas)
# Create local_thetas as grid around minima
local_thetas = kde.create_grid(self.sim, n_local_pts, local=True)
if type(local_thetas) == list: # <- if simulator has multiple global minima
multiple_minima = True
local_pdf_true = []
for local_t in local_thetas:
local_pdf_true__ = kde.calculate_true_pdf(self.sim, h, local_t)
local_pdf_true.append(local_pdf_true__)
else:
multiple_minima = False
local_pdf_true = kde.calculate_true_pdf(self.sim, h, local_thetas)
out['grid_thetas'] = grid_thetas
out['grid_pdf_true'] = grid_pdf_true
out['local_thetas'] = local_thetas
out['local_pdf_true'] = local_pdf_true
out['multiple_minima'] = multiple_minima
return out
def calculate_distances(self, metrics='all', n_grid_pts=None, n_local_pts=20, c_spr=2.5):
"""
Returns:
-TV distances,
-KL divergences (true | approx),
-"Local" TV distances,
-Euclid. dist. between true minimizer t and estimated minimizer t'
under GP model.
metrics: The metrics to calculate. Pass 'all' to calculate all, else
pass a list containing anything in ['tv', 'kl', 'local', 'argmin'].
n_grid_pts: Number of grid (or sampled) points to calculate full TV/KL at.
If simulator is MVGaussian w/ dim. > 2, it is number of sampled
points for evaluating full TV/KL.
n_local_pts: Number of grid points for 'local' TV (only around minima).
c_spr: Factor to multiply Gaussian covariances by for Monte Carlo TV/KL calculations.
"""
if metrics != 'all':
chs = ['kl', 'tv', 'local', 'argmin']
assert isinstance(metrics, list), "pass metrics='all' or as a list of strings."
assert all([m in chs for m in metrics]), "choose from: %s" % chs
if n_grid_pts is None:
if self.input_dim <= 2:
n_grid_pts = 100
else:
n_grid_pts = 10000
use_metric = lambda m: (metrics == 'all') or (m in metrics)
# Find suitable value for the bandwidth, h ===============================
observed_data = self.observed_data
mx = observed_data[:, -1].max()
mn = observed_data[:, -1].min()
h = mn + (mx - mn) * self.h_mult
print('Using h=%.4f' % h)
# Calculate ground truth =================================================
ground_truth = self._get_ground_truth(h, n_grid_pts, n_local_pts, c_spr)
grid_thetas, local_thetas = ground_truth['grid_thetas'], ground_truth['local_thetas']
grid_pdf_true = ground_truth['grid_pdf_true']
local_pdf_true = ground_truth['local_pdf_true']
multiple_minima = ground_truth['multiple_minima']
n_runs = len(self.final_acquisitions.values()[0])
n_iter = len(self.final_acquisitions.values()[0][0].model.discrs) - self.num_init_pts
n_acqs = len(self.final_acquisitions.values())
# Progress bars in Jupyter ===============================================
if self.ipy:
sim_name = Text(value=self.sim.name+' metric calculation.')
run_progress = IntProgress(min=1, max=n_runs, value=1, description='Run [1/%d]' % n_runs)
run_timebox = FloatText(value='0.00', description='last run time:')
iter_progress = IntProgress(min=1, max=n_iter, value=1, description='Iter [1/%d]' % n_iter)
display(sim_name)
display(run_progress)
display(run_timebox)
display(iter_progress)
if len(self.final_acquisitions.keys()) > 1:
acq_timebox = FloatText(value='0.00', description='last acq. time:')
acq_progress = IntProgress(min=1, max=n_acqs, value=1, description='Acq. [1/%d]' % n_acqs)
display(acq_timebox)
display(acq_progress)
# acq_name: [avg distances, standard deviations] (over n_runs)
tv_dists = {a: None for a in self.final_acquisitions.keys()}
kl_divs = {a: None for a in self.final_acquisitions.keys()}
local_tvs = {a: None for a in self.final_acquisitions.keys()}
min_dists = {a: None for a in self.final_acquisitions.keys()}
# Do the actual metric calculations ======================================
for acq_num, (name, acquisitions) in enumerate(self.final_acquisitions.items()):
start_time = time.time()
if self.verbose and not self.ipy:
print('Calculating metrics for {}'.format(name))
T = n_iter
all_dists = np.zeros((n_runs, n_iter))
all_kls = np.zeros((n_runs, n_iter))
all_locals = np.zeros((n_runs, n_iter))
all_argmins = np.zeros((n_runs, n_iter))
err_msg = 'n_runs: %d; n_acqs: %d; acq: %s' % (n_runs, len(acquisitions), acquisitions[0].acq_name)
assert n_runs == len(acquisitions), err_msg
for run, acquisition in enumerate(acquisitions):
run_time = time.time()
for i in range(n_iter):
iter_time = time.time()
model = acquisition.model.get_state_at_earlier_iter(
T-i+self.num_init_pts
)
# GRID METRICS (KL/FULL TV) ==================================
if use_metric('tv') or use_metric('kl'):
grid_pdf_m = kde.calculate_approx_pdf(
model, h, thetas=grid_thetas
)
if use_metric('tv'):
# TV distance, KL divergence
all_dists[run][n_iter-i-1] = 0.5 * np.sum(
np.abs(grid_pdf_true - grid_pdf_m)
)
if use_metric('kl'):
all_kls[run][n_iter-i-1] = entropy(
grid_pdf_true, grid_pdf_m
)
# ============================================================
# LOCAL TV ===================================================
if use_metric('local'):
if multiple_minima: # <- multiple global minima
tv = 0.
for j in range(len(local_thetas)):
local_grid = local_thetas[j]
lcl_pdf_true = local_pdf_true[j]
lcl_pdf_m = kde.calculate_approx_pdf(
model, h, local_grid
)
tv += 0.5 * np.sum(
np.abs(lcl_pdf_true - lcl_pdf_m)
)
tv /= len(local_thetas)
else: # <- only one global minimum
local_pdf_m = kde.calculate_approx_pdf(
model, h, local_thetas
)
tv = 0.5 * np.sum(
np.abs(local_pdf_true - local_pdf_m)
)
all_locals[run][n_iter-i-1] = tv
# ============================================================
# ARGMIN_DISTANCES ===========================================
if use_metric('argmin'):
argmin_m = model.find_minimizer()
all_argmins[run][n_iter-i-1] = min(
[np.sqrt(np.sum((argmin_m - argmin)**2)) for argmin in self.sim.argmin.reshape(-1, self.input_dim)]
)
if self.verbose and not self.ipy:
t_time = time.time() - iter_time
acq_msg = 'Acq: %s [%d/%d] - ' % (name, acq_num+1, n_acqs)
run_msg = 'Run [%d/%d] - ' % (run+1, n_runs)
ite_msg = 'Iter [%d/%d] - ' % (i+1, n_iter)
msg = '%s%s%sTime: %.3f' % (acq_msg, run_msg, ite_msg, t_time)
print(msg)
elif self.ipy:
iter_progress.value += 1
iter_progress.description = 'Iter [%d/%d]' % (iter_progress.value, n_iter)
if self.ipy:
run_timebox.value = time.time() - run_time
run_progress.value += 1
run_progress.description = 'Run [%d/%d]' % (run_progress.value, n_runs)
iter_progress.value = 1
iter_progress.description = 'Iter [1/%d]' % n_iter
# Now calculate means and std. devs across the runs
avg_dists = np.mean(all_dists, axis=0) # <- full TV dists
std_dists = np.std(all_dists, axis=0)
tv_dists[name] = (avg_dists, std_dists)
avg_kls = np.mean(all_kls, axis=0) # <- full KL divs.
std_kls = np.std(all_kls, axis=0)
kl_divs[name] = (avg_kls, std_kls)
avg_locals = np.mean(all_locals, axis=0) # <- local TV dists
std_locals = np.std(all_locals, axis=0)
local_tvs[name] = (avg_locals, std_locals)
avg_argmins = np.mean(all_argmins, axis=0)
std_argmins = np.std(all_argmins, axis=0)
min_dists[name] = (avg_argmins, std_argmins)
elapsed = time.time() - start_time
if self.verbose and not self.ipy:
print('Acq. took %.3f seconds' % elapsed)
elif self.ipy:
if len(self.final_acquisitions.keys()) > 1:
acq_timebox.value = time.time() - start_time
acq_progress.value += 1
acq_progress.description = 'Acq. [%d/%d]' % (acq_progress.value, n_acqs)
run_progress.value = 1
run_progress.description = 'Run [%d/%d]' % (run_progress.value, n_runs)
# Cache results ==========================================================
cache_results = True
if cache_results:
if use_metric('tv'):
self.results_cache['TV'].append(tv_dists)
| |
<filename>seraphsix/models/destiny.py
from datetime import datetime, timezone
from dataclasses import dataclass, field
from dataclasses_json import dataclass_json, config, LetterCase
from marshmallow import fields
from typing import Optional, List, Dict, Any
from seraphsix import constants
from seraphsix.tasks.parsing import member_hash, member_hash_db
__all__ = [
"DestinyActivity",
"DestinyActivityDetails",
"DestinyActivityResponse",
"DestinyActivityResults",
"DestinyActivityStat",
"DestinyActivityStatValue",
"DestinyBungieNetUser",
"DestinyBungieNetUserInfo",
"DestinyCharacter",
"DestinyCharacterData",
"DestinyCharacterResponse",
"DestinyCharacterResults",
"DestinyGroup",
"DestinyGroupClanBannerData",
"DestinyGroupClanInfo",
"DestinyGroupD2ClanProgression",
"DestinyGroupDetail",
"DestinyGroupFeatures",
"DestinyGroupMember",
"DestinyGroupMemberKick",
"DestinyGroupMemberKickResponse",
"DestinyGroupMemberResults",
"DestinyGroupMembersResponse",
"DestinyGroupPendingMember",
"DestinyGroupPendingMemberResults",
"DestinyGroupPendingMembersResponse",
"DestinyGroupResponse",
"DestinyMemberGroup",
"DestinyMemberGroupResponse",
"DestinyMemberGroupResults",
"DestinyMembership",
"DestinyMembershipResponse",
"DestinyMembershipResults",
"DestinyPGCR",
"DestinyPGCREntry",
"DestinyPGCRExtended",
"DestinyPGCRResponse",
"DestinyPGCRWeapon",
"DestinyPlayer",
"DestinyProfile",
"DestinyProfileData",
"DestinyProfileResponse",
"DestinyProfileResults",
"DestinyResponse",
"DestinyResults",
"DestinySearchPlayerResponse",
"DestinyTokenErrorResponse",
"DestinyTokenResponse",
"DestinyUserInfo",
]
def encode_datetime(obj):
return obj.strftime(constants.DESTINY_DATE_FORMAT_API)
def decode_datetime(obj):
try:
return datetime.strptime(obj, constants.DESTINY_DATE_FORMAT)
except ValueError:
return datetime.strptime(obj, constants.DESTINY_DATE_FORMAT_MS)
def encode_datetime_timestamp(obj):
return str(int(datetime.timestamp(obj)))
def decode_datetime_timestamp(obj):
return datetime.fromtimestamp(float(obj)).astimezone(tz=timezone.utc)
def encode_id_string(obj):
return str(obj)
def decode_id_string(obj):
if obj:
return int(obj)
else:
return None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyUserInfo:
cross_save_override: int
is_public: bool
membership_type: int
membership_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
applicable_membership_types: Optional[List[int]] = None
last_seen_display_name: Optional[str] = field(
metadata=config(field_name="LastSeenDisplayName"), default=None
)
last_seen_display_name_type: Optional[int] = field(
metadata=config(field_name="LastSeenDisplayNameType"), default=None
)
display_name: Optional[str] = None
icon_path: Optional[str] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyBungieNetUserInfo:
supplemental_display_name: str
icon_path: str
cross_save_override: int
is_public: bool
membership_type: int
membership_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
display_name: str
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyProfileData:
user_info: DestinyUserInfo
date_last_played: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
versions_owned: int
character_ids: List[int]
season_hashes: List[int]
current_season_hash: int
current_season_reward_power_cap: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyProfile:
data: DestinyProfileData
privacy: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyProfileResults:
profile: DestinyProfile
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyCharacterData:
membership_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
membership_type: int
character_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
date_last_played: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
minutes_played_this_session: str
minutes_played_total: str
light: int
stats: Dict[str, int]
race_hash: int
gender_hash: int
class_hash: int
race_type: int
class_type: int
gender_type: int
emblem_path: str
emblem_background_path: str
emblem_hash: int
emblem_color: Dict[str, int]
level_progression: Dict[str, int]
base_character_level: int
percent_to_next_level: int
title_record_hash: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyCharacter:
data: Dict[int, DestinyCharacterData]
privacy: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyActivityStatValue:
value: float
display_value: str
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyActivityStat:
basic: DestinyActivityStatValue
stat_id: Optional[str] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyActivityDetails:
reference_id: int
director_activity_hash: int
instance_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
mode: int
modes: List[int]
is_private: bool
membership_type: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyActivity:
period: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
activity_details: DestinyActivityDetails
values: Dict[str, DestinyActivityStat]
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyActivityResults:
activities: Optional[List[DestinyActivity]] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyPlayer:
destiny_user_info: DestinyUserInfo
class_hash: int
race_hash: int
gender_hash: int
character_level: int
light_level: int
emblem_hash: int
character_class: Optional[str] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyPGCRWeapon:
reference_id: int
values: Dict[str, DestinyActivityStat]
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyPGCRExtended:
values: Dict[str, DestinyActivityStat]
weapons: Optional[List[DestinyPGCRWeapon]] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyPGCREntry:
standing: int
player: DestinyPlayer
score: DestinyActivityStat
character_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
values: Dict[str, DestinyActivityStat]
extended: DestinyPGCRExtended
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyPGCR:
period: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
activity_details: DestinyActivityDetails
starting_phase_index: int
entries: List[DestinyPGCREntry]
teams: List[Any]
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyMembership:
# Basically the same as DestinyUserInfo with the addition of the "LastSeen" fields
# TODO Consolidate these two?
last_seen_display_name: str = field(
metadata=config(field_name="LastSeenDisplayName")
)
last_seen_display_name_type: int = field(
metadata=config(field_name="LastSeenDisplayNameType")
)
icon_path: str
cross_save_override: int
applicable_membership_types: List[int]
is_public: bool
membership_type: int
membership_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
display_name: str
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyBungieNetUser:
membership_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
unique_name: str
display_name: str
profile_picture: int
profile_theme: int
user_title: int
success_message_flags: str
is_deleted: bool
about: str
first_access: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
last_update: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
show_activity: bool
locale: str
locale_inherit_default: bool
show_group_messaging: bool
profile_picture_path: str
profile_theme_name: str
user_title_display: str
status_text: str
status_date: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
psn_display_name: Optional[str] = None
xbox_display_name: Optional[str] = None
steam_display_name: Optional[str] = None
stadia_display_name: Optional[str] = None
twitch_display_name: Optional[str] = None
blizzard_display_name: Optional[str] = None
fb_display_name: Optional[str] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyMembershipResults:
destiny_memberships: List[DestinyMembership]
bungie_net_user: Optional[DestinyBungieNetUser] = None
primary_membership_id: Optional[int] = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
),
default=None,
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupMember:
member_type: int
is_online: bool
last_online_status_change: datetime = field(
metadata=config(
encoder=encode_datetime_timestamp,
decoder=decode_datetime_timestamp,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
group_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
destiny_user_info: DestinyUserInfo
join_date: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
bungie_net_user_info: Optional[DestinyBungieNetUserInfo] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupFeatures:
maximum_members: int
maximum_memberships_of_group_type: int
capabilities: int
membership_types: List[int]
invite_permission_override: bool
update_culture_permission_override: bool
host_guidedGame_permission_override: int
update_banner_permission_override: bool
join_level: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupD2ClanProgression:
progression_hash: int
daily_progress: int
daily_limit: int
weekly_progress: int
weekly_limit: int
current_progress: int
level: int
level_cap: int
step_index: int
progress_to_next_level: int
next_level_at: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupClanBannerData:
decal_id: int
decal_color_id: int
decal_background_color_id: int
gonfalon_id: int
gonfalon_color_id: int
gonfalon_detail_id: int
gonfalon_detail_color_id: int
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupClanInfo:
d2_clan_progressions: Dict[int, DestinyGroupD2ClanProgression]
clan_callsign: str
clan_banner_data: DestinyGroupClanBannerData
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupDetail:
group_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
name: str
group_type: int
membership_id_created: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
creation_date: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
modification_date: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
about: str
tags: List[Any]
member_count: int
is_public: bool
is_public_topic_admin_only: bool
motto: str
allow_chat: bool
is_default_post_public: bool
chat_security: int
locale: str
avatar_image_index: int
homepage: int
membership_option: int
default_publicity: int
theme: str
banner_path: str
avatar_path: str
conversation_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
enable_invitation_messaging_for_admins: bool
ban_expire_date: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
features: DestinyGroupFeatures
clan_info: DestinyGroupClanInfo
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroup:
detail: DestinyGroupDetail
founder: DestinyGroupMember
allied_ids: List[int]
alliance_status: int
group_join_invite_count: int
current_user_memberships_inactive_for_destiny: bool
current_user_member_map: Dict[Any, Any]
current_user_potential_member_map: Dict[Any, Any]
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyMemberGroup:
member: DestinyGroupMember
group: DestinyGroupDetail
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupPendingMember:
group_id: int = field(
metadata=config(
encoder=encode_id_string,
decoder=decode_id_string,
mm_field=fields.Integer(),
)
)
creation_date: datetime = field(
metadata=config(
encoder=encode_datetime,
decoder=decode_datetime,
mm_field=fields.DateTime(format=constants.DESTINY_DATE_FORMAT),
)
)
resolve_state: int
destiny_user_info: DestinyUserInfo
bungie_net_user_info: Optional[DestinyBungieNetUserInfo] = None
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupMemberKick:
group: DestinyGroupDetail
group_deleted: bool
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyResponse:
error_code: int = field(metadata=config(field_name="ErrorCode"))
error_status: str = field(metadata=config(field_name="ErrorStatus"))
message: str = field(metadata=config(field_name="Message"))
message_data: object = field(metadata=config(field_name="MessageData"))
throttle_seconds: int = field(metadata=config(field_name="ThrottleSeconds"))
response: Optional[object] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyResults:
results: List[object]
total_results: int
has_more: bool
query: Dict[str, int]
use_total_results: bool
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyMemberGroupResults(DestinyResults):
results: List[DestinyMemberGroup]
are_all_memberships_inactive: Dict[str, bool]
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyMemberGroupResponse(DestinyResponse):
response: Optional[DestinyMemberGroupResults] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupResponse(DestinyResponse):
response: Optional[DestinyGroup] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupMemberResults(DestinyResults):
results: List[DestinyGroupMember]
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupMembersResponse(DestinyResponse):
response: Optional[DestinyGroupMemberResults] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupPendingMemberResults(DestinyResults):
results: List[DestinyGroupPendingMember]
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupPendingMembersResponse(DestinyResponse):
response: Optional[DestinyGroupPendingMemberResults] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyGroupMemberKickResponse(DestinyResponse):
response: Optional[DestinyGroupMemberKick] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinySearchPlayerResponse(DestinyResponse):
response: Optional[List[DestinyUserInfo]] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyProfileResponse(DestinyResponse):
response: Optional[DestinyProfileResults] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyCharacterResults:
characters: DestinyCharacter
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyCharacterResponse(DestinyResponse):
response: Optional[DestinyCharacterResults] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyActivityResponse(DestinyResponse):
response: Optional[DestinyActivityResults] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyMembershipResponse(DestinyResponse):
response: Optional[DestinyMembershipResults] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DestinyPGCRResponse(DestinyResponse):
response: Optional[DestinyPGCR] = field(
metadata=config(field_name="Response"), default=None
)
@dataclass_json
@dataclass
class DestinyTokenResponse:
access_token: str
expires_in: int
membership_id: int
refresh_token: str
refresh_expires_in: int
token_type: str
error: Optional[str] = None
@dataclass_json
@dataclass
class DestinyTokenErrorResponse:
error: str
error_description: str
class UserMembership(object):
def __init__(self):
self.id = None
self.username = None
def __call__(self, details):
self.id = details.membership_id
self.username = details.display_name
def __repr__(self):
return f"<{type(self).__name__}: {self.username}-{self.id}>"
class User(object):
class Memberships(object):
def __init__(self):
self.bungie = UserMembership()
self.psn = UserMembership()
self.xbox = UserMembership()
self.blizzard = UserMembership()
self.steam = UserMembership()
self.stadia = UserMembership()
def __init__(self, details):
self.memberships = self.Memberships()
self.primary_membership_id = details.primary_membership_id
self.is_cross_save = self.primary_membership_id is not None
if hasattr(details, "destiny_user_info"):
self._process_membership(details.destiny_user_info)
elif hasattr(details, "destiny_memberships"):
for entry in details.destiny_memberships:
self._process_membership(entry)
if hasattr(details, "bungie_net_user_info"):
self._process_membership(details.bungie_net_user_info)
if hasattr(details, "bungie_net_user"):
self._process_membership(details.bungie_net_user)
def _process_membership(self, entry):
if not hasattr(entry, "membership_id"):
return
if not hasattr(entry, "membership_type"):
self.memberships.bungie(entry)
else:
if entry.membership_type == constants.PLATFORM_XBOX:
self.memberships.xbox(entry)
elif entry.membership_type == constants.PLATFORM_PSN:
self.memberships.psn(entry)
elif entry.membership_type == constants.PLATFORM_BLIZZARD:
self.memberships.blizzard(entry)
elif entry.membership_type == constants.PLATFORM_STEAM:
self.memberships.steam(entry)
elif entry.membership_type == constants.PLATFORM_STADIA:
self.memberships.stadia(entry)
elif entry.membership_type == constants.PLATFORM_BUNGIE:
self.memberships.bungie(entry)
def to_dict(self):
return dict(
bungie_id=self.memberships.bungie.id,
bungie_username=self.memberships.bungie.username,
xbox_id=self.memberships.xbox.id,
xbox_username=self.memberships.xbox.username,
psn_id=self.memberships.psn.id,
psn_username=self.memberships.psn.username,
blizzard_id=self.memberships.blizzard.id,
blizzard_username=self.memberships.blizzard.username,
steam_id=self.memberships.steam.id,
steam_username=self.memberships.steam.username,
stadia_id=self.memberships.stadia.id,
stadia_username=self.memberships.stadia.username,
primary_membership_id=self.primary_membership_id,
is_cross_save=self.is_cross_save,
)
class Member(User):
def __init__(self, details, user_details):
super().__init__(user_details)
self.join_date = details.join_date
self.is_online = details.is_online
self.last_online_status_change = details.last_online_status_change
self.group_id = details.group_id
self.member_type = details.member_type
if self.memberships.xbox.id:
self.platform_id = constants.PLATFORM_XBOX
self.member_id = self.memberships.xbox.id
elif self.memberships.psn.id:
self.platform_id = constants.PLATFORM_PSN
self.member_id = self.memberships.psn.id
elif self.memberships.blizzard.id:
self.platform_id = constants.PLATFORM_BLIZZARD
self.member_id = self.memberships.blizzard.id
elif self.memberships.steam.id:
self.platform_id = constants.PLATFORM_STEAM
self.member_id = self.memberships.steam.id
elif self.memberships.stadia.id:
self.platform_id = constants.PLATFORM_STADIA
self.member_id = self.memberships.stadia.id
def __repr__(self):
return f"<{type(self).__name__}: {self.platform_id}-{self.member_id}>"
def __str__(self):
return f"{self.platform_id}-{self.member_id}"
class Player(object):
def __init__(self, details):
self.membership_id = details.player.destiny_user_info.membership_id
self.membership_type = details.player.destiny_user_info.membership_type
self.name = details.player.destiny_user_info.display_name
self.completed = False
if details.values["completed"].basic.display_value == "Yes":
self.completed = True
try:
self.time_played = details.values["timePlayedSeconds"].basic.value
except KeyError:
self.time_played = 0.0
def __str__(self):
return f"<{type(self).__name__}: {self.membership_type}-{self.membership_id}>"
def __repr__(self):
return str(self.__dict__)
class Game(object):
def __init__(self, | |
<reponame>Anikbh11/trident
"""
Ion fraction fields using Cloudy data.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2016, Trident Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#-----------------------------------------------------------------------------
from yt.fields.field_detector import \
FieldDetector
from yt.utilities.linear_interpolators import \
TrilinearFieldInterpolator, \
UnilinearFieldInterpolator
from yt.utilities.physical_constants import mh
from yt.funcs import mylog
import numpy as np
import h5py
import copy
import os
from trident.config import \
ion_table_filepath
from trident.line_database import \
LineDatabase, \
uniquify
from trident.roman import \
from_roman
H_mass_fraction = 0.76
to_nH = H_mass_fraction / mh
# set fractions to 0 for values lower than 1e-9,
# which is what is used in Sutherland & Dopita (1993).
fraction_zero_point = 1.e-9
zero_out_value = -30.
table_store = {}
class IonBalanceTable(object):
def __init__(self, filename=None, atom=None):
"""
Base class for building additional ion fields
Used to load in an HDF5 file that contains the values for the
elemental ionization state of the gas as a function of density,
temperature, metallcity, and redshift (for metagalactic photoionizing
radiation field).
**Parameters**
:filename: string, optional
Name of the HDF5 file that contains the ionization table data.
Default: it uses the table specified in ~/.trident/config
:atom: string, optional
The atomic species for which you want to create an IonBalanceTable
Default: None
"""
if filename is None:
filename = ion_table_filepath
self.filename = filename
self.parameters = []
self.ion_fraction = []
self._load_hdf5_table(atom)
def _load_hdf5_table(self, atom):
"""
Read in the HDF5 ion balance table
"""
input = h5py.File(self.filename, 'r')
self.ion_fraction = input[atom][()]
self.ion_fraction[self.ion_fraction < np.log10(fraction_zero_point)] = zero_out_value
for par in range(1, len(self.ion_fraction.shape) - 1):
name = "Parameter%d" % par
self.parameters.append(input[atom].attrs[name])
self.parameters.append(input[atom].attrs['Temperature'])
input.close()
def _log_nH(field, data):
"""
One index of ion balance table is in log of density, so this translates
dataset's density values into the same format for indexing the table
N.B. All datasets *should* have an H_nuclei_density field defined if
created in the standard way in yt. Ray objects will also include
H_nuclei_density from the parent dataset to assure identical behavior
when ions are added to a ray as when they are added to an original dataset
before then being included in a ray.
"""
if ("gas", "H_nuclei_density") in data.ds.derived_field_list:
log_nH_field = np.log10(data["gas", "H_nuclei_density"])
else:
log_nH_field = np.log10(data["gas", "density"] * to_nH)
return log_nH_field
def _redshift(field, data):
"""
One index of ion balance table is in redshift, so this translates
dataset's redshift values into the same format for indexing the table
Note that if there already exists a "redshift" field on the dataset (e.g.,
on a LightRay dataset), that redshift field will be used instead. This can
lead to slight differences (1 part in 1e8) in the calculation of ion fields
when added to a LightRay than when added to a dataset because redshift
is continually varying (slightly) along the ray, whereas it is fixed for
a standard dataset.
"""
# Assure that redshift is defined for dataset--if not, assume z=0
try:
current_redshift = data.ds.current_redshift
except AttributeError:
current_redshift = 0.
return current_redshift * \
np.ones(data["gas", "density"].shape, dtype=data["gas", "density"].dtype)
def _log_T(field, data):
"""
One index of ion balance table is in log of temperature, so this translates
dataset's temperature values into the same format for indexing the table
"""
return np.log10(data["gas", "temperature"])
def add_ion_fields(ds, ions, ftype='gas',
ionization_table=None,
field_suffix=False,
line_database=None,
sampling_type='local',
particle_type=None):
"""
Preferred method for adding ion fields to a yt dataset.
Select ions based on the selection indexing set up in
:class:`~trident.LineDatabase.parse_subset_to_ions` function, that is,
by specifying a list of strings where each string represents an ion or
line. Strings are of one of three forms:
* <element>
* <element> <ion state>
* <element> <ion state> <line_wavelength>
If a line_database is selected, then the ions chosen will be a subset
of the ions present in the equivalent :class:`~trident.LineDatabase`,
nominally located in ``trident.__path__/data/line_lists``.
For each ion species selected, four fields will be added (example for
Mg II):
* Ion fraction field. e.g. ("gas", 'Mg_p1_ion_fraction')
* Number density field. e.g. ("gas", 'Mg_p1_number_density')
* Density field. e.g. ("gas", 'Mg_p1_density')
* Mass field. e.g. ("gas", 'Mg_p1_mass')
This function is the preferred method for adding ion fields to one's
dataset, but for more fine-grained control, one can also employ the
:class:`~trident.add_ion_fraction_field`,
:class:`~trident.add_ion_number_density_field`,
:class:`~trident.add_ion_density_field`,
:class:`~trident.add_ion_mass_field` functions individually.
Fields are added assuming collisional ionization equilibrium and
photoionization in the optically thin limit from a redshift-dependent
metagalactic ionizing background using the ionization_table specified.
**Parameters**
:ds: yt dataset object
This is the dataset to which the ion fraction field will be added.
:ions: list of strings
List of strings matching possible lines. Strings can be of the
form:
* Atom - Examples: "H", "C", "Mg"
* Ion - Examples: "H I", "H II", "C IV", "Mg II"
* Line - Examples: "H I 1216", "C II 1336", "Mg II 1240"
If set to 'all', creates **all** ions for the first 30 elements:
(ie hydrogen to zinc). If set to 'all' with ``line_database``
keyword set, then creates **all** ions associated with the lines
specified in the equivalent :class:`~trident.LineDatabase`.
:ionization_table: string, optional
Path to an appropriately formatted HDF5 table that can be used to
compute the ion fraction as a function of density, temperature,
metallicity, and redshift. When set to None, it uses the table
specified in ~/.trident/config
Default: None
:field_suffix: boolean, optional
Determines whether or not to append a suffix to the field name that
indicates what ionization table was used. Useful when using generating
ion_fields that already exist in a dataset.
:line_database: string, optional
Ions are selected out of the set of ions present in the line_database
constructed from the line list filename specified here. See
:class:`~trident.LineDatabase` for more information.
:ftype: string, optional
This is deprecated and no longer necessary since all relevant
fields are aliased to the 'gas' ftype.
Default: 'gas'
:sampling_type: string, optional
This is deprecated and no longer necessary.
Default: 'local'
:particle_type: boolean, optional
This is deprecated and no longer necessary.
Default: 'auto'
**Example**
To add ionized hydrogen, doubly-ionized Carbon, and all of the Magnesium
species fields to a dataset, you would run:
>>> import yt
>>> import trident
>>> ds = yt.load('path/to/file')
>>> trident.add_ion_fields(ds, ions=['H II', 'C III', 'Mg'])
"""
ion_list = []
if ionization_table is None:
ionization_table = ion_table_filepath
# Parse the ions given following the LineDatabase syntax
# If line_database is set, then use the underlying file as the line list
# to select ions from.
if line_database is not None:
line_database = LineDatabase(line_database)
ion_list = line_database.parse_subset_to_ions(ions)
# Otherwise, any ion can be selected (not just ones in the line list).
else:
if ions == 'all' or ions == ['all']:
for k, v in atomic_number.items():
for j in range(v+1):
ion_list.append((k, j+1))
else:
for ion in ions:
ionn = ion.split()
if len(ionn) >= 2:
ion_list.append((ionn[0], from_roman(ionn[1])))
elif len(ionn) == 1:
num_states = atomic_number[ionn[0]]
for j in range(num_states+1):
ion_list.append((ionn[0], j+1))
else:
raise RuntimeError("Cannot add a blank ion.")
# make sure ion list is unique
ion_list = uniquify(ion_list)
# adding X_p#_ion_mass field triggers the addition of:
# - X_P#_ion_fraction
# - X_P#_number_density
# - X_P#_density
for (atom, ion) in ion_list:
add_ion_mass_field(atom, ion, ds, ftype, ionization_table,
field_suffix=field_suffix, sampling_type=sampling_type)
def add_ion_fraction_field(atom, ion, ds, ftype="gas",
ionization_table=None,
field_suffix=False,
sampling_type='local',
particle_type=None):
"""
Add ion fraction field to a yt dataset for the desired ion.
.. note::
The preferred method for adding ion fields to a dataset is using
:class:`~trident.add_ion_fields`,
For example, add_ion_fraction_field('O', 6, ds) creates a field
called O_p5_ion_fraction for dataset ds, which represents 5-ionized
oxygen (O plus 5 = O VI = 'O', 6).
Fields are added assuming collisional ionization equilibrium and
photoionization in the optically thin limit from a redshift-dependent
metagalactic ionizing background using the ionization_table specified.
**Parameters**
:atom: string
Atomic species for desired ion fraction (e.g. 'H', 'C', 'Mg')
:ion: integer
Ion number for desired species (e.g. 1 = neutral, 2 = singly ionized,
3 = doubly ionized, etc.)
:ds: yt dataset object
This is the dataset to which the ion fraction field will be added.
:ftype: string, optional
This is deprecated and no longer necessary since all relevant
fields are aliased to | |
was not 'High'. ######################
if not test2_ok:
deficiency_type = ''
status = ''
global_EQR = np.nan
comment = 'not enough data for test2 (jan-may)'
elif test2_result > self.deficiency_limit or np.isnan(test2_result):
#### METHOD 1 ####
deficiency_type = 'seasonal'
global_EQR, status = self._calculate_global_EQR_from_indicator_value(water_body=water_body,
value=test1_result,
max_value=100)
elif test2_result <= self.deficiency_limit:
# TODO: only use station with wadep >= maxD-10
# mean_affected_area_fraction = by_date_deep.AREA_PERC_BELOW_CRITICAL_DEPH.mean()
# mean_critical_depth = by_date_deep.CRITICAL_DEPH.mean()
deficiency_type = 'longterm'
if self.ref_settings.get_value(variable=self.wb_id_header, water_body=water_body) == water_body:
#### METHOD 2 ####
global_EQR, status = self._calculate_global_EQR_from_indicator_value(water_body=water_body,
value=mean_affected_area_fraction,
max_value=100)
else:
#### METHOD 1 #####
comment = 'no classboundaries defined for longterm deficieny in this waterbody, using definition of seasonal deficiency'
global_EQR, status = self._calculate_global_EQR_from_indicator_value(value=test1_result,
water_body=water_body)
else:
by_period = False
if by_period:
by_period = pd.DataFrame(
{self.wb_id_header: [water_body], 'WATER_BODY_NAME': [wb_name], 'WATER_TYPE_AREA': [type_area],
'GLOBAL_EQR': [global_EQR], 'STATUS': [status], 'DEEPEST_STATNS': deepest_statns_str,
'MAX_WADEP': wb_max_wadep,
'O2 conc test1': [test1_result], 'O2 conc test2': [test2_result],
'% Area below conc limit': [mean_affected_area_fraction], 'Depth of conc limit': [mean_critical_depth],
'max depth': [self.maxD[water_body]],
'test1_ok': [test1_ok], 'test1_month_list': [test1_month_list], 'test1_no_yr': [test1_no_yr],
'test2_ok': [test2_ok], 'test2_month_list': [test2_month_list], 'test2_no_yr': [test2_no_yr],
'DEFICIENCY_TYPE': [deficiency_type], 'CONC_LIMIT': [self.deficiency_limit],
'COMMENT': [comment], 'STATNS_below_limit': stations_below_limit_str})
by_period['variance'] = np.nan
by_period['p_ges'] = np.nan
return by_date_deep, by_date, False, by_period
###############################################################################
###############################################################################
class IndicatorPhytoplankton(IndicatorBase):
"""
Class with methods incommon for Phytoplankton indicators.
"""
def __init__(self, subset_uuid, parent_workspace_object, indicator):
super().__init__(subset_uuid, parent_workspace_object, indicator)
# [self.column_list.append(c) for c in ['MNDEP', 'MXDEP', 'DEPH', 'RLABO'] if c not in self.column_list]
[self.column_list.append(c) for c in ['MNDEP', 'MXDEP', 'DEPH'] if c not in self.column_list]
if self.name == 'indicator_chl':
if all(x in self.get_filtered_data(subset=self.subset, step='step_2').columns for x in
self.parameter_list[0:-1]):
# if data is available for all parameters, use all except SALT
self.indicator_parameter = self.parameter_list[0:-1]
elif all(x in self.get_filtered_data(subset=self.subset, step='step_2').columns for x in
self.parameter_list[1:-1]):
# if data is available for all parameters but the first (CPHL_INTEG), use the remaining two (CPHL_INTEG_CALC and CPHL_BTL) except SALT
self.indicator_parameter = self.parameter_list[1:-1]
self.column_list.remove(self.parameter_list[0])
elif self.parameter_list[0] in self.get_filtered_data(subset=self.subset, step='step_2').columns:
# if data is available only for the first parameter (CPHL_INTEG), use only CPHL_INTEG
self.indicator_parameter = [self.parameter_list[0]]
self.column_list = [c for c in self.column_list if c not in self.parameter_list[1:-1]]
self.salt_parameter = self.parameter_list[-1]
self.notintegrate_typeareas = ['8', '9', '10', '11', '12-s', '12-n,', '13', '14', '15', '24']
self.start_deph_max = 2
self.end_deph_max = 11
self.end_deph_min = 9
# Set dataframe to use
self._set_water_body_indicator_df(water_body=None)
# ==================================================================================================================
def _get_surface_df(self, df, type_area):
# TODO: This is now very slow since it has to look at each measurement. Can be written much faster.
# For a start you can turn of all the if-statements and just take discrete sample or integrated.
"""
First step before calculating EQR values and status. Collect the surface data that should be used.
:param df:
:param type_area:
:return: surface dataframe, list with indeces used, comment
"""
# --------------------------------------------------------------------------------------------------------------
def get_surface_sample(df):
"""
# Get a surface (<2 m) sample from available data
# 1. Bottle sample <2 m
# 2. Hose sample from <2 m
# 3. Shallowest sample from bottle or hose
:param df
:return add_df
"""
# ----------------------------------------------------------------------------------------------------------
def get_hose_surface_data(df):
"""
gets best available hose data for surface
:param df:
:return: add_df
"""
comment = ''
MXD = df.MXDEP.values[0]
MND = df.MNDEP.values[0]
if MXD <= max_surf_deph:
# max depth for hose data is from shallow depths, use this
value = df.loc[df.MXDEP == MXD, param]
add_df = df.loc[df.MXDEP == MXD].copy()
value_found = True
elif MXD == np.nan and MND <= max_surf_deph:
# no max depth given, min depth for hose data is from shallow depths, use this
value = df.loc[df.MNDEP == MND, param]
add_df = df.loc[df.MNDEP == MND].copy()
value_found = True
elif MXD <= 11:
# use out of bounds hose data
value = df.loc[df.MXDEP == MXD, param]
add_df = df.loc[df.MXDEP == MXD].copy()
value_found = True
comment = 'Expert judgement. This is not true surface sample'
else:
return False
add_df['comment'] = comment
add_df['VALUE'] = value
if 'VALUE' not in add_df.columns:
raise Exception(message='no VALUE key')
return add_df
# ----------------------------------------------------------------------------------------------------------
comment = ''
max_surf_deph = self.start_deph_max
indicator_cols = ~df[indicator_list].isnull().all()
indicator_cols = indicator_cols[np.where(indicator_cols)[0]].index[:].tolist()
param = 'CPHL_BTL'
value_found = False
if param in indicator_cols:
# There is chlorophyll bottle data
idxmin = df.dropna(subset=[param])['DEPH'].idxmin(skipna=True)
minD_old = df.dropna(subset=[param]).DEPH.min()
minD = df.loc[idxmin, 'DEPH']
if minD <= max_surf_deph:
# There is chlorophyll bottle data from an accepted depth
value_old = df.loc[df.DEPH == minD, param]
add_df_old = df.loc[df.DEPH == minD, ].copy()
value = df.loc[idxmin, param]
add_df = df.loc[[idxmin], :].copy()
add_df['comment'] = comment
add_df['VALUE'] = value
value_found = True
else:
# no true surface sample from bottle, proceed to check hose data.
value_found = False
if not value_found:
# check hose data
param_list = [p for p in ['CPHL_INTEG', 'BIOV_CONC_ALL'] if p in indicator_list]
if len(param_list) == 1:
param = param_list[0]
if not df.dropna(subset=[param]).empty:
# There is hose data
if len(df) > 1:
# print('length of df > 1', df.STATN, df[param])
# df can be >1 if there are duplicates, then they have different sample_ID
add_df = False
for name, hose_group in df.groupby(['SAMPLE_ID']):
add_df_hose = get_hose_surface_data(hose_group)
if isinstance(add_df_hose, bool):
# no hose data in the bounds given, use bottle out of depth bounds?
param = 'CPHL_BTL'
if param in indicator_cols:
minD = df.DEPH.min()
value = df.loc[df.DEPH == minD, param]
add_df = df.loc[df.DEPH == minD,].copy()
value_found = True
comment = 'Expert judgement. This is not true surface sample'
add_df['comment'] = comment
add_df['VALUE'] = value
else:
False
elif isinstance(add_df, pd.DataFrame):
add_df_next = add_df_hose
add_df = pd.concat([add_df, add_df_next])
if 'VALUE' not in add_df.columns:
raise Exception(message='no VALUE key')
else:
add_df = add_df_hose
else:
add_df = get_hose_surface_data(df)
if 'VALUE' not in add_df.columns:
raise Exception(message='no VALUE key')
if isinstance(add_df, bool):
# no hose data in the bounds given, use bottle out of depth bounds?
value_found = False
else:
value_found = True
if not value_found:
# no hose data use bottle data from shallowest available depth
param = 'CPHL_BTL'
if param in indicator_cols:
minD = df.DEPH.min()
value = df.loc[df.DEPH == minD, param]
add_df = df.loc[df.DEPH == minD, ].copy()
value_found = True
comment = 'Expert judgement. This is not true surface sample'
add_df['comment'] = comment
add_df['VALUE'] = value
else:
return False
# TODO: add value to column VALUE above and change names here
if 'VALUE' not in add_df.columns:
raise KeyError(message='key VALUE missing in add_df')
if self.name == 'indicator_chl':
add_df['CPHL_SOURCE'] = param
add_df.rename(columns={'comment': 'CPHL_comment', 'VALUE': 'CPHL'}, inplace=True)
else:
add_df.rename(columns={'comment': 'BIOV_comment', 'VALUE': self.indicator_parameter[0]}, inplace=True)
return add_df
# --------------------------------------------------------------------------------------------------------------
def get_integ_sample(df):
"""
:param df
:return add_df
"""
# ----------------------------------------------------------------------------------------------------------
def get_integrated(df):
MXD = df.MXDEP.values
MND = df.MNDEP.values
try:
MXD[0]
except IndexError:
print(MXD)
if len(MND) > 1:
print(df.WATER_BODY_NAME, MND, MXD)
pass
else:
MXD = MXD[0]
MND = MND[0]
if MND == np.nan:
MND = 0
if MND <= start_deph_max and MXD <= end_deph_max and MXD >= end_deph_min:
# integrated data is within depth bounds
value = df.loc[df.MXDEP == MXD, param]
if len(df[param]) != len(value.values):
print('df and value does not match', df[param], value.values)
add_df = df.loc[df[param] == value.values, ].copy()
add_df['comment'] = ''
add_df['VALUE'] = value
return add_df
elif MND <= end_deph_max and MXD <= end_deph_max:
# elif MND <= start_deph_max and MXD <= end_deph_max:
# check for smaller range integrated data,
# for delivery created on 20190307 the commented elif was used
value = df.loc[df.MXDEP == MXD, param]
add_df = df.loc[df[param] == value.values, ].copy()
add_df['comment'] = 'Expert judgement. Integrated data to shallow'
add_df['VALUE'] = value
return add_df
# ----------------------------------------------------------------------------------------------------------
start_deph_max = self.start_deph_max
end_deph_max = self.end_deph_max
end_deph_min = self.end_deph_min
indicator_cols = ~df[indicator_list].isnull().all()
indicator_cols = indicator_cols[np.where(indicator_cols)[0]].index[:].tolist()
comment = ''
add_df = False
param_list = [p for p in ['CPHL_INTEG', 'CPHL_INTEG_CALC', 'BIOV_CONC_ALL'] if p in indicator_cols]
if len(param_list) == 0:
# no hose data
if 'CPHL_BTL' in df.columns:
if not df.dropna(subset=['CPHL_BTL']).empty:
param = 'CPHL_BTL'
add_df = df.dropna(subset=[param])
add_df['VALUE'] = add_df[param]
add_df['comment'] = 'Expert judgement. Not integrated sample'
else:
return False
else:
# check hose data
param = param_list[0]
df_filtered = df.dropna(subset=[param]).copy()
df_filtered = df_filtered.loc[(df_filtered.MXDEP <= end_deph_max) & (df_filtered.MNDEP <= start_deph_max)].dropna(subset=[param])
if df_filtered is None or df_filtered.empty:
# max depth of integrated data is to large or no hose data at all
| |
import numpy as np
import torch
import itertools
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from skimage.measure import compare_psnr, compare_ssim
from skimage.restoration import denoise_nl_means, estimate_sigma
import skimage.io as sio
from glow.glow import Glow
from dcgan.dcgan import Generator
import json
import os
import warnings
warnings.filterwarnings("ignore")
def solveInpainting(args):
if args.prior == 'glow':
GlowInpaint(args)
elif args.prior == 'dcgan':
GANInpaint(args)
elif args.prior == 'glowred':
GlowREDInpaint(args)
else:
raise "prior not defined correctly"
def np_to_torch(img_np):
"""Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
"""
# return torch.from_numpy(img_np)[None, :].float().cuda()
return torch.from_numpy(img_np).float().cuda()
def torch_to_np(img_torch):
"""Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
"""
return img_torch.detach().cpu().numpy() # add [0] later
def Denoiser(d_name, sigma_f, x_f):
x = torch_to_np(x_f)
if d_name == 'nlm':
patch_kw = dict(patch_size=5, # 5x5 patches
patch_distance=6, # 13x13 search area
multichannel=True)
s0 = np.mean(estimate_sigma(x[0], multichannel=True))
s1 = np.mean(estimate_sigma(x[1], multichannel=True))
x0 = denoise_nl_means(x[0], h=s0, sigma=s0, fast_mode=False, **patch_kw)
x1 = denoise_nl_means(x[1], h=s1, sigma=s1, fast_mode=False, **patch_kw)
x = np.stack([x0, x1])
else:
raise "other denoisers not implemented"
x_f = np_to_torch(x)
return x_f
import itertools
from pprint import pprint
inputdata = [
['a', 'b', 'c'],
['d'],
['e', 'f'],
]
result = list(itertools.product(*inputdata))
def GlowREDInpaint(args):
# loopOver = zip(args.gamma)
hyperparams = [args.gamma, args.alpha, args.beta]
loopOver = list(itertools.product(*hyperparams))
for gamma, alpha, beta in loopOver:
skip_to_next = False # flag to skip to next loop if recovery is fails due to instability
n = args.size * args.size * 3
modeldir = "./trained_models/%s/glow" % args.model
test_folder = "./test_images/%s" % args.dataset
save_path = "./results/%s/%s" % (args.dataset, args.experiment)
# loading dataset
trans = transforms.Compose([transforms.Resize((args.size, args.size)), transforms.ToTensor()])
test_dataset = datasets.ImageFolder(test_folder, transform=trans)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batchsize, drop_last=False,
shuffle=False)
# loading glow configurations
config_path = modeldir + "/configs.json"
with open(config_path, 'r') as f:
configs = json.load(f)
# regularizor
gamma = torch.tensor(gamma, requires_grad=True, dtype=torch.float, device=args.device)
# alpha = args.alpha
# beta = args.beta
# getting test images
Original = []
Recovered = []
Masked = []
Mask = []
Residual_Curve = []
for i, data in enumerate(test_dataloader):
# getting batch of data
x_test = data[0]
x_test = x_test.clone().to(device=args.device)
n_test = x_test.size()[0]
assert n_test == args.batchsize, "please make sure that no. of images are evenly divided by batchsize"
# generate mask
mask = gen_mask(args.inpaint_method, args.size, args.mask_size)
mask = np.array([mask for i in range(n_test)])
mask = mask.reshape([n_test, 1, args.size, args.size])
mask = torch.tensor(mask, dtype=torch.float, requires_grad=False, device=args.device)
# loading glow model
glow = Glow((3, args.size, args.size),
K=configs["K"], L=configs["L"],
coupling=configs["coupling"],
n_bits_x=configs["n_bits_x"],
nn_init_last_zeros=configs["last_zeros"],
device=args.device)
glow.load_state_dict(torch.load(modeldir + "/glowmodel.pt"))
glow.eval()
# making a forward to record shapes of z's for reverse pass
_ = glow(glow.preprocess(torch.zeros_like(x_test)))
# initializing z from Gaussian
if args.init_strategy == "random":
z_sampled = np.random.normal(0, args.init_std, [n_test, n])
z_sampled = torch.tensor(z_sampled, requires_grad=True, dtype=torch.float, device=args.device)
# initializing z from image with noise filled only in masked region
elif args.init_strategy == "noisy_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0, 0.2, x_noisy_filled.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
noise = noise * (1 - mask)
x_noisy_filled = x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with masked region inverted
elif args.init_strategy == "inverted_filled":
x_inverted_filled = x_test.clone().detach()
missing_x = x_inverted_filled.clone()
missing_x = missing_x.data.cpu().numpy()
missing_x = missing_x[:, :, ::-1, ::-1]
missing_x = torch.tensor(missing_x.copy(), dtype=torch.float, device=args.device)
missing_x = (1 - mask) * missing_x
x_inverted_filled = x_inverted_filled * mask
x_inverted_filled = x_inverted_filled + missing_x
z, _, _ = glow(x_inverted_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from masked image ( masked region as zeros )
elif args.init_strategy == "black_filled":
x_black_filled = x_test.clone().detach()
x_black_filled = mask * x_black_filled
x_black_filled = x_black_filled * mask
z, _, _ = glow(x_black_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from noisy complete image
elif args.init_strategy == "noisy":
x_noisy = x_test.clone().detach()
noise = np.random.normal(0, 0.05, x_noisy.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
x_noisy = x_noisy + noise
x_noisy = torch.clamp(x_noisy, 0, 1)
z, _, _ = glow(x_noisy - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
# initializing z from image with only noise in masked region
elif args.init_strategy == "only_noise_filled":
x_noisy_filled = x_test.clone().detach()
noise = np.random.normal(0, 0.2, x_noisy_filled.size())
noise = torch.tensor(noise, dtype=torch.float, device=args.device)
noise = noise * (1 - mask)
x_noisy_filled = mask * x_noisy_filled + noise
x_noisy_filled = torch.clamp(x_noisy_filled, 0, 1)
z, _, _ = glow(x_noisy_filled - 0.5)
z = glow.flatten_z(z).clone().detach()
z_sampled = z.clone().detach().requires_grad_(True)
else:
raise "Initialization strategy not defined"
# selecting optimizer
if args.optim == "adam":
optimizer = torch.optim.Adam([z_sampled], lr=args.lr, )
elif args.optim == "lbfgs":
optimizer = torch.optim.LBFGS([z_sampled], lr=args.lr, )
# metrics to record over training
psnr_t = torch.nn.MSELoss().to(device=args.device)
residual = []
x_f = (x_test * mask).clone()
u = torch.zeros_like(x_test)
# running optimizer steps
for t in range(args.steps):
def closure():
optimizer.zero_grad()
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_masked_test = x_test * mask
x_masked_gen = x_gen * mask
global residual_t
residual_t = ((x_masked_gen - x_masked_test) ** 2).view(len(x_masked_test), -1).sum(dim=1).mean()
z_reg_loss_t = gamma * z_sampled.norm(dim=1).mean()
residual_x = beta * ((x_gen - (x_f - u)) ** 2).view(len(x_gen), -1).sum(dim=1).mean()
loss_t = residual_t + z_reg_loss_t + residual_x
psnr = psnr_t(x_test, x_gen)
psnr = 10 * np.log10(1 / psnr.item())
print("\rAt step=%0.3d|loss=%0.4f|residual_t=%0.4f|residual_x=%0.4f|z_reg=%0.5f|psnr=%0.3f" % (
t, loss_t.item(), residual_t.item(), residual_x.item(), z_reg_loss_t.item(), psnr), end="\r")
loss_t.backward()
return loss_t
def denoiser_step(x_f, u):
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False).detach()
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_f = 1 / (beta + alpha) * (beta * Denoiser(args.denoiser, args.sigma_f, x_f) + alpha * (x_gen + u))
u = u + x_gen - x_f
return x_f, u
optimizer.step(closure)
residual.append(residual_t.item())
if t % args.update_iter == args.update_iter - 1:
x_f, u = denoiser_step(x_f, u)
# try:
# optimizer.step(closure)
# residual.append(residual_t.item())
# if t % args.update_iter == 0:
# x_f, u = denoiser_step(x_f, u)
#
# except:
# skip_to_next = True
# break
if skip_to_next:
break
# getting recovered and true images
x_test_np = x_test.data.cpu().numpy().transpose(0, 2, 3, 1)
z_unflat = glow.unflatten_z(z_sampled, clone=False)
x_gen = glow(z_unflat, reverse=True, reverse_clone=False)
x_gen = glow.postprocess(x_gen, floor_clamp=False)
x_gen_np = x_gen.data.cpu().numpy().transpose(0, 2, 3, 1)
x_gen_np = np.clip(x_gen_np, 0, 1)
mask_np = mask.data.cpu().numpy()
x_masked_test = x_test * mask
x_masked_test_np = x_masked_test.data.cpu().numpy().transpose(0, 2, 3, 1)
x_masked_test_np = np.clip(x_masked_test_np, 0, 1)
Original.append(x_test_np)
Recovered.append(x_gen_np)
Masked.append(x_masked_test_np)
Residual_Curve.append(residual)
Mask.append(mask_np)
# freeing up memory for second loop
glow.zero_grad()
optimizer.zero_grad()
del x_test, x_gen, optimizer, psnr_t, z_sampled, glow, mask,
torch.cuda.empty_cache()
print("\nbatch completed")
if skip_to_next:
print("\nskipping current loop due to instability or user triggered quit")
continue
# metric evaluations
Original = np.vstack(Original)
Recovered = np.vstack(Recovered)
Masked = np.vstack(Masked)
Mask = np.vstack(Mask)
psnr = [compare_psnr(x, y) for x, y in zip(Original, Recovered)]
# print performance analysis
printout = "+-" * 10 + "%s" % args.dataset + "-+" * 10 + "\n"
printout = printout + "\t n_test = %d\n" % len(Recovered)
printout = printout + "\t inpaint_method = %s\n" % args.inpaint_method
printout = printout + "\t mask_size = %0.3f\n" % args.mask_size
printout = printout + "\t update_iter = %0.4f\n" % args.update_iter
printout = printout + "\t gamma = %0.6f\n" % gamma
printout = printout + "\t alpha = %0.6f\n" % alpha
printout = printout + "\t beta = %0.6f\n" % beta
printout = printout + "\t PSNR = %0.3f\n" % np.mean(psnr)
print(printout)
if args.save_metrics_text:
with open("%s_inpaint_glow_results.txt" % args.dataset, "a") as f:
f.write('\n' + printout)
# saving images
if args.save_results:
gamma = gamma.item()
file_names = [name[0].split("/")[-1].split(".")[0] for name in test_dataset.samples]
if args.init_strategy == 'random':
save_path = save_path + "/inpaint_%s_masksize_%0.4f_updateiter_%0.4f_gamma_%0.6f_alpha_%0.6f_beta_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path % (
args.inpaint_method, args.mask_size, args.update_iter, gamma, alpha, beta, args.steps, args.lr, args.init_std, args.optim)
else:
save_path = save_path + "/inpaint_%s_masksize_%0.4f_updateiter_%0.4f_gamma_%0.6f_alpha_%0.6f_beta_%0.6f_steps_%d_lr_%0.3f_init_std_%0.2f_optim_%s"
save_path = save_path % (
args.inpaint_method, args.mask_size, args.update_iter, gamma, alpha, beta, args.steps, args.lr, args.init_strategy, args.optim)
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
save_path_1 = save_path + "_1"
if not os.path.exists(save_path_1):
os.makedirs(save_path_1)
save_path = save_path_1
else:
save_path_2 = save_path + "_2"
if not os.path.exists(save_path_2):
os.makedirs(save_path_2)
save_path = save_path_2
_ = [sio.imsave(save_path + "/" + name + "_recov.jpg", x) for x, name in zip(Recovered, file_names)]
_ = [sio.imsave(save_path + "/" + | |
# coding=utf-8
__source__ = 'https://leetcode.com/problems/super-egg-drop/'
# Time: O(KlongN)
# Space: O(NK)
# DP
# dp(K,N)= min(max(dp(K−1,X−1),dp(K,N−X))))
# 1≤X≤N
#
# Description: Leetcode # 887. Super Egg Drop
#
# You are given K eggs, and you have access to a building with N floors from 1 to N.
#
# Each egg is identical in function, and if an egg breaks, you cannot drop it again.
#
# You know that there exists a floor F with 0 <= F <= N such that any egg dropped at a floor higher than F will break,
# and any egg dropped at or below floor F will not break.
#
# Each move, you may take an egg (if you have an unbroken one) and drop it from any floor X (with 1 <= X <= N).
#
# Your goal is to know with certainty what the value of F is.
#
# What is the minimum number of moves that you need to know with certainty what F is,
# regardless of the initial value of F?
#
#
# Example 1:
#
# Input: K = 1, N = 2
# Output: 2
# Explanation:
# Drop the egg from floor 1. If it breaks, we know with certainty that F = 0.
# Otherwise, drop the egg from floor 2. If it breaks, we know with certainty that F = 1.
# If it didn't break, then we know with certainty F = 2.
# Hence, we needed 2 moves in the worst case to know what F is with certainty.
# Example 2:
#
# Input: K = 2, N = 6
# Output: 3
# Example 3:
#
# Input: K = 3, N = 14
# Output: 4
#
#
# Note:
#
# 1 <= K <= 100
# 1 <= N <= 10000
#
import unittest
# 20ms 100%
class Solution(object):
def superEggDrop(self, K, N):
"""
:type K: int
:type N: int
:rtype: int
"""
# O(K) memory, O(KlogN) computation
dp = [1] * (K+1)
dp[0] = 0
steps = 1
while dp[K] < N:
for j in range(K, 0, -1):
dp[j] += dp[j-1] + 1
steps += 1
return steps
# dp = {(1, i) : 1 for i in range(1, K+1)}
# dp[(1, 0)] = 0
# j = 1
# while dp[(j, K)] < N:
# j += 1
# dp[(j, 0)] = 0
# for i in range(1, K+1):
# dp[(j, i)] = dp[(j-1, i-1)] + dp[(j-1, i)] + 1
# print dp
# return j
# 20ms 100%
class SolutionBinominal(object):
def superEggDrop(self, K, N):
def f(x):
ans = 0
r = 1
for i in range(1, K+1):
r *= x-i+1
r //= i
ans += r
if ans >= N: break
return ans
lo, hi = 1, N
while lo < hi:
mi = (lo + hi) // 2
if f(mi) < N:
lo = mi + 1
else:
hi = mi
return lo
# 2200 ms 11.11%
class SolutionBottomUpDP(object):
def superEggDrop(self, K, N):
# Right now, dp[i] represents dp(1, i)
dp = range(N+1)
for k in xrange(2, K+1):
# Now, we will develop dp2[i] = dp(k, i)
dp2 = [0]
x = 1
for n in xrange(1, N+1):
# Let's find dp2[n] = dp(k, n)
# Increase our optimal x while we can make our answer better.
# Notice max(dp[x-1], dp2[n-x]) > max(dp[x], dp2[n-x-1])
# is simply max(T1(x-1), T2(x-1)) > max(T1(x), T2(x)).
while x < n and max(dp[x-1], dp2[n-x]) > \
max(dp[x], dp2[n-x-1]):
x += 1
# The final answer happens at this x.
dp2.append(1 + max(dp[x-1], dp2[n-x]))
dp = dp2
return dp[-1]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/super-egg-drop/solution/
# Approach 1: Dynamic Programming with Binary Search
Complexity Analysis
Time Complexity: O(K *NlogN)
Space Complexity: O(K * N)
# 152ms 12.44%
class Solution { //K eggs, N floors
Map<Integer, Integer> memo = new HashMap();
public int superEggDrop(int K, int N) {
return dp(K, N);
}
private int dp(int K, int N) {
if (!memo.containsKey(N * 100 + K)) {
int ans;
if (N == 0) ans = 0;
else if (K == 1) ans = N;
else {
int lo = 1, hi = N;
while (lo + 1 < hi) {
int x = (lo + hi) / 2;
int t1 = dp(K - 1, x - 1);
int t2 = dp(K, N - x);
if (t1 < t2) lo = x;
else if (t1 > t2) hi = x;
else lo = hi = x;
}
ans = 1 + Math.min(Math.max(dp(K - 1, lo - 1), dp(K, N-lo)),
Math.max(dp(K - 1, hi - 1), dp(K, N - hi)));
}
memo.put(N * 100 + K, ans);
}
return memo.get(N * 100 + K);
}
}
# Approach 2: Dynamic Programming with Optimality Criterion (bottom-up)
#
# Complexity Analysis
# Time Complexity: O(K * N).
# Space Complexity: O(N).
# 26ms 34.45%
class Solution {
public int superEggDrop(int K, int N) {
// Right now, dp[i] represents dp(1, i)
int[] dp = new int[N+1];
for (int i = 0; i <= N; ++i) dp[i] = i;
for (int k = 2; k <= K; ++k) {
// Now, we will develop dp2[i] = dp(k, i)
int[] dp2 = new int[N + 1];
int x = 1;
for (int n = 1; n <= N; ++n) {
// Let's find dp2[n] = dp(k, n)
// Increase our optimal x while we can make our answer better.
// Notice max(dp[x-1], dp2[n-x]) > max(dp[x], dp2[n-x-1])
// is simply max(T1(x-1), T2(x-1)) > max(T1(x), T2(x)).
while (x < n && Math.max(dp[x - 1], dp2[n - x]) > Math.max(dp[x], dp2[n-x-1])) x++;
// The final answer happens at this x.
dp2[n] = 1 + Math.max(dp[x-1], dp2[n-x]);
}
dp = dp2;
}
return dp[N];
}
}
Approach 3: Mathematical
# 4ms 100%
class Solution {
public int superEggDrop(int K, int N) {
int lo = 1, hi = N;
while (lo < hi) {
int mid = lo + (hi - lo) / 2;
if (f(mid, K, N) < N) lo = mid + 1;
else hi = mid;
}
return lo;
}
private int f(int x, int K, int N) {
int ans = 0, r = 1;
for (int i = 1; i <= K; i++) {
r *= x - i + 1;
r /= i;
ans += r;
if (ans > N) break;
}
return ans;
}
}
# DP:
https://leetcode.com/problems/super-egg-drop/discuss/158974/C%2B%2BJavaPython-2D-and-1D-DP-O(KlogN)
# Drop eggs is a very classical problem.
# Some people may come up with idea O(KN^2)
# where dp[K][N] = 1 + max(dp[K - 1][i - 1],dp[K][N - i]) for i in 1...N.
# However this idea is very brute force, for the reason that you check all possiblity.
#
# So I consider this problem in a different way:
# dp[M][K]means that, given K eggs and M moves,
# what is the maximum number of floor that we can check.
#
# The dp equation is:
# dp[m][k] = dp[m - 1][k - 1] + dp[m - 1][k] + 1,
# which means we take 1 move to a floor,
# if egg breaks, then we can check dp[m - 1][k - 1] floors.
# if egg doesn't breaks, then we can check dp[m - 1][k - 1] floors.
#
# dp[m][k] is similar to the number of combinations and it increase exponentially to N
#
# Time Complexity:
# O(KlogN) Time, O(NK) Space
# think of example 3, with binary search, split at 7th fl,
# it then become 2 sub questions, the lower level one is example 2
# example 2 split at mid, third fl, and it become part of example 1
class Solution {
// 2-d DP
# 8ms 75.45%
public int superEggDrop(int K, int N) {
int[][] floors = new int[K + 1][N + 1];
for (int j = 1; j < N + 1; j++) {
for (int i = 1; i < K + 1; i++) {
floors[i][j] = floors[i-1][j-1] + floors[i][j-1] + 1;
if (floors[i][j] >= N) return j;
}
}
return N;
}
//100% - 1d dp: Dynamic Program
# 5ms 86.63%
public int superEggDrop(int K, int N) {
int[] floors = new int[K+1];
int moves = 0;
while ( | |
#!/usr/bin/env python
"""
Main process for updating audio files metadata from parsed configuration files.
All options defining specific metadata fields (``--artist``, ``--year``, etc.) override any
corresponding information fields found in configurations files from options ``--info`` or ``--all``.
Applied changes listed in ``--output`` file.
"""
import argparse
import logging
import os
import sys
from typing import List, Optional, Union, Tuple
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL, NOTSET
import aiu
from aiu import DEFAULT_EXCEPTIONS_CONFIG, DEFAULT_STOPWORDS_CONFIG, LOGGER, TRACE, __meta__, tags as t
from aiu.parser import (
ALL_IMAGE_EXTENSIONS,
ALL_PARSER_EXTENSIONS,
FORMAT_MODE_ANY,
FORMAT_MODE_YAML,
FORMAT_MODES,
PARSER_MODES,
get_audio_files,
load_config,
parse_audio_config,
save_audio_config
)
from aiu.updater import merge_audio_configs, apply_audio_config, update_file_names
from aiu.utils import backup_files, look_for_default_file, save_cover_file, validate_output_file, log_exception
from aiu.typedefs import AudioConfig, Duration
from aiu.youtube import fetch_files, get_metadata
def cli():
_PROG = "aiu"
_NAME = "Audio Info Updater ({})".format(_PROG)
_DESC = "{}. {}".format(_NAME, __doc__)
_HELP_FORMAT = """{} Format Modes
Below are the applicable format modes for format/parser options.
Note that not all modes necessarily apply to both.
Refer to their option of applicable values.
``any``
Attempts to automatically determine which of the formats to apply based
on the contents of the provided information file. If this is causing
problems, switch to explicit specification of the provided format.
``csv``
Expects an header row indicating the fields retrieved on following lines.
Then, each line provide an entry to attempt matching against an audio file.
``tab``
Takes a plain list of (any amount of) tab delimited rows where each one
represents a potential audio file to find. Rows are expected to have
following format (bracket fields are optional):
[track] title duration
``json`` / ``yaml``
Standard representation of corresponding formats of a list of objects.
Each object provides fields and values to attempt match against audio files.
Fields names correspond to the lower case values
``list``
Parses a plain list with each field placed on a separate row. Rows are
expected to provide continuous intervals between corresponding field, as
presented below, for each audio files to attempt match. Corresponding fields
must be provided for each entry. Either one or both of the TRACK/DURATION
fields are mandatory.
[track-1]
title-1
[duration-1]
[track-2]
title-2
[duration-2]
...
""".format(_NAME)
try:
ap = argparse.ArgumentParser(prog=_PROG, description=_DESC, add_help=False,
formatter_class=lambda prog: argparse.HelpFormatter(prog, width=120))
gen_args = ap.add_argument_group(title="General Arguments",
description="Arguments that provides information about the application "
"or usage related details.")
gen_args.add_argument("--help", "-h", action="help", help="Display this help message.")
gen_args.add_argument("--help-format", action="store_true",
help="Display additional help details about formatter/parser modes.")
gen_args.add_argument("--version", action="version", version=__meta__.__version__,
help="Display the program's version.")
parser_args = ap.add_argument_group(title="Parsing Arguments",
description="Arguments that control parsing methodologies and "
"configurations to update matched audio files metadata.")
parser_args.add_argument("-l", "--link", "--youtube", dest="link",
help="YouTube music link from where to retrieve songs and album metadata. "
"When provided, other options will override whichever tag information was "
"automatically obtained from the URL reference.")
parser_args.add_argument("-p", "--path", "-f", "--file", default=".", dest="search_path",
help="Path where to search for audio and metadata info files to process. "
"Can either be a directory path where all containing audio files will be "
"processed or a single audio file path to process by itself "
"(default: %(default)s).")
parser_args.add_argument("-i", "--info", dest="info_file",
help="Path to audio metadata information file to be applied to format matched with "
"audio files. (default: looks for text file compatible format named `info`, "
"`config` or `meta` under `path`, uses the first match with ``any`` format).")
parser_args.add_argument("-a", "--all", dest="all_info_file",
help="Path to audio info file of metadata to apply to every matched audio files. "
"This is mainly to apply shared tags across a list of matched audio files such "
"as the same ARTIST, ALBUM or YEAR values for a set of grouped tracks. "
"(default: looks for text file compatible format named `all`, `any` or "
"`every` under `path`, uses the first match with ``any`` format).")
parser_args.add_argument("-P", "--parser", dest="parser_mode",
default="any", choices=[p.name for p in PARSER_MODES],
help="Parsing mode to enforce. See also ``--help-format`` for details. "
"(default: %(default)s)")
parser_args.add_argument("-o", "--output", "--output-file", dest="output_file",
help="Location where to save applied output configurations (file or directory). "
"(default: ``output.yml`` located under ``--outdir``, ``--path`` directory "
" or parent directory of ``--file``, whichever comes first).")
parser_args.add_argument("-O", "--outdir", "--output-dir", dest="output_dir",
help="Output directory of applied configuration if not defined by ``--output`` "
"and download location of files referenced by ``--link``.")
parser_args.add_argument("-F", "--format, --output-format", dest="output_mode",
default=FORMAT_MODE_YAML, choices=[f.name for f in FORMAT_MODES],
help="Output format of applied metadata details. "
"See also ``--help-format`` for details. (default: %(default)s)")
parser_args.add_argument("-E", "--exceptions", default=DEFAULT_EXCEPTIONS_CONFIG, dest="exceptions_config",
help="Path to custom exceptions configuration file "
"(default: ``config/exceptions.cfg``). "
"During formatting of fields, words matched against keys in the file will be "
"replaced by the specified value instead of default word capitalization.")
parser_args.add_argument("-S", "--stopwords", default=DEFAULT_STOPWORDS_CONFIG, dest="stopwords_config",
help="Path to custom stopwords configuration file "
"(default: ``config/stopwords.cfg``). "
"When formatting fields of ID3 tags and file names, the resulting words "
"matched against listed words from that file will be converted to lowercase "
"instead of the default word capitalization.")
op_args = ap.add_argument_group(title="Operation Arguments",
description="Arguments to control which subset of operations to apply on "
"matched audio files and parsed metadata.")
op_args.add_argument("--dry", action="store_true",
help="Do not execute any modification, just pretend. "
"(note: works best when combined with outputs of ``--verbose`` or ``--debug``)")
op_args.add_argument("-b", "--backup", action="store_true",
help="Create a backup of files to be modified. Files are saved in directory named "
"``backup`` under the ``--path`` or parent directory of ``--file``. "
"No backup is accomplished otherwise.")
op_args.add_argument("--rename-title", "--RT", action="store_true",
help="Specifies whether to rename matched audio files with their corresponding ``TITLE``. "
"This is equivalent to ``--rename-format '%%(TITLE)s'``.")
op_args.add_argument("--prefix-track", "--PT", action="store_true",
help="Specifies whether to prefix the file name with ``TRACK`` when combined with "
"``--rename-title`` option. "
"This is equivalent to ``--rename-format '%%(TRACK)s %%(TITLE)s'``.")
op_args.add_argument("--rename-format", "--RF",
help="Specify the specific ``FORMAT`` to employ for renaming files. "
"Formatting template follows the ``%%(<TAG>)`` syntax. "
"Supported ``<TAG>`` fields are listed in ID3 TAG names except image-related items.")
op_args.add_argument("--no-fetch", "--nF", action="store_true",
help="Must be combined with ``--link`` option. Enforces parser mode ``youtube``. "
"When provided, instead of downloading music files, only metadata information will "
"be retrieved from the link in order to obtain ID3 audio tag metadata and apply them "
"to referenced pre-existing audio files in the search path. The metadata retrieved "
"this way replaces corresponding ID3 tag details otherwise provided by ``--info``.")
op_args.add_argument("--no-info", "--nI", action="store_true",
help="Disable auto-detection of 'info' common audio metadata information file names. "
"Useful when detection of an existing file on search path should be avoided. "
"Ignored if ``--info`` is explicitly specified.")
op_args.add_argument("--no-all", "--nA", action="store_true",
help="Disable auto-detection of 'all' common audio metadata information file names. "
"Useful when detection of an existing file on search path should be avoided. "
"Ignored if ``--all`` is explicitly specified.")
op_args.add_argument("--no-cover", "--nC", action="store_true",
help="Disable auto-detection of common cover image file names. "
"Useful when detection of an existing file on search path should be avoided. "
"Ignored if ``--cover`` is explicitly specified.")
op_args.add_argument("--no-rename", "--nR", action="store_true",
help="Do not apply any file rename operation. (note: implied when ``--dry`` is provided)")
op_args.add_argument("--no-update", "--nU", action="store_true",
help="Do not apply any ID3-Tags updates. (note: implied when ``--dry`` is provided)")
op_args.add_argument("--no-output", "--nO", action="store_true",
help="Do not save results to output configurations file. (see: ``--output``)")
op_args.add_argument("--no-result", "--nP", action="store_true",
help="Do not print results to console output. "
"Be aware that result will be reported only if logging level is ``--verbose`` "
"or ``--debug``. This flag is redundant for more restrictive logging levels.")
id3_args = ap.add_argument_group(title="ID3 Tags Arguments",
description="Options to directly provide specific ID3 tag values to one or "
"many audio files matched instead of through ``--info`` "
"and ``--all`` configuration files.")
id3_args.add_argument("-c", "--cover", "-I", "--image", dest="cover_file",
help="Path where to find image file to use as audio file album cover. "
"(default: looks for image of compatible format named "
"`cover`, `artwork`, `art` or `image` under ``--path`` or parent directory "
"of ``--file``, using the first match).")
id3_args.add_argument("-T", "--title", dest=t.TAG_TITLE,
help="Name to apply as ``TAG_TITLE`` metadata attribute to file(s).")
id3_track = id3_args.add_mutually_exclusive_group()
id3_track.add_argument("-N", "--track", "--track-number", | |
self),
alignment=core.Qt.AlignHCenter | core.Qt.AlignVCenter)
hsizer.addSpacing(102) # 82)
hsizer.addWidget(qtw.QLabel(self.master.captions['C_WID'], self),
alignment=core.Qt.AlignVCenter)
hsizer.addSpacing(8) # 84)
hsizer.addWidget(qtw.QLabel(self.master.captions['C_IND'], self),
alignment=core.Qt.AlignVCenter)
hsizer.addWidget(qtw.QLabel(self.master.captions['C_SEQ'], self),
alignment=core.Qt.AlignVCenter)
hsizer.addStretch()
self.sizer.addLayout(hsizer)
pnl = qtw.QFrame(self)
self.scrl = qtw.QScrollArea(self)
self.scrl.setWidget(pnl)
self.scrl.setAlignment(core.Qt.AlignBottom)
self.scrl.setWidgetResizable(True)
self.bar = self.scrl.verticalScrollBar()
self.gsizer = qtw.QVBoxLayout()
self.rownum = 0 # indicates the number of rows in the gridlayout
self.data, self.checks = [], []
self.col_textids, self.col_names = self.master.col_textids, self.master.col_names
for ix, item in enumerate(self.master.book.page.column_info):
item.append(ix)
self.add_row(*item)
box = qtw.QVBoxLayout()
box.addLayout(self.gsizer)
box.addStretch()
pnl.setLayout(box)
self.sizer.addWidget(self.scrl)
buttonbox = qtw.QDialogButtonBox()
btn = buttonbox.addButton(self.master.captions['C_ADDCOL'],
qtw.QDialogButtonBox.ActionRole)
btn.clicked.connect(self.add_column)
btn = buttonbox.addButton(self.master.captions['C_REMCOL'],
qtw.QDialogButtonBox.ActionRole)
btn.clicked.connect(self.remove_columns)
buttonbox.addButton(qtw.QDialogButtonBox.Ok)
buttonbox.addButton(qtw.QDialogButtonBox.Cancel)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
hsizer = qtw.QHBoxLayout()
hsizer.addStretch()
hsizer.addWidget(buttonbox)
hsizer.addStretch()
self.sizer.addLayout(hsizer)
self.setLayout(self.sizer)
self.initializing = False
def add_row(self, name='', width='', is_flag=False, colno=''):
"""create a row for defining column settings
"""
self.rownum += 1
rownum = self.rownum
colnum = 0
check = qtw.QCheckBox(self)
ghsizer = qtw.QHBoxLayout()
ghsizer.addWidget(check, rownum)
self.checks.append(check)
colnum += 1
w_name = qtw.QComboBox(self)
w_name.addItems(self.col_names)
w_name.setEditable(True)
if name:
w_name.setCurrentIndex(self.col_textids.index(name))
else:
w_name.clearEditText()
w_name.editTextChanged.connect(self.on_text_changed)
ghsizer.addWidget(w_name, rownum)
colnum += 1
hsizer = qtw.QHBoxLayout()
hsizer.addSpacing(20)
w_width = qtw.QSpinBox(self)
w_width.setMaximum(999)
if width:
w_width.setValue(width)
w_width.setFixedWidth(48)
hsizer.addWidget(w_width)
hsizer.addSpacing(20)
ghsizer.addLayout(hsizer, rownum)
colnum += 1
hsizer = qtw.QHBoxLayout()
hsizer.addSpacing(40)
w_flag = qtw.QCheckBox(self)
w_flag.setChecked(is_flag)
w_flag.setFixedWidth(32)
hsizer.addWidget(w_flag)
hsizer.addSpacing(24)
ghsizer.addLayout(hsizer, rownum)
colnum += 1
hsizer = qtw.QHBoxLayout()
hsizer.addSpacing(68)
val = self.rownum if colno == '' else colno + 1
w_colno = qtw.QSpinBox(self)
w_colno.setMinimum(1)
w_colno.setMaximum(99)
w_colno.setValue(val)
w_colno.setFixedWidth(36)
hsizer.addWidget(w_colno)
hsizer.addStretch()
ghsizer.addLayout(hsizer, rownum)
self.gsizer.addLayout(ghsizer)
old_colno = "new" if colno == '' else colno
self.data.append((w_name, w_width, w_colno, w_flag, old_colno))
vbar = self.scrl.verticalScrollBar()
vbar.setMaximum(vbar.maximum() + 62)
vbar.setValue(vbar.maximum())
def delete_row(self, rownum):
"""remove a column settings row
"""
self.rownum -= 1
check = self.checks[rownum]
for widgets in self.data[rownum:]:
w_colno = widgets[2]
w_colno.setValue(w_colno.value() - 1)
w_name, w_width, w_colno, w_flag, _ = self.data[rownum]
for widget in check, w_name, w_width, w_colno, w_flag:
self.gsizer.removeWidget(widget)
widget.close()
self.gsizer.removeItem(self.gsizer.itemAt(rownum))
self.checks.pop(rownum)
self.data.pop(rownum)
def on_text_changed(self, text):
"adjust column width based on length of column title"
for w_name, w_width, *dummy in self.data:
column_text = w_name.currentText()
if column_text == text:
w_width.setValue(10 * len(text))
break
def add_column(self):
"""nieuwe rij aanmaken in self.gsizer"""
self.add_row()
def remove_columns(self):
"""alle aangevinkte items verwijderen uit self.gsizer"""
test = [x.isChecked() for x in self.checks]
checked = [x for x, y in enumerate(test) if y]
if not any(test):
return
if ask_question(self.parent, 'Q_REMCOL'):
for row in reversed(checked):
self.delete_row(row)
def accept(self):
"""save the changed settings and leave
"""
data = [(x.currentText(), y.value(), a.text(), b.isChecked(), c)
for x, y, a, b, c in self.data]
ok, cancel = self.master.accept_columnsettings(data)
if ok:
super().accept()
# elif cancel:
# super().reject()
class NewColumnsDialog(qtw.QDialog):
"""dialoog voor aanmaken nieuwe kolom-ids
"""
def __init__(self, parent, master):
self.parent = parent
self.master = master
self.initializing = True
super().__init__(parent)
self.setWindowTitle(self.master.title)
self.sizer = qtw.QVBoxLayout()
text = '\n'.join((self.master.captions['T_TRANS'].split(' / ')))
hsizer = qtw.QHBoxLayout()
hsizer.addWidget(qtw.QLabel(text, self))
self.sizer.addLayout(hsizer)
# maak een kop voor de id en een kop voor elke taal die ondersteund wordt
gsizer = qtw.QGridLayout()
row = col = 0
gsizer.addWidget(qtw.QLabel('text id', self), row, col)
for name in self.master.dialog_data['languages']:
col += 1
gsizer.addWidget(qtw.QLabel(name.split('.')[0].title(), self), row, col)
# maak een regel voor elke nieuwe titel en neem de waarde over
# in de kolom die overeenkomt met de huidige taalinstelling
# tevens de betreffende text entry read-only maken
self.widgets = []
for item in self.master.dialog_data['new_titles']:
row += 1
entry_row = []
for col in range(len(self.master.dialog_data['languages']) + 1):
entry = qtw.QLineEdit(self)
if col == 0:
text = self.master.dialog_data['textid']
else:
text = item
if col == self.master.dialog_data['colno']:
entry.setEnabled(False)
entry.setText(text)
gsizer.addWidget(entry, row, col)
entry_row.append(entry)
self.widgets.append(entry_row)
self.sizer.addLayout(gsizer)
buttonbox = qtw.QDialogButtonBox()
buttonbox.addButton(qtw.QDialogButtonBox.Ok)
buttonbox.addButton(qtw.QDialogButtonBox.Cancel)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
hsizer = qtw.QHBoxLayout()
hsizer.addStretch()
hsizer.addWidget(buttonbox)
hsizer.addStretch()
self.sizer.addLayout(hsizer)
self.setLayout(self.sizer)
self.initializing = False
def accept(self):
"""save the changed settings and leave
"""
entries = [[col.text() for col in row] for row in self.widgets]
ok = self.master.accept_newcolumns(entries)
if ok:
super().accept()
class ExtraSettingsDialog(qtw.QDialog):
"""dialoog voor invullen tool specifieke instellingen
"""
def __init__(self, parent, master):
self.parent = parent
self.master = master
# self.title = self.master.title
self.captions = self.master.captions
super().__init__(parent)
## self.resize(680, 400)
self.setWindowTitle(self.master.title)
self.sizer = qtw.QVBoxLayout()
pnl = qtw.QFrame()
vsizer = qtw.QVBoxLayout()
hsizer = qtw.QHBoxLayout()
text = qtw.QLabel(self.master.captions['S_PLGNAM'], self)
self.t_program = qtw.QLineEdit(self.master.book.page.settings[shared.SettType.PLG.value],
self)
hsizer.addWidget(text)
hsizer.addWidget(self.t_program)
vsizer.addLayout(hsizer)
hsizer = qtw.QHBoxLayout()
text = qtw.QLabel(self.master.captions['S_PNLNAM'], self)
self.t_title = qtw.QLineEdit(self.master.book.page.settings[shared.SettType.PNL.value],
self)
hsizer.addWidget(text)
hsizer.addWidget(self.t_title)
vsizer.addLayout(hsizer)
hsizer = qtw.QHBoxLayout()
self.c_rebuild = qtw.QCheckBox(self.master.captions['T_MAKE'].format(
self.master.captions['S_RBLD']), self)
if self.master.book.page.settings[shared.SettType.RBLD.value] == '1':
self.c_rebuild.toggle()
hsizer.addWidget(self.c_rebuild)
vsizer.addLayout(hsizer)
hsizer = qtw.QHBoxLayout()
self.c_showdet = qtw.QCheckBox(self.master.captions['S_DETS'], self)
try:
if self.master.book.page.settings[shared.SettType.DETS.value] == '1':
self.c_showdet.toggle()
except KeyError:
shared.log_exc()
hsizer.addWidget(self.c_showdet)
vsizer.addLayout(hsizer)
hsizer = qtw.QHBoxLayout()
self.c_redef = qtw.QCheckBox(self.master.captions['T_MAKE'].format(
self.master.captions['S_RSAV']), self)
if self.master.book.page.settings[shared.SettType.RDEF.value] == '1':
self.c_redef.toggle()
hsizer.addWidget(self.c_redef)
vsizer.addLayout(hsizer)
pnl.setLayout(vsizer)
pnl.setFrameStyle(qtw.QFrame.Box | qtw.QFrame.Raised)
self.sizer.addWidget(pnl)
pnl = qtw.QFrame(self)
vsizer = qtw.QVBoxLayout()
text = self.master.captions['T_XTRASET'].format(
self.master.book.page.settings[shared.SettType.PNL.value])
hsizer = qtw.QHBoxLayout()
label = qtw.QLabel(text, self)
hsizer.addStretch()
hsizer.addWidget(label)
hsizer.addStretch()
vsizer.addLayout(hsizer)
hsizer = qtw.QHBoxLayout()
hsizer.addSpacing(41)
hsizer.addWidget(qtw.QLabel(self.master.captions['C_NAM'], self),
alignment=core.Qt.AlignHCenter)
hsizer.addSpacing(52)
hsizer.addWidget(qtw.QLabel(self.master.captions['C_VAL'], self),
alignment=core.Qt.AlignHCenter)
hsizer.addStretch()
vsizer.addLayout(hsizer)
pnl2 = qtw.QFrame(self)
self.scrl = qtw.QScrollArea(self)
self.scrl.setWidget(pnl2)
self.scrl.setWidgetResizable(True)
self.bar = self.scrl.verticalScrollBar()
self.gsizer = qtw.QGridLayout()
rownum = 0
self.rownum = rownum
self.data, self.checks = [], []
for name, value in self.master.book.page.settings.items():
if name not in shared.csv_settingnames and name != 'extra':
try:
desc = self.master.book.page.settings['extra'][name]
except KeyError:
desc = ''
self.add_row(name, value, desc)
pnl2.setLayout(self.gsizer)
pnl.setFrameStyle(qtw.QFrame.Box)
self.scrl.ensureVisible(0, 0)
vsizer.addWidget(self.scrl)
hsizer = qtw.QHBoxLayout()
hsizer.addStretch()
btn = qtw.QPushButton(self.master.captions['C_ADDSET'], self)
btn.clicked.connect(self.add_setting)
hsizer.addWidget(btn)
btn = qtw.QPushButton(self.master.captions['C_REMSET'], self)
btn.clicked.connect(self.remove_settings)
hsizer.addWidget(btn)
hsizer.addStretch()
vsizer.addLayout(hsizer)
pnl.setLayout(vsizer)
pnl.setFrameStyle(qtw.QFrame.Box | qtw.QFrame.Raised)
self.sizer.addWidget(pnl)
buttonbox = qtw.QDialogButtonBox()
btn = buttonbox.addButton(qtw.QDialogButtonBox.Ok)
btn = buttonbox.addButton(qtw.QDialogButtonBox.Cancel)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
hsizer = qtw.QHBoxLayout()
hsizer.addStretch()
hsizer.addWidget(buttonbox)
hsizer.addStretch()
self.sizer.addLayout(hsizer)
self.setLayout(self.sizer)
def add_row(self, name='', value='', desc=''):
"""add a row for defining a setting (name, value)
"""
self.rownum += 1
colnum = 0
check = qtw.QCheckBox(self)
self.gsizer.addWidget(check, self.rownum, colnum)
self.checks.append(check)
colnum += 1
w_name = qtw.QLineEdit(name, self)
w_name.setFixedWidth(88)
if name:
w_name.setReadOnly(True)
## w_name.setMaxLength(50)
self.gsizer.addWidget(w_name, self.rownum, colnum)
colnum += 1
w_value = qtw.QLineEdit(value, self)
self.gsizer.addWidget(w_value, self.rownum, colnum)
self.rownum += 1
w_desc = qtw.QLineEdit(desc, self)
self.gsizer.addWidget(w_desc, self.rownum, colnum)
self.data.append((w_name, w_value, w_desc))
vbar = self.scrl.verticalScrollBar()
vbar.setMaximum(vbar.maximum() + 62)
vbar.setValue(vbar.maximum())
def delete_row(self, rownum):
"""delete a setting definition row
"""
check = self.checks[rownum]
w_name, w_value, w_desc = self.data[rownum]
for widget in check, w_name, w_value, w_desc:
self.gsizer.removeWidget(widget)
widget.close()
self.checks.pop(rownum)
self.data.pop(rownum)
def add_setting(self):
"""nieuwe rij aanmaken in self.gsizer"""
self.add_row()
def remove_settings(self):
"""alle aangevinkte items verwijderen uit self.gsizer"""
test = [x.isChecked() for x in self.checks]
checked = [x for x, y in enumerate(test) if y]
if any(test):
if ask_question(self.parent, 'Q_REMSET'):
for row in reversed(checked):
self.delete_row(row)
def accept(self):
"""update settings and leave
"""
data = [(x.text(), y.text(), z.text()) for x, y, z in self.data]
ok = self.master.accept_extrasettings(self.t_program.text(), self.t_title.text(),
self.c_rebuild.isChecked(),
self.c_showdet.isChecked(),
self.c_redef.isChecked(), data)
if not ok:
self.c_showdet.setChecked(False)
self.c_redef.setChecked(False)
else:
super().accept()
class EntryDialog(qtw.QDialog):
"""Dialog for Manual Entry
"""
def __init__(self, parent, master):
self.parent = parent
self.master = master
self.captions = self.master.captions
super().__init__(parent)
self.resize(680, 400)
self.setWindowTitle(self.master.title)
self.sizer = qtw.QVBoxLayout()
hsizer = qtw.QHBoxLayout()
self.p0list = qtw.QTableWidget(self)
# use self.parent.page.column_info to define grid
names, widths = [], []
for row in self.master.book.page.column_info:
names.append(self.captions[row[0]])
widths.append(row[1])
self.p0list.setColumnCount(len(names))
self.p0list.setHorizontalHeaderLabels(names)
p0hdr = self.p0list.horizontalHeader()
for indx, wid in enumerate(widths):
p0hdr.resizeSection(indx, wid)
# use self.master.page.data to populate grid
self.data = self.master.book.page.data
num_rows = 0
for rowkey, row in self.data.items():
self.p0list.insertRow(num_rows)
for i, element in enumerate(row):
new_item = qtw.QTableWidgetItem()
new_item.setText(element)
self.p0list.setItem(num_rows, i, new_item)
num_rows += 1
self.numrows = num_rows
hsizer.addWidget(self.p0list)
self.sizer.addLayout(hsizer)
hsizer = qtw.QHBoxLayout()
hsizer.addStretch()
buttonbox = qtw.QDialogButtonBox()
btn = buttonbox.addButton(self.captions['C_ADDKEY'],
qtw.QDialogButtonBox.ActionRole)
btn.clicked.connect(self.add_key)
btn = buttonbox.addButton(self.captions['C_REMKEY'],
qtw.QDialogButtonBox.ActionRole)
btn.clicked.connect(self.delete_key)
buttonbox.addButton(qtw.QDialogButtonBox.Ok)
buttonbox.addButton(qtw.QDialogButtonBox.Cancel)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
hsizer.addWidget(buttonbox)
hsizer.addStretch()
self.sizer.addLayout(hsizer)
self.setLayout(self.sizer)
def add_key(self):
"add a line to the grid"
self.p0list.insertRow(self.numrows)
for i in range(self.p0list.columnCount()):
new_item = qtw.QTableWidgetItem()
new_item.setText("")
self.p0list.setItem(self.numrows, i, new_item)
self.numrows += 1
## self.p0list.scrollToItem(new_item)
self.p0list.scrollToBottom()
def delete_key(self):
"remove selected line(s) from the grid"
selected_rows = []
for item in self.p0list.selectedRanges():
for increment in range(item.rowCount()):
selected_rows.append(item.topRow() + increment)
for row in reversed(sorted(selected_rows)):
self.p0list.removeRow(row)
def accept(self):
"""send updates to parent and leave
"""
new_values = collections.defaultdict(list)
display = False
for rowid in range(self.p0list.rowCount()):
for colid in range(self.p0list.columnCount()):
try:
value = self.p0list.item(rowid, colid).text()
except AttributeError:
value = ''
new_values[rowid + 1].append(value.replace('\\n', '\n'))
self.master.book.page.data = new_values
super().accept()
class CompleteDialog(qtw.QDialog):
"""Model dialog for entering / completing command descriptions
"""
def __init__(self, parent, master):
self.parent = parent
self.master = master
## self.captions = self.parent.captions
super().__init__(parent)
self.resize(680, 400)
self.read_data() # client exit to get start data for dialog
self.p0list = qtw.QTableWidget(len(self.cmds), 2, self)
self.p0list.setHorizontalHeaderLabels([shared.get_text(self.parent, 'C_CMD'),
shared.get_text(self.parent, 'C_DESC')])
hdr | |
import os
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import (
NearestNeighbors, radius_neighbors_graph, kneighbors_graph)
from sklearn.utils.graph import graph_shortest_path
from scipy.spatial import distance_matrix
from scipy.spatial.distance import pdist, squareform
from sklearn import datasets
from ._fast_utils import distance_matrix
class Shape(object):
def __init__(self,
X=None,
name='random',
seed=42,
n_neighbors=12,
dim=3,
use_noise=False,
noise_std=1e-2,
n_jobs=4):
np.random.seed(seed)
self.points = X
self.seed = seed
self.name = name
self.n_neighbors = n_neighbors
self.dim = dim
self.n_jobs = n_jobs
self.euclidean_d = None
self.sqeuclidean_d = None
self.geodesic_d = None
self.use_noise = use_noise
self.noise_std = noise_std
self.color = None
def generate(self, npoints, use_cache=True):
if (use_cache and
self.points is not None and
npoints == self.points.shape[0]):
return self.points
self.points = np.random.rand(npoints, self.dim)
return self.points
def add_noise(self, x):
if self.use_noise:
n = np.random.normal(0, self.noise_std, x.shape)
x = x + n
return x
def noise_round_points(self, p):
if self.use_noise:
noise_x = np.random.normal(0, self.noise_std, p.shape[0])
noise_y = np.random.normal(0, self.noise_std, p.shape[0])
noise_z = np.random.normal(0, self.noise_std, p.shape[0])
noise = np.stack((noise_x, noise_y, noise_z), axis=1)
p += noise
p = np.around(p, decimals=6)
return p
def euclidean_distances(self, points=None, use_cache=True):
if use_cache and self.euclidean_d is not None:
return self.euclidean_d
if points is None:
points = self.points
self.euclidean_d = distance_matrix(points)
return self.euclidean_d
def sqeuclidean_distances(self, points=None, use_cache=True):
if use_cache and self.sqeuclidean_d is not None:
return self.euclidean_d
if points is None:
points = self.points
self.sqeuclidean_d = squareform(pdist(points, metric='sqeuclidean'))
return self.sqeuclidean_d
def geodesic_radius(self, points=None, use_cache=True):
if use_cache and self.geodesic_d is not None:
return self.geodesic_d
if points is None:
points = self.points
dist = self.euclidean_distances()
nbrs_inc = np.argsort(dist, axis=1)
max_dist = -1
for i in range(dist.shape[0]):
achieved_neighbors = 0
while achieved_neighbors < min(self.n_neighbors, dist.shape[0]):
j = achieved_neighbors
if max_dist < dist[i][nbrs_inc[i][j]]:
max_dist = dist[i][nbrs_inc[i][j]]
achieved_neighbors += 1
nbrs = (NearestNeighbors(algorithm='auto',
n_neighbors=self.n_neighbors,
radius=max_dist,
n_jobs=self.n_jobs)
.fit(points))
kng = radius_neighbors_graph(
nbrs, max_dist, mode='distance', n_jobs=self.n_jobs)
self.geodesic_d = graph_shortest_path(kng, method='D', directed=False)
return self.geodesic_d
def geodesic_neighbors(self, points=None, use_cache=True):
if use_cache and self.geodesic_d is not None:
return self.geodesic_d
if points is None:
points = self.points
nbrs = (NearestNeighbors(algorithm='auto',
n_neighbors=self.n_neighbors,
n_jobs=self.n_jobs)
.fit(points))
kng = kneighbors_graph(nbrs,
self.n_neighbors,
mode='distance',
n_jobs=self.n_jobs)
self.geodesic_d = graph_shortest_path(kng, method='D', directed=False)
return self.geodesic_d
def _save_data(self, x, data_dir='./'):
if x is not None:
filename = '{}_{}_{}_{}'.format(
self.name,
self.points.shape[0],
self.dim,
'noise' if self.use_noise else 'no_noise')
save_file = os.path.join(
data_dir, filename)
np.savetxt(save_file, x, delimiter=',')
def save(self):
self._save_data(self.points, '{}_coords.dat')
self._save_data(self.euclidean_d, 'Euclidean_{}.dat')
self._save_data(self.geodesic_d, 'Geodesic_{}.dat')
def instance(self, npoints=0, distance='euclidean', geomethod='neigh'):
if self.points is None:
points = self.generate(npoints)
points = self.add_noise(points)
self.points = points
else:
points = self.add_noise(self.points)
self.points = points
if distance == 'euclidean':
dist = self.euclidean_distances()
elif distance == 'sqeuclidean':
dist = self.sqeuclidean_distances()
else:
if geomethod == 'radius':
dist = self.geodesic_radius()
else:
dist = self.geodesic_neighbors()
return points, dist
def plot3d(self, report_dir='./'):
if self.points is None:
return
xx = self.points[:, 0]
yy = self.points[:, 1]
zz = self.points[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xx, yy, zz)
filename = '{}_{}_{}_{}'.format(
self.name,
self.points.shape[0],
self.dim,
'noise' if self.use_noise else 'no_noise')
plt.savefig(os.path.join(report_dir, filename))
class Ball(Shape):
def __init__(self,
X=None,
radius=0.9,
name='ball',
use_noise=True,
noise_std=1e-2,
seed=42,
n_neighbors=8,
dim=3,
n_jobs=4):
super(Ball, self).__init__(
name=name,
seed=seed,
n_neighbors=n_neighbors,
dim=dim,
use_noise=use_noise,
noise_std=noise_std,
n_jobs=n_jobs)
self.radius = radius
def generate(self, npoints, use_cache=True):
if (use_cache and
self.points is not None and
npoints == self.points.shape[0]):
return self.points
phi = np.random.uniform(0, 2.0 * np.pi, npoints)
costheta = np.random.uniform(-1.0, 1.0, npoints)
u = np.random.uniform(0.0, 1.0, npoints)
theta = np.arccos(costheta)
r = self.radius * np.cbrt(u)
sintheta = np.sin(theta)
x = r * sintheta * np.cos(phi)
y = r * sintheta * np.sin(phi)
z = r * costheta
p = np.stack((x, y, z), axis=1)
self.points = self.noise_round_points(p)
return self.points
class Sphere(Shape):
def __init__(self,
X=None,
radius=0.9,
name='sphere',
use_noise=True,
noise_std=1e-2,
seed=42,
n_neighbors=8,
dim=3,
n_jobs=4):
super(Sphere, self).__init__(
name=name,
seed=seed,
n_neighbors=n_neighbors,
dim=dim,
use_noise=use_noise,
noise_std=noise_std,
n_jobs=n_jobs)
self.radius = radius
@staticmethod
def _get_coords(theta, phi):
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
return x, y, z
def generate(self, npoints, use_cache=True):
phi = np.random.uniform(0, 2.0 * np.pi, npoints)
costheta = np.random.uniform(-1.0, 1.0, npoints)
theta = np.arccos(costheta)
x, y, z = self._get_coords(theta, phi)
p = np.stack((x, y, z), axis=1)
self.points = self.noise_round_points(p)
return self.points
class CutSphere(Shape):
def __init__(self,
X=None,
radius=0.9,
cut_theta=0.5 * np.pi,
name='cut-sphere',
use_noise=True,
noise_std=1e-2,
seed=42,
n_neighbors=8,
dim=3,
n_jobs=4):
super(CutSphere, self).__init__(
name=name,
seed=seed,
n_neighbors=n_neighbors,
dim=dim,
use_noise=use_noise,
noise_std=noise_std,
n_jobs=n_jobs)
self.radius = radius
self.cut_theta = cut_theta
@staticmethod
def _get_coords(theta, phi):
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
return x, y, z
def generate(self, npoints, use_cache=True):
phi = np.random.uniform(0, 2.0 * np.pi, npoints)
costheta = np.random.uniform(np.cos(self.cut_theta), 1.0, npoints)
theta = np.arccos(costheta)
# cut_theta = theta[theta < self.cut_theta]
x, y, z = self._get_coords(theta, phi)
p = np.stack((x, y, z), axis=1)
self.points = self.noise_round_points(p)
return self.points
class Spiral(Shape):
def __init__(self,
X=None,
name='spiral',
angle_start=np.pi,
angle_stop=4*np.pi,
r_stop=0.9,
r_start=0.1,
depth=12,
use_noise=True,
noise_std=1e-2,
seed=42,
n_neighbors=8,
dim=3,
n_jobs=4):
super(Spiral, self).__init__(
name=name,
seed=seed,
n_neighbors=n_neighbors,
dim=dim,
use_noise=use_noise,
noise_std=noise_std,
n_jobs=n_jobs)
self.angle_start = angle_start
self.angle_stop = angle_stop
self.r_start = r_start
self.r_stop = r_stop
self.depth = depth
def generate(self, npoints, use_cache=True):
rows = np.round(npoints / self.depth) - 1
angle_step = float(self.angle_stop - self.angle_start) / rows
distance_step = float(self.r_stop - self.r_start) / rows
angle = self.angle_start
distance = self.r_start
points = []
while angle <= self.angle_stop:
for i in range(self.depth):
x = -0.9 + (1.8 * i) / (self.depth - 1)
y = distance * np.cos(angle)
z = distance * np.sin(angle)
points.append([x, y, z])
distance += distance_step
angle += angle_step
p = np.array(points)
self.points = self.noise_round_points(p)
self.color = self.points[:, 0]
return self.points
class SpiralHole(Shape):
def __init__(self,
X=None,
name='spiral-with-hole',
angle_start=np.pi,
angle_stop=4*np.pi,
r_stop=0.9,
r_start=0.1,
depth=10,
use_noise=True,
noise_std=1e-2,
seed=42,
n_neighbors=8,
dim=3,
n_jobs=4):
super(SpiralHole, self).__init__(
name=name,
seed=seed,
n_neighbors=n_neighbors,
dim=dim,
use_noise=use_noise,
noise_std=noise_std,
n_jobs=n_jobs)
self.angle_start = angle_start
self.angle_stop = angle_stop
self.r_start = r_start
self.r_stop = r_stop
self.depth = depth
self.angle_hole_start = float(360 + 45) * np.pi / 180
self.angle_hole_stop = float(360 + 135) * np.pi / 180
def generate(self, npoints, use_cache=True):
rows = np.round(npoints / self.depth) - 1
angle_step = float(self.angle_stop - self.angle_start) / rows
distance_step = float(self.r_stop - self.r_start) / rows
angle = self.angle_start
distance = self.r_start
points = []
while angle <= self.angle_stop:
for i in range(self.depth):
x = -0.9 + (1.8 * i) / (self.depth - 1)
y = distance * np.cos(angle)
z = distance * np.sin(angle)
min_hole = np.round(int(2 * self.depth / 3))
max_hole = np.round(int(self.depth / 3))
if (self.angle_hole_stop >= angle >= self.angle_hole_start and
min_hole > i >= max_hole):
pass
else:
points.append([x, y, z])
distance += distance_step
angle += angle_step
p = np.array(points)
self.points = self.noise_round_points(p)
return self.points
class SwissRoll(Shape):
def generate(self, npoints, use_cache=True):
noise_std = 0 if not self.use_noise else self.noise_std
self.points, self.color = datasets.samples_generator.make_swiss_roll(
n_samples=npoints, noise=noise_std, random_state=self.seed)
return self.points
class SCurve(Shape):
def generate(self, npoints, use_cache=True):
noise_std = 0 if not self.use_noise else self.noise_std
self.points, self.color = datasets.samples_generator.make_s_curve(
n_samples=npoints, noise=noise_std, random_state=self.seed)
return self.points
class ToroidalHelix(Shape):
def generate(self, npoints, use_cache=True):
param = -1
t = np.arange(1, npoints) / float(npoints)
e_t = t ** (param * 2.0 * np.pi)
self.color = e_t
p = np.array([
(2 + np.cos(8 * e_t)) * np.cos(e_t),
(2 + np.cos(8 * e_t)) * np.sin(e_t),
np.sin(8 * e_t)]).T
self.points = self.noise_round_points(p)
return self.points
class SwissHole(Shape):
def generate(self, npoints, use_cache=True):
param = 1
tt = (3 * np.pi / 2.0) * (1 + 2.0 * np.random.rand(2 * npoints))
h = 21 * np.random.rand(2 * npoints)
kl = np.zeros(2 * npoints)
for ii in range(2 * npoints):
if 9 < tt[ii] < 12:
if 9 < h[ii] < 14:
kl[ii] = 1
tt = tt[kl == 0]
h = h[kl == 0]
p = np.array([tt * np.cos(tt), h, param * tt * np.sin(tt)]).T
self.points = self.noise_round_points(p)
self.color = tt
return self.points
class PuncturedSphere(Shape):
def generate(self, npoints, use_cache=True):
param = .5
inc = 9.0 / np.sqrt(npoints)
yy, xx = map(lambda z: z.flatten(),
np.mgrid[-5:5:inc, -5:5:inc])
rr2 = xx ** 2 + yy ** 2
ii = np.argsort(rr2)
y = np.array([xx[ii[:npoints]].T, yy[ii[:npoints]].T])
a = 4.0 / (4 + np.sum(y ** 2, axis=0))
p = np.array([a * y[0, :], a * y[1, :], param * 2 * (1 - a)]).T
self.points = self.noise_round_points(p)
self.color = self.points[:, 2]
return self.points
class CornerPlane(Shape):
def generate(self, npoints, use_cache=True):
k = 0
| |
Register('csrce2', 8, 0x90006710),
Register('csrce3', 8, 0x90006718),
Register('csrce4', 8, 0x90006720),
Register('csrce5', 8, 0x90006728),
Register('csrce6', 8, 0x90006730),
Register('csrce7', 8, 0x90006738),
Register('csrce8', 8, 0x90006740),
Register('csrce9', 8, 0x90006748),
Register('csrcea', 8, 0x90006750),
Register('csrceb', 8, 0x90006758),
Register('csrcec', 8, 0x90006760),
Register('csrced', 8, 0x90006768),
Register('csrcee', 8, 0x90006770),
Register('csrcef', 8, 0x90006778),
Register('csrcf0', 8, 0x90006780),
Register('csrcf1', 8, 0x90006788),
Register('csrcf2', 8, 0x90006790),
Register('csrcf3', 8, 0x90006798),
Register('csrcf4', 8, 0x900067a0),
Register('csrcf5', 8, 0x900067a8),
Register('csrcf6', 8, 0x900067b0),
Register('csrcf7', 8, 0x900067b8),
Register('csrcf8', 8, 0x900067c0),
Register('csrcf9', 8, 0x900067c8),
Register('csrcfa', 8, 0x900067d0),
Register('csrcfb', 8, 0x900067d8),
Register('csrcfc', 8, 0x900067e0),
Register('csrcfd', 8, 0x900067e8),
Register('csrcfe', 8, 0x900067f0),
Register('csrcff', 8, 0x900067f8),
Register('csrd00', 8, 0x90006800),
Register('csrd01', 8, 0x90006808),
Register('csrd02', 8, 0x90006810),
Register('csrd03', 8, 0x90006818),
Register('csrd04', 8, 0x90006820),
Register('csrd05', 8, 0x90006828),
Register('csrd06', 8, 0x90006830),
Register('csrd07', 8, 0x90006838),
Register('csrd08', 8, 0x90006840),
Register('csrd09', 8, 0x90006848),
Register('csrd0a', 8, 0x90006850),
Register('csrd0b', 8, 0x90006858),
Register('csrd0c', 8, 0x90006860),
Register('csrd0d', 8, 0x90006868),
Register('csrd0e', 8, 0x90006870),
Register('csrd0f', 8, 0x90006878),
Register('csrd10', 8, 0x90006880),
Register('csrd11', 8, 0x90006888),
Register('csrd12', 8, 0x90006890),
Register('csrd13', 8, 0x90006898),
Register('csrd14', 8, 0x900068a0),
Register('csrd15', 8, 0x900068a8),
Register('csrd16', 8, 0x900068b0),
Register('csrd17', 8, 0x900068b8),
Register('csrd18', 8, 0x900068c0),
Register('csrd19', 8, 0x900068c8),
Register('csrd1a', 8, 0x900068d0),
Register('csrd1b', 8, 0x900068d8),
Register('csrd1c', 8, 0x900068e0),
Register('csrd1d', 8, 0x900068e8),
Register('csrd1e', 8, 0x900068f0),
Register('csrd1f', 8, 0x900068f8),
Register('csrd20', 8, 0x90006900),
Register('csrd21', 8, 0x90006908),
Register('csrd22', 8, 0x90006910),
Register('csrd23', 8, 0x90006918),
Register('csrd24', 8, 0x90006920),
Register('csrd25', 8, 0x90006928),
Register('csrd26', 8, 0x90006930),
Register('csrd27', 8, 0x90006938),
Register('csrd28', 8, 0x90006940),
Register('csrd29', 8, 0x90006948),
Register('csrd2a', 8, 0x90006950),
Register('csrd2b', 8, 0x90006958),
Register('csrd2c', 8, 0x90006960),
Register('csrd2d', 8, 0x90006968),
Register('csrd2e', 8, 0x90006970),
Register('csrd2f', 8, 0x90006978),
Register('csrd30', 8, 0x90006980),
Register('csrd31', 8, 0x90006988),
Register('csrd32', 8, 0x90006990),
Register('csrd33', 8, 0x90006998),
Register('csrd34', 8, 0x900069a0),
Register('csrd35', 8, 0x900069a8),
Register('csrd36', 8, 0x900069b0),
Register('csrd37', 8, 0x900069b8),
Register('csrd38', 8, 0x900069c0),
Register('csrd39', 8, 0x900069c8),
Register('csrd3a', 8, 0x900069d0),
Register('csrd3b', 8, 0x900069d8),
Register('csrd3c', 8, 0x900069e0),
Register('csrd3d', 8, 0x900069e8),
Register('csrd3e', 8, 0x900069f0),
Register('csrd3f', 8, 0x900069f8),
Register('csrd40', 8, 0x90006a00),
Register('csrd41', 8, 0x90006a08),
Register('csrd42', 8, 0x90006a10),
Register('csrd43', 8, 0x90006a18),
Register('csrd44', 8, 0x90006a20),
Register('csrd45', 8, 0x90006a28),
Register('csrd46', 8, 0x90006a30),
Register('csrd47', 8, 0x90006a38),
Register('csrd48', 8, 0x90006a40),
Register('csrd49', 8, 0x90006a48),
Register('csrd4a', 8, 0x90006a50),
Register('csrd4b', 8, 0x90006a58),
Register('csrd4c', 8, 0x90006a60),
Register('csrd4d', 8, 0x90006a68),
Register('csrd4e', 8, 0x90006a70),
Register('csrd4f', 8, 0x90006a78),
Register('csrd50', 8, 0x90006a80),
Register('csrd51', 8, 0x90006a88),
Register('csrd52', 8, 0x90006a90),
Register('csrd53', 8, 0x90006a98),
Register('csrd54', 8, 0x90006aa0),
Register('csrd55', 8, 0x90006aa8),
Register('csrd56', 8, 0x90006ab0),
Register('csrd57', 8, 0x90006ab8),
Register('csrd58', 8, 0x90006ac0),
Register('csrd59', 8, 0x90006ac8),
Register('csrd5a', 8, 0x90006ad0),
Register('csrd5b', 8, 0x90006ad8),
Register('csrd5c', 8, 0x90006ae0),
Register('csrd5d', 8, 0x90006ae8),
Register('csrd5e', 8, 0x90006af0),
Register('csrd5f', 8, 0x90006af8),
Register('csrd60', 8, 0x90006b00),
Register('csrd61', 8, 0x90006b08),
Register('csrd62', 8, 0x90006b10),
Register('csrd63', 8, 0x90006b18),
Register('csrd64', 8, 0x90006b20),
Register('csrd65', 8, 0x90006b28),
Register('csrd66', 8, 0x90006b30),
Register('csrd67', 8, 0x90006b38),
Register('csrd68', 8, 0x90006b40),
Register('csrd69', 8, 0x90006b48),
Register('csrd6a', 8, 0x90006b50),
Register('csrd6b', 8, 0x90006b58),
Register('csrd6c', 8, 0x90006b60),
Register('csrd6d', 8, 0x90006b68),
Register('csrd6e', 8, 0x90006b70),
Register('csrd6f', 8, 0x90006b78),
Register('csrd70', 8, 0x90006b80),
Register('csrd71', 8, 0x90006b88),
Register('csrd72', 8, 0x90006b90),
Register('csrd73', 8, 0x90006b98),
Register('csrd74', 8, 0x90006ba0),
Register('csrd75', 8, 0x90006ba8),
Register('csrd76', 8, 0x90006bb0),
Register('csrd77', 8, 0x90006bb8),
Register('csrd78', 8, 0x90006bc0),
Register('csrd79', 8, 0x90006bc8),
Register('csrd7a', 8, 0x90006bd0),
Register('csrd7b', 8, 0x90006bd8),
Register('csrd7c', 8, 0x90006be0),
Register('csrd7d', 8, 0x90006be8),
Register('csrd7e', 8, 0x90006bf0),
Register('csrd7f', 8, 0x90006bf8),
Register('csrd80', 8, 0x90006c00),
Register('csrd81', 8, 0x90006c08),
Register('csrd82', 8, 0x90006c10),
Register('csrd83', 8, 0x90006c18),
Register('csrd84', 8, 0x90006c20),
Register('csrd85', 8, 0x90006c28),
Register('csrd86', 8, 0x90006c30),
Register('csrd87', 8, 0x90006c38),
Register('csrd88', 8, 0x90006c40),
Register('csrd89', 8, 0x90006c48),
Register('csrd8a', 8, 0x90006c50),
Register('csrd8b', 8, 0x90006c58),
Register('csrd8c', 8, 0x90006c60),
Register('csrd8d', 8, 0x90006c68),
Register('csrd8e', 8, 0x90006c70),
Register('csrd8f', 8, 0x90006c78),
Register('csrd90', 8, 0x90006c80),
Register('csrd91', 8, 0x90006c88),
Register('csrd92', 8, 0x90006c90),
Register('csrd93', 8, 0x90006c98),
Register('csrd94', 8, 0x90006ca0),
Register('csrd95', 8, 0x90006ca8),
Register('csrd96', 8, 0x90006cb0),
Register('csrd97', 8, 0x90006cb8),
Register('csrd98', 8, 0x90006cc0),
Register('csrd99', 8, 0x90006cc8),
Register('csrd9a', 8, 0x90006cd0),
Register('csrd9b', 8, 0x90006cd8),
Register('csrd9c', 8, 0x90006ce0),
Register('csrd9d', 8, 0x90006ce8),
Register('csrd9e', 8, 0x90006cf0),
Register('csrd9f', 8, 0x90006cf8),
Register('csrda0', 8, 0x90006d00),
Register('csrda1', 8, 0x90006d08),
Register('csrda2', 8, 0x90006d10),
Register('csrda3', 8, 0x90006d18),
Register('csrda4', 8, 0x90006d20),
Register('csrda5', 8, 0x90006d28),
Register('csrda6', 8, 0x90006d30),
Register('csrda7', 8, 0x90006d38),
Register('csrda8', 8, 0x90006d40),
Register('csrda9', 8, 0x90006d48),
Register('csrdaa', 8, 0x90006d50),
Register('csrdab', 8, 0x90006d58),
Register('csrdac', 8, 0x90006d60),
Register('csrdad', 8, 0x90006d68),
Register('csrdae', 8, 0x90006d70),
Register('csrdaf', 8, 0x90006d78),
Register('csrdb0', 8, 0x90006d80),
Register('csrdb1', 8, 0x90006d88),
Register('csrdb2', 8, 0x90006d90),
Register('csrdb3', 8, 0x90006d98),
Register('csrdb4', 8, 0x90006da0),
Register('csrdb5', 8, 0x90006da8),
Register('csrdb6', 8, 0x90006db0),
Register('csrdb7', 8, 0x90006db8),
Register('csrdb8', 8, 0x90006dc0),
Register('csrdb9', 8, 0x90006dc8),
Register('csrdba', 8, 0x90006dd0),
Register('csrdbb', 8, 0x90006dd8),
Register('csrdbc', 8, 0x90006de0),
Register('csrdbd', 8, 0x90006de8),
Register('csrdbe', 8, 0x90006df0),
Register('csrdbf', 8, 0x90006df8),
Register('csrdc0', 8, 0x90006e00),
Register('csrdc1', 8, 0x90006e08),
Register('csrdc2', 8, 0x90006e10),
Register('csrdc3', 8, 0x90006e18),
Register('csrdc4', 8, 0x90006e20),
Register('csrdc5', 8, 0x90006e28),
Register('csrdc6', 8, 0x90006e30),
Register('csrdc7', 8, 0x90006e38),
Register('csrdc8', 8, 0x90006e40),
Register('csrdc9', 8, 0x90006e48),
Register('csrdca', 8, 0x90006e50),
Register('csrdcb', 8, 0x90006e58),
Register('csrdcc', 8, 0x90006e60),
Register('csrdcd', 8, 0x90006e68),
Register('csrdce', 8, 0x90006e70),
Register('csrdcf', 8, 0x90006e78),
Register('csrdd0', 8, 0x90006e80),
Register('csrdd1', 8, 0x90006e88),
Register('csrdd2', 8, 0x90006e90),
Register('csrdd3', 8, 0x90006e98),
Register('csrdd4', 8, 0x90006ea0),
Register('csrdd5', 8, 0x90006ea8),
Register('csrdd6', 8, 0x90006eb0),
Register('csrdd7', 8, 0x90006eb8),
Register('csrdd8', 8, 0x90006ec0),
Register('csrdd9', 8, 0x90006ec8),
Register('csrdda', 8, 0x90006ed0),
Register('csrddb', 8, 0x90006ed8),
Register('csrddc', 8, 0x90006ee0),
Register('csrddd', 8, 0x90006ee8),
Register('csrdde', 8, 0x90006ef0),
Register('csrddf', 8, 0x90006ef8),
Register('csrde0', 8, 0x90006f00),
Register('csrde1', 8, 0x90006f08),
Register('csrde2', 8, 0x90006f10),
Register('csrde3', 8, 0x90006f18),
Register('csrde4', 8, 0x90006f20),
Register('csrde5', 8, 0x90006f28),
Register('csrde6', 8, 0x90006f30),
Register('csrde7', 8, 0x90006f38),
Register('csrde8', 8, 0x90006f40),
Register('csrde9', 8, 0x90006f48),
Register('csrdea', 8, 0x90006f50),
Register('csrdeb', 8, 0x90006f58),
Register('csrdec', 8, 0x90006f60),
Register('csrded', 8, 0x90006f68),
Register('csrdee', 8, 0x90006f70),
Register('csrdef', 8, 0x90006f78),
Register('csrdf0', 8, 0x90006f80),
Register('csrdf1', 8, 0x90006f88),
Register('csrdf2', 8, 0x90006f90),
Register('csrdf3', 8, 0x90006f98),
Register('csrdf4', 8, 0x90006fa0),
Register('csrdf5', 8, 0x90006fa8),
Register('csrdf6', 8, 0x90006fb0),
Register('csrdf7', 8, 0x90006fb8),
Register('csrdf8', 8, 0x90006fc0),
Register('csrdf9', 8, 0x90006fc8),
Register('csrdfa', 8, 0x90006fd0),
Register('csrdfb', 8, 0x90006fd8),
Register('csrdfc', 8, 0x90006fe0),
Register('csrdfd', 8, 0x90006fe8),
Register('csrdfe', 8, 0x90006ff0),
Register('csrdff', 8, 0x90006ff8),
Register('csre00', 8, 0x90007000),
Register('csre01', 8, 0x90007008),
Register('csre02', 8, 0x90007010),
Register('csre03', 8, 0x90007018),
Register('csre04', 8, 0x90007020),
Register('csre05', 8, 0x90007028),
Register('csre06', 8, 0x90007030),
Register('csre07', 8, 0x90007038),
Register('csre08', 8, 0x90007040),
Register('csre09', 8, 0x90007048),
Register('csre0a', 8, 0x90007050),
Register('csre0b', 8, 0x90007058),
Register('csre0c', 8, 0x90007060),
Register('csre0d', 8, 0x90007068),
Register('csre0e', 8, 0x90007070),
Register('csre0f', 8, 0x90007078),
Register('csre10', 8, 0x90007080),
Register('csre11', 8, 0x90007088),
Register('hgeip', 8, 0x90007090),
Register('csre13', 8, 0x90007098),
Register('csre14', 8, 0x900070a0),
Register('csre15', 8, 0x900070a8),
Register('csre16', 8, 0x900070b0),
Register('csre17', 8, 0x900070b8),
Register('csre18', 8, 0x900070c0),
Register('csre19', 8, 0x900070c8),
Register('csre1a', 8, 0x900070d0),
Register('csre1b', 8, 0x900070d8),
Register('csre1c', 8, 0x900070e0),
Register('csre1d', 8, 0x900070e8),
Register('csre1e', 8, 0x900070f0),
Register('csre1f', 8, 0x900070f8),
Register('csre20', 8, 0x90007100),
Register('csre21', 8, 0x90007108),
Register('csre22', 8, 0x90007110),
Register('csre23', 8, 0x90007118),
Register('csre24', 8, 0x90007120),
Register('csre25', 8, 0x90007128),
Register('csre26', 8, 0x90007130),
Register('csre27', 8, 0x90007138),
Register('csre28', 8, 0x90007140),
Register('csre29', 8, 0x90007148),
Register('csre2a', 8, 0x90007150),
Register('csre2b', 8, 0x90007158),
Register('csre2c', 8, 0x90007160),
Register('csre2d', 8, 0x90007168),
Register('csre2e', 8, 0x90007170),
Register('csre2f', 8, 0x90007178),
Register('csre30', 8, 0x90007180),
Register('csre31', 8, 0x90007188),
Register('csre32', 8, 0x90007190),
Register('csre33', 8, 0x90007198),
Register('csre34', 8, 0x900071a0),
Register('csre35', 8, 0x900071a8),
Register('csre36', 8, 0x900071b0),
Register('csre37', 8, 0x900071b8),
Register('csre38', 8, 0x900071c0),
Register('csre39', 8, 0x900071c8),
Register('csre3a', 8, 0x900071d0),
Register('csre3b', 8, 0x900071d8),
Register('csre3c', 8, 0x900071e0),
Register('csre3d', 8, 0x900071e8),
Register('csre3e', 8, 0x900071f0),
Register('csre3f', 8, 0x900071f8),
Register('csre40', 8, 0x90007200),
Register('csre41', 8, 0x90007208),
Register('csre42', 8, 0x90007210),
Register('csre43', 8, 0x90007218),
Register('csre44', 8, 0x90007220),
Register('csre45', 8, 0x90007228),
Register('csre46', 8, 0x90007230),
Register('csre47', 8, 0x90007238),
Register('csre48', 8, 0x90007240),
Register('csre49', 8, 0x90007248),
Register('csre4a', 8, 0x90007250),
Register('csre4b', 8, 0x90007258),
Register('csre4c', 8, 0x90007260),
Register('csre4d', 8, 0x90007268),
Register('csre4e', 8, 0x90007270),
Register('csre4f', 8, 0x90007278),
Register('csre50', 8, 0x90007280),
Register('csre51', 8, 0x90007288),
Register('csre52', 8, 0x90007290),
Register('csre53', 8, 0x90007298),
Register('csre54', 8, 0x900072a0),
Register('csre55', 8, 0x900072a8),
Register('csre56', 8, 0x900072b0),
Register('csre57', 8, 0x900072b8),
Register('csre58', 8, 0x900072c0),
Register('csre59', 8, 0x900072c8),
Register('csre5a', 8, 0x900072d0),
Register('csre5b', 8, 0x900072d8),
Register('csre5c', 8, 0x900072e0),
Register('csre5d', 8, 0x900072e8),
Register('csre5e', 8, 0x900072f0),
Register('csre5f', 8, 0x900072f8),
Register('csre60', 8, 0x90007300),
Register('csre61', 8, 0x90007308),
Register('csre62', 8, 0x90007310),
Register('csre63', 8, 0x90007318),
Register('csre64', 8, 0x90007320),
Register('csre65', 8, 0x90007328),
Register('csre66', 8, 0x90007330),
Register('csre67', 8, 0x90007338),
Register('csre68', 8, 0x90007340),
Register('csre69', 8, 0x90007348),
Register('csre6a', 8, 0x90007350),
Register('csre6b', 8, 0x90007358),
Register('csre6c', 8, 0x90007360),
Register('csre6d', 8, 0x90007368),
Register('csre6e', 8, 0x90007370),
Register('csre6f', 8, 0x90007378),
Register('csre70', 8, 0x90007380),
Register('csre71', 8, 0x90007388),
Register('csre72', 8, 0x90007390),
Register('csre73', 8, 0x90007398),
Register('csre74', 8, 0x900073a0),
Register('csre75', 8, 0x900073a8),
Register('csre76', 8, 0x900073b0),
Register('csre77', 8, 0x900073b8),
Register('csre78', 8, 0x900073c0),
Register('csre79', 8, 0x900073c8),
Register('csre7a', 8, 0x900073d0),
Register('csre7b', 8, 0x900073d8),
Register('csre7c', 8, 0x900073e0),
Register('csre7d', 8, 0x900073e8),
Register('csre7e', 8, 0x900073f0),
Register('csre7f', 8, 0x900073f8),
Register('csre80', 8, 0x90007400),
Register('csre81', 8, 0x90007408),
Register('csre82', 8, 0x90007410),
Register('csre83', 8, 0x90007418),
Register('csre84', 8, 0x90007420),
Register('csre85', 8, 0x90007428),
Register('csre86', 8, 0x90007430),
Register('csre87', 8, 0x90007438),
Register('csre88', 8, 0x90007440),
Register('csre89', 8, 0x90007448),
Register('csre8a', 8, 0x90007450),
Register('csre8b', 8, 0x90007458),
Register('csre8c', 8, | |
#! python
## Copyright (c) 2018-2021, Carnegie Mellon University
## See LICENSE for details
## This script reads a file, cube-sizes.txt, that contains several cube size
## specifications for the 3D DFT. This script will:
## Generate a list of source file names for CMake to build
## Create the source files (by running Spiral), writing them
## to directory lib_<stem>_srcs
## Create the prototype definitions in a private library include file: <stem>_decls.h
## Create the public header file for the library: <stem>_public.h
## Compiling and library is handled by CMake.
import sys
import subprocess
import os, stat
import re
import shutil
## file stem can be an argument speciying library to build
if len ( sys.argv ) < 2:
## No library name stem provided, default to mddft3d_
_file_stem = 'mddft3d_'
else:
## Use given argument as the library stem name
_file_stem = sys.argv[1]
if not re.match ( '_$', _file_stem ): ## append an underscore if one is not present
_file_stem = _file_stem + '_'
## Code to build -- Hip or CUDA (default) governs file suffix etc.
_code_type = 'CUDA'
_file_suffix = '.cu'
if len ( sys.argv ) >= 3:
## Code type specified
_code_type = sys.argv[2]
if re.match ( 'cuda', _code_type, re.IGNORECASE ):
## CUDA selected
_code_type = 'CUDA'
_file_suffix = '.cu'
if re.match ( 'hip', _code_type, re.IGNORECASE ):
## HIP selected
_code_type = 'HIP'
_file_suffix = '.cpp'
## Create the library sources directory (if it doesn't exist)
_srcs_dir = 'lib_' + _file_stem + 'srcs'
isdir = os.path.isdir ( _srcs_dir )
if not isdir:
os.mkdir ( _srcs_dir )
_cmake_srcs = open ( _srcs_dir + '/SourceList.cmake', 'w' )
_cmake_srcs.write ( 'set ( _source_files ${_source_files} \n' )
## Build a header file for the library with the declarations and tables to
## manage the entry points in the library
_lib_hdrfname = _srcs_dir + '/' + _file_stem + 'decls.h'
_lib_pubfname = _srcs_dir + '/' + _file_stem + 'public.h'
_lib_apifname = _srcs_dir + '/' + _file_stem + 'libentry' + _file_suffix
_lib_cmakefil = _srcs_dir + '/CMakeLists.txt'
def start_header_file ( type ):
"Sets up the common stuff for both header files"
_str = '#ifndef ' + _file_stem + type + '_HEADER_INCLUDED\n'
_str = _str + '#define ' + _file_stem + type + '_HEADER_INCLUDED\n\n'
_str = _str + '// Copyright (c) 2018-2021, Carnegie Mellon University\n'
_str = _str + '// See LICENSE for details\n\n'
_str = _str + '#ifndef INITTRANSFORMFUNC\n'
_str = _str + '#define INITTRANSFORMFUNC\n'
_str = _str + 'typedef void ( * initTransformFunc ) ( void );\n'
_str = _str + '#endif\n\n'
_str = _str + '#ifndef DESTROYTRANSFORMFUNC\n'
_str = _str + '#define DESTROYTRANSFORMFUNC\n'
_str = _str + 'typedef void ( * destroyTransformFunc ) ( void );\n'
_str = _str + '#endif\n\n'
_str = _str + '#ifndef RUNTRANSFORMFUNC\n'
_str = _str + '#define RUNTRANSFORMFUNC\n'
_str = _str + 'typedef void ( * runTransformFunc ) ( double *output, double *input );\n'
_str = _str + '#endif\n\n'
_str = _str + '#ifndef CUBESIZE_T\n'
_str = _str + '#define CUBESIZE_T\n'
_str = _str + 'typedef struct cubesize { int dimx, dimy, dimz; } cubesize_t;\n'
_str = _str + '#endif\n\n'
_str = _str + '#ifndef TRANSFORMTUPLE_T\n'
_str = _str + '#define TRANSFORMTUPLE_T\n'
_str = _str + 'typedef struct transformTuple {\n'
_str = _str + ' initTransformFunc initfp;\n'
_str = _str + ' destroyTransformFunc destroyfp;\n'
_str = _str + ' runTransformFunc runfp;\n'
_str = _str + '} transformTuple_t;\n'
_str = _str + '#endif\n\n'
return _str;
def body_public_header ():
"Add the body details for the public header file"
_str = '// Query the list of sizes available from the library; returns a pointer to an\n'
_str = _str + '// array of size, each element is a struct of type cubesize specifying the X,\n'
_str = _str + '// Y, and Z dimensions\n\n'
_str = _str + 'cubesize_t * ' + _file_stem + 'QuerySizes ();\n\n'
_str = _str + '// Run an ' + _file_stem + ' transform once: run the init functions, run the transform,\n'
_str = _str + '// and finally tear down by calling the destroy function. Accepts cubesize_t\n'
_str = _str + '// specifying size, and pointers to the output (returned) data and the input\n'
_str = _str + '// data.\n\n'
_str = _str + 'void ' + _file_stem + 'Run ( cubesize_t req, double * output, double * input );\n\n'
_str = _str + '// Get a transform tuple -- a set of pointers to the init, destroy, and run\n'
_str = _str + '// functions for a specific size ' + _file_stem + ' transform. Using this information the\n'
_str = _str + '// user may call the nit function to setup for the transform, then run the\n'
_str = _str + '// transform repeatedly, and finally tesr down (using destroy function).\n\n'
_str = _str + 'transformTuple_t * ' + _file_stem + 'Tuple ( cubesize_t req );\n\n'
_str = _str + '#endif\n\n'
return _str;
def library_api ( ):
"Sets up the public API for the library"
_str = '// Copyright (c) 2018-2021, Carnegie Mellon University\n'
_str = _str + '// See LICENSE for details\n\n'
_str = _str + '#include <stdio.h>\n'
_str = _str + '#include <stdlib.h>\n'
_str = _str + '#include <string.h>\n'
_str = _str + '#include "' + _file_stem + 'decls.h"\n'
_str = _str + '#include "' + _file_stem + 'public.h"\n\n'
_str = _str + '// Query the list of sizes available from the library; returns a pointer to an\n'
_str = _str + '// array of size, each element is a struct of type cubesize specifying the X,\n'
_str = _str + '// Y, and Z dimensions\n\n'
_str = _str + 'cubesize_t * ' + _file_stem + 'QuerySizes ()\n'
_str = _str + '{\n'
_str = _str + ' cubesize_t *wp = (cubesize_t *) malloc ( sizeof ( AllCubes ) );\n'
_str = _str + ' if ( wp != NULL)\n'
_str = _str + ' memcpy ( (void *) wp, (const void *) AllCubes, sizeof ( AllCubes ) );\n\n'
_str = _str + ' return wp;\n'
_str = _str + '}\n\n'
_str = _str + '// Get a transform tuple -- a set of pointers to the init, destroy, and run\n'
_str = _str + '// functions for a specific size ' + _file_stem + ' transform. Using this information the\n'
_str = _str + '// user may call the nit function to setup for the transform, then run the\n'
_str = _str + '// transform repeatedly, and finally tesr down (using destroy function).\n'
_str = _str + '// Returns NULL if requested size is not found\n\n'
_str = _str + 'transformTuple_t * ' + _file_stem + 'Tuple ( cubesize_t req )\n'
_str = _str + '{\n'
_str = _str + ' int indx;\n'
_str = _str + ' int numentries = sizeof ( AllCubes ) / sizeof ( cubesize_t ) - 1; // last entry in { 0, 0, 0 }\n'
_str = _str + ' transformTuple_t *wp = NULL;\n\n'
_str = _str + ' for ( indx = 0; indx < numentries; indx++ ) {\n'
_str = _str + ' if ( req.dimx == AllCubes[indx].dimx &&\n'
_str = _str + ' req.dimy == AllCubes[indx].dimy &&\n'
_str = _str + ' req.dimz == AllCubes[indx].dimz ) {\n'
_str = _str + ' // found a match\n'
_str = _str + ' wp = (transformTuple_t *) malloc ( sizeof ( transformTuple_t ) );\n'
_str = _str + ' if ( wp != NULL) {\n'
_str = _str + ' *wp = ' + _file_stem + 'Tuples[indx];\n'
_str = _str + ' }\n'
_str = _str + ' break;\n'
_str = _str + ' }\n'
_str = _str + ' }\n\n'
_str = _str + ' return wp;\n'
_str = _str + '}\n\n'
_str = _str + '// Run an ' + _file_stem + ' transform once: run the init functions, run the transform,\n'
_str = _str + '// and finally tear down by calling the destroy | |
<gh_stars>1-10
from medpy.io import load
from medpy.io import save
import numpy as np
from sklearn import utils
from os import listdir,makedirs
from os.path import isfile, join, isdir,exists
import os
from medpy.features import indices
import pickle
import medpy.metric
import scipy.ndimage as ndimage
import math
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
from sklearn import mixture
import scipy.stats as stats
from medpy import filter
import matplotlib.pyplot as plt
import xlsxwriter
from scipy.ndimage.morphology import binary_dilation
class subject_class:
def __init__(self, types_sequences ,path_sequences,path_subject):
self.dir = path_subject
self.T1= 0
self.T1_path = ''
self.T2 = 0
self.T2_path= ''
self.FLAIR = 0
self.FLAIR_path = ''
self.DP = 0
self.DP_path = ''
self.GADO = 0
self.GADO_path = ''
for n_paths in range(len(path_sequences)):
if types_sequences [n_paths] =='T1':
self.T1 = 1
self.T1_path = path_sequences[n_paths]
if types_sequences [n_paths] =='T2':
self.T2 = 1
self.T2_path = path_sequences[0]
if types_sequences [n_paths] =='FLAIR':
self.FLAIR = 1
self.FLAIR_path = path_sequences[n_paths]
if types_sequences [n_paths] =='GADO':
self.GADO = 1
self.GADO_path = path_sequences[n_paths]
if types_sequences [n_paths] =='DP':
self.DP = 1
self.DP_path = path_sequences[n_paths]
self.T1_irs = 0
self.T1_irs_path = ''
self.T2_irs = 0
self.T2_irs_path = ''
self.FLAIR_irs = 0
self.FLAIR_irs_path = ''
self.DP_irs = 0
self.DP_irs_path = ''
self.GADO_irs = 0
self.GADO_irs_path = ''
self.brain_mask = 0
self.brain_mask_path = ''
self.lesion_mask = 0
self.lesion_mask_path = ''
self.csf_ext_train = 0
self.csf_ext_train_path = ''
def add_intermediate(self,path_intermediate):
self.intermediate = 1
self.intermediate_path = path_intermediate
def add_brain_mask(self,path_brain_mask):
self.brain_mask = 1
self.brain_mask_path = path_brain_mask
def add_pp_sequences(self,types_sequences ,path_pp_sequences):
for n_paths in range(len(path_pp_sequences)):
if types_sequences[n_paths] == 'T1':
self.T1_pp = 1
self.T1_pp_path = path_pp_sequences[n_paths]
if types_sequences[n_paths] == 'T2':
self.T2_pp = 1
self.T2_pp_path = path_pp_sequences[0]
if types_sequences[n_paths] == 'FLAIR':
self.FLAIR_pp = 1
self.FLAIR_pp_path = path_pp_sequences[n_paths]
if types_sequences[n_paths] == 'GADO':
self.GADO_pp = 1
self.GADO_pp_path = path_pp_sequences[n_paths]
if types_sequences[n_paths] == 'DP':
self.DP_pp = 1
self.DP_pp_path = path_pp_sequences[n_paths]
def add_tissue_segmentation(self, path_GM,path_WM,path_CSF):
self.GM = 1
self.GM_path = path_GM
self.WM = 1
self.WM_path = path_WM
self.CSF = 1
self.CSF_path = path_CSF
def add_lesion_mask(self,path_lesion_mask):
self.brain_mask = 1
self.brain_mask = path_lesion_mask
def return_sequences(self):
sequences = []
if self.T1 == 1:
sequences.append(self.T1_path)
if self.T2 == 1:
sequences.append(self.T2_path)
if self.FLAIR == 1:
sequences.append(self.FLAIR_path)
if self.DP == 1:
sequences.append(self.DP_path)
if self.GADO == 1:
sequences.append(self.GADO_path)
return sequences
def return_irs_sequences(self):
sequences = []
if self.T1_irs == 1:
sequences.append(self.T1_irs_path)
if self.T2_irs == 1:
sequences.append(self.T2_irs_path)
if self.FLAIR_irs == 1:
sequences.append(self.FLAIR_irs_path)
if self.DP_irs == 1:
sequences.append(self.DP_irs_path)
if self.GADO_irs == 1:
sequences.append(self.GADO_irs_path)
return sequences
def add_ext_csf_classifier(self,class_ext_csf_path):
self.classifier_ext_csf = 1
self.classifier_ext_csf_path = class_ext_csf_path
def add_ms_classifier(self, class_ms_path):
self.classifier_ms = 1
self.classifier_ms_path = class_ms_path
def create_subject(flags_sequences,path_sequences,subject_name,subject_path):
subject = subject_class(subject_name, flags_sequences, path_sequences,subject_path)
return subject
def create_brain_mask(subject):
if subject.T1_pp == 1:
vol, header = load(subject.T1_pp_path)
mask_brain = vol > 0
save(mask_brain, join(subject.intermediate_path, 'brain_mask.nii.gz'), header)
else:
c1, headerc1 = load(join(subject.intermediate_path,'c1T1.nii'))
c2, headerc2 = load(join(subject.intermediate_path, 'c2T1.nii'))
c3, headerc3 = load(join(subject.intermediate_path, 'c3T1.nii'))
c4, headerc4 = load(join(subject.intermediate_path, 'c4T1.nii'))
c5, headerc5 = load(join(subject.intermediate_path, 'c5T1.nii'))
t1,t1_header = load(subject.T1_path)
mask_brain = ((c1+c2+c3) > 0.95) * (c4 + c5 < 0.1 )
save(mask_brain, join(subject.intermediate_path,'brain_mask.nii.gz'), t1_header)
subject.add_brain_mask(join(subject.intermediate_path, 'brain_mask.nii.gz'))
return subject
def retrain_intensity(image,seq_type):
path = join(os.path.dirname(os.path.realpath(__file__)),'train_int')
subdirectories = os.listdir(path)
folders = []
for dir in subdirectories:
if isdir(join(path, dir)):
folders.append(dir)
images=[]
if seq_type == 1:
seq = 'T1_preprocessed.nii.gz'
if seq_type == 2:
seq = 'T2_preprocessed.nii.gz'
if seq_type == 3:
seq = 'FLAIR_preprocessed.nii.gz'
if seq_type == 4:
seq = 'DP_preprocessed.nii.gz'
if seq_type == 5:
seq = 'GADO_preprocessed.nii.gz'
for subject in folders:
im, im_header = medpy.io.load(join(path, subject, seq))
mask, mask_header = medpy.io.load(join(path, subject, 'Mask_registered.nii.gz'))
images.append(im[mask > 0])
images.append(image)
irs = medpy.filter.IntensityRangeStandardization()
trained_model, transformed_images = irs.train_transform(images)
return transformed_images[15]
def intensity_correction(subject):
# if not exists(join(subject.dir, subject.irs_dir)):
# makedirs(join(subject.dir, subject.irs_dir))
# if not exists(join(subject.dir, subject.irs_dir,subject.name)):
# makedirs(join(subject.dir, subject.irs_dir,subject.name))
if subject.T1 == 1:
with open(join(os.path.dirname(os.path.realpath(__file__)),'models/model_T1.pkl'), 'r') as f:
irs = pickle.load(f)
vol, header = load(subject.T1_pp_path)
mask, mask_header = load(subject.brain_mask_path)
spacing_indices = (1, 1, 1)
vol_indices = indices(vol, spacing_indices, mask)
try:
intensities_corrected = irs.transform(vol[mask > 0])
except:
intensities_corrected= retrain_intensity(vol[mask > 0], 1)
vol_irs = np.zeros(vol.shape)
for n_voxel in range(len(vol[mask > 0])):
vol_irs[vol_indices[n_voxel][0], vol_indices[n_voxel][1], vol_indices[n_voxel][2]] = \
intensities_corrected[n_voxel]
save(vol_irs, join(subject.intermediate_path, 'T1_corrected.nii.gz'), header)
subject.T1_irs = 1
subject.T1_irs_path = join(subject.intermediate_path, 'T1_corrected.nii.gz')
if subject.T2 == 1:
with open(join(os.path.dirname(os.path.realpath(__file__)),'models/model_T2.pkl'), 'r') as f:
irs = pickle.load(f)
vol, header = load(subject.T2_pp_path)
mask, mask_header = load(subject.brain_mask_path)
spacing_indices = (1, 1, 1)
vol_indices = indices(vol, spacing_indices, mask)
try:
intensities_corrected = irs.transform(vol[mask > 0])
except:
intensities_corrected = retrain_intensity(vol[mask > 0], 2)
vol_irs = np.zeros(vol.shape)
for n_voxel in range(len(vol[mask > 0])):
vol_irs[vol_indices[n_voxel][0], vol_indices[n_voxel][1], vol_indices[n_voxel][2]] = \
intensities_corrected[n_voxel]
save(vol_irs, join(subject.intermediate_path, 'T2_corrected.nii.gz'), header)
subject.T2_irs = 1
subject.T2_irs_path = join(subject.intermediate_path,'T2_corrected.nii.gz')
if subject.FLAIR == 1:
with open(join(os.path.dirname(os.path.realpath(__file__)),'models/model_FLAIR.pkl'), 'r') as f:
irs = pickle.load(f)
vol, header = load(subject.FLAIR_pp_path)
mask, mask_header = load(subject.brain_mask_path)
spacing_indices = (1, 1, 1)
vol_indices = indices(vol, spacing_indices, mask)
try:
intensities_corrected = irs.transform(vol[mask > 0])
except:
intensities_corrected = retrain_intensity(vol[mask > 0], 3)
vol_irs = np.zeros(vol.shape)
for n_voxel in range(len(vol[mask > 0])):
vol_irs[vol_indices[n_voxel][0], vol_indices[n_voxel][1], vol_indices[n_voxel][2]] = \
intensities_corrected[n_voxel]
save(vol_irs, join(subject.intermediate_path,'FLAIR_corrected.nii.gz'), header)
subject.FLAIR_irs = 1
subject.FLAIR_irs_path = join(subject.intermediate_path, 'FLAIR_corrected.nii.gz')
if subject.DP == 1:
with open(join(os.path.dirname(os.path.realpath(__file__)),'models/model_DP.pkl'), 'r') as f:
irs = pickle.load(f)
vol, header = load(subject.DP_pp_path)
mask, mask_header = load(subject.brain_mask_path)
spacing_indices = (1, 1, 1)
vol_indices = indices(vol, spacing_indices, mask)
try:
intensities_corrected = irs.transform(vol[mask > 0])
except:
intensities_corrected = retrain_intensity(vol[mask > 0], 4)
vol_irs = np.zeros(vol.shape)
for n_voxel in range(len(vol[mask > 0])):
vol_irs[vol_indices[n_voxel][0], vol_indices[n_voxel][1], vol_indices[n_voxel][2]] = \
intensities_corrected[n_voxel]
save(vol_irs, join(subject.intermediate_path, 'DP_corrected.nii.gz'), header)
subject.DP_irs = 1
subject.DP_irs_path = join(subject.intermediate_path, 'DP_corrected.nii.gz')
if subject.GADO== 1:
with open(join(os.path.dirname(os.path.realpath(__file__)),'models/model_GADO.pkl'), 'r') as f:
irs = pickle.load(f)
vol, header = load(subject.GADO_pp_path)
mask, mask_header = load(subject.brain_mask_path)
spacing_indices = (1, 1, 1)
vol_indices = indices(vol, spacing_indices, mask)
try:
intensities_corrected = irs.transform(vol[mask > 0])
except:
intensities_corrected = retrain_intensity(vol[mask > 0], 5)
vol_irs = np.zeros(vol.shape)
for n_voxel in range(len(vol[mask > 0])):
vol_irs[vol_indices[n_voxel][0], vol_indices[n_voxel][1], vol_indices[n_voxel][2]] = \
intensities_corrected[n_voxel]
save(vol_irs, join(subject.intermediate_path, 'GADO_corrected.nii.gz'), header)
subject.GADO_irs = 1
subject.GADO_irs_path = join(subject.intermediate_path, 'GADO_corrected.nii.gz')
return subject
def gray_matter_threshold(flair,GM_mask):
GM=medpy.features.intensities(flair,GM_mask)
n, bins, patch = plt.hist(GM, bins=200, label="Flair WM",histtype='stepfilled')
maximo = np.max(n)
paso=bins[1]-bins[0]
pico=bins[np.argmax(n)]+paso/2
test=n>(maximo/2)
iter=0
modo=0
for j in test:
if modo==0:
if j == True:
indice1=iter
modo=1
else:
if j==False:
indice2=iter-1
break
iter+=1
fwhm=(bins[indice2]-bins[indice1])/2
# plt.axvline(pico, color='r', linestyle='dashed', linewidth=2)
# gamma_int=1
gamma_int =0.2
# plt.axvline(pico+gamma_int*fwhm, color='r', linestyle='dashed', linewidth=5,label = "Threshold")
Tint=pico + gamma_int*fwhm
return flair>Tint
def kernel_subs_creator(tam,spacing):
kernel_shape = np.asarray([(int(round(tam/spacing[0]))),(int(round(tam/spacing[1]))),(int(round(tam/spacing[2])))])
for shape_kernel in range(len(kernel_shape)):
if kernel_shape[shape_kernel] % 2 ==0:
kernel_shape[shape_kernel] +=1
kernel = np.ones(kernel_shape.tolist())*-1/((kernel_shape[0] * kernel_shape[1] *kernel_shape[2]) -1)
kernel_center = [ math.floor(elem/2.0) for elem in kernel.shape ]
kernel[int(kernel_center[0]),int(kernel_center[1]),int(kernel_center[2])] = 1
return kernel
def subsampling(path,sampling):
random_data=utils.shuffle(np.load(path,'r'))
subset=random_data[1:int(sampling*random_data.shape[0])][:].copy()
return subset
def create_features_csfext(subject,flag):
print "------------Creating csf ext features-----------------"
mask, mask_header = load(subject.brain_mask_path)
pv1, pv1_header = load(subject.GM_path)
pv2, pv2_header = load(subject.WM_path)
pv0, pv0_header = load(subject.CSF_path)
flair, fl_header = load(subject.FLAIR_irs_path)
if flag == 0:
mask_class, mask_class_header = load(subject.gt_extcsf_path)
mask_class = ((pv0 > pv1) * (pv0 > pv2) * mask_class == 0)
data_name = 'vent'
if flag == 1:
mask_class,mask_class_header = load(subject.gt_extcsf_path)
data_name = 'csf_ext'
if flag == 2:
mask_class = ((pv0 > pv1) * (pv0 > pv2))
data_name = 'csf'
if flag > 3:
'error invalid flag'
return -1
mask_voxels = mask * mask_class
iter = 0
for vol_path in subject.return_irs_sequences():
print 'Generating csf features ---< ' + ' ' + vol_path
image_data, image_header = load(vol_path)
spacing = medpy.io.header.get_pixel_spacing(image_header)
intensities = medpy.features.intensity.intensities(image_data, mask_voxels)
gaussian1 = medpy.features.intensity.local_mean_gauss(image_data, 3, spacing, mask_voxels)
gaussian2 = medpy.features.intensity.local_mean_gauss(image_data, 5, spacing, mask_voxels)
gaussian3 = medpy.features.intensity.local_mean_gauss(image_data, 7, spacing, mask_voxels)
kernel1 = kernel_subs_creator(3, spacing)
subs1 = medpy.features.intensity.intensities(ndimage.filters.convolve(image_data, kernel1), mask_voxels)
kernel2 = kernel_subs_creator(5, spacing)
subs2 = medpy.features.intensity.intensities(ndimage.filters.convolve(image_data, kernel2), mask_voxels)
kernel3 = kernel_subs_creator(7, spacing)
subs3 = medpy.features.intensity.intensities(ndimage.filters.convolve(image_data, kernel3), mask_voxels)
if iter == 0:
joined = medpy.features.utilities.join(intensities, gaussian1, gaussian2, gaussian3, subs1, subs2, subs3)
else:
joined = medpy.features.utilities.join(joined, intensities, gaussian1, gaussian2, gaussian3, subs1, subs2,
subs3)
iter += 1
print 'Generating tissue and distance features'
spacing_indices = (1, 1, 1)
spacing = medpy.io.header.get_pixel_spacing(fl_header)
indices = medpy.features.indices(flair, spacing_indices, mask_voxels)
distances_0 = medpy.features.intensity.centerdistance_xdminus1(flair, 0, spacing, mask_voxels)
distances_1 = medpy.features.intensity.centerdistance_xdminus1(flair, 1, spacing, mask_voxels)
distances_2 = medpy.features.intensity.centerdistance_xdminus1(flair, 2, spacing, mask_voxels)
flair_unpp, fl_unpp_header = load(subject.FLAIR_path)
skull = (flair_unpp * (mask==0))>10
spacing = medpy.io.header.get_pixel_spacing(fl_header)
dist_transform_vol = ndimage.distance_transform_edt(skull == 0, sampling=spacing)
dist_transform_feature = medpy.features.intensity.intensities(dist_transform_vol, mask_voxels)
intensities_pv0 = medpy.features.intensity.intensities(pv0, mask_voxels)
intensities_pv1 = medpy.features.intensity.intensities(pv1, mask_voxels)
intensities_pv2 = medpy.features.intensity.intensities(pv2, mask_voxels)
gaussian1_pv0 = medpy.features.intensity.local_mean_gauss(pv0, 3, spacing, mask_voxels)
gaussian2_pv0 = medpy.features.intensity.local_mean_gauss(pv0, 7, spacing, mask_voxels)
gaussian3_pv0 = medpy.features.intensity.local_mean_gauss(pv0, 15, spacing, mask_voxels)
gaussian1_pv1 = medpy.features.intensity.local_mean_gauss(pv1, 3, spacing, mask_voxels)
gaussian2_pv1 = medpy.features.intensity.local_mean_gauss(pv1, 7, spacing, mask_voxels)
| |
('credit', False, Credit),
('credit-words', True, CreditWords),
('encoding', False, Encoding),
('software', True, Software),
('supports', False, Supports),
('encoding-date', True),
('part-list', False, PartList),
('part-group', False, PartGroup),
('group-name', True),
('group-symbol', True),
('group-barline', True),
('group-name-display', False),
('group-abbreviation', False),
('group-abbreviation-display', False),
('group-time', False),
('solo', False),
('ensemble', False),
('score-part', False, ScorePart),
('score-instrument', False, ScoreInstrument),
('instrument-name', True),
('instrument-abbreviation', True),
('part-name', True),
('harmony', False, Harmony),
('inversion', True),
('function', True),
('root', False, Root),
('root-step', True),
('root-alter', True),
('kind', True, Kind),
('bass', False, Bass),
('bass-step', True),
('bass-alter', True),
('degree', False, Degree),
('degree-value', True, DegreeValue),
('degree-alter', True, DegreeAlter),
('degree-type', True, DegreeType),
('midi-instrument', False, MIDIInstrument),
('midi-channel', True),
('midi-program', True),
('volume', False),
('pan', False),
('elevation', False),
('midi-name', False),
('midi-bank', False),
('midi-unpitched', False),
('double', False),
]
# order matters: keep order here
self.tagsCharData = [] # note: this may no longer be needed
self.tagsAll = []
for data in _tags:
# some cases have a string w/o a class definition
if isinstance(data, str):
# if common.isStr(data):
if data in DYNAMIC_MARKS:
data = [data, False, DynamicMark]
elif data in ARTICULATION_MARKS:
data = [data, False, ArticulationMark]
elif data in TECHNICAL_MARKS:
data = [data, False, TechnicalMark]
else:
raise MusicXMLException('got tag without any information on it: %s' % data)
tagName = data[0]
charDataBool = data[1]
if len(data) > 2:
className = data[2]
else: # not all tags define a class name
className = None
# store tag names in order
self.tagsAll.append(tagName)
if charDataBool:
self.tagsCharData.append(tagName)
self._t[tagName] = Tag(tagName, charDataBool, className)
# utility
self._stat = None
self._statMapWidth = 80
def __getitem__(self, key):
return self._t[key]
def getClassName(self, key):
'''
Get the class or name, or None if none defined.
>>> tl = musicxml.mxObjects.TagLib()
>>> tl.getClassName('voice')
'''
return self._t[key].className # may be None
def keys(self):
return self._t.keys()
#---------------------------------------------------------------------------
# utilities for error checking and debugging
def _statTabulate(self):
self._stat = {}
tags = list(self._t.keys())
tags.sort()
maxCount = 0
maxTag = 0
for tag in tags:
if self._t[tag].count > maxCount:
maxCount = self._t[tag].count
if len(tag) > maxTag:
maxTag = len(tag)
for tag in tags:
# get magnitude string
if maxCount > 0:
if maxCount > self._statMapWidth:
scalar = self._statMapWidth * (self._t[tag].count /
float(maxCount))
scalar = int(round(scalar))
else:
scalar = self._t[tag].count
magStr = scalar * '.'
else: magStr = ''
# get formatted tag str
tagStr = tag.ljust(maxTag+1)
# store count, tag string, magnitude string
self._stat[tag] = [self._t[tag].count, tagStr, magStr]
def statClear(self):
tags = self._t.keys()
for tag in tags:
self._t[tag].count = 0
def _statMapActive(self):
'''Display method for tag audit checks
'''
tags = list(self._t.keys())
tags.sort()
sortOrder = []
for tag in tags:
if self._stat[tag][0] > 0:
# count, tagStr, magStr
sortOrder.append(self._stat[tag])
sortOrder.sort()
msg = []
for count, tagStr, magStr in sortOrder:
msg.append(tagStr + str(count).ljust(4) + magStr)
print('\n'.join(msg))
def statRun(self):
self._statTabulate()
self._statMapActive()
def audit(self):
'''
A way to check for errors in SAX parsing. Assumes that
all start() calls have been paired with an end() call,
and that all element data has been cleared.
'''
errors = []
header = 'TagLib audit: '
for key in self._t:
if self._t[key].status: # if true
errors.append('tag <%s> left open' % key)
if self._t[key].cdFlag:
sample = self._t[key].charData
if sample != '':
errors.append('tag <%s> left element data: %s' % (key, sample))
if len(errors) != 0:
ok = False
return ok, header + ('%s errors found:\n' % len(errors)) + '\n'.join(errors)
else:
ok = True
return ok, header + 'no errors found.'
#-------------------------------------------------------------------------------
class MusicXMLElement(xmlnode.XMLNode):
'''MusicXML elements are an abstraction of MusicXML
into an object oriented framework. Some, not all, of MusicXML
elements are represented as objects. Some sub-elements are much more
simply placed as attributes of parent objects. These simple elements
have only a tag and character data. Elements that have attributes
and/or sub-elements, however, must be represented as objects.
'''
def __init__(self):
'''
These tests are module specific and should be loaded as unittests, below
>>> a = musicxml.mxObjects.MusicXMLElement()
>>> a._convertNameToXml('groupAbbreviation')
'group-abbreviation'
>>> a._convertNameToXml('midiUnpitched')
'midi-unpitched'
>>> a._convertNameToXml('groupNameDisplay')
'group-name-display'
>>> a._convertNameToXml('group-name-display')
'group-name-display'
>>> a = musicxml.mxObjects.MusicXMLElement()
>>> a._convertNameFromXml('group-abbreviation')
'groupAbbreviation'
>>> a._convertNameFromXml('midi-unpitched')
'midiUnpitched'
>>> a._convertNameFromXml('midiUnpitched')
'midiUnpitched'
>>> a._convertNameFromXml('opus')
'opus'
>>> a._convertNameFromXml('group-name-display')
'groupNameDisplay'
>>> a = musicxml.mxObjects.MusicXMLElement()
>>> len(a._publicAttributes())
3
>>> print(a._publicAttributes())
['charData', 'external', 'tag']
'''
xmlnode.XMLNode.__init__(self)
self.external = {} # references to external objects
self._attr = {} # store attributes in dictionary
self._tag = None # name of tag
self.charData = None # obtained by assignment from a Tag
self._doctypeName = 'score-partwise'
self._doctypePublic = '-//Recordare//DTD MusicXML 2.0 Partwise//EN'
self._doctypeSystem = 'http://www.musicxml.org/dtds/partwise.dtd'
# dictionary of local Python name and possible names assumed
# from music xml. used in get and set methods
# specialize in subclassess
self._crossReference = {'charData': ['characterData', 'content']}
class MusicXMLElementList(MusicXMLElement):
'''MusicXML that is designed to function as a list. In general,
this is an element this only used to contain other Elements.
List operations permit easy access and manipuation.
Note that design here mirrors that of node.py NodeList, but this needs
to be a subclass of of MusicXMLElement
'''
def __init__(self):
MusicXMLElement.__init__(self)
# basic storage location
self.componentList = []
# additional attributes and elements will be defined in subclass
def _getComponents(self):
return self.componentList
def append(self, item):
self.componentList.append(item)
def insert(self, position, item):
self.componentList.insert(position, item)
def __len__(self):
return len(self.componentList)
def __iter__(self):
return common.Iterator(self.componentList)
def __getitem__(self, key):
'''Get item via index value
'''
return self.componentList[key]
def __add__(self, other):
'''Used to combine component lists of objects. There may be other object
attributes not on component list that are not 'added' with this method.
>>> a = musicxml.mxObjects.MusicXMLElementList()
>>> a.componentList.append(1)
>>> b = musicxml.mxObjects.MusicXMLElementList()
>>> b.componentList.append(2)
>>> c = a + b
>>> c.componentList
[1, 2]
>>> a.componentList # original is not changed
[1]
'''
new = copy.deepcopy(self)
new.componentList += other.componentList
return new
#-------------------------------------------------------------------------------
class Score(MusicXMLElementList):
'''Score is used to collect score header elements and,
if available, all other MusicXML data. Score can be used for
partwise or timewise scores. This object includes all MusicXML score
information.
'''
def __init__(self, m21Version=None):
'''
>>> a = musicxml.mxObjects.Score()
>>> a.tag
'score-partwise'
>>> a.setDefaults()
>>> b = musicxml.mxObjects.Identification()
>>> b.setDefaults()
>>> a.set('identification', b)
>>> c = musicxml.mxObjects.Score()
>>> d = c.merge(a)
'''
MusicXMLElementList.__init__(self)
self._tag = 'score-partwise' # assumed for now
# attributes
self._attr['version'] = None
# elements
self.movementTitle = None
self.movementNumber = None
# component objects
self.workObj = None
self.identificationObj = None
self.defaultsObj = None
self.encodingObj = None
self.partListObj = None
self.creditList = [] # store a list of credit objects
self.componentList = [] # list of Part objects
self._crossReference['partListObj'] = ['partlist', 'part-list']
self._crossReference['identificationObj'] = ['identification']
# the score, as the outermost container, stores the m21 version
# number that it was made with when written to disc
# this value is only relevant in comparing pickled files
self.m21Version = m21Version
def _getComponents(self):
c = []
c.append(self.workObj)
c.append(('movement-number', self.movementNumber))
c.append(('movement-title', self.movementTitle))
c.append(self.identificationObj)
c.append(self.defaultsObj)
c = c + self.creditList
c.append(self.partListObj)
c = c + self.componentList
return c
def setDefaults(self):
self.set('movementTitle', defaults.title)
#---------------------------------------------------------------------------
# utility methods unique to the score
def getPartIds(self):
'''
A quick way to get all valid part ids in the componentList
'''
post = []
for part in self.componentList:
post.append(part.get('id'))
return post
def getPartIdsFromPartListObj(self):
'''
A quick way to get all valid part ids in the partListObj
for each one that is a ScorePart
'''
post = []
for part in self.partListObj:
if isinstance(part, ScorePart):
post.append(part.get('id'))
return post
def partIdToNameDict(self):
'''
A quick way to get a mapping of valid part ids to the part names
from a :class:`~music21.musicxml.Score` (musicxml.Score)
object in the `.partListObj` property.
Returns a dictionary mapping part id to part-name for each :class:`~music21.musicxml.ScorePart`
in `.partListObj`.
| |
'\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
class EntitySubstitution(object):
"""The ability to substitute XML or HTML entities for certain characters."""
def _populate_class_variables():
"""Initialize variables used by this class to manage the plethora of
HTML5 named entities.
This function returns a 3-tuple containing two dictionaries
and a regular expression:
unicode_to_name - A mapping of Unicode strings like "⦨" to
entity names like "angmsdaa". When a single Unicode string has
multiple entity names, we try to choose the most commonly-used
name.
name_to_unicode: A mapping of entity names like "angmsdaa" to
Unicode strings like "⦨".
named_entity_re: A regular expression matching (almost) any
Unicode string that corresponds to an HTML5 named entity.
"""
unicode_to_name = {}
name_to_unicode = {}
short_entities = set()
long_entities_by_first_character = defaultdict(set)
for name_with_semicolon, character in sorted(html5.items()):
# "It is intentional, for legacy compatibility, that many
# code points have multiple character reference names. For
# example, some appear both with and without the trailing
# semicolon, or with different capitalizations."
# - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references
#
# The parsers are in charge of handling (or not) character
# references with no trailing semicolon, so we remove the
# semicolon whenever it appears.
if name_with_semicolon.endswith(';'):
name = name_with_semicolon[:-1]
else:
name = name_with_semicolon
# When parsing HTML, we want to recognize any known named
# entity and convert it to a sequence of Unicode
# characters.
if name not in name_to_unicode:
name_to_unicode[name] = character
# When _generating_ HTML, we want to recognize special
# character sequences that _could_ be converted to named
# entities.
unicode_to_name[character] = name
# We also need to build a regular expression that lets us
# _find_ those characters in output strings so we can
# replace them.
#
# This is tricky, for two reasons.
if (len(character) == 1 and ord(character) < 128
and character not in '<>&'):
# First, it would be annoying to turn single ASCII
# characters like | into named entities like
# |. The exceptions are <>&, which we _must_
# turn into named entities to produce valid HTML.
continue
if len(character) > 1 and all(ord(x) < 128 for x in character):
# We also do not want to turn _combinations_ of ASCII
# characters like 'fj' into named entities like 'fj',
# though that's more debateable.
continue
# Second, some named entities have a Unicode value that's
# a subset of the Unicode value for some _other_ named
# entity. As an example, \u2267' is ≧,
# but '\u2267\u0338' is | |
None:
if "buffer" not in args: args = {"buffer":""}
return args
args["buffer"]+=data
for iteration in range(args["buffer"].count("|")):
# Isolate a particular command
length = args["buffer"].index("|")
if length==0:
args["buffer"] = args["buffer"][1:]
continue
data = args["buffer"][0:length]
args["buffer"] = args["buffer"][length+1:]
if data[0]=="<" and self._mainchat: self._mainchat(data+"\n")
elif data[0]=="$":
x = data.split()
if x[0]=="$Lock": self._socket.send("$Supports UserCommand UserIP2 TTHSearch GetZBlock |$Key "+self.lock2key(x[1])+"|$ValidateNick "+self._config["nick"]+"|")
elif x[0]=="$Supports": self._config["hub_supports"] = x[1:]
elif x[0]=="$HubName":
self._config["hubname"] = x[-1]
self._mainchat("Hub Name : "+self._config["hubname"]+"\n")
elif x[0]=="$GetPass": self._socket.send("$MyPass "+self._config["pass"]+"|")
elif x[0]=="$BadPass":
self.disconnect()
elif x[0]=="$Hello":
if x[1]==self._config["nick"]:
self._socket.send("$Version "+self._config["version"]+"|$MyINFO $ALL "+self._config["nick"]+" "+self._config["desc"]+" <"+self._config["client"]+" V:"+str(self._config["version"])+",M:"+("A" if self._config["mode"] else "P")+",H:"+self._config["hubcount"]+",S:"+str(self._download["maxupslots"])+">$ $"+self._config["connection"]+chr(self._config["status"])+"$"+self._config["email"]+"$"+str(self._config["sharesize"])+"$|$GetNickList|")
else:
try: self._nicklist[x[1]]
except: self._nicklist[x[1]] = {"operator":False,"bot":False} # $OpList and $BotList commands will soon follow (if required), so we can make this assumption here.
elif x[0]=="$LogedIn": self._config["operator"] = True
elif x[0]=="$HubTopic":
self._config["topic"] = data[10:]
self._mainchat("Hub Topic : "+self._config["topic"]+"\n")
elif x[0]=="$NickList":
self._nicklock.acquire()
for nick in data[10:].split("$$"):
if nick=="": continue
try: self._nicklist[nick]
except KeyError: self._nicklist[nick] = {"operator":False,"bot":False}
try: self._nicklist[nick]["ip"] = self._userips[nick]
except KeyError: pass
self._socket.send("$UserIP "+data[9:]+"|")
self._nicklock.release()
elif x[0]=="$UserIP":
for item in data[8:].split("$$"):
if item=="": continue
nick,ip = item.split()
self._userips[nick] = ip
elif x[0]=="$OpList":
ops = data[8:].split("$$")
for nick in self._nicklist:
if nick=="": continue
self._nicklist[nick]["operator"] = (True if nick in ops else False)
elif x[0]=="$BotList":
bots = data[9:].split("$$")
for nick in self._nicklist:
if nick=="": continue
self._nicklist[nick]["bot"] = (True if nick in bots else False)
elif x[0]=="$MyINFO":
nick,desc,conn,flag,email,share = re.findall("^\$MyINFO \$ALL ([^ ]*) ([^\$]*)\$.\$([^\$]*)([^\$])\$([^\$]*)\$([^\$]*)\$",data)[0]
try: self._config["nicklist"][nick]
except KeyError: self._nicklist[nick] = {"operator":False,"bot":False}
self._nicklist[nick]["desc"] = desc
self._nicklist[nick]["conn"] = conn
self._nicklist[nick]["flag"] = flag
self._nicklist[nick]["email"] = email
self._nicklist[nick]["share"] = share
elif x[0]=="$To:":
info2 = re.findall("^\$To\: ([^ ]*) From: ([^ ]*) \$(.*)$",data)
if len(info2)==0: continue
else: info2 = info2[0]
if self._config["nick"]!=info2[0]: continue
try: self._pm( info2[1] , time.strftime("%d-%b-%Y %H:%S",time.localtime())+" "+info2[2] )
except TypeError: pass
elif x[0]=="$Quit":
try: del self._nicklist[x[1]]
except KeyError: pass
elif x[0]=="$ForceMove":
if x[1].count(":")==0: addr = (x[1],411)
elif x[1].count(":")==1: addr = tuple(x.split(":"))
else:
self.debug("Invalid Redirection Address")
continue
if self._config["host"]==addr[0] and self._config["port"]==addr[1]:
self.debug("Redirected to the same hub : "+x[1])
continue
self._config["host"],self._config["port"] = addr
self.reconnect()
elif x[0]=="$Search": self.search_result_generate(data)
elif x[0]=="$SR": self.search_result_process(data)
elif x[0]=="$ConnectToMe":
continue # SHERIFFBOT
remote = x[2] # This client's mode does not matter here
d = {"host":remote.split(":")[0], "port":remote.split(":")[1] }
d["socket"] = Connection({ "name":remote,"host":remote.split(":")[0],"port":remote.split(":")[1],"role":"client","type":"tcp","handler":self.transfer_handler,"args":{"role":"client","transfer":d},"debug":self._debug })
self._transfer.append(d)
elif x[0]=="$RevConnectToMe":
continue # SHERIFFBOT
self.connect_remote(x[1],False)
else: self.debug("Unrecognized Command : "+data)
# end of iteration
return args
################################################## Interaction Functions ##################################################
def mc_send(self,data): # Write to the mainchat for all users to see
self._socket.send("<"+self._config["nick"]+"> "+self.escape(data)+"|") # Sending a raw command containing another nick here causes the server to reject it.
return self
def pm_send(self,nick,data): # Sends a private message to the specified user
self._socket.send("$To: %s From: %s $<%s> %s|" %(nick,self._config["nick"],self._config["nick"],self.escape(data)))
print "$To: %s From: %s $<%s> %s|" %(nick,self._config["nick"],self._config["nick"],self.escape(data))
return self
def download_tth(self,tth,name=None,location=None,success_callback=None,success_callback_args=None,failure_callback=None,failure_callback_args=None): # INCOMPLETE : Validate TTH
self._queue.append({"id":tth,"incomplete":tth,"parts":-1,"type":"tth","nick":[],"priority":3,"name":name,"location":location,"active":False,"considered":False,"success_callback":success_callback,"success_callback_args":success_callback_args,"failure_callback":failure_callback,"failure_callback_args":failure_callback_args})
return self
def download_filelist(self,nick,success_callback=None,success_callback_args=None,failure_callback=None,failure_callback_args=None): # Downloads the filelist of a specific user
flag = True
for item in self._queue:
if item["id"]==self._config["filelist"] and item["incomplete"]==self.escape_filename(nick)+".filelist" and item["name"]=="@"+self.escape_filename(nick)+".xml.bz2": flag = False
if flag:
self._queue.append({"id":self._config["filelist"],"incomplete":self.escape_filename(nick)+".filelist","part":0,"parts":1,"type":"file","nick":[nick],"offset":"0","length":-1,"priority":5,"name":"@"+self.escape_filename(nick)+".xml.bz2","size":-1,"location":self._dir["filelist"], "active":False,"considered":False,"success_callback":success_callback,"success_callback_args":success_callback_args,"failure_callback":failure_callback,"failure_callback_args":failure_callback_args})
return self
def download_manager(self): # An infinite loop that keeps trying to start queued downloads.
while self._download["active"]: # Keep doing this as long as the client runs
flag = False; # Initially assume that new search will be performed, so wait for a while before the next cycle
for item in self._queue: # For each item in queue
if not self._download["active"]: # Check if the client isnt being shut down
flag = True; break # Download Manager is being terminated
# print "Download Queue :", [i["name"]+":"+str(i["part"]) if "part" in i else "-1" for i in self._queue] # NOTICE : DEBUG only
if self._download["downslots"]==self._download["maxdownslots"]: break # If slots are not available, wait for a while
if item["active"]==True or item["considered"]==True: continue # If item isnt already being downloaded
if item["type"]=="file": # Filelist Downloads
item["considered"] = True
def fail():
item["considered"] = False
self.debug("Removing filelist from queue as "+str(item["nick"])+" is not responding.")
self._queue.remove(item) # SHERIFFBOT : Delete this item from queue
if item["failure_callback"]!=None:
try:
if item["failure_callback_args"]!=None: item["failure_callback"](item["failure_callback_args"])
else: item["failure_callback"]()
except:
self.debug("Failure Callback Function Error : "+str(args["get"]))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=10, file=(sys.stdout))
self.connect_remote(item["nick"],True,fail) # Connect to the peer. Filelists always have only one part, an assumption made in filelist_get.
elif item["type"]=="tth": # TTH Downloads
if item["parts"]==-1: # What to do if no other information about the file is available
flag = True # No need to wait after one cycle
result = [] # List to hold search results from sources
self.search("TTH:"+item["id"],lambda x:result.append(x),{"type":"tth","mode":"auto"}) # Start a search for sources
time.sleep(self._config["searchtime_auto"]) # Assume that search results will arrive in this much time
if len(result)==0: continue # No results - cant do anything about that :(
if item["name"] is None: item["name"] = re.split("/",result[0][2].replace("\\","/") )[-1] # If name isnt provided, use the one from the first search result to arrive.
item["size"] = int(result[0][3]); # Set total file size, to be used during rebuilding
item["nick"] = self.unique(item["nick"]+[x[1] for x in result]) # Initialize/Expand source list, without redundancy
parts = int(math.ceil(float(item["size"])/self._config["segment_size"])) # Calculate number of blocks this file is not be divided into based on preconfigured block size.
if parts==0: # If the file is empty (size 0), write it now only.
open(self.transfer_filename(item),"wb").close() # Create and close an empty file.
continue # We can assume after this point that at least one part is present.
item["parts"] = parts; item["length"] = self._config["segment_size"] # Setting general infomration applicable to all parts, except last.
for part in range(parts-1): # Leaving out the last part, given that the length may be different.
item["part"] = part; item["offset"] = part*self._config["segment_size"]; # Set part-specific information
self._queue.append(copy.deepcopy(item)) # All parts now have information all necessary information, and may now be treated individually.
item["part"] = parts-1; item["offset"] = (parts-1)*self._config["segment_size"]; # It is not necessary to append the last block again, as we can transform the current one into that.
item["length"] = ((item["size"]+self._config["segment_size"]-1)%self._config["segment_size"])+1 # Get the exact length of the last part
print "added "+str(parts)+" items"
if not self.transfer_verify(item): # Check whether or not this item has already been downloaded.
x = [i for i in self._queue if (item["id"]==i["id"] and "part" in i and item["part"]==i["part"])] # Isolate item with matching signature
if len(x)==1 and x[0] in self._queue: self._queue.remove(x[0]) # Remove item from queue
self.transfer_rebuild(item); continue # Try rebuilding it, but invariably move on
connected = list(itertools.chain(*[[transfer[key] for key in transfer if key=="nick"] for transfer in self._transfer])) # Generate list of nicks to which we are already connected.
nick = filter(lambda n: n not in connected and n in self._nicklist.keys(),item["nick"]) # Select only those to which we arent connected and are online. The original list isnt touched because
if len(nick)==0: continue # No one left :(
print "Actually being considered ...",item["part"]
item["considered"] = True
def fail(): item["considered"] = False
nick=random.choice(nick); # Randomly select a nickname
# INCOMPLETE (possible) : Failure callbacks before file removal
self.spawn("RemoteConnection:"+nick,self.connect_remote,(nick,True,fail)) # Connect to the nick. transfer_next deals with determining which file to download from the peer.
if not flag: time.sleep(self._config["download_time"]) # If no searches have been performed, wait for a while before starting next cycle
return self # Allows more functions to be chained in the same line
################################################## Transfer Functions ##################################################
def connect_remote(self,nick,rev=True,failure=None): # Sets up a connection with nick
if type(nick) is list:
if len(nick)==0: return self
else: nick=nick[0]
if self._config["mode"]: # Nothing can be done if both are passive
port = random.randint(1000,2**16-1) # Randomly select +ve integer for a part number in the given range
d = { "nick":nick } # This is the prototype for the transfer object, created so that the connection object it will contain will have a reference to it.
self.debug("Sending connection request to "+nick+" ...")
while True: # Keeping trying to bind to different port numbers
try:
d["socket"] = Connection({"name":nick,"host":self._config["localhost"],"port":port,"role":"server","type":"tcp","handler":self.transfer_handler,"args":{"role":"server","transfer":d,"failure":failure,"nick":nick},"debug":self._debug})
break # Terminate loop only after binding to a specific port. Those Connections objects that could not bind have lost their
except ConnectionError: port = random.randint(0,2**16-1) # If this particular port is occupied,try another one randomly
self._transfer.append(d)
for retry in range(self._config["retry"]):
self._socket.send("$ConnectToMe "+nick+" "+self._config["localhost"]+":"+str(port)+"|")
time.sleep(self._config["wait"])
if d["socket"].clients(): return self # Connection Successful
self.debug("No response from "+nick+" after waiting for "+str(self._config["wait"])+" seconds.")
self.debug("Connection to "+nick+" failed - timeout.")
d["socket"].close() # Terminate the server
if failure!=None: failure()
return self
elif rev:
self._socket.send("$RevConnectToMe "+self._config["nick"]+" "+nick+"|")
return self
def transfer_verify(self,get): # Checks whether or not it is safe to download this file
tempname = self._dir["incomplete"]+os.sep+self.escape_filename(get["incomplete"])+".part"+str(get["part"]) # Generate the name of the temporary file in which to store data before joining and transferring it to the target.
if not get["active"] and (not os.path.exists(tempname) or os.path.getsize(tempname)<get["length"]): return True # If the file doesnt exist, or if the size hasent reached the target, start this download.
x = [item for item in self._queue if item["id"]==get["id"] and item["incomplete"]==get["incomplete"] and "part" in item and item["part"]==get["part"]] # Locate items in the queue with the same signature as the current one.
if len(x)==1 and x[0] in self._queue: self._queue.remove(x[0]) # As the file is already available completely, we can remove the corresponding item from the queue.
return False # This object has not been verified for download, which means that it has already beeen downloaded, and rebuilding should be attempted.
def transfer_next(self,args,info): # Check if we need to download something from this peer
self._download["lock"].acquire() # | |
# qmpy/materials/entry.py
from datetime import datetime
import time
import os
from django.db import models
from django.db import transaction
import networkx as nx
from qmpy.db.custom import *
from qmpy.materials.composition import *
from qmpy.materials.element import Element, Species
from qmpy.materials.structure import Structure, StructureError
from qmpy.utils import *
from qmpy.computing.resources import Project
from qmpy.data.meta_data import *
import qmpy.io as io
import qmpy.computing.scripts as scripts
import qmpy.analysis.vasp as vasp
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
k_desc = 'Descriptive keyword for looking up entries'
h_desc = 'A note indicating a reason the entry should not be calculated'
@add_meta_data('keyword', description=k_desc)
@add_meta_data('hold', description=h_desc)
class Entry(models.Model):
"""Base class for a database entry.
The core model for typical database entries. An Entry model represents an
input structure to the database, and can be created from any input file.
The Entry also ties together all of the associated :mod:`qmpy.Structure`,
:mod:`qmpy.Calculation`, :mod:`qmpy.Reference`,
:mod:`qmpy.FormationEnergies`, and other associated databas entries.
Relationships:
| :mod:`~qmpy.Calculation` via calculation_set
| :mod:`~qmpy.DOS` via dos_set
| :mod:`~qmpy.Entry` via duplicate_of
| :mod:`~qmpy.Entry` via duplicates
| :mod:`~qmpy.Element` via element_set
| :mod:`~qmpy.FormationEnergy` via formationenergy_set
| :mod:`~qmpy.Job` via job_set
| :mod:`~qmpy.MetaData` via meta_data
| :mod:`~qmpy.Project` via project_set
| :mod:`~qmpy.Prototype` via prototype
| :mod:`~qmpy.Species` via species_set
| :mod:`~qmpy.Structure` via structure_set
| :mod:`~qmpy.Task` via task_set
| :mod:`~qmpy.Reference` via reference
| :mod:`~qmpy.Composition` via composition
Attributes:
| id: Primary key (auto-incrementing int)
| label: An identifying name for the structure. e.g. icsd-1001 or A3
"""
### structure properties
path = models.CharField(max_length=255, unique=True)
meta_data = models.ManyToManyField('MetaData')
label = models.CharField(max_length=20, null=True)
### record keeping
duplicate_of = models.ForeignKey('Entry', related_name='duplicates',
null=True)
ntypes = models.IntegerField(blank=True, null=True)
natoms = models.IntegerField(blank=True, null=True)
### links
element_set = models.ManyToManyField('Element')
species_set = models.ManyToManyField('Species')
project_set = models.ManyToManyField('Project')
composition = models.ForeignKey('Composition', blank=True, null=True)
reference = models.ForeignKey('Reference', null=True, blank=True)
prototype = models.ForeignKey('Prototype', null=True, blank=True)
class Meta:
app_label = 'qmpy'
db_table = 'entries'
def __str__(self):
return '%s - %s' % (self.id, self.name)
@transaction.atomic
def save(self, *args, **kwargs):
"""Saves the Entry, as well as all associated objects."""
if not self.reference is None:
if self.reference.id is None:
self.reference.save()
self.reference = self.reference
super(Entry, self).save(*args, **kwargs)
if not self.duplicate_of:
self.duplicate_of = self
super(Entry, self).save(*args, **kwargs)
if self._structures:
for k, v in self.structures.items():
v.label = k
v.entry = self
v.save()
#self.structure_set = self.structures.values()
if self._calculations:
for k, v in self.calculations.items():
v.label = k
v.entry = self
v.save()
#self.calculation_set = self.calculations.values()
if self._elements:
self.element_set = self.elements
if self._species:
self.species_set = self.species
if self._projects:
self.project_set = self.projects
if self._keywords or self._holds:
self.meta_data = self.hold_objects + self.keyword_objects
@staticmethod
def create(source, keywords=[], projects=[], prototype=None, **kwargs):
"""
Attempts to create an Entry object from a provided input file.
Processed in the following way:
#. If an Entry exists at the specified path, returns that Entry.
#. Create an Entry, and assign all fundamental attributes. (natoms,
ntypes, input, path, elements, keywords, projects).
#. If the input file is a CIF, and because CIF files have additional
composition and reference information, if that file format is
found, an additional test is performed to check that the reported
composition matches the composition of the resulting structure. The
reference for the work is also created and assigned to the entry.
#. Attempt to identify another entry that this is either exactly
equivalent to, or a defect cell of.
Keywords:
keywords: list of keywords to associate with the entry.
projects: list of project names to associate with the entry.
"""
source_file = os.path.abspath(source)
if 'libraries_v2_0/libraries_v1_overflow' in source_file:
source_file = source_file.replace("libraries_v2_0/libraries_v1_overflow", "libraries")
path = os.path.dirname(source_file)
# Step 1
if Entry.objects.filter(path=path).exists():
return Entry.objects.get(path=path)
# Step 2
entry = Entry(**kwargs)
try:
structure = io.poscar.read(source_file)
except ValueError:
structure = io.cif.read(source_file)
structure.make_primitive()
entry.source_file = source_file
entry.path = os.path.dirname(source_file)
entry.input = structure
entry.ntypes = structure.ntypes
entry.natoms = len(structure.sites)
entry.elements = entry.comp.keys()
entry.composition = Composition.get(structure.comp)
for kw in keywords:
entry.add_keyword(kw)
entry.projects = projects
entry.prototype = prototype
# Step 3
c1 = structure.composition
if 'cif' in source_file:
c2 = structure.reported_composition
if not c1.compare(c2, 5e-2):
entry.add_hold("composition mismatch in cif")
entry.composition = c2
entry.reference = io.cif.read_reference(source_file)
# check for perfect crystals
if not any([ s.partial for s in structure.sites ]):
dup = Entry.get(structure)
if dup is not None:
entry.duplicate_of = dup
entry.add_hold('duplicate')
return entry
# detect solid solution
if all([ s.occupancy > 0.99 for s in structure.sites ]):
if any([ len(s) > 1 for s in structure.sites ]):
entry.add_keyword('solid solution')
if any([ s.partial for s in structure.sites ]):
entry.add_hold('partial occupancy')
return entry
@staticmethod
def get(structure, tol=1e-1):
if isinstance(structure, Structure):
return Entry.search_by_structure(structure, tol=tol)
@staticmethod
def search_by_structure(structure, tol=1e-2):
c = Composition.get(structure.comp)
for e in c.entries:
if e.structure.compare(structure, tol=tol):
return e
return None
_elements = None
@property
def elements(self):
"""List of Elements"""
if self._elements is None:
self._elements = [ Element.get(e) for e in self.comp.keys() ]
return self._elements
@elements.setter
def elements(self, elements):
self._elements = [ Element.get(e) for e in elements ]
_species = None
@property
def species(self):
"""List of Species"""
if self._species is None:
self._species = [ Species.get(s) for s in self.spec_comp.keys() ]
return self._species
@species.setter
def species(self, species):
self._species = [ Species.get(e) for e in species ]
_projects = None
@property
def projects(self):
"""List of Projects"""
if self._projects is None:
self._projects = list(self.project_set.all())
return self._projects
@projects.setter
def projects(self, projects):
self._projects = [ Project.get(p) for p in projects ]
_structures = None
@property
def structures(self):
if self._structures is None:
if self.id is None:
self._structures = {}
else:
structs = {}
for s in self.structure_set.exclude(label=''):
structs[s.label] = s
self._structures = structs
return self._structures
s = structures
@structures.setter
def structures(self, structs):
if not isinstance(structs, dict):
raise TypeError('structures must be a dict')
if not all( isinstance(v, Structure) for v in structs.values()):
raise TypeError('structures must be a dict of Calculations')
self._structures = structs
@structures.deleter
def structures(self, struct):
self._structures[struct].delete()
del self._structures[struct]
_calculations = None
@property
def calculations(self):
"""Dictionary of label:Calculation pairs."""
if self._calculations is None:
if self.id is None:
self._calculations = {}
else:
calcs = {}
for c in self.calculation_set.exclude(label=''):
calcs[c.label] = c
self._calculations = calcs
return self._calculations
c = calculations
@calculations.setter
def calculations(self, calcs):
if not isinstance(calcs, dict):
raise TypeError('calculations must be a dict')
if not all( isinstance(v, vasp.Calculation) for v in calcs.values()):
raise TypeError('calculations must be a dict of Calculations')
self._calculations = calcs
@calculations.deleter
def calculations(self, calc):
self._calculations[calc].delete()
del self._calculations[calc]
@property
def input(self):
return self.structures.get('input')
@property
def structure(self):
if 'final' in self.structures:
return self.structures['final']
elif 'relaxed' in self.structures:
return self.structures['relaxed']
elif 'relaxation' in self.structures:
return self.structures['relaxation']
elif 'standard' in self.structures:
return self.structures['standard']
elif 'fine_relax' in self.structures:
return self.structures['fine_relax']
else:
try:
return self.structures['input']
except KeyError:
return None
@input.setter
def input(self, structure):
self.structures['input'] = structure
@property
def tasks(self):
return list(self.task_set.all())
@property
def jobs(self):
return list(self.job_set.all())
@property
def comp(self):
if not self.composition_id is None:
return parse_comp(self.composition_id)
elif not self.input is None:
return self.input.comp
else:
return {}
@property
def spec_comp(self):
"""
Composition dictionary, using species (element + oxidation state)
instead of just the elements.
"""
if self.input is None:
return {}
else:
return self.input.spec_comp
@property
def unit_comp(self):
"""Composition dictionary, normalized to 1 atom."""
return unit_comp(self.comp)
@property
def red_comp(self):
"""Composition dictionary, in reduced form."""
return reduce_comp(self.comp)
@property
def name(self):
"""Unformatted name"""
return format_comp(reduce_comp(self.comp))
@property
def latex(self):
"""LaTeX formatted name"""
return format_latex(reduce_comp(self.comp))
@property
def html(self):
"""HTML formatted name"""
return format_html(reduce_comp(self.comp))
@property
def proto_label(self):
#if not self.prototype is None:
# return self.prototype.name
protos = []
for e in self.duplicates.all():
if not e.prototype is None:
protos.append(e.prototype.name)
protos = list(set(protos))
if len(protos) == 1:
return protos[0]
else:
return ', '.join(protos)
@property
def space(self):
"""Return the set of elements in the input structure.
Examples::
>>> e = Entry.create("fe2o3/POSCAR") # an input containing Fe2O3
>>> e.space
set(["Fe", "O"])
"""
return set([ e.symbol for e in self.elements])
@property
def total_energy(self):
"""
If the structure has been relaxed, returns the formation energy of the
final relaxed structure. Otherwise, returns None.
"""
es = []
if 'static' in self.calculations:
if self.calculations['static'].converged:
return self.calculations['static'].energy_pa
#es.append(self.calculations['static'].energy_pa)
if 'standard' in self.calculations:
if self.calculations['standard'].converged:
return self.calculations['standard'].energy_pa
#es.append(self.calculations['standard'].energy_pa)
if not es:
return None
#else:
# return min(es)
_energy = None
@property
def energy(self):
"""
If the structure has been relaxed, returns the formation energy of the
final relaxed structure. Otherwise, returns None.
"""
if self._energy is None:
fes | |
Return a boolean FastArray set to True where duplicate rows exist,
optionally only considering certain columns
Parameters
----------
subset : str or list of str, optional
A column label or list of column labels to inspect for duplicate values.
When ``None``, all columns will be examined.
keep : {'first', 'last', False}, default 'first'
* ``first`` : keep duplicates except for the first occurrence.
* ``last`` : keep duplicates except for the last occurrence.
* False : set to True for all duplicates.
Examples
--------
>>> ds=rt.Dataset({'somenans': [0., 1., 2., rt.nan, 0., 5.], 's2': [0., 1., rt.nan, rt.nan, 0., 5.]})
>>> ds
# somenans s2
- -------- ----
0 0.00 0.00
1 1.00 1.00
2 2.00 nan
3 nan nan
4 0.00 0.00
5 5.00 5.00
>>> ds.duplicated()
FastArray([False, False, False, False, True, False])
Notes
-----
Consider using ``rt.Grouping(subset).ifirstkey`` as a fancy index to pull in unique rows.
"""
if subset is None:
subset = list(self.keys())
else:
if not isinstance(subset, list):
subset = [subset]
g = self.gbu(subset).get_groupings()
igroup = g['iGroup']
ifirstgroup= g['iFirstGroup']
ncountgroup = g['nCountGroup']
result = ones(igroup.shape, dtype=bool)
# return row of first occurrence
if keep == 'first':
# remove invalid bin
ifirstgroup = ifirstgroup[1:]
result[igroup[ifirstgroup]]=False
# return row of last occurrence (however, keys will be in order of their first occurrence)
elif keep == 'last':
lastindex = ifirstgroup[-1] + ncountgroup[-1] -1
# skip invalid and shift everything
ilast = ifirstgroup[2:]
ilast -=1
result[igroup[ilast]]=False
# set the last one
result[lastindex]=False
# only return rows that occur once
elif keep is False:
ifirstgroup = ifirstgroup[ncountgroup==1]
result[igroup[ifirstgroup]]=False
return result
# -------------------------------------------------------------
def drop_duplicates(self, subset=None, keep: Union[bool, str] = 'first', inplace: bool = False) -> 'Dataset':
"""
Return Dataset with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : Dataset
Notes
-----
If `keep` is 'last', the rows in the result will match pandas, but the order will be based
on first occurrence of the unique key.
Examples
--------
>>> np.random.seed(12345)
>>> ds = rt.Dataset({
... 'strcol' : np.random.choice(['a','b','c','d'], 15),
... 'intcol' : np.random.randint(0, 3, 15),
... 'rand' : np.random.rand(15)
... })
>>> ds
# strcol intcol rand
-- ------ ------ ----
0 c 2 0.05
1 b 1 0.81
2 b 2 0.93
3 b 0 0.36
4 a 2 0.69
5 b 1 0.13
6 c 1 0.83
7 c 2 0.32
8 b 1 0.74
9 c 2 0.60
10 b 2 0.36
11 b 1 0.79
12 c 0 0.70
13 b 1 0.82
14 d 1 0.90
<BLANKLINE>
[15 rows x 3 columns] total bytes: 195.0 B
Keep only the row of the first occurrence:
>>> ds.drop_duplicates(['strcol','intcol'])
# strcol intcol rand
- ------ ------ ----
0 c 2 0.05
1 b 1 0.81
2 b 2 0.93
3 b 0 0.36
4 a 2 0.69
5 c 1 0.83
6 c 0 0.70
7 d 1 0.90
<BLANKLINE>
[8 rows x 3 columns] total bytes: 104.0 B
Keep only the row of the last occurrence:
>>> ds.drop_duplicates(['strcol','intcol'], keep='last')
# strcol intcol rand
- ------ ------ ----
0 c 2 0.60
1 b 1 0.82
2 b 2 0.36
3 b 0 0.36
4 a 2 0.69
5 c 1 0.83
6 c 0 0.70
7 d 1 0.90
<BLANKLINE>
[8 rows x 3 columns] total bytes: 104.0 B
Keep only the rows which only occur once:
>>> ds.drop_duplicates(['strcol','intcol'], keep=False)
# strcol intcol rand
- ------ ------ ----
0 b 0 0.36
1 a 2 0.69
2 c 1 0.83
3 c 0 0.70
4 d 1 0.90
<BLANKLINE>
[5 rows x 3 columns] total bytes: 65.0 B
"""
if self.shape[0] == 0:
if inplace:
return self
else:
return TypeRegister.Dataset(self)
if subset is None:
subset = list(self.keys())
else:
if not isinstance(subset, list):
subset = [subset]
gb = self.gbu(subset)
# return row of first occurrence
if keep == 'first':
deduplicated = gb.first()
deduplicated.label_remove()
# return row of last occurrence (however, keys will be in order of their first occurrence)
elif keep == 'last':
deduplicated = gb.last()
deduplicated.label_remove()
# only return rows that occur once
elif keep is False:
non_duplicated = gb.count().Count == 1
deduplicated = gb.first()
deduplicated.label_remove()
deduplicated = deduplicated[non_duplicated,:]
else:
raise ValueError(f"Got unexpected value for keep {keep}.")
# replace all columns in dictionary
if inplace is True:
if deduplicated._nrows != self._nrows:
# swap out all column data
self._nrows = deduplicated._nrows
self._col_sortlist = None
self.col_replace_all(deduplicated, check_exists=False)
return self
return deduplicated
# -------------------------------------------------------------
def col_replace_all(self, newdict, check_exists: bool = True) -> None:
"""
Replace the data for each item in the item dict. Original attributes
will be retained. Useful for internal routines that need to swap out all columns quickly.
Parameters
----------
newdict : dictionary of item names -> new item data (can also be a Dataset)
check_exists : bool
if True, all newdict keys and old item keys will be compared to ensure a match
"""
self._all_items.item_replace_all(newdict, check_exists=check_exists)
# -------------------------------------------------------------
def all(self, axis=0, as_dataset: bool = True):
"""
Returns truth value 'all' along axis. Behavior for ``axis=None`` differs from pandas!
Parameters
----------
axis : int, optional
* axis=0 (dflt.) -> over columns (returns Struct (or Dataset) of bools)
string synonyms: c, C, col, COL, column, COLUMN
* axis=1 -> over rows (returns array of bools)
string synonyms: r, R, row, ROW
* axis=None -> over rows and columns (returns bool)
string synonyms: all, ALL
as_dataset : bool
When ``axis=0``, return Dataset instead of Struct. Defaults to False.
Returns
-------
Struct (or Dataset) or list or bool
"""
def _col_all(_col):
try:
return bool(_col.all())
except TypeError:
return all(_col)
axis = self._axis_key(axis)
cond_rtn_type = type(self) if as_dataset else Struct
if axis == 0:
return cond_rtn_type({_cn: _col_all(_val) for _cn, _val in self.items()})
if axis is None:
return all(_col_all(_val) for _cn, _val in self.items())
if axis == 1:
# for each col, !=0 to get back bool array. then inplace AND all those results, careful with string arrays
temparray=ones(len(self), dtype=bool)
for arr in self.values():
if arr.dtype.num <= 13:
# inplace AND for numerical data
# for cats we will assume 0 is the invalid and !=0 check works
temparray *= arr != 0
else:
# care about string array?
if arr.dtype.char in 'US':
temparray *= arr != ''
else:
# skip this datatype
pass
return temparray
raise NotImplementedError('Dataset.all(axis=<0, 1, None>)')
def sorts_on(self) -> None:
"""
Turns on all row/column sorts for display. False by default.
sorts_view must have been called before
:return: None
"""
if self._col_sortlist is None:
warnings.warn(f"sort_view was not called first. Display sorting will remain off.")
return
self._sort_display = True
def sorts_off(self) -> None:
"""
Turns off all row/column sorts for display (happens when sort_view is called)
If sort is cached, it will remain in cache in case sorts are toggled back on.
:return: None
"""
self._col_sortlist = None
self._sort_display = False
# -------------------------------------------------------
def get_row_sort_info(self):
sortdict = None
# general row sort will take precedence
if self._col_sortlist is not None:
for col in self._col_sortlist:
if col not in self:
print(str(col), "is not a valid key to sort by.")
# clear invalid sort from dataset
self._col_sortlist = None
break
else:
#sortdict = {col: self.__getattribute__(col) for col in self._col_sortlist}
sortdict = {col: self.col_get_value(col) for col in self._col_sortlist}
return self._uniqueid, self._nrows, sortdict
# -------------------------------------------------------
def _sort_lexsort(self, by, ascending=True):
bylist = by
if not isinstance(by, list):
bylist = [bylist]
sortkeys = []
for col in bylist:
sortkeys.append(self.col_get_value(col))
# larger | |
# -*- test-case-name: flocker.node.agents.functional.test_ebs -*-
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
An EBS implementation of the ``IBlockDeviceAPI``.
"""
from subprocess import check_output
import threading
import time
import logging
from uuid import UUID
from bitmath import Byte, GiB
from pyrsistent import PRecord, field, pset, pmap, thaw
from zope.interface import implementer
from boto import ec2
from boto import config
from boto.ec2.connection import EC2Connection
from boto.utils import get_instance_metadata
from boto.exception import EC2ResponseError
from twisted.python.constants import (
Names, NamedConstant, Values, ValueConstant
)
from twisted.python.filepath import FilePath
from eliot import Message
from .blockdevice import (
IBlockDeviceAPI, BlockDeviceVolume, UnknownVolume, AlreadyAttachedVolume,
UnattachedVolume, StorageProfileAttributes
)
from ...control import pmap_field
from ._logging import (
AWS_ACTION, BOTO_EC2RESPONSE_ERROR, NO_AVAILABLE_DEVICE,
NO_NEW_DEVICE_IN_OS, WAITING_FOR_VOLUME_STATUS_CHANGE,
BOTO_LOG_HEADER, IN_USE_DEVICES,
)
DATASET_ID_LABEL = u'flocker-dataset-id'
METADATA_VERSION_LABEL = u'flocker-metadata-version'
CLUSTER_ID_LABEL = u'flocker-cluster-id'
BOTO_NUM_RETRIES = u'20'
VOLUME_STATE_CHANGE_TIMEOUT = 300
MAX_ATTACH_RETRIES = 3
class ProfileAttributeNames(Values):
"""
Storage profile attribute names.
"""
SNAPSHOT = ValueConstant(u'snapshot')
VOLUME_TYPE = ValueConstant(u'volume_type')
IOPS = ValueConstant(u'iops')
ENCRYPTED = ValueConstant(u'encrypted')
class VolumeOperations(Names):
"""
Supported EBS backend operations on a volume.
"""
CREATE = NamedConstant()
ATTACH = NamedConstant()
DETACH = NamedConstant()
DESTROY = NamedConstant()
class VolumeStates(Values):
"""
Expected EBS volume states during ``VolumeOperations``.
"""
EMPTY = ValueConstant('')
CREATING = ValueConstant(u'creating')
AVAILABLE = ValueConstant(u'available')
ATTACHING = ValueConstant(u'attaching')
IN_USE = ValueConstant(u'in-use')
DETACHING = ValueConstant(u'detaching')
DELETING = ValueConstant(u'deleting')
class VolumeStateFlow(PRecord):
"""
Expected EBS volume state flow during ``VolumeOperations``.
"""
start_state = field(mandatory=True, type=ValueConstant)
transient_state = field(mandatory=True, type=ValueConstant)
end_state = field(mandatory=True, type=ValueConstant)
# Boolean flag to indicate if a volume state transition
# results in non-empty ``attach_data.device`` and
# ``attach_data.instance_id`` for the EBS volume.
sets_attach = field(mandatory=True, type=bool)
unsets_attach = field(mandatory=True, type=bool)
class VolumeStateTable(PRecord):
"""
Map of volume operation to expected volume state transitions
and expected update to volume's ``attach_data``.
"""
def _populate_volume_state_table():
"""
Initialize volume state table with transitions for ``create_volume``,
``attach_volume``, ``detach_volume``, ``delete_volume`` operations.
"""
O = VolumeOperations
S = VolumeStates
table = pmap()
def add_flow(operation, start, transient, end, sets_attach,
unsets_attach):
"""
Helper to add expected volume states for given operation.
"""
return table.set(operation,
VolumeStateFlow(start_state=start,
transient_state=transient,
end_state=end,
sets_attach=sets_attach,
unsets_attach=unsets_attach))
table = add_flow(O.CREATE, S.EMPTY, S.CREATING, S.AVAILABLE,
False, False)
table = add_flow(O.ATTACH, S.AVAILABLE, S.ATTACHING, S.IN_USE,
True, False)
table = add_flow(O.DETACH, S.IN_USE, S.DETACHING, S.AVAILABLE,
False, True)
table = add_flow(O.DESTROY, S.AVAILABLE, S.DELETING, S.EMPTY,
False, False)
return table
table = pmap_field(NamedConstant, VolumeStateFlow,
initial=_populate_volume_state_table())
VOLUME_STATE_TABLE = VolumeStateTable()
class TimeoutException(Exception):
"""
A timeout on waiting for volume to reach destination end state.
:param unicode blockdevice_id: Unique identifier for a volume.
:param NamedConstant operation: Operation performed on volume.
:param unicode start_state: Volume's start state before operation.
:param unicode transient_state: Expected transient state during operation.
:param unicode end_state: Expected end state on operation completion.
:param unicode current_state: Volume's state at timeout.
"""
def __init__(self, blockdevice_id, operation,
start_state, transient_state, end_state, current_state):
Exception.__init__(self, blockdevice_id)
self.blockdevice_id = blockdevice_id
self.operation = operation
self.start_state = start_state
self.transient_state = transient_state
self.end_state = end_state
self.current_state = current_state
class UnexpectedStateException(Exception):
"""
An unexpected state was encountered by a volume as a result of operation.
:param unicode blockdevice_id: Unique identifier for a volume.
:param NamedConstant operation: Operation performed on volume.
:param unicode start_state: Volume's start state before operation.
:param unicode transient_state: Expected transient state during operation.
:param unicode end_state: Expected end state on operation completion.
:param unicode current_state: Volume's state at timeout.
"""
def __init__(self, blockdevice_id, operation,
start_state, transient_state, end_state, current_state):
Exception.__init__(self, blockdevice_id)
self.blockdevice_id = blockdevice_id
self.operation = operation
self.start_state = start_state
self.transient_state = transient_state
self.end_state = end_state
self.current_state = current_state
class EliotLogHandler(logging.Handler):
# Whitelist ``"msg": "Params:`` field for logging.
_to_log = {"Params"}
def emit(self, record):
fields = vars(record)
# Only log certain things. The log is massively too verbose
# otherwise.
if fields.get("msg", ":").split(":")[0] in self._to_log:
Message.new(
message_type=BOTO_LOG_HEADER, **fields
).write()
def _enable_boto_logging():
"""
Make boto log activity using Eliot.
"""
logger = logging.getLogger("boto")
logger.addHandler(EliotLogHandler())
# It seems as though basically all boto log messages are at the same
# level. Either we can see all of them or we can see none of them.
# We'll do some extra filtering in the handler.
logger.setLevel(logging.DEBUG)
_enable_boto_logging()
class AttachedUnexpectedDevice(Exception):
"""
A volume was attached to a device other than the one we expected.
:ivar str _template: A native string giving the template into which to
format attributes for the string representation.
"""
_template = "AttachedUnexpectedDevice(requested={!r}, discovered={!r})"
def __init__(self, requested, discovered):
"""
:param FilePath requested: The requested device name.
:param FilePath discovered: The device which was discovered on the
system.
"""
self.requested = requested
self.discovered = discovered
def __str__(self):
return self._template.format(
self.requested.path, self.discovered.path,
)
__repr__ = __str__
def _expected_device(requested_device):
"""
Given a device we requested from AWS EBS, determine the OS device path that
will actually be created.
This maps EBS required ``/dev/sdX`` names to ``/dev/vbdX`` names that are
used by currently supported platforms (Ubuntu 14.04 and CentOS 7).
"""
prefix = b"/dev/sd"
if requested_device.startswith(prefix):
return FilePath(b"/dev").child(b"xvd" + requested_device[len(prefix):])
raise ValueError(
"Unsupported requested device {!r}".format(requested_device)
)
def ec2_client(region, zone, access_key_id, secret_access_key):
"""
Establish connection to EC2 client.
:param str region: The name of the EC2 region to connect to.
:param str zone: The zone for the EC2 region to connect to.
:param str access_key_id: "aws_access_key_id" credential for EC2.
:param str secret_access_key: "aws_secret_access_key" EC2 credential.
:return: An ``_EC2`` giving information about EC2 client connection
and EC2 instance zone.
"""
# Set 2 retry knobs in Boto to BOTO_NUM_RETRIES:
# 1. ``num_retries``:
# Request automatic exponential backoff and retry
# attempts by Boto if an EC2 API call fails with
# ``RequestLimitExceeded`` due to system load.
# 2. ``metadata_service_num_attempts``:
# Request for retry attempts by Boto to
# retrieve data from Metadata Service used to retrieve
# credentials for IAM roles on EC2 instances.
if not config.has_section('Boto'):
config.add_section('Boto')
config.set('Boto', 'num_retries', BOTO_NUM_RETRIES)
config.set('Boto', 'metadata_service_num_attempts', BOTO_NUM_RETRIES)
# Get Boto EC2 connection with ``EC2ResponseError`` logged by Eliot.
connection = ec2.connect_to_region(region,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
return _EC2(zone=zone,
connection=_LoggedBotoConnection(connection=connection))
def _boto_logged_method(method_name, original_name):
"""
Run a boto.ec2.connection.EC2Connection method and
log additional information about any exceptions that are raised.
:param str method_name: The name of the method of the wrapped object to
call.
:param str original_name: The name of the attribute of self where the
wrapped object can be found.
:return: A function which will call the method of the wrapped object and do
the extra exception logging.
"""
def _run_with_logging(self, *args, **kwargs):
"""
Run given boto.ec2.connection.EC2Connection method with exception
logging for ``EC2ResponseError``.
"""
original = getattr(self, original_name)
method = getattr(original, method_name)
# Trace IBlockDeviceAPI ``method`` as Eliot Action.
# See https://clusterhq.atlassian.net/browse/FLOC-2054
# for ensuring all method arguments are serializable.
with AWS_ACTION(operation=[method_name, args, kwargs]):
try:
return method(*args, **kwargs)
except EC2ResponseError as e:
BOTO_EC2RESPONSE_ERROR(
aws_code=e.code,
aws_message=e.message,
aws_request_id=e.request_id,
).write()
raise
return _run_with_logging
def boto_logger(*args, **kwargs):
"""
Decorator to log all callable boto.ec2.connection.EC2Connection
methods.
:return: A function that will decorate all methods of the given
class with Boto exception logging.
"""
def _class_decorator(cls):
for attr in EC2Connection.__dict__:
# Log wrap all callable methods except `__init__`.
if attr != '__init__':
attribute = getattr(EC2Connection, attr)
if callable(attribute):
setattr(cls, attr,
_boto_logged_method(attr, *args, **kwargs))
return cls
return _class_decorator
@boto_logger("connection")
class _LoggedBotoConnection(PRecord):
"""
Wrapper ``PRecord`` around ``boto.ec2.connection.EC2Connection``
to facilitate logging of exceptions from Boto APIs.
:ivar boto.ec2.connection.EC2Connection connection: Object
representing connection to an EC2 instance with logged
``EC2ConnectionError``.
"""
connection = field(mandatory=True)
class _EC2(PRecord):
"""
:ivar str zone: The name of the zone for the connection.
:ivar boto.ec2.connection.EC2Connection connection: Object
representing connection to an EC2 instance.
"""
zone = field(mandatory=True)
connection = field(mandatory=True)
def _blockdevicevolume_from_ebs_volume(ebs_volume):
"""
Helper function to convert Volume information from
EBS format to Flocker block device format.
:param boto.ec2.volume ebs_volume: Volume in EC2 format.
:return: Input volume in BlockDeviceVolume format.
"""
return BlockDeviceVolume(
blockdevice_id=unicode(ebs_volume.id),
size=int(GiB(ebs_volume.size).to_Byte().value),
attached_to=ebs_volume.attach_data.instance_id,
dataset_id=UUID(ebs_volume.tags[DATASET_ID_LABEL])
)
def _get_ebs_volume_state(volume):
"""
Fetch input EBS volume's latest state from backend.
:param boto.ec2.volume volume: Volume that needs state update.
:returns: EBS volume with latest state known to backend.
:rtype: boto.ec2.volume
"""
return volume.update()
def _should_finish(operation, volume, update, start_time,
timeout=VOLUME_STATE_CHANGE_TIMEOUT):
"""
Helper function to determine if wait for volume's state transition
resulting from given operation is over.
The method completes if volume reached expected end state, or, failed
to reach expected end state, or we timed out waiting for the volume to
reach expected end state.
:param NamedConstant operation: Operation performed on given volume.
:param boto.ec2.volume volume: Target volume of | |
[]
starting_time = time.time()
self.DurationPerTrial = pd.Series(index=np.arange(repeats), dtype=float)
self.RetPerTrial = pd.Series(index=np.arange(repeats), dtype=float)
ret_per_trial = []
# Try as many times as required by the integer 'repeats'
for i in np.arange(repeats):
start_trial = time.time()
if not pick_ml and progress:
print(f"\nInstance {self.instance}: Trial {i}\n")
# RUN HEURISTIC
new, reduced_trees, seq_heights, df_pred = Heuristic(progress=progress, reduce_trivial=reduce_trivial,
pick_lowest_cherry=pick_lowest_cherry, pick_ml=pick_ml,
model_name=model_name, str_features=str_features)
if progress:
print(f"Instance {self.instance}: found sequence of length: {len(new)}")
print(f"Instance {self.instance}: adding roots")
# complete partial sequence
new, reduced_trees = sequence_add_roots(new, reduced_trees)
for k in np.arange(len(new) - len(seq_heights)):
seq_heights += [seq_heights[-1]]
if progress:
print(f"Instance {self.instance}: final length: {len(new)}")
self.CPS_Compute_Reps += 1
self.DurationPerTrial[i] = time.time() - start_trial
# FIND RETICULATION NUMBER
converted_new_seq = [(self.labels_reversed[pair[0]], self.labels_reversed[pair[1]]) for pair in new]
# Output the raw network
if self.distances:
network = PhN(seq=converted_new_seq,
reduced_trees=reduced_trees,
heights=seq_heights)
else:
network = PhN(seq=converted_new_seq, reduced_trees=reduced_trees)
# ret_per_trial.append(len(network.reticulations()))
self.RetPerTrial[i] = sum(network.reticulations_non_binary())
# store best sequence
if best is None or len(new) < len(best):
best = new
red_trees_best = reduced_trees
heights_best = seq_heights
if progress:
print(f"Instance {self.instance}: best sequence has length: {len(best)}")
if time_limit and time.time() - starting_time > time_limit:
break
# storing stuff of heuristic
self.CPS_Compute_Time += time.time() - starting_time
# storing best network
new_seq = best
if not self.best_seq_with_lengths or len(new_seq) < len(self.best_seq_with_lengths):
converted_new_seq = []
for pair in new_seq:
converted_new_seq += [(self.labels_reversed[pair[0]], self.labels_reversed[pair[1]])]
self.best_seq_with_lengths = converted_new_seq
self.best_seq_with_lengths_red_trees = red_trees_best
self.best_seq_with_lengths_heights = heights_best
return self.best_seq_with_lengths, df_pred
def CPHeuristic(self, progress=False, reduce_trivial=True, pick_lowest_cherry=True, pick_ml=False, model_name=None,
str_features=None):
# Works in a copy of the input trees, copy_of_inputs, because trees have to be reduced somewhere.
copy_of_inputs = deepcopy(self)
CPS = []
reduced_trees = []
heights_seq = []
candidate_leaves = deepcopy(self.leaves)
# Make dict of reducible pairs
reducible_pairs = self.find_all_pairs()
current_heights = dict() # for each reducible pair: [0] gives height, [1] the number of trees it was computed in.
if pick_ml:
# create initial features
start_time_init = time.time()
features = Features(reducible_pairs, copy_of_inputs.trees, root=2, str_features=str_features)
df_pred = pd.DataFrame(columns=["x", "y", "no_cher_pred", "cher_pred", "ret_cher_pred", "trees_reduced"])
if progress:
print(
f"Instance {self.instance}: Initial features found in {np.round(time.time() - start_time_init, 3)}s")
# open prediction model
if model_name is None:
model_name = "../LearningCherries/RFModels/rf_cherries_lgt_no_cat.joblib"
rf_model = joblib.load(model_name)
else:
df_pred = None
while copy_of_inputs.trees:
if progress:
print(f"Instance {self.instance}: Sequence has length {len(CPS)}")
print(f"Instance {self.instance}: {len(copy_of_inputs.trees)} trees left.\n")
if reduce_trivial:
new_seq, new_red_trees, reducible_pairs, new_heights_seq = copy_of_inputs.reduce_trivial_pairs_lengths(
candidate_leaves, reducible_pairs)
if progress:
print(f"Instance {self.instance}: Trivial cherries reduced")
CPS += new_seq
reduced_trees += new_red_trees
heights_seq += new_heights_seq
if len(copy_of_inputs.trees) == 0:
break
current_heights = copy_of_inputs.update_heights(current_heights, reducible_pairs)
if pick_lowest_cherry:
lowest_height = None
lowest_heights_found = 1
for pair in reducible_pairs:
height_pair_tuple = current_heights[pair][0]
height_pair = float(height_pair_tuple[0] + height_pair_tuple[1]) / 2
new_found = False
if (not lowest_height) or lowest_height > height_pair:
new_found = True
lowest_heights_found = 1
elif lowest_height == height_pair:
lowest_heights_found += 1
if random.random() < 1 / float(lowest_heights_found):
new_found = True
if new_found:
lowest_cherry = pair
lowest_height = height_pair
lowest_height_tuple = height_pair_tuple
chosen_cherry = lowest_cherry
chosen_height_tuple = lowest_height_tuple
elif pick_ml:
# predict if cherry
prediction = pd.DataFrame(np.array([p[:, 1] for p in rf_model.predict_proba(features.data)]).transpose(), index=features.data.index)
max_cherry = prediction[1].argmax()
max_cherry_prob = prediction.iloc[max_cherry, 1]
chosen_cherry = prediction.index[max_cherry]
if max_cherry_prob < 1:
max_ret_cherry = (prediction[1] + prediction[2]).argmax()
chosen_cherry = prediction.index[max_ret_cherry]
chosen_height_tuple = current_heights[chosen_cherry][0]
df_pred.loc[len(df_pred)] = [*chosen_cherry, *prediction.loc[chosen_cherry], np.nan]
if progress:
print(f"Instance {self.instance}: chosen cherry = {chosen_cherry}, "
f"ML prediction = {list(prediction.loc[chosen_cherry])}")
else:
random_cherry_num = np.random.choice(np.arange(len(reducible_pairs)))
chosen_cherry = list(reducible_pairs)[random_cherry_num]
chosen_height_tuple = current_heights[chosen_cherry][0]
CPS += [chosen_cherry]
heights_seq += [chosen_height_tuple]
# update cherries before reducing trees with chosen cherry
if pick_ml:
start_time_update_before = time.time()
features.update_cherry_features_before(chosen_cherry, reducible_pairs, copy_of_inputs.trees)
if progress:
print(f"Instance {self.instance}: Update BEFORE features found in {np.round(time.time() - start_time_update_before, 3)}s")
elif progress:
print(f"Instance {self.instance}: chosen cherry = {chosen_cherry}")
# reduce trees with chosen cherry
new_reduced = copy_of_inputs.reduce_pair_in_all(chosen_cherry, reducible_pairs=reducible_pairs)
reducible_pairs = copy_of_inputs.update_reducible_pairs(reducible_pairs, new_reduced)
if progress:
print(f"Instance {self.instance}: {len(reducible_pairs)} reducible pairs left")
if pick_ml:
df_pred.iloc[-1]["trees_reduced"] = len(new_reduced)
start_time_update_after = time.time()
features.update_cherry_features_after(chosen_cherry, reducible_pairs, copy_of_inputs.trees, new_reduced)
if progress:
print(f"Instance {self.instance}: Update AFTER features found in {np.round(time.time() - start_time_update_after, 3)}s")
reduced_trees += [new_reduced]
if reduce_trivial:
candidate_leaves = set(chosen_cherry)
return CPS, reduced_trees, heights_seq, df_pred
# when using machine learning, update the topological/combinatorial length of nodes
def update_node_comb_length(self, x, y, reduced_trees):
for t in reduced_trees:
try:
self.trees[t].nw.nodes[y]["node_comb"] -= 1
except KeyError:
continue
# todo: Returns an updated dictionary of heights of the reducible pairs
def update_heights(self, current_heights, reducible_pairs):
for pair, trees in reducible_pairs.items():
# updating is only necessary when the set of trees for that pair is changed or
# the reducible pair was not reducible before.
if pair not in current_heights or not current_heights[pair][1] == len(trees):
height_pair = self.height_pair(pair, trees)
current_heights[pair] = (height_pair, len(trees))
return current_heights
# Returns the average height of a pair in a set of trees
# The pair must be reducible in each tree in 'trees'
def height_pair(self, pair, trees):
height_pair = [0, 0]
for t in trees:
height_in_t = self.trees[t].height_of_cherry(*pair)
height_pair[0] += height_in_t[0]
height_pair[1] += height_in_t[1]
return [height_pair[0] / float(len(trees)), height_pair[1] / float(len(trees))]
# Finds the set of reducible pairs in all trees
# Returns a dictionary with reducible pairs as keys, and the trees they reduce as values.
def find_all_pairs(self):
reducible_pairs = dict()
for i, t in self.trees.items():
red_pairs_t = t.find_all_reducible_pairs()
for pair in red_pairs_t:
if pair in reducible_pairs:
reducible_pairs[pair].add(i)
else:
reducible_pairs[pair] = {i}
return reducible_pairs
# Returns the updated dictionary of reducible pairs in all trees after a reduction (with the trees they reduce as values)
# we only need to update for the trees that got reduced: 'new_red_treed'
def update_reducible_pairs(self, reducible_pairs, new_red_trees):
# Remove trees to update from all pairs
pair_del = []
for pair, trees in reducible_pairs.items():
trees.difference_update(new_red_trees)
if len(trees) == 0:
pair_del.append(pair)
for pair in pair_del:
del reducible_pairs[pair]
# Add the trees to the right pairs again
for index in new_red_trees:
if index in self.trees:
t = self.trees[index]
red_pairs_t = t.find_all_reducible_pairs()
for pair in red_pairs_t:
if pair in reducible_pairs:
reducible_pairs[pair].add(index)
else:
reducible_pairs[pair] = {index}
return reducible_pairs
# reduces the given pair in all trees
# Returns the set of trees that were reduced
# CHANGES THE SET OF TREES, ONLY PERFORM IN A COPY OF THE CLASS INSTANCE
def reduce_pair_in_all(self, pair, reducible_pairs=None):
if not len(reducible_pairs):
print("no reducible pairs")
if reducible_pairs is None:
reducible_pairs = dict()
reduced_trees_for_pair = []
if pair in reducible_pairs:
trees_to_reduce = reducible_pairs[pair]
else:
trees_to_reduce = deepcopy(self.trees)
for t in trees_to_reduce:
if t in self.trees:
tree = self.trees[t]
if tree.reduce_pair(*pair):
reduced_trees_for_pair += [t]
if len(tree.nw.edges()) <= 2:
del self.trees[t]
return set(reduced_trees_for_pair)
# reduces the trivial pairs in the current set of trees with branch lengths
# runs efficiently by giving a set of leaves 'candidate_leaves' that may be involved in trivial pairs
# this set must be given; after a reduction of the pair (a,b) only using the leaves a and b works
# Returns the reduced pairs and the sets of trees thet were reduced, also updates the reducible pairs and their heights.
# CHANGES THE SET OF TREES, ONLY PERFORM IN A COPY OF THE CLASS INSTANCE
def reduce_trivial_pairs_lengths(self, candidate_leaves, reducible_pairs):
seq = []
reduced_tree_sets = []
heights_seq = []
while candidate_leaves:
l = candidate_leaves.pop()
new_pairs = list(self.trivial_pair_with(l))
if new_pairs:
# print("found a trivial pair")
seq += new_pairs
for p in new_pairs:
height_p = self.height_pair(p, reducible_pairs[p])
red_trees_p = self.reduce_pair_in_all(p, reducible_pairs=reducible_pairs)
heights_seq += [height_p]
reducible_pairs = self.update_reducible_pairs(reducible_pairs, red_trees_p)
reduced_tree_sets += [red_trees_p]
candidate_leaves = candidate_leaves | set(p)
return seq, reduced_tree_sets, reducible_pairs, heights_seq
# Returns all trivial pairs involving the leaf l
def trivial_pair_with(self, l):
pairs = set()
# Go through all trees t with index i.
for i, t in self.trees.items():
# If the leaf occurs in t
if l in t.leaves:
# Compute reducible pairs of t with the leaf as first coordinate
pairs_in_t = t.find_pairs_with_first(l)
# If we did not have a set of candidate pairs yet, use pairs_in_t
if not pairs:
pairs = pairs_in_t
# Else, the candidate pairs must also be in t, so take intersection
else:
pairs = pairs & pairs_in_t
# If we do not have any candidate | |
KB = 0.53 * x_T
BMT = ((0.085 * x_CB - 0.002) * x_B * x_B) / (x_T * x_CB)
KG = 1.0 + 0.52 * x_D
constraintFuncs[8] = (KB + BMT - KG) - (0.07 * x_B)
constraintFuncs = np.where(constraintFuncs < 0, -constraintFuncs, 0)
f[3] = constraintFuncs[0] + constraintFuncs[1] + constraintFuncs[2] + constraintFuncs[3] + constraintFuncs[4] + constraintFuncs[5] + constraintFuncs[6] + constraintFuncs[7] + constraintFuncs[8]
return f
class RE61():
def __init__(self):
self.problem_name = 'RE61'
self.n_objectives = 6
self.n_variables = 3
self.n_constraints = 0
self.n_original_constraints = 7
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.01
self.lbound[1] = 0.01
self.lbound[2] = 0.01
self.ubound[0] = 0.45
self.ubound[1] = 0.10
self.ubound[2] = 0.10
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
# First original objective function
f[0] = 106780.37 * (x[1] + x[2]) + 61704.67
#Second original objective function
f[1] = 3000 * x[0]
# Third original objective function
f[2] = 305700 * 2289 * x[1] / np.power(0.06*2289, 0.65)
# Fourth original objective function
f[3] = 250 * 2289 * np.exp(-39.75*x[1]+9.9*x[2]+2.74)
# Fifth original objective function
f[4] = 25 * (1.39 /(x[0]*x[1]) + 4940*x[2] -80)
# Constraint functions
g[0] = 1 - (0.00139/(x[0]*x[1])+4.94*x[2]-0.08)
g[1] = 1 - (0.000306/(x[0]*x[1])+1.082*x[2]-0.0986)
g[2] = 50000 - (12.307/(x[0]*x[1]) + 49408.24*x[2]+4051.02)
g[3] = 16000 - (2.098/(x[0]*x[1])+8046.33*x[2]-696.71)
g[4] = 10000 - (2.138/(x[0]*x[1])+7883.39*x[2]-705.04)
g[5] = 2000 - (0.417*x[0]*x[1] + 1721.26*x[2]-136.54)
g[6] = 550 - (0.164/(x[0]*x[1])+631.13*x[2]-54.48)
g = np.where(g < 0, -g, 0)
f[5] = g[0] + g[1] + g[2] + g[3] + g[4] + g[5] + g[6]
return f
class RE91():
def __init__(self, set_random_seed=False):
self.problem_name = 'RE91'
self.n_objectives = 9
self.n_variables = 7
self.n_constraints = 0
self.n_original_constraints = 0
self.lbound = np.zeros(self.n_variables)
self.ubound = np.zeros(self.n_variables)
self.lbound[0] = 0.5
self.lbound[1] = 0.45
self.lbound[2] = 0.5
self.lbound[3] = 0.5
self.lbound[4] = 0.875
self.lbound[5] = 0.4
self.lbound[6] = 0.4
self.ubound[0] = 1.5
self.ubound[1] = 1.35
self.ubound[2] = 1.5
self.ubound[3] = 1.5
self.ubound[4] = 2.625
self.ubound[5] = 1.2
self.ubound[6] = 1.2
if set_random_seed:
np.random.seed(seed=0)
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_original_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
x5 = x[4]
x6 = x[5]
x7 = x[6]
# stochastic variables
x8 = 0.006 * (np.random.normal(0, 1)) + 0.345
x9 = 0.006 * (np.random.normal(0, 1)) + 0.192
x10 = 10 * (np.random.normal(0, 1)) + 0.0
x11 = 10 * (np.random.normal(0, 1)) + 0.0
# First function
f[0] = 1.98 + 4.9 * x1 + 6.67 * x2 + 6.98 * x3 + 4.01 * x4 + 1.75 * x5 + 0.00001 * x6 + 2.73 * x7
# Second function
f[1] = max(0.0, (1.16 - 0.3717* x2 * x4 - 0.00931 * x2 * x10 - 0.484 * x3 * x9 + 0.01343 * x6 * x10 )/1.0)
# Third function
f[2] = max(0.0, (0.261 - 0.0159 * x1 * x2 - 0.188 * x1 * x8 - 0.019 * x2 * x7 + 0.0144 * x3 * x5 + 0.87570001 * x5 * x10 + 0.08045 * x6 * x9 + 0.00139 * x8 * x11 + 0.00001575 * x10 * x11)/0.32)
# Fourth function
f[3] = max(0.0, (0.214 + 0.00817 * x5 - 0.131 * x1 * x8 - 0.0704 * x1 * x9 + 0.03099 * x2 * x6 - 0.018 * x2 * x7 + 0.0208 * x3 * x8 + 0.121 * x3 * x9 - 0.00364 * x5 * x6 + 0.0007715 * x5 * x10 - 0.0005354 * x6 * x10 + 0.00121 * x8 * x11 + 0.00184 * x9 * x10 - 0.018 * x2 * x2)/0.32)
# Fifth function
f[4] = max(0.0, (0.74 - 0.61* x2 - 0.163 * x3 * x8 + 0.001232 * x3 * x10 - 0.166 * x7 * x9 + 0.227 * x2 * x2)/0.32)
# Sixth function
tmp = (( 28.98 + 3.818 * x3 - 4.2 * x1 * x2 + 0.0207 * x5 * x10 + 6.63 * x6 * x9 - 7.77 * x7 * x8 + 0.32 * x9 * x10) + (33.86 + 2.95 * x3 + 0.1792 * x10 - 5.057 * x1 * x2 - 11 * x2 * x8 - 0.0215 * x5 * x10 - 9.98 * x7 * x8 + 22 * x8 * x9) + (46.36 - 9.9 * x2 - 12.9 * x1 * x8 + 0.1107 * x3 * x10) )/3
f[5] = max(0.0, tmp/32)
# Seventh function
f[6] = max(0.0, (4.72 - 0.5 * x4 - 0.19 * x2 * x3 - 0.0122 * x4 * x10 + 0.009325 * x6 * x10 + 0.000191 * x11 * x11)/4.0)
# EighthEighth function
f[7] = max(0.0, (10.58 - 0.674 * x1 * x2 - 1.95 * x2 * x8 + 0.02054 * x3 * x10 - 0.0198 * x4 * x10 + 0.028 * x6 * x10)/9.9)
# Ninth function
f[8] = max(0.0, (16.45 - 0.489 * x3 * x7 - 0.843 * x5 * x6 + 0.0432 * x9 * x10 - 0.0556 * x9 * x11 - 0.000786 * x11 * x11)/15.7)
return f
class CRE21():
def __init__(self):
self.problem_name = 'CRE21'
self.n_objectives = 2
self.n_variables = 3
self.n_constraints = 3
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 0.00001
self.lbound[1] = 0.00001
self.lbound[2] = 1.0
self.ubound[0] = 100.0
self.ubound[1] = 100.0
self.ubound[2] = 3.0
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
# First original objective function
f[0] = x1 * np.sqrt(16.0 + (x3 * x3)) + x2 * np.sqrt(1.0 + x3 * x3)
# Second original objective function
f[1] = (20.0 * np.sqrt(16.0 + (x3 * x3))) / (x1 * x3)
# Constraint functions
g[0] = 0.1 - f[0]
g[1] = 100000.0 - f[1]
g[2] = 100000 - ((80.0 * np.sqrt(1.0 + x3 * x3)) / (x3 * x2))
g = np.where(g < 0, -g, 0)
return f, g
class CRE22():
def __init__(self):
self.problem_name = 'CRE22'
self.n_objectives = 2
self.n_variables = 4
self.n_constraints = 4
self.ubound = np.zeros(self.n_variables)
self.lbound = np.zeros(self.n_variables)
self.lbound[0] = 0.125
self.lbound[1] = 0.1
self.lbound[2] = 0.1
self.lbound[3] = 0.125
self.ubound[0] = 5.0
self.ubound[1] = 10.0
self.ubound[2] = 10.0
self.ubound[3] = 5.0
def evaluate(self, x):
f = np.zeros(self.n_objectives)
g = np.zeros(self.n_constraints)
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
P = 6000
L = 14
E = 30 * 1e6
# // deltaMax = 0.25
G = 12 * 1e6
tauMax = 13600
sigmaMax = 30000
# First original objective function
f[0] = (1.10471 * x1 * x1 * x2) + (0.04811 * x3 * x4) * (14.0 + x2)
# Second original objective function
f[1] = (4 * P * L * L * L) / (E * x4 * x3 * x3 * x3)
# Constraint functions
M = P * (L + (x2 / 2))
tmpVar = ((x2 * x2) / 4.0) + np.power((x1 + x3) / 2.0, 2)
R = np.sqrt(tmpVar)
tmpVar = ((x2 * x2) / 12.0) + np.power((x1 + x3) / 2.0, 2)
J = 2 * np.sqrt(2) * x1 * x2 * tmpVar
tauDashDash = (M * R) / J
tauDash = P / (np.sqrt(2) * x1 * x2)
tmpVar = tauDash * tauDash + ((2 * tauDash * tauDashDash * x2) / (2 * R)) + (tauDashDash * tauDashDash)
tau = np.sqrt(tmpVar)
sigma = (6 * P * L) / (x4 * x3 * x3)
tmpVar = 4.013 * E * np.sqrt((x3 * x3 * x4 * x4 * x4 * x4 * | |
import re
from flask import jsonify
from sqlalchemy import text
from db import db
class IcuDevelopments:
__build_obj = """
json_build_object(
'timestamp',
agg.timestamp,
'inserted',
agg.last_insert_date,
'last_updated',
agg.last_update,
'num_hospitals',
agg.num_hospitals,
'icu_low_state',
json_build_object(
'Verfügbar',
icu_low_v,
'Begrenzt',
icu_low_b,
'Ausgelastet',
icu_low_a,
'Nicht verfügbar',
icu_low_nv
),
'icu_high_state',
json_build_object(
'Verfügbar',
icu_high_v,
'Begrenzt',
icu_high_b,
'Ausgelastet',
icu_high_a,
'Nicht verfügbar',
icu_high_nv
),
'ecmo_state',
json_build_object(
'Verfügbar',
ecmo_v,
'Begrenzt',
ecmo_b,
'Ausgelastet',
ecmo_a,
'Nicht verfügbar',
ecmo_nv
)
)::jsonb
"""
__agg_cols = """
r.ids as ids,
r.name as name,
c.timestamp as timestamp,
MAX(c.last_update) as last_update,
MAX(c.last_insert_date) as last_insert_date,
r.geom as geom,
SUM(icu_low_v) as icu_low_v,
SUM(icu_low_b) as icu_low_b,
SUM(icu_low_a) as icu_low_a,
SUM(icu_low_nv) as icu_low_nv,
SUM(icu_high_v) as icu_high_v,
SUM(icu_high_b) as icu_high_b,
SUM(icu_high_a) as icu_high_a,
SUM(icu_high_nv) as icu_high_nv,
SUM(ecmo_v) as ecmo_v,
SUM(ecmo_b) as ecmo_b,
SUM(ecmo_a) as ecmo_a,
SUM(ecmo_nv) as ecmo_nv,
COUNT(r.ids) as num_hospitals
"""
def get_hospital(self, from_time, to_time, max_days_old, id_hospital, want_geom: bool):
"""
Return the development of one hospital
"""
return self.__res_single(self.__get_hospitals(from_time, to_time, max_days_old, id_hospital),
from_time, to_time, max_days_old, id_hospital, self.__get_feature_hospital,
want_geom=want_geom)
def get_hospitals(self, from_time, to_time, max_days_old, want_geom: bool):
"""
Return the development of all hospital
"""
return self.__res_collection(self.__get_hospitals(from_time, to_time, max_days_old, None), from_time, to_time,
max_days_old, self.__get_feature_hospital, want_geom=want_geom)
def get_county(self, from_time, to_time, max_days_old, id_county, want_geom: bool):
"""
Return the development of icu capacities for one county
"""
return self.__res_single(
self.__agg_query('landkreise_extended', 'county_id', from_time, to_time, max_days_old, id_county),
from_time, to_time, max_days_old, id_county, self.__get_feature_agg,
want_geom=want_geom)
def get_by_counties(self, from_time, to_time, max_days_old, want_geom: bool):
"""
Return the development of icu capacities by counties
"""
return self.__res_collection(
self.__agg_query('landkreise_extended', 'county_id', from_time, to_time, max_days_old, None),
from_time, to_time, max_days_old, self.__get_feature_agg, want_geom=want_geom)
def get_district(self, from_time, to_time, max_days_old, id_district, want_geom: bool):
"""
Return the development of icu capacities for one district
"""
return self.__res_single(
self.__agg_query('regierungsbezirke', 'gd_id', from_time, to_time, max_days_old, id_district),
from_time, to_time, max_days_old, id_district, self.__get_feature_agg,
want_geom=want_geom)
def get_by_districts(self, from_time, to_time, max_days_old, want_geom: bool):
"""
Return the development of icu capacities by districts
"""
return self.__res_collection(
self.__agg_query('regierungsbezirke', 'gd_id', from_time, to_time, max_days_old, None),
from_time, to_time, max_days_old, self.__get_feature_agg, want_geom=want_geom)
def get_state(self, from_time, to_time, max_days_old, id_state, want_geom: bool):
"""
Return the development of icu capacities for one state
"""
return self.__res_single(
self.__agg_query('bundeslaender', 'state_id', from_time, to_time, max_days_old, id_state),
from_time, to_time, max_days_old, id_state, self.__get_feature_agg,
want_geom=want_geom)
def get_by_states(self, from_time, to_time, max_days_old, want_geom: bool):
"""
Return the development of icu capacities by states
"""
return self.__res_collection(
self.__agg_query('bundeslaender', 'state_id', from_time, to_time, max_days_old, None),
from_time, to_time, max_days_old, self.__get_feature_agg, want_geom=want_geom)
def get_country(self, from_time, to_time, max_days_old, id_country, want_geom: bool):
"""
Return the development of icu capacities for one country
"""
return self.__res_single(
self.__agg_query('germany', 'country_id', from_time, to_time, max_days_old, id_country),
from_time, to_time, max_days_old, id_country, self.__get_feature_agg,
want_geom=want_geom)
def get_by_countries(self, from_time, to_time, max_days_old, want_geom: bool):
"""
Return the development of icu capacities by countries
"""
return self.__res_collection(self.__agg_query('germany', 'country_id', from_time, to_time, max_days_old, None),
from_time, to_time, max_days_old, self.__get_feature_agg, want_geom=want_geom)
def get_aggregated(self, agg_dict: dict, from_time: str, to_time: str, want_geom: bool):
return self.__res_single(sql_stmt=self.__agg_region_query(agg_dict, from_time, to_time),
from_time=from_time, to_time=to_time, max_days_old=5, cb=self.__get_feature_agg,
id_hospital=None, want_geom=want_geom)
@staticmethod
def __get_feature_hospital(r, want_geom: bool):
# agg.id,
# agg.name,
# agg.address,
# agg.state,
# agg.contact,
# agg.helipad_nearby,
# st_asgeojson(agg.location) :: jsonb AS geom,
# CASE
# WHEN min(agg.timestamp) IS NULL THEN NULL
# ELSE json_agg(
# {buildObj}
# ORDER BY
# agg.timestamp
# )::jsonb
# END AS development
feature = {
"type": 'Feature',
"properties": {
"id": r[0],
"name": r[1],
"address": r[2],
"contact": r[4],
"helipad_nearby": r[5],
"developments": r[7]
}
}
if want_geom:
feature['geometry'] = r[6]
return feature
@staticmethod
def __res_collection(sql_stmt, from_time, to_time, max_days_old, cb, want_geom: bool):
sql_result = db.engine.execute(sql_stmt, from_time=from_time, to_time=to_time, max_days_old=max_days_old) \
.fetchall()
features = []
for r in sql_result:
feature = cb(r, want_geom=want_geom)
features.append(feature)
featurecollection = {"type": "FeatureCollection", "features": features}
return jsonify(featurecollection), 200
@staticmethod
def __res_single(sql_stmt, from_time, to_time, max_days_old, id_hospital, cb, want_geom: bool):
r = db.engine.execute(sql_stmt, from_time=from_time, to_time=to_time, max_days_old=max_days_old,
id_obj=id_hospital).fetchone()
if r is None:
return jsonify({'error': 'not found'}), 404
feature = cb(r, want_geom=want_geom)
return jsonify(feature), 200
@staticmethod
def __get_feature_agg(r, want_geom):
# agg.ids,
# agg.name,
# st_asgeojson(agg.geom) :: jsonb AS geom,
# st_asgeojson(st_centroid(agg.geom)):: jsonb AS centroid,
# AS development,
# AS developmentDays
feature = {
"type": 'Feature',
"properties": {
"id": r[0],
"name": r[1],
"centroid": r[3],
"developments": r[4],
"developmentDays": r[5]
}
}
if want_geom:
feature['geometry'] = r[2]
return feature
@staticmethod
def __get_agg_cols(show_region=True):
region_cols = ""
if show_region:
region_cols = """
r.ids as ids,
r.name as name,
r.geom as geom,
"""
return f"""
{region_cols}
c.timestamp as timestamp,
MAX(c.last_update) as last_update,
MAX(c.last_insert_date) as last_insert_date,
SUM(icu_low_v) as icu_low_v,
SUM(icu_low_b) as icu_low_b,
SUM(icu_low_a) as icu_low_a,
SUM(icu_low_nv) as icu_low_nv,
SUM(icu_high_v) as icu_high_v,
SUM(icu_high_b) as icu_high_b,
SUM(icu_high_a) as icu_high_a,
SUM(icu_high_nv) as icu_high_nv,
SUM(ecmo_v) as ecmo_v,
SUM(ecmo_b) as ecmo_b,
SUM(ecmo_a) as ecmo_a,
SUM(ecmo_nv) as ecmo_nv,
COUNT(c.hospital_id) as num_hospitals
"""
def __agg_query(self, agg_table, region_id_col, from_time, to_time, max_days_old, id_obj):
sql_from_time = ""
sql_to_time = ""
sql_max_days_old = ""
sql_id_obj = ""
if from_time:
sql_from_time = "AND agg.timestamp >= :from_time"
if to_time:
sql_to_time = "AND agg.timestamp <= :to_time"
if max_days_old:
sql_max_days_old = "AND c.age <= (:max_days_old || ' days') ::interval"
if id_obj:
sql_id_obj = "AND agg.ids = :id_obj"
# noinspection SqlResolve
stmnt = text("""
WITH agg AS (
SELECT {agg_cols}
FROM filled_hospital_timeseries_with_fix c
JOIN {agg_table} r ON c.{region_id_col} = r.ids
WHERE landkreis_id IS NOT NULL
{sql_max_days_old}
GROUP BY r.ids,
r.name,
r.geom,
c.timestamp
)
SELECT agg.ids,
agg.name,
st_asgeojson(agg.geom) :: jsonb AS geom,
st_asgeojson(st_centroid(agg.geom)):: jsonb AS centroid,
-- check if the first value is null, can ONLY happen if there are no values for the landkreis,
-- then we return null
CASE
WHEN min(agg.timestamp) IS NULL THEN NULL
ELSE json_agg(
{build_obj}
ORDER BY
agg.timestamp
)::jsonb
END AS development,
CASE
WHEN min(agg.timestamp) IS NULL THEN NULL
ELSE json_object_agg(
agg.timestamp::date,
{build_obj}
ORDER BY
agg.timestamp
)::jsonb
END AS developmentDays
FROM agg
WHERE 1 = 1
{sql_from_time}
{sql_to_time}
{sql_id_obj}
GROUP BY agg.ids,
agg.name,
agg.geom
""".format(agg_table=agg_table, agg_cols=self.__agg_cols, build_obj=self.__build_obj,
sql_from_time=sql_from_time, sql_to_time=sql_to_time, sql_max_days_old=sql_max_days_old,
sql_id_obj=sql_id_obj, region_id_col=region_id_col)
)
# current_app.logger.debug(stmnt)
return stmnt
def __get_hospitals(self, from_time, to_time, max_days_old, id_hospital):
"""
Return the development of icu capacities
by counties
"""
sql_from_time = ""
sql_to_time = ""
sql_max_days_old = ""
sql_id_county = ""
if from_time:
sql_from_time = "AND agg.timestamp >= :from_time"
if to_time:
sql_to_time = "AND agg.timestamp <= :to_time"
if max_days_old:
sql_max_days_old = "AND agg.age <= (:max_days_old || ' days') ::interval"
if id_hospital:
sql_id_county = "AND agg.hospital_id = :id_obj"
# noinspection SqlUnused
sql_stmt = text("""
SELECT
agg.hospital_id,
agg.name,
agg.address,
agg.state,
agg.contact,
agg.helipad_nearby,
st_asgeojson(agg.geom) :: jsonb AS geom,
CASE
WHEN min(agg.timestamp) IS NULL THEN NULL
ELSE json_agg(
{build_obj}
ORDER BY
agg.timestamp
)::jsonb
END AS development,
CASE
WHEN min(agg.timestamp) IS NULL THEN NULL
ELSE json_object_agg(
agg.timestamp::date,
{build_obj}
ORDER BY
agg.timestamp
)::jsonb
END AS developmentDays
FROM
(SELECT *, 1 AS num_hospitals FROM filled_hospital_timeseries_with_fix) agg
WHERE landkreis_id IS NOT NULL
{sql_from_time}
{sql_to_time}
{sql_id_county}
{sql_max_days_old}
GROUP BY
agg.hospital_id,
agg.name,
agg.address,
agg.state,
agg.contact,
agg.geom,
agg.helipad_nearby
""".format(build_obj=self.__build_obj, sql_from_time=sql_from_time, sql_to_time=sql_to_time,
sql_max_days_old=sql_max_days_old, sql_id_county=sql_id_county))
# current_app.logger.debug(f'Counties: {sql_stmt}')
return sql_stmt
# noinspection DuplicatedCode
def __agg_region_query(self, agg_table_dict: dict[str, str], from_time: str, to_time: str):
sql_from_time = ""
sql_to_time = ""
sql_join_union = []
regex = re.compile(r"[^0-9]", re.IGNORECASE)
all_ids = []
number_of_ids = 0
ids: str
agg_table = ''
region_filter_arr = []
for agg_table, ids in agg_table_dict.items():
if ids is None:
continue
ids_sanitized = list(map(lambda d: re.sub(regex, "", d.strip()), ids.split(",")))
number_of_ids += len(ids_sanitized)
all_ids += ids_sanitized
ids_sanitized_sql = "('" + ("', '".join(ids_sanitized)) + "')"
desc = ''
if agg_table == 'landkreise':
agg_table = 'landkreise_extended'
desc = "bez AS description"
region_filter_arr.append(f'county_id IN {ids_sanitized_sql}')
elif agg_table == 'regierungsbezirke':
desc = "'RB' AS description"
region_filter_arr.append(f'gd_id IN {ids_sanitized_sql}')
elif agg_table == 'bundeslaender':
desc = "'BL' AS description"
region_filter_arr.append(f'state_id IN {ids_sanitized_sql}')
elif agg_table == 'laender':
agg_table = 'germany'
ids_sanitized_sql = '(\'de\')'
all_ids.append('de')
region_filter_arr.append(f'country_id IN {ids_sanitized_sql}')
desc = "'L' AS description"
# noinspection SqlResolve
sql_join_union += [f"SELECT ids, geom, name, {desc} FROM {agg_table} WHERE ids IN {ids_sanitized_sql}"]
sql_joins = " ( " + (" UNION ".join(sql_join_union)) + " ) AS r"
region_filters = " OR ".join(region_filter_arr)
if from_time:
sql_from_time = f"AND agg.timestamp >= :fromTimeParam"
if to_time:
sql_to_time = f"AND agg.timestamp <= :toTimeParam"
# noinspection SqlResolve
sql_stmt = text("""
WITH
regions_arr AS (
SELECT
array_agg(DISTINCT r.ids) as ids,
array_agg(DISTINCT (r.description || ' ' || r.name)) as name,
array_agg(DISTINCT r.description) as "desc",
st_union(DISTINCT r.geom) as geom
FROM {sql_joins}
| |
import sys
from ctypes import *
import platform
from .types import *
from ._platform import DLL_PATH, is_windows
def load_libtiepie():
"""Load libtiepie library and import all functions."""
if is_windows:
from ctypes.wintypes import HANDLE, HWND, LPARAM, WPARAM
api = CDLL(DLL_PATH)
api.LibInit.restype = None
api.LibInit.argtypes = []
api.LibIsInitialized.restype = c_uint8
api.LibIsInitialized.argtypes = []
api.LibExit.restype = None
api.LibExit.argtypes = []
api.LibGetVersion.restype = c_uint64
api.LibGetVersion.argtypes = []
api.LibGetVersionExtra.restype = c_char_p
api.LibGetVersionExtra.argtypes = []
api.LibGetConfig.restype = c_uint32
api.LibGetConfig.argtypes = [c_void_p, c_uint32]
api.LibGetLastStatus.restype = c_int32
api.LibGetLastStatus.argtypes = []
api.LibGetLastStatusStr.restype = c_char_p
api.LibGetLastStatusStr.argtypes = []
api.LstUpdate.restype = None
api.LstUpdate.argtypes = []
api.LstGetCount.restype = c_uint32
api.LstGetCount.argtypes = []
api.LstOpenDevice.restype = c_uint32
api.LstOpenDevice.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstOpenOscilloscope.restype = c_uint32
api.LstOpenOscilloscope.argtypes = [c_uint32, c_uint32]
api.LstOpenGenerator.restype = c_uint32
api.LstOpenGenerator.argtypes = [c_uint32, c_uint32]
api.LstOpenI2CHost.restype = c_uint32
api.LstOpenI2CHost.argtypes = [c_uint32, c_uint32]
api.LstCreateCombinedDevice.restype = c_uint32
api.LstCreateCombinedDevice.argtypes = [c_void_p, c_uint32]
api.LstCreateAndOpenCombinedDevice.restype = c_uint32
api.LstCreateAndOpenCombinedDevice.argtypes = [c_void_p, c_uint32]
api.LstRemoveDevice.restype = None
api.LstRemoveDevice.argtypes = [c_uint32]
api.LstRemoveDeviceForce.restype = None
api.LstRemoveDeviceForce.argtypes = [c_uint32]
api.LstDevCanOpen.restype = c_uint8
api.LstDevCanOpen.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstDevGetProductId.restype = c_uint32
api.LstDevGetProductId.argtypes = [c_uint32, c_uint32]
api.LstDevGetVendorId.restype = c_uint32
api.LstDevGetVendorId.argtypes = [c_uint32, c_uint32]
api.LstDevGetName.restype = c_uint32
api.LstDevGetName.argtypes = [c_uint32, c_uint32, c_char_p, c_uint32]
api.LstDevGetNameShort.restype = c_uint32
api.LstDevGetNameShort.argtypes = [c_uint32, c_uint32, c_char_p, c_uint32]
api.LstDevGetNameShortest.restype = c_uint32
api.LstDevGetNameShortest.argtypes = [c_uint32, c_uint32, c_char_p, c_uint32]
api.LstDevGetDriverVersion.restype = c_uint64
api.LstDevGetDriverVersion.argtypes = [c_uint32, c_uint32]
api.LstDevGetRecommendedDriverVersion.restype = c_uint64
api.LstDevGetRecommendedDriverVersion.argtypes = [c_uint32, c_uint32]
api.LstDevGetFirmwareVersion.restype = c_uint64
api.LstDevGetFirmwareVersion.argtypes = [c_uint32, c_uint32]
api.LstDevGetRecommendedFirmwareVersion.restype = c_uint64
api.LstDevGetRecommendedFirmwareVersion.argtypes = [c_uint32, c_uint32]
api.LstDevGetCalibrationDate.restype = c_uint32
api.LstDevGetCalibrationDate.argtypes = [c_uint32, c_uint32]
api.LstDevGetSerialNumber.restype = c_uint32
api.LstDevGetSerialNumber.argtypes = [c_uint32, c_uint32]
api.LstDevGetIPv4Address.restype = c_uint32
api.LstDevGetIPv4Address.argtypes = [c_uint32, c_uint32]
api.LstDevGetIPPort.restype = c_uint16
api.LstDevGetIPPort.argtypes = [c_uint32, c_uint32]
api.LstDevHasServer.restype = c_uint8
api.LstDevHasServer.argtypes = [c_uint32, c_uint32]
api.LstDevGetServer.restype = c_uint32
api.LstDevGetServer.argtypes = [c_uint32, c_uint32]
api.LstDevGetTypes.restype = c_uint32
api.LstDevGetTypes.argtypes = [c_uint32, c_uint32]
api.LstDevGetContainedSerialNumbers.restype = c_uint32
api.LstDevGetContainedSerialNumbers.argtypes = [c_uint32, c_uint32, c_void_p, c_uint32]
api.LstCbDevGetProductId.restype = c_uint32
api.LstCbDevGetProductId.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstCbDevGetVendorId.restype = c_uint32
api.LstCbDevGetVendorId.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstCbDevGetName.restype = c_uint32
api.LstCbDevGetName.argtypes = [c_uint32, c_uint32, c_uint32, c_char_p, c_uint32]
api.LstCbDevGetNameShort.restype = c_uint32
api.LstCbDevGetNameShort.argtypes = [c_uint32, c_uint32, c_uint32, c_char_p, c_uint32]
api.LstCbDevGetNameShortest.restype = c_uint32
api.LstCbDevGetNameShortest.argtypes = [c_uint32, c_uint32, c_uint32, c_char_p, c_uint32]
api.LstCbDevGetDriverVersion.restype = c_uint64
api.LstCbDevGetDriverVersion.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstCbDevGetFirmwareVersion.restype = c_uint64
api.LstCbDevGetFirmwareVersion.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstCbDevGetCalibrationDate.restype = c_uint32
api.LstCbDevGetCalibrationDate.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstCbScpGetChannelCount.restype = c_uint16
api.LstCbScpGetChannelCount.argtypes = [c_uint32, c_uint32, c_uint32]
api.LstSetCallbackDeviceAdded.restype = None
api.LstSetCallbackDeviceAdded.argtypes = [CallbackDeviceList, c_void_p]
api.LstSetCallbackDeviceRemoved.restype = None
api.LstSetCallbackDeviceRemoved.argtypes = [CallbackDeviceList, c_void_p]
api.LstSetCallbackDeviceCanOpenChanged.restype = None
api.LstSetCallbackDeviceCanOpenChanged.argtypes = [CallbackDeviceList, c_void_p]
if platform.system() == 'Linux':
api.LstSetEventDeviceAdded.restype = None
api.LstSetEventDeviceAdded.argtypes = [c_int]
api.LstSetEventDeviceRemoved.restype = None
api.LstSetEventDeviceRemoved.argtypes = [c_int]
api.LstSetEventDeviceCanOpenChanged.restype = None
api.LstSetEventDeviceCanOpenChanged.argtypes = [c_int]
if platform.system() == 'Windows':
api.LstSetEventDeviceAdded.restype = None
api.LstSetEventDeviceAdded.argtypes = [HANDLE]
api.LstSetEventDeviceRemoved.restype = None
api.LstSetEventDeviceRemoved.argtypes = [HANDLE]
api.LstSetEventDeviceCanOpenChanged.restype = None
api.LstSetEventDeviceCanOpenChanged.argtypes = [HANDLE]
api.LstSetMessageDeviceAdded.restype = None
api.LstSetMessageDeviceAdded.argtypes = [HWND]
api.LstSetMessageDeviceRemoved.restype = None
api.LstSetMessageDeviceRemoved.argtypes = [HWND]
api.LstSetMessageDeviceCanOpenChanged.restype = None
api.LstSetMessageDeviceCanOpenChanged.argtypes = [HWND]
api.NetGetAutoDetectEnabled.restype = c_uint8
api.NetGetAutoDetectEnabled.argtypes = []
api.NetSetAutoDetectEnabled.restype = c_uint8
api.NetSetAutoDetectEnabled.argtypes = [c_uint8]
api.NetSrvAdd.restype = c_uint8
api.NetSrvAdd.argtypes = [c_char_p, c_uint32, c_void_p]
api.NetSrvRemove.restype = c_uint8
api.NetSrvRemove.argtypes = [c_char_p, c_uint32, c_uint8]
api.NetSrvGetCount.restype = c_uint32
api.NetSrvGetCount.argtypes = []
api.NetSrvGetByIndex.restype = c_uint32
api.NetSrvGetByIndex.argtypes = [c_uint32]
api.NetSrvGetByURL.restype = c_uint32
api.NetSrvGetByURL.argtypes = [c_char_p, c_uint32]
api.NetSrvSetCallbackAdded.restype = None
api.NetSrvSetCallbackAdded.argtypes = [CallbackHandle, c_void_p]
if platform.system() == 'Linux':
api.NetSrvSetEventAdded.restype = None
api.NetSrvSetEventAdded.argtypes = [c_int]
if platform.system() == 'Windows':
api.NetSrvSetEventAdded.restype = None
api.NetSrvSetEventAdded.argtypes = [HANDLE]
api.NetSrvSetMessageAdded.restype = None
api.NetSrvSetMessageAdded.argtypes = [HWND]
api.ObjClose.restype = None
api.ObjClose.argtypes = [c_uint32]
api.ObjIsRemoved.restype = c_uint8
api.ObjIsRemoved.argtypes = [c_uint32]
api.ObjGetInterfaces.restype = c_uint64
api.ObjGetInterfaces.argtypes = [c_uint32]
api.ObjSetEventCallback.restype = None
api.ObjSetEventCallback.argtypes = [c_uint32, CallbackEvent, c_void_p]
api.ObjGetEvent.restype = c_uint8
api.ObjGetEvent.argtypes = [c_uint32, c_void_p, c_void_p]
if platform.system() == 'Linux':
api.ObjSetEventEvent.restype = None
api.ObjSetEventEvent.argtypes = [c_uint32, c_int]
if platform.system() == 'Windows':
api.ObjSetEventEvent.restype = None
api.ObjSetEventEvent.argtypes = [c_uint32, HANDLE]
api.ObjSetEventWindowHandle.restype = None
api.ObjSetEventWindowHandle.argtypes = [c_uint32, HWND]
api.DevClose.restype = None
api.DevClose.argtypes = [c_uint32]
api.DevIsRemoved.restype = c_uint8
api.DevIsRemoved.argtypes = [c_uint32]
api.DevGetDriverVersion.restype = c_uint64
api.DevGetDriverVersion.argtypes = [c_uint32]
api.DevGetFirmwareVersion.restype = c_uint64
api.DevGetFirmwareVersion.argtypes = [c_uint32]
api.DevGetCalibrationDate.restype = c_uint32
api.DevGetCalibrationDate.argtypes = [c_uint32]
api.DevGetCalibrationToken.restype = c_uint32
api.DevGetCalibrationToken.argtypes = [c_uint32, c_char_p, c_uint32]
api.DevGetSerialNumber.restype = c_uint32
api.DevGetSerialNumber.argtypes = [c_uint32]
api.DevGetIPv4Address.restype = c_uint32
api.DevGetIPv4Address.argtypes = [c_uint32]
api.DevGetIPPort.restype = c_uint16
api.DevGetIPPort.argtypes = [c_uint32]
api.DevGetProductId.restype = c_uint32
api.DevGetProductId.argtypes = [c_uint32]
api.DevGetVendorId.restype = c_uint32
api.DevGetVendorId.argtypes = [c_uint32]
api.DevGetType.restype = c_uint32
api.DevGetType.argtypes = [c_uint32]
api.DevGetName.restype = c_uint32
api.DevGetName.argtypes = [c_uint32, c_char_p, c_uint32]
api.DevGetNameShort.restype = c_uint32
api.DevGetNameShort.argtypes = [c_uint32, c_char_p, c_uint32]
api.DevGetNameShortest.restype = c_uint32
api.DevGetNameShortest.argtypes = [c_uint32, c_char_p, c_uint32]
api.DevHasBattery.restype = c_uint8
api.DevHasBattery.argtypes = [c_uint32]
api.DevGetBatteryCharge.restype = c_int8
api.DevGetBatteryCharge.argtypes = [c_uint32]
api.DevGetBatteryTimeToEmpty.restype = c_int32
api.DevGetBatteryTimeToEmpty.argtypes = [c_uint32]
api.DevGetBatteryTimeToFull.restype = c_int32
api.DevGetBatteryTimeToFull.argtypes = [c_uint32]
api.DevIsBatteryChargerConnected.restype = c_uint8
api.DevIsBatteryChargerConnected.argtypes = [c_uint32]
api.DevIsBatteryCharging.restype = c_uint8
api.DevIsBatteryCharging.argtypes = [c_uint32]
api.DevIsBatteryBroken.restype = c_uint8
api.DevIsBatteryBroken.argtypes = [c_uint32]
api.DevSetCallbackRemoved.restype = None
api.DevSetCallbackRemoved.argtypes = [c_uint32, Callback, c_void_p]
if platform.system() == 'Linux':
api.DevSetEventRemoved.restype = None
api.DevSetEventRemoved.argtypes = [c_uint32, c_int]
if platform.system() == 'Windows':
api.DevSetEventRemoved.restype = None
api.DevSetEventRemoved.argtypes = [c_uint32, HANDLE]
api.DevSetMessageRemoved.restype = None
api.DevSetMessageRemoved.argtypes = [c_uint32, HWND, WPARAM, LPARAM]
api.DevTrGetInputCount.restype = c_uint16
api.DevTrGetInputCount.argtypes = [c_uint32]
api.DevTrGetInputIndexById.restype = c_uint16
api.DevTrGetInputIndexById.argtypes = [c_uint32, c_uint32]
api.ScpTrInIsTriggered.restype = c_uint8
api.ScpTrInIsTriggered.argtypes = [c_uint32, c_uint16]
api.DevTrInGetEnabled.restype = c_uint8
api.DevTrInGetEnabled.argtypes = [c_uint32, c_uint16]
api.DevTrInSetEnabled.restype = c_uint8
api.DevTrInSetEnabled.argtypes = [c_uint32, c_uint16, c_uint8]
api.DevTrInGetKinds.restype = c_uint64
api.DevTrInGetKinds.argtypes = [c_uint32, c_uint16]
api.ScpTrInGetKindsEx.restype = c_uint64
api.ScpTrInGetKindsEx.argtypes = [c_uint32, c_uint16, c_uint32]
api.DevTrInGetKind.restype = c_uint64
api.DevTrInGetKind.argtypes = [c_uint32, c_uint16]
api.DevTrInSetKind.restype = c_uint64
api.DevTrInSetKind.argtypes = [c_uint32, c_uint16, c_uint64]
api.DevTrInIsAvailable.restype = c_uint8
api.DevTrInIsAvailable.argtypes = [c_uint32, c_uint16]
api.ScpTrInIsAvailableEx.restype = c_uint8
api.ScpTrInIsAvailableEx.argtypes = [c_uint32, c_uint16, c_uint32]
api.DevTrInGetId.restype = c_uint32
api.DevTrInGetId.argtypes = [c_uint32, c_uint16]
api.DevTrInGetName.restype = c_uint32
api.DevTrInGetName.argtypes = [c_uint32, c_uint16, c_char_p, c_uint32]
api.DevTrGetOutputCount.restype = c_uint16
api.DevTrGetOutputCount.argtypes = [c_uint32]
api.DevTrGetOutputIndexById.restype = c_uint16
api.DevTrGetOutputIndexById.argtypes = [c_uint32, c_uint32]
api.DevTrOutGetEnabled.restype = c_uint8
api.DevTrOutGetEnabled.argtypes = [c_uint32, c_uint16]
api.DevTrOutSetEnabled.restype = c_uint8
api.DevTrOutSetEnabled.argtypes = [c_uint32, c_uint16, c_uint8]
api.DevTrOutGetEvents.restype = c_uint64
api.DevTrOutGetEvents.argtypes = [c_uint32, c_uint16]
api.DevTrOutGetEvent.restype = c_uint64
api.DevTrOutGetEvent.argtypes = [c_uint32, c_uint16]
api.DevTrOutSetEvent.restype = c_uint64
api.DevTrOutSetEvent.argtypes = [c_uint32, c_uint16, c_uint64]
api.DevTrOutGetId.restype = c_uint32
api.DevTrOutGetId.argtypes = [c_uint32, c_uint16]
api.DevTrOutGetName.restype = c_uint32
api.DevTrOutGetName.argtypes = [c_uint32, c_uint16, c_char_p, c_uint32]
api.DevTrOutTrigger.restype = c_uint8
api.DevTrOutTrigger.argtypes = [c_uint32, c_uint16]
api.ScpGetChannelCount.restype = c_uint16
api.ScpGetChannelCount.argtypes = [c_uint32]
api.ScpChIsAvailable.restype = c_uint8
api.ScpChIsAvailable.argtypes = [c_uint32, c_uint16]
api.ScpChIsAvailableEx.restype = c_uint8
api.ScpChIsAvailableEx.argtypes = [c_uint32, c_uint16, c_uint32, c_double, c_uint8, c_void_p, c_uint16]
api.ScpChGetConnectorType.restype = c_uint32
api.ScpChGetConnectorType.argtypes = [c_uint32, c_uint16]
api.ScpChIsDifferential.restype = c_uint8
api.ScpChIsDifferential.argtypes = [c_uint32, c_uint16]
api.ScpChGetImpedance.restype = c_double
api.ScpChGetImpedance.argtypes = [c_uint32, c_uint16]
api.ScpChGetBandwidths.restype = c_uint32
api.ScpChGetBandwidths.argtypes = [c_uint32, c_uint16, c_void_p, c_uint32]
api.ScpChGetBandwidth.restype = c_double
api.ScpChGetBandwidth.argtypes = [c_uint32, c_uint16]
api.ScpChSetBandwidth.restype = c_double
api.ScpChSetBandwidth.argtypes = [c_uint32, c_uint16, c_double]
api.ScpChGetCouplings.restype = c_uint64
api.ScpChGetCouplings.argtypes = [c_uint32, c_uint16]
api.ScpChGetCoupling.restype = c_uint64
api.ScpChGetCoupling.argtypes = [c_uint32, c_uint16]
api.ScpChSetCoupling.restype = c_uint64
api.ScpChSetCoupling.argtypes = [c_uint32, c_uint16, c_uint64]
api.ScpChGetEnabled.restype = c_uint8
api.ScpChGetEnabled.argtypes = [c_uint32, c_uint16]
api.ScpChSetEnabled.restype = c_uint8
api.ScpChSetEnabled.argtypes = [c_uint32, c_uint16, c_uint8]
api.ScpChGetProbeGain.restype = c_double
api.ScpChGetProbeGain.argtypes = [c_uint32, c_uint16]
api.ScpChSetProbeGain.restype = c_double
api.ScpChSetProbeGain.argtypes = [c_uint32, c_uint16, c_double]
api.ScpChGetProbeOffset.restype = c_double
api.ScpChGetProbeOffset.argtypes = [c_uint32, c_uint16]
api.ScpChSetProbeOffset.restype = c_double
api.ScpChSetProbeOffset.argtypes = [c_uint32, c_uint16, c_double]
api.ScpChGetAutoRanging.restype = c_uint8
api.ScpChGetAutoRanging.argtypes = [c_uint32, c_uint16]
api.ScpChSetAutoRanging.restype = c_uint8
api.ScpChSetAutoRanging.argtypes = [c_uint32, c_uint16, c_uint8]
api.ScpChGetRanges.restype = c_uint32
api.ScpChGetRanges.argtypes = [c_uint32, c_uint16, c_void_p, c_uint32]
api.ScpChGetRangesEx.restype = c_uint32
api.ScpChGetRangesEx.argtypes = [c_uint32, c_uint16, c_uint64, c_void_p, c_uint32]
api.ScpChGetRange.restype = c_double
api.ScpChGetRange.argtypes = [c_uint32, c_uint16]
api.ScpChSetRange.restype = c_double
api.ScpChSetRange.argtypes = [c_uint32, c_uint16, c_double]
api.ScpChHasSafeGround.restype = c_uint8
api.ScpChHasSafeGround.argtypes = [c_uint32, c_uint16]
api.ScpChGetSafeGroundEnabled.restype = c_uint8
api.ScpChGetSafeGroundEnabled.argtypes = [c_uint32, c_uint16]
api.ScpChSetSafeGroundEnabled.restype = c_uint8
api.ScpChSetSafeGroundEnabled.argtypes = [c_uint32, c_uint16, c_uint8]
api.ScpChGetSafeGroundThresholdMin.restype = c_double
api.ScpChGetSafeGroundThresholdMin.argtypes = [c_uint32, c_uint16]
api.ScpChGetSafeGroundThresholdMax.restype = c_double
api.ScpChGetSafeGroundThresholdMax.argtypes = [c_uint32, c_uint16]
api.ScpChGetSafeGroundThreshold.restype = c_double
api.ScpChGetSafeGroundThreshold.argtypes = [c_uint32, c_uint16]
api.ScpChSetSafeGroundThreshold.restype = c_double
api.ScpChSetSafeGroundThreshold.argtypes = [c_uint32, c_uint16, c_double]
api.ScpChVerifySafeGroundThreshold.restype = c_double
api.ScpChVerifySafeGroundThreshold.argtypes = [c_uint32, c_uint16, c_double]
api.ScpChHasTrigger.restype = c_uint8
api.ScpChHasTrigger.argtypes = [c_uint32, c_uint16]
api.ScpChHasTriggerEx.restype = c_uint8
api.ScpChHasTriggerEx.argtypes = [c_uint32, c_uint16, c_uint32]
api.ScpChTrIsAvailable.restype = c_uint8
api.ScpChTrIsAvailable.argtypes = [c_uint32, c_uint16]
api.ScpChTrIsAvailableEx.restype = c_uint8
api.ScpChTrIsAvailableEx.argtypes = [c_uint32, c_uint16, c_uint32, c_double, c_uint8, c_void_p, c_void_p, c_uint16]
api.ScpChTrIsTriggered.restype = c_uint8
api.ScpChTrIsTriggered.argtypes = [c_uint32, c_uint16]
api.ScpChTrGetEnabled.restype = c_uint8
api.ScpChTrGetEnabled.argtypes = [c_uint32, c_uint16]
api.ScpChTrSetEnabled.restype = c_uint8
api.ScpChTrSetEnabled.argtypes = [c_uint32, c_uint16, c_uint8]
api.ScpChTrGetKinds.restype = c_uint64
api.ScpChTrGetKinds.argtypes = [c_uint32, c_uint16]
api.ScpChTrGetKindsEx.restype = c_uint64
api.ScpChTrGetKindsEx.argtypes = [c_uint32, c_uint16, c_uint32]
api.ScpChTrGetKind.restype = c_uint64
api.ScpChTrGetKind.argtypes = [c_uint32, c_uint16]
api.ScpChTrSetKind.restype = c_uint64
api.ScpChTrSetKind.argtypes = [c_uint32, c_uint16, c_uint64]
api.ScpChTrGetLevelModes.restype = c_uint32
api.ScpChTrGetLevelModes.argtypes = [c_uint32, c_uint16]
api.ScpChTrGetLevelMode.restype = c_uint32
api.ScpChTrGetLevelMode.argtypes = | |
import pickle
from abc import ABC, abstractmethod
from io import BytesIO
from db_classes import PRG
from geo_utilities import *
class XmlParser(ABC):
def __init__(self, xml_path: str, tags_tuple: tuple, event_type: str) -> None:
self.xml_path = xml_path
self.tags_tuple = tags_tuple
self.event_type = event_type
@property
def get_xml_path(self) -> str:
return f"{self.xml_path}"
@abstractmethod
def check_path(self) -> None:
pass
@abstractmethod
def parse_xml(self) -> None:
pass
class BDOT10kDictsParser(XmlParser):
def __init__(self, xml_path: str, tags_tuple: tuple, event_type: str) -> None:
super().__init__(xml_path, tags_tuple, event_type)
self.bdot10k_dicts = {}
self.check_path()
self.parse_xml()
def check_path(self) -> None:
"""" Method that checks if path to file is valid """
if not os.path.isfile(self.xml_path):
raise Exception("Pod adresem: '" + self.xml_path + "' nie ma pliku '" + os.environ['BDOT10K_DICTS_NAME'] +
"'. Uzupełnij ten plik i uruchom program ponownie!")
def parse_xml(self) -> None:
""" Method that parses xml file to dictionairy object """
xml_contex = etree.iterparse(self.xml_path, events=(self.event_type,), tag=self.tags_tuple)
curr_dict = {}
c_val = ""
for _, curr_node in xml_contex:
c_text = curr_node.text
curr_attrib = curr_node.attrib
if not c_text.isspace() and c_text is not None:
c_text1 = "".join([lett if not lett.isupper() else " " + lett.lower() for i, lett in enumerate(c_text)])
curr_dict[c_val] = c_text1
if 'value' in curr_attrib:
c_val = curr_attrib['value']
if 'name' in curr_attrib:
curr_dict = {}
self.bdot10k_dicts[curr_attrib['name']] = curr_dict
def get_bdot10k_dicts(self) -> dict:
""" Method that returns final dicts """
return self.bdot10k_dicts
class BDOT10kDataParser(XmlParser):
def __init__(self, xml_path: str, tags_tuple: tuple, event_type: str, dicts_tags: dict, tags_dict: dict) -> None:
super().__init__(xml_path, tags_tuple, event_type)
self.dicts_tags = dicts_tags
self.tags_dict = tags_dict
self.check_path()
self.bdot10k_dicts = read_bdot10k_dicts()
self.parse_xml()
def check_path(self) -> None:
"""" Method that checks if path to file is valid """
if not os.path.isfile(self.xml_path):
raise Exception("Pod adresem: '" + self.xml_path + "' nie ma pliku '" + os.environ['BDOT10K_NAME'] +
"'. Pobierz ten plik ze strony: '" + os.environ['BDOT10K_LINK'] + "' i uruchom program" +
" ponownie!")
def parse_xml(self) -> None:
""" Method that parses xml file and saves data to SQL database """
with zipfile.ZipFile(self.xml_path, "r") as zfile:
for woj_name in zfile.namelist():
woj_zip = BytesIO(zfile.read(woj_name))
logging.info(woj_name)
bdot10k_woj_rows = []
with zipfile.ZipFile(woj_zip, "r") as zfile2:
for pow_name in zfile2.namelist():
pow_zip = BytesIO(zfile2.read(pow_name))
with zipfile.ZipFile(pow_zip, "r") as zfile3:
for xml_file in zfile3.namelist():
if "BUBD" in xml_file:
# Wyciągamy interesujące nas informacje z pliku xml i zapisujemy je w tablicy
bd_xml = BytesIO(zfile3.read(xml_file))
xml_contex = etree.iterparse(bd_xml, events=(self.event_type,),
tag=self.tags_tuple)
fin_row = ['', '', '', '', 0, 0, '', 0.0, 0.0, 0.0, '']
bdot10k_woj_rows += self.parse_bdot10k_xml(xml_contex, fin_row)
# Zapisujemy do bazy danych informacje dotyczące budynkow z danego województwa
bdot10k_rows = []
db_save_freq = int(os.environ['DB_SAVE_FREQ'])
with sa.orm.Session(SQL_ENGINE) as db_session:
for i, c_row in enumerate(bdot10k_woj_rows):
bdot10k_rows.append(BDOT10K(*c_row))
if i % db_save_freq == 0:
db_session.bulk_save_objects(bdot10k_rows)
db_session.commit()
bdot10k_rows = []
if bdot10k_rows:
db_session.bulk_save_objects(bdot10k_rows)
db_session.commit()
def parse_bdot10k_xml(self, xml_contex: etree.iterparse, fin_row: list) -> list:
""" Method that exctrats data from BDOT10k XML file """
# Tworzymy liste przechowujaca dane z XML o powiatach
bdot10k_pow_rows = []
all_tags = self.tags_tuple
for _, curr_node in xml_contex:
c_tag = curr_node.tag
row_idx = self.tags_dict[c_tag]
c_text = curr_node.text if curr_node.text is not None else ''
if c_tag == all_tags[7] and c_text is not None and not c_text.isspace():
# Wczytujemy geometrię WKT
crds_lst = c_text.split(" ")
coords_str = "".join([crds_lst[i] + " " + crds_lst[i + 1] + ", " for i in range(0, len(crds_lst), 2)])
poly_wkt = "POLYGON((" + coords_str[:-2] + "))"
poly_geom = ogr.CreateGeometryFromWkt(poly_wkt)
# Wyliczamy powierzchnię budynku mnozac powierzchnie wielokata przez liczbe kondygnacji
poly_area = poly_geom.GetArea()
fin_row[-4] = int(poly_area) if fin_row[4] == 0 else int(poly_area * fin_row[4])
# Konwertujemy współrzędne z układu map polskich do układu map google
coord_trans = create_coords_transform(int(os.environ["PL_CRDS"]), int(os.environ["WORLD_CRDS"]), True)
poly_geom.Transform(coord_trans)
# Wyliczamy centroid wielokata budynku
poly_centroid = poly_geom.Centroid()
poly_centr_y = np.asarray(poly_centroid.GetY())
poly_centr_x = np.asarray(poly_centroid.GetX())
coords_prec = int(os.environ["COORDS_PREC"])
fin_row[-3] = np.round(poly_centr_y, coords_prec)
fin_row[-2] = np.round(poly_centr_x, coords_prec)
# Konwertujemy geometrie na GeoJson
geojson_poly = poly_geom.ExportToJson()
reduced_geojson = reduce_coordinates_precision(geojson_poly, coords_prec)
fin_row[-1] = reduced_geojson
# Dodajemy nowy wiersz do lacznej listy
c_sekt_tpl = get_sector_codes(poly_centr_y, poly_centr_x)
c_sekt_szer = c_sekt_tpl[0]
c_sekt_dl = c_sekt_tpl[1]
kod_sektora = str(c_sekt_szer).zfill(3) + "_" + str(c_sekt_dl).zfill(3)
fin_row2 = [kod_sektora] + fin_row
bdot10k_pow_rows.append(fin_row2)
fin_row = ['', '', '', '', 0, 0, '', 0.0, 0.0, 0.0, '']
elif c_tag == all_tags[5]:
fin_row[row_idx] = 1 if c_text == 'true' else 0
elif c_tag == all_tags[4]:
fin_row[row_idx] = int(c_text) if c_text.isnumeric() else 0
elif c_tag in (all_tags[0], all_tags[1], all_tags[2], all_tags[3]):
c_dict = self.bdot10k_dicts[self.dicts_tags[c_tag]]
if c_text in c_dict:
if fin_row[row_idx] == '':
fin_row[row_idx] = c_dict[c_text]
else:
fin_row[row_idx] += " | " + c_dict[c_text]
else:
fin_row[row_idx] = ""
else:
fin_row[row_idx] = c_text
# Czyscimy przetworzone obiekty wezlow XML z pamieci
clear_xml_node(curr_node)
return bdot10k_pow_rows
@time_decorator
def read_bdot10k_dicts() -> dict:
""" Function that reads BDOT10K dicts into dictionairy"""
# Parsujemy plik XML do postaci słowika
dicts_path = os.path.join(os.environ["PARENT_PATH"], os.environ['SLOWS_PATH'])
all_tags = tuple(os.environ['BDOT10K_DICTS_TAGS'].split(";"))
bdot10k_dicts = BDOT10kDictsParser(dicts_path, all_tags, 'start').get_bdot10k_dicts()
# Importujemy inne słowniki niezawarte w domyślnym zestawie słowników
bubd_codes_path = os.path.join(os.environ["PARENT_PATH"], os.environ['BUBD_CODES_PATH'])
bdot10k_dicts["x_kod"] = csv_to_dict(bubd_codes_path)
# Importujemy inne słowniki niezawarte w domyślnym zestawie słowników
karto10k_path = os.path.join(os.environ["PARENT_PATH"], os.environ['KARTO10K_PATH'])
bdot10k_dicts["x_kodKarto10k"] = csv_to_dict(karto10k_path)
return bdot10k_dicts
class PRGDataParser(XmlParser):
def __init__(self, xml_path: str, tags_tuple: tuple, event_type: str, perms_dict: dict) -> None:
super().__init__(xml_path, tags_tuple, event_type)
self.perms_dict = perms_dict
self.check_path()
self.addr_phrs_list = []
self.addr_phrs_len = 0
self.parse_xml()
def check_path(self) -> None:
"""" Method that checks if path to file is valid """
if not os.path.isfile(self.xml_path):
raise Exception("Pod adresem: '" + self.xml_path + "' nie ma pliku '" + os.environ['PRG_NAME'] +
"'. Pobierz ten plik ze strony: '" + os.environ['PRG_LINK'] +
"' i uruchom program ponownie!")
def parse_xml(self) -> None:
""" Method that parses xml file and saves data to SQL database """
# Definiujemy podstawowe parametry
x_path, x_filename = os.path.split(self.xml_path)
os.chdir(x_path)
# Tworzymy transformacje wspolrzednych
wrld_pl_trans = create_coords_transform(int(os.environ['WORLD_CRDS']), int(os.environ['PL_CRDS']), True)
sekt_num = int(os.environ["SEKT_NUM"])
sekt_addr_phrs = np.full(shape=(sekt_num, sekt_num), fill_value='', dtype=object)
# Definiujemy sesję sql engine
with sa.orm.Session(SQL_ENGINE) as db_session:
teryt_arr = pd.read_sql(db_session.query(TerytCodes.teryt_name, TerytCodes.teryt_code).statement,
SQL_ENGINE).to_numpy()
json_arr = pd.read_sql(db_session.query(RegJSON.json_teryt, RegJSON.json_shape).statement,
SQL_ENGINE).to_numpy()
with zipfile.ZipFile(x_filename, "r") as zfile:
woj_names = zfile.namelist()
if len(list(os.listdir(x_path))) < 3:
zfile.extractall()
for woj_name in woj_names:
# Wczytujemy dane XML dla danego wojewodztwa
xml_contex = etree.iterparse(woj_name, events=(self.event_type,), tag=self.tags_tuple[:-1])
# Tworzymy listę punktów adresowych PRG
points_list = self.create_points_list(xml_contex)
points_arr = np.empty(shape=(len(points_list), 11), dtype=object)
points_arr[:] = points_list[:]
# Konwertujemy wspolrzedne PRG z ukladu polskiego do ukladu mag Google i sprawdzamy czy leżą one
# wewnątrz shapefile'a swojej gminy
self.check_prg_pts_add_db(points_arr, woj_name, teryt_arr, json_arr, wrld_pl_trans, sekt_addr_phrs)
# Zapisujemy zbiór unikalnych adresow na dysku twardym
with open(os.path.join(os.environ["PARENT_PATH"], os.environ['ADDRS_PATH']), 'wb') as f:
pickle.dump(sekt_addr_phrs, f, pickle.HIGHEST_PROTOCOL)
def create_points_list(self, xml_contex: etree.iterparse) -> list:
""" Creating list of data points """
# Definiujemy podstawowe parametry
c_ind = 0
c_row = [''] * 11
points_list = []
coords_prec = int(os.environ["COORDS_PREC"])
all_tags = self.tags_tuple
num_dict = {all_tags[1]: 3, all_tags[2]: 4, all_tags[3]: 5, all_tags[4]: 6, all_tags[5]: 7, all_tags[6]: 8}
rep_dict = {"ul. ": "", "ulica ": "", "al.": "Aleja", "Al.": "Aleja", "pl.": "Plac", "Pl.": "Plac",
"wTrakcieBudowy": "w trakcie budowy"}
rep_dict_keys = np.asarray(list(rep_dict.keys()))
with sa.orm.Session(SQL_ENGINE) as db_session:
addr_phrs_uniq = db_session.query(UniqPhrs.uniq_phrs).all()[0][0]
for _, curr_node in xml_contex:
c_val = curr_node.text
c_tag = curr_node.tag
if c_tag == all_tags[0] and c_val != "Polska":
c_row[c_ind] = c_val
c_ind += 1
elif c_tag in [all_tags[1], all_tags[2], all_tags[3], all_tags[4], all_tags[5], all_tags[6]]:
c_val = c_val if c_val is not None else ""
sub_in = [substring in c_val for substring in rep_dict_keys]
if sum(sub_in) > 0:
c_key = rep_dict_keys[sub_in][0]
c_val = c_val.replace(c_key, rep_dict[c_key])
c_row[num_dict[c_tag]] = c_val
c_ind = 0
elif c_tag == all_tags[7] or c_tag == all_tags[8]:
c_val = c_val.split()
c_row[-2:] = [round(float(c_val[0]), coords_prec), round(float(c_val[1]), coords_prec)]
if c_row[0] != '' and c_row[1] != '' and c_row[2] != '':
points_list.append(c_row)
uniq_addr, uniq_ids = np.unique(np.asarray([unidecode(c_row[i]).upper() for i in (3, 4, 5, 6, 7)
if c_row[i] != ""]), return_index=True)
addr_arr = uniq_addr[np.argsort(uniq_ids)]
self.addr_phrs_list.append(addr_arr[self.perms_dict[len(addr_arr)]].tolist())
for el in addr_arr:
if el not in addr_phrs_uniq:
addr_phrs_uniq += el + " "
c_ind = 0
c_row = [''] * 11
# Czyscimy przetworzone obiekty wezlow XML z pamieci
clear_xml_node(curr_node)
with sa.orm.Session(SQL_ENGINE) as db_session:
db_session.query(UniqPhrs).filter(UniqPhrs.uniq_id == 1).update({'uniq_phrs': addr_phrs_uniq})
db_session.commit()
return points_list
@time_decorator
def check_prg_pts_add_db(self, points_arr: np.ndarray, woj_name: str, teryt_arr: np.ndarray, json_arr: np.ndarray,
wrld_pl_trans: osr.CoordinateTransformation, sekt_addr_phrs: np.ndarray) -> None:
""" Function that converts spatial reference |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.