input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
descriptions)
# print('self.ready: %r' % self.ready)
# input('pause')
return (titles,descriptions)
def get_string_from_middle_children_1(self,node=None):
'''Combine strings of tags into one string, excluding first heading tag
and table tags.'''
string = None
if self.ready:
if node:
children = self.get_children(node=node)
if children:
# remove table tags from children
children = [c for c in children if c.name != 'table']
# remove the first child if heading tag
if children[0].name in self.heading_tag_names:
children = children[1:]
else:
self.ready = False
self.logger.error('Unable to get_string_from_middle_children_1. ' +
'first tag not in self.heading_tag_names. ' +
'children: {}. '.format(children))
if self.ready:
string = '\n\n'.join([self.get_string(node=child)
for child in children])
else:
self.ready = False
self.logger.error('Unable to get_string_from_middle_children_1. ' +
'children: {}.'.format(children)
)
else:
self.ready = False
self.logger.error('Unable to get_string_from_middle_children_1. ' +
'node: {}.'.format(node)
)
return string
def check_found_hdus(self,title=str(),description=str()):
'''Check if the title and description indicates the hdus have been found.'''
found_hdus = False
if title or description:
split_title = set([s.lower() for s in title.split()])
description_title = set([s.lower() for s in description.split()])
string_set = split_title | description_title
if ({'required','keywords'}.issubset(string_set) or
{'required','column'}.issubset(string_set)
):
found_hdus = True
else:
self.ready = False
self.logger.error('Unable to check_found_hdus. ' +
'title: {}, '.format(title) +
'description: {}.'.format(description)
)
return found_hdus
def get_hdu_number_and_hdu_title_from_heading_tag(self,node=None):
'''Get hdu_number and hdu_title from first heading tag in BeautifulSoup node.'''
(hdu_number,hdu_title) = (None,None)
if self.ready:
if node:
child_names = self.get_child_names(node=node) if node else None
heading_tag_names = [name for name in child_names
if name in self.heading_tag_names] if child_names else None
heading_tag_name = heading_tag_names[0] if heading_tag_names else None
heading_tag = node.find(heading_tag_name) if heading_tag_name else None
heading = self.get_string(node=heading_tag).strip() if heading_tag else None
if heading_tag and heading:
# hdu_number
# hdu_number from node['id']
node_id = (node.attrs['id']
if node.attrs and 'id' in node.attrs else str())
regex = '(?i)hdu\s*\d+'
matches1 = self.get_matches(regex=regex,string=node_id) if node_id else list()
match1 = matches1[0] if matches1 else str()
regex = '\d+'
matches2 = self.get_matches(regex=regex,string=match1) if match1 else None
node_id_hdu_number = int(matches2[0]) if matches2 else None
# hdu_number from heading_tag['id']
heading_id = (heading_tag.attrs['id']
if heading_tag.attrs and 'id' in heading_tag.attrs else str())
regex = '(?i)hdu\s*\d+'
matches3 = self.get_matches(regex=regex,string=heading_id) if heading_id else list()
match2 = matches3[0] if matches3 else str()
regex = '\d+'
matches4 = self.get_matches(regex=regex,string=match2) if match2 else None
heading_id_hdu_number = int(matches4[0]) if matches4 else None
# hdu_number from hdu_title
regex = '(?i)hdu\s*\d+'
matches5 = (self.get_matches(regex=regex,string=heading)
if heading else list())
heading_hdu_N = matches5[0] if matches5 else str()
regex = '\d+'
matches6 = (self.get_matches(regex=regex,string=heading_hdu_N)
if heading_hdu_N else list())
heading_hdu_number = int(matches6[0]) if matches6 else None
if heading_hdu_number is None:
regex = '(?i)primary'
heading_hdu_number = ('0' if self.check_match(regex=regex,string=heading)
else None)
# put hdu_number together
hdu_number = (node_id_hdu_number
if node_id_hdu_number is not None
else heading_id_hdu_number
if heading_id_hdu_number is not None
else heading_hdu_number
if heading_hdu_number is not None
else None)
# put hdu_title together
hdu_title = heading.strip()
# print('\nnode_id: %r' % node_id)
# print('matches1: %r' % matches1)
# print('match1: %r' % match1)
# print('matches2: %r' % matches2)
# print('node_id_hdu_number: %r' % node_id_hdu_number)
#
# print('\nheading_id: %r' % heading_id)
# print('matches3: %r' % matches3)
# print('match2: %r' % match2)
# print('matches4: %r' % matches4)
# print('heading_id_hdu_number: %r' % heading_id_hdu_number)
#
# print('\nheading: %r' % heading)
# print('matches5: %r' % matches5)
# print('heading_hdu_N: %r' % heading_hdu_N)
# print('matches6: %r' % matches6)
# print('heading_hdu_number: %r' % heading_hdu_number)
#
# print('\nhdu_number: %r' % hdu_number)
# print('hdu_title: %r' % hdu_title)
# input('pause')
else:
self.ready = False
self.logger.error('Unable to get_hdu_number_and_hdu_title_from_heading_tag from first heading. ' +
'heading_tag: {}, '.format(heading_tag) +
'heading: {}.'.format(heading)
)
if (hdu_number,hdu_title) == (None,None):
self.ready = False
self.logger.error('Unable to get_hdu_number_and_hdu_title_from_heading_tag. ' +
'hdu_number: {}, '.format(hdu_number) +
'hdu_title: {}'.format(hdu_title)
)
else:
self.ready = False
self.logger.error('Unable to get_hdu_number_and_hdu_title_from_heading_tag. ' +
'node: {} '.format(node) )
return (hdu_number,hdu_title)
def get_hdu_number_and_hdu_title_from_p_tags_1(self,node=None):
'''Get hdu_number and hdu_title from first heading tag in BeautifulSoup node.'''
(hdu_number,hdu_title) = (None,None)
if self.ready:
if node:
hdu_numbers = list()
for p in node.find_all('p'):
(title,description) = self.get_title_and_description_from_p(p=p)
if title:
regex1 = self.get_table_title_regex_1()
regex2 = '(?i)hdu\s*\d+'
match1 = (self.check_match(regex=regex1,string=title)
if title else None)
matches2 = (self.get_matches(regex=regex2,string=title)
if title else None)
match2 = matches2[0] if matches2 else None
if match1:
if not match2:
hdu_numbers.append(0)
else:
regex3 = '\d+'
matches3 = (self.get_matches(regex=regex3,string=title)
if title else None)
match3 = matches3[0] if matches3 else None
if match3:
hdu_numbers.append(match3)
if len(set(hdu_numbers)) == 1: # if all entries in hdu_numbers are the same
hdu_number = hdu_numbers[0]
hdu_title = 'HDU' + str(hdu_number)
if (hdu_number,hdu_title) == (None,None):
self.ready = False
self.logger.error('Unable to get_hdu_number_and_hdu_title_from_p_tags_1. ' +
'hdu_number: {}, '.format(hdu_number) +
'hdu_title: {}'.format(hdu_title)
)
else:
self.ready = False
self.logger.error('Unable to get_hdu_number_and_hdu_title_from_p_tags_1. ' +
'node: {} '.format(node) )
return (hdu_number,hdu_title)
def get_single_digit(self,string=None):
'''Get single digit 0-9 from the given string.'''
digit = None
if self.ready:
if string:
digits = list(filter(str.isdigit, string))
if len(digits) == 1:
digit = int(digits[0])
else:
self.ready = False
self.logger.error('Unable to get_single_digit. ' +
'len(digits) > 1. ' +
'digits: {}, '.format(digits) +
'string: {}'.format(string))
else:
self.ready = False
self.logger.error('Unable to get_single_digit. ' +
'string: {0}'.format(string))
return digit
def get_hdu_divs(self,node=None):
'''Get a list of divs with id containing 'hdu', from the given node.'''
hdu_divs = list()
if self.ready:
if node:
divs = node.find_all('div')
for div in [div for div in divs
if not self.get_string(node=div).isspace()]:
div_id = (div.attrs['id']
if div.attrs and 'id' in div.attrs else None)
if div_id and div_id.lower().startswith('hdu'):
hdu_divs.append(div)
else:
self.ready = False
self.logger.error('Unable to get_hdu_divs. ' +
'node: {}'.format(node))
return hdu_divs
def get_intro_div(self,node=None):
'''Get a list of divs with id containing 'intro', from the given node.'''
intro_divs = list()
if self.ready:
if node:
# create intro_divs list
divs = node.find_all('div')
for div in [div for div in divs
if not self.get_string(node=div).isspace()]:
div_id = (div.attrs['id']
if div.attrs and 'id' in div.attrs else None)
if div_id and div_id.startswith('intro'):
intro_divs.append(div)
# check one and only one intro div
if not intro_divs:
self.ready = False
self.logger.error("Unable to get_intro_divs. " +
"Not found: 'intro' in div['id']. " +
"intro_divs: {}".format(intro_divs)
)
if len(intro_divs) > 1:
self.ready = False
self.logger.error("Unable to get_intro_divs. " +
"len(intro_divs) > 1. " +
"intro_divs: {}".format(intro_divs)
)
else:
self.ready = False
self.logger.error('Unable to get_intro_div. ' +
'node: {}'.format(node))
return intro_divs[0] if self.ready else None
def get_heading_tag_sibling_names(self,node=None):
'''Get a list of heading tags, which are siblings of the given node.'''
heading_tags = list()
if self.ready:
if node:
siblings = set(self.get_sibling_names(node=node))
heading_tags = list(set(self.heading_tag_names) & siblings)
else:
self.ready = False
self.logger.error('Unable to get_heading_tag_sibling_names. ' +
'node: {0}'.format(node))
return heading_tags
def get_heading_tag_child_names(self,node=None):
'''Get a list of heading tags, which are children of the given node.'''
heading_tag_names = list()
if self.ready:
if node:
children = self.get_child_names(node=node)
heading_tag_names = [child for child in children
if child in self.heading_tag_names]
else:
self.ready = False
self.logger.error('Unable to get_heading_tag_child_names. ' +
'node: {0}'.format(node))
return heading_tag_names
def get_heading_tag_children(self,node=None):
'''Get a list of heading tags, which are children of the given node.'''
heading_tags = list()
if self.ready:
if node:
heading_tags = [c for c in node.children
if c.name and c.name in self.heading_tag_names]
else:
self.ready = False
self.logger.error('Unable to get_heading_tag_child_names. ' +
'node: {0}'.format(node))
return heading_tags
def get_all_strings(self,node=None):
'''Get all strings from the given BeautifulSoup node.'''
all_strings = list()
if self.ready:
if node:
for string in [str(s) for s in node.strings if not s.isspace()]:
all_strings.append(str(string))
else:
self.ready = False
self.logger.error('Unable to get_all_strings. ' +
'node: {0}'.format(node))
return all_strings
def get_datatype_and_hdu_size(self,node=None):
'''Get datatype and hdu_size from the given BeautifulSoup node.'''
(datatype,hdu_size) = (None,None)
if self.ready:
if node:
all_strings = self.get_all_strings(node=node)
if self.check_match(regex='(?i)hdu type',string=all_strings[0]):
datatype = all_strings[1].strip().strip(punctuation).strip()
if self.check_match(regex='(?i)hdu size',string=all_strings[2]):
hdu_size = all_strings[3].strip().strip(punctuation).strip()
else:
self.ready = False
self.logger.error('Unable to get_datatype_and_hdu_size. ' +
'node: {0}'.format(node))
if (datatype,hdu_size) == (None,None):
self.ready = False
self.logger.error('Unable to get_datatype_and_hdu_size. ' +
'datatype: {0}. '.format(datatype) +
'hdu_size: {0}. '.format(hdu_size)
)
return (datatype,hdu_size)
def get_datatype_and_hdu_size_from_dl(self,dl=None):
'''Get datatype and hdu_size from the given BeautifulSoup dl tag.'''
(datatype,hdu_size)=(None,None)
if self.ready:
if dl:
(definitions,descriptions) = self.get_dts_and_dds_from_dl(dl=dl)
for (definition,description) in list(zip(definitions,descriptions)):
if 'hdu type' in definition.lower(): datatype = description
if 'hdu size' in definition.lower(): hdu_size = description
else:
self.ready = False
self.logger.error('Unable to get_datatype_and_hdu_size_from_dl. ' +
'dl: {0}'.format(dl))
return (datatype,hdu_size)
def get_children(self,node=None,names=list()):
'''Get the children from the BeautifulSoup node, excluding line endings.'''
children = None
if self.ready:
if node:
# children = ([child for child | |
def string_wrapping(self):
import textwrap
print(textwrap.wrap("HelloWorld", 5))
#Hello
#World
def string_stripping(self):
"00005".lstrip("0") # 5
"50000".rstrip("0") # 5
def string_numerical_values____kwds_unicode(self):
ord('a') # 97 # return unicode value. #ascii being subset of unicode.
chr(97) # 'a' input: [0 ... 255]
chr(97) #u'a' input: [0 ... 65535] anki-done (
# ref: https://www.geeksforgeeks.org/ord-function-python/
# prob: https://www.hac5535kerrank.com/challenges/most-commons/problem
def string_substring_counting(self):
# Count substring
":-| ^_^ :-) -_- :-) *_* :-)".count(":-)") #3
"aaaa".count("a", 1, 3) # 2 [start]/[end]
def string_index_of_a_letter(self):
"abcd".index("b") # 1
def every_substring(s): # e.g abba -> ['a', 'b'...,'bba', 'abba']
ss = []
for l in range(1, len(s) + 1): # l = 1..4
for i in range(0, len(s) - l + 1): # l=1 -> i=(0, 4-1+1-1=3)
ss.append(s[i:i + l]) # e.g 0:1 > a 0:2 -> ab # for generator: yield s[i:i+l]
return ss
print(every_substring("abba")) # ['a', 'b', 'b', 'a', 'ab', 'bb', 'ba', 'abb', 'bba', 'abba']
n = len("abba")
substring_count = (n * (n + 1)) // 2
def List_():
L = [1, 4, 2, 1, 2, "A"] # [] for empty list.
# Append:
L.append(99)
L += ["abc"] # Note, += <Iterable>. if you += "abc" you get ["a","b","c"] rather than ["abc].
# Note1: += [i] is 2x slower than .append()
# Insert:
L.insert(0, 5) # insert 5 at start of list.
# Index:
L[0] # first element
L[-1] # last element
L[-3] # 3rd last.
# Remove
L.remove(5)
# Pop (read & remove from right).
L.pop()
# Ref: https://www.geeksforgeeks.org/python-list/
# List has .count() function to count # of items inside it:
[1, 1, 5].count(1)
#2
# Empty list is False. 1+ is True.
L = []
if not L:
print("List is empty.")
# Useful if you want to set default value in functions if function did not do anything with list.
# L2 = L or ["default]
def multi_dimensional_lists():
# List of lists..
L = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
L[0][1]
#2
def deduplicating_list():
uniquearr = list(dict.fromkeys(L)) #anki-todo
uniquearr.sort(reverse=True)
print(uniquearr)
def list_comprehension():
# Single level
nums = [1, 2, 3, 4, 5, 6]
double_evens = [n * 2 for n in nums if n % 2 == 0]
print("Single level ", double_evens) # [4, 8, 12]
# Nested
first_list = [2, 4, 6]
second_list = [1, 2, 3]
product_first_second = [a * b for a in first_list for b in second_list] # "if a EXPR b" filter possible.
print("Nested", product_first_second) # [2, 4, 6, 4, 8, 12, 6, 12, 18]
# First loop, 2nd 3rd
indent_list_a = [1, 2, 3]
indent_list_b = [10, 20, 30]
added_list = [a + b
for a in indent_list_a
for b in indent_list_b
if a % 2 != 0] # [11, 21, 31, 13, 23, 33]
print("Multi-Line", added_list)
# Practice Problem: https://www.hackerrank.com/challenges/list-comprehensions/problem
# Reference: https://hackernoon.com/list-comprehension-in-python-8895a785550b
def mapping_func_to_iter():
# map (func, iter1 [, iter 2]) # function has 1 arg per iter.
numbers = (1, 2, 3, 4)
result = [x + x for x in numbers] # apply function (or lamdba) to an iterable item. RETURN: list
print((list(result)))
# src: https://www.geeksforgeeks.org/python-map-function/
# See src for multi-arg, string looping etc..
def bisect_insert_sorted(): # TAG__List TAG__Insertion TAG__Sorted binary insert
# Useful to insert an element such that list stays sorted.
# Bisect is O(log n), but insertion is O(n).
from bisect import insort
L = [1, 3]
insort(L, 2)
#L == [1, 2, 3]
def bisect_bisect():
from bisect import bisect
# Bisect gives position of where to insert item. (i+1)
L = [1, 3, 5, 7]
to_ins = 4
L.insert(bisect(L, to_ins), to_ins)
# [1, 3, 4, 5, 7]
def generator_expressions():
# Create generator to iterate over. (yieling_func)
gen = (i * 2 for i in range(10))
for i in gen:
print(i)
# 1 2 3 ..
# src: https://dbader.org/blog/python-generator-expressions
L = list(range(10))
L_iter = (-L[i] for i in range(len(L)))
for i in L_iter:
print(i)
# 0, -1, -2, -3 ....
def sorting(): # 828a4bad40234324ba24bd02f6595334 -> CheatSheet.
L = [2, 1, 3]
# Opt 1) In-Place:
# myList.sort([key=lambda x: EXPR][, reverse=False]):
L.sort()
# Prob: https://www.hackerrank.com/challenges/python-sort-sort/problem
# Opt 2) Return a new sorted list:
# sorted(<Iterable>, [key=lambda x: EXPR], [reverse=False])
# new_sorted_list = sorted([obj1,obj2,obj3], [key=lambda x: x.attribute], reverse=True)
# Dealing with situation where you need to sort an on index
L = [[2,"a"], [1, "b"]]
L.sort(key=lambda x:x[0]) # -> [[1, 'b'], [2, 'a']] # sort by number at index 0.
L.sort(key=lambda x:x[1]) # -> [[2, 'a'], [1, 'b']] # sort by number at index 1
# ref: https://docs.python.org/2/library/functions.html#sorted
# Note: in-place sort can't sort string. sorted(s) can. To sort a string:
"".join(sorted("bac")) # anki-todo
def dict_examples():
mydic = {'jack': 999, 'jennie': 111}
mydic['leo'] = 123
mydic['bob'] = 234
mydic['leo'] # accessing
mydic.pop('bob') # deleting
print(mydic)
# src: https://www.geeksforgeeks.org/python-dictionary/
# See also OrderedDict in collections.
def set_examples():
L = [1,2,3]
B = [2,3,4]
# set is an undordered collection of items.
# To modify an item, remove(item) & add(item).
myset = set() # ref: https://www.geeksforgeeks.org/python-sets/
myset = set([1,2,3])
myset = {8,9}
myset.add(1)
myset.add(2)
myset.add(3)
myset.update([1,2,3]) # #like add for a list.
myset.intersection_update(set()) # Modifies myset.
myset.difference_update(set())
myset.symmetric_difference_update(set())
myset.discard(999) # works even if e not in set.
myset.remove(2) # Raises KeyError if e doesn't exist.
myset.pop() # Raises KeyError if e doesn't exist.
if 2 in myset:
print("2 is in set!")
len(myset)
# Return new sets:
# Set Theory:
setA, setB = set(), set()
setA.union(setB) #(A | B) -> returns new set...
setA.intersection(setB) # (A & B) # ref: https://www.programiz.com/python-programming/set
setA.difference(setB) # (A - B) # values that exist in A but not in B.
setA.symmetric_difference(setB) # (A ^ B) # in either a or b, but not both.
# Diagrams: https://www.hackerrank.com/challenges/py-set-symmetric-difference-operation/problem
# setA.issubset(t) s <= b
# setA.issuperset(t) s >= b
# ref: https://docs.python.org/2/library/sets.html
def heaps_with_heapq():
# 1ed485d0f6614735afbf9a7efc834caf
# Methods that take an Array as arguments:
from heapq import heapify, heappush, heappop, heapreplace
heap = []
heapify(list) # augments the list. O(n)
heappush(heap, int) # O(log n)
heappop(int) # O(log n)
heap[0] # peak.
# heapq is a min heap.
# For max heap, negate input & output "heappush(h, -val)" "-heappop(h)"
# Small classes to wrap min/max heaps are good/clean way to deal with heaps.
class maxheap:
def __init__(self):
self.h = []
self.push = lambda x: heappush(self.h, -x)
self.pop = lambda: -heappop(self.h)
self.peak = lambda: -self.h[0]
self.len = lambda: len(self.h)
class minhheap:
def __init__(self):
self.h = []
self.push = lambda x: heappush(self.h, x)
self.pop = lambda: heappop(self.h)
self.peak = lambda: self.h[0]
self.len = lambda: len(self.h)
def kth_min_max():
from heapq import nsmallest, nlargest
# kTh smallest : Keep a min heap of size k. O(n log k)
h, k = [], 3
for i in [5, 2, 1, 7, 4, 2, 8, 10, 2]:
if len(h) < k:
heappush(h, i)
else:
if i > k: heapreplace(h, k)
print("3rd smallest: ", h[0]) # Ex: 65fdd808fcad43b2b2726062aeaa108d
# Build in support for k-min/max in O(n log k) time:
L = [1,2,3,2,5,6,8]
nsmallest(2, L) # ,key=lambda x:x) -> ]
nlargest(2, L) # ,key=lambda x:x) -> [8, 6]
# Performance wise, you get O(n log k) as expected compared to sort:
# L = rand_list(10000000)
# timeit(lambda: sorted(L)[0:6], number=50)
# 44.241248495000036
# timeit(lambda: heapq.nsmallest(6, L), number=50)
# 14.27249390999998
# Time complexity, indirect uses nlargest ref: https://stackoverflow.com/questions/29240807/python-collections-counter-most-common-complexity/29240949
def heapq_with_classes():
# In general,
import heapq
class Person:
def __init__(self, name, age, height):
self.name = name
self.age = age
self.height = height
def __lt__(self, other):
return self.age < other.age # set to > for max heap.
def __repr__(self):
return "{} {} {}".format(self.name, self.age, self.height)
people = [
Person("Leo", 31, 168),
Person("Valarie", 19, 157),
Person("Jane", 20, 150),
Person("Bob", 40, 170),
Person("M.Jordon", 45, 210)
]
print(heapq.nlargest(2, people, key=lambda x: x.height)[1].name) # bob.
heapq.heapify(people)
while people:
print(heapq.heappop(people))
# Bob
# Valarie 19 157
# Jane 20 150
# Leo 31 168
# Bob 40 170
# M.Jordon 45 210
def runtime_complexities_O(n):
# Ref: Text
# https://www.ics.uci.edu/~pattis/ICS-33/lectures/complexitypython.txt
# | |
#!/usr/bin/env python3
import json
import logging
import sys
from itertools import groupby
from random import randint
from time import sleep
from typing import Dict, Iterator, List
import requests
from bs4 import BeautifulSoup, PageElement
assert sys.version_info >= (3, 6), 'Install Python 3.6 or higher'
log = logging.getLogger('philly')
log.setLevel(logging.DEBUG)
fh = logging.FileHandler('./philly.log', 'w', 'utf-8')
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
class PhillyScraper():
'''
Scrapes all available concerts and films from "The Digital Concert Hall" and
writes the results as json files.
This was possible in March 2020 during the corona outbreak when every concert and film was accessible for free.
'''
MIN_DELAY = 2
MAX_DELAY = 5
def __init__(self, user_token: str, concert_ids_path: str = None, film_ids_path: str = None):
self.__data = []
self.__concert_id_list = []
if concert_ids_path:
with open(concert_ids_path, 'r') as infile:
self.__concert_id_list = infile.read().splitlines()
self.__film_id_list = []
if film_ids_path:
with open(film_ids_path, 'r') as infile:
self.__film_id_list = infile.read().splitlines()
self.__sess = requests.Session()
self.__sess.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0 Safari/605.1.15'
})
self.__sess.cookies.update(dict(hasBeenLoggedIn='1', dch_user_token=user_token))
def __write_output(self, out_name: str = 'all'):
'''
Helper to create the final json file
'''
with open(f'{out_name}.json', 'w') as out:
json.dump(self.__data, out)
def __get_seasons(self) -> List[Dict]:
'''
Returns a list of dicts containing information about each season
'''
log.debug('Getting seasons...')
try:
r = self.__sess.get('https://www.digitalconcerthall.com/json_cacheable_services/get_seasons?language=en')
r.raise_for_status()
except requests.HTTPError as httpe:
log.error(f'Get seasons failed with http error: {httpe}')
sys.exit(-1)
except requests.exceptions.ConnectionError as cerr:
log.error(f'Get seasons failed with network problems: {cerr}')
sys.exit(-1)
except requests.exceptions.Timeout:
log.error('Get seasons timed out!')
sys.exit(-1)
except requests.exceptions.RequestException as err:
log.error(f'Get seasons failed with request error: {err}')
sys.exit(-1)
try:
seasons_dict = r.json()
except ValueError:
log.error(f'Get seasons returned non-json data: {r.text}')
sys.exit(-1)
if len(seasons_dict['items']) != seasons_dict['count']:
log.warning(f'API returned a season count of {seasons_dict["count"]}, but {len(seasons_dict["items"])} were found!')
return seasons_dict['items']
def __extract_text(self, page_element: PageElement) -> str:
'''
Helper to extract the text content from a bs4 page element.
Whitespace between words is trimmed
'''
try:
return ' '.join(page_element.text.split())
except AttributeError:
return ' '.join(page_element.split())
def __make_dict(self, groups: Iterator) -> Dict[str, str]:
'''
Helper to create a dict from a itertoolsgroup
'''
ret = dict(role='KEINE ROLLE', player='KEINER')
for tag in groups:
if tag.name == 'strong':
ret['player'] = tag.text.strip()
elif tag.name == 'em':
ret['role'] = tag.text.strip()
return ret
def __extract_metadata(self, concert_id: str, soup: BeautifulSoup) -> Dict:
'''
Extracts all available metadata for a concert and returns it as a dict
'''
log.debug(f'Extracting metadata for concert with ID {concert_id}...')
streams = self.__get_streams(concert_id)
metaDict = dict(concertId=concert_id)
concertTitleTag = soup.select_one('h1[itemprop="name"]')
if concertTitleTag:
metaDict['concertTitle'] = concertTitleTag.text.replace(u'\u2019', "'").strip()
concertProgrammeTag = soup.select_one('div[itemprop="description"]')
if concertProgrammeTag:
metaDict['concertProgramme'] = concertProgrammeTag.text.replace(u'\u2019', "'").strip()
programmeGuideTag = soup.select_one('div#tabs-1')
if programmeGuideTag:
metaDict['concertProgrammeGuide'] = programmeGuideTag.text.replace(u'\u2019', "'").strip()
concertMetaTag = soup.select_one('p.concertMeta')
metaElms = concertMetaTag.contents
metaDict['concertDate'] = metaElms[0].replace(u'\u2013', '-').strip()
if len(metaElms) == 3:
metaDict['concertMeta'] = ' '.join(metaElms[2].split()).replace(u'\u2019', "'")
mainArtistTag = soup.select_one('p.mainArtist')
mainElms = mainArtistTag.contents
try:
metaDict['mainArtist'] = mainElms[0].strip()
except TypeError:
metaDict['mainArtist'] = mainElms[0].text.strip()
except IndexError:
pass
if len(mainElms) == 3:
metaDict['conductor'] = ' '.join(mainElms[2].text.split())
starArtists = soup.select('p.starArtist span[itemprop="name"]')
if len(starArtists):
metaDict['starArtists'] = [' '.join(spanTag.text.split()) for spanTag in starArtists]
supportTag = soup.select_one('div#concert-support')
if supportTag:
metaDict['support'] = supportTag.text.strip()
metaDict['pieces'] = []
for piece in soup.select('ul.list-lines > li'):
concert_piece_id = piece.select_one('div.jsConcertWork')['id']
pieceDict = dict(pieceId=concert_piece_id)
if concert_piece_id in streams:
pieceDict['streamUrl'] = streams[concert_piece_id]
else:
log.warning(f'No stream url found for concert piece with ID {concert_piece_id}')
pieceDict['streamUrl'] = 'not-found'
headers = piece.find('h2').contents
for idx, tag in enumerate(headers):
if tag.name == 'strong':
pieceDict['composer'] = tag.text.strip()
elif tag.name == 'br':
pieceDict['description'] = ''.join(map(self.__extract_text, headers[idx + 1:])).strip()
break
if 'composer' not in pieceDict and 'description' not in pieceDict:
pieceDict['description'] = ''.join(map(self.__extract_text, headers)).strip()
artists = piece.find('p')
if not artists:
metaDict['pieces'].append(pieceDict)
continue
artistList = [ self.__make_dict(g[1]) for g in groupby(artists.contents, key=lambda x: str(x).strip() != ',') if g[0] ]
if len(artistList) != 1 or artistList[0]['role'] != 'KEINE ROLLE' or artistList[0]['player'] != 'KEINER':
temp = dict()
for d in artistList:
role = d['role']
if role in temp:
temp[role].append(d['player'])
else:
temp[role] = [d['player']]
pieceArtists = [ dict(role=k, names=v) for k,v in temp.items() ]
if len(pieceArtists):
pieceDict['artists'] = pieceArtists
metaDict['pieces'].append(pieceDict)
return metaDict
def __get_streams(self, content_id: str) -> Dict[str, str]:
'''
Returns all available stream links for a content id as a dict
'''
log.debug(f'Getting streams for content with ID {content_id}...')
try:
r = self.__sess.get(f'https://www.digitalconcerthall.com/json_services/get_stream_urls?id={content_id}&language=en')
r.raise_for_status()
except requests.HTTPError as httpe:
log.error(f'Get streams for content with ID {content_id} failed with http error: {httpe}')
sys.exit(-1)
except requests.exceptions.ConnectionError as cerr:
log.error(f'Get streams for content with ID {content_id} failed with network problems: {cerr}')
sys.exit(-1)
except requests.exceptions.Timeout:
log.error(f'Get streams for content with ID {content_id} timed out!')
sys.exit(-1)
except requests.exceptions.RequestException as err:
log.error(f'Get streams for content with ID {content_id} failed with request error: {err}')
sys.exit(-1)
try:
urls_dict = r.json()
except ValueError:
log.error(f'Get streams for content with ID {content_id} returned non-json data: {r.text}')
sys.exit(-1)
if not urls_dict['success']:
log.error(f'Get streams failed with message: {urls_dict["message"]}')
sys.exit(-1)
manifest_dict = { k:v[0]['url'] for k, v in urls_dict['urls'].items() }
log.debug(f'Extracted {len(manifest_dict)} streams for content with ID {content_id}')
return manifest_dict
def __handle_concert(self, concert_id: str) -> Dict:
if concert_id not in self.__concert_id_list:
log.debug(f'Scraping concert with ID {concert_id}...')
try:
r = self.__sess.get(f'https://www.digitalconcerthall.com/en/concert/{concert_id}')
r.raise_for_status()
except requests.HTTPError as httpe:
log.error(f'Get concert with ID {concert_id} failed with http error: {httpe}')
sys.exit(-1)
except requests.exceptions.ConnectionError as cerr:
log.error(f'Get concert with ID {concert_id} failed with network problems: {cerr}')
sys.exit(-1)
except requests.exceptions.Timeout:
log.error(f'Get concert with ID {concert_id} timed out!')
sys.exit(-1)
except requests.exceptions.RequestException as err:
log.error(f'Get concert with ID {concert_id} failed with request error: {err}')
sys.exit(-1)
soup = BeautifulSoup(r.content, 'lxml')
return self.__extract_metadata(concert_id, soup)
else:
log.debug(f'Skipping concert with ID {concert_id} because it already exists')
return None
def __handle_season(self, season: Dict):
season_id = season['id']
log.debug(f'Scraping season {season["label"]} with ID {season_id}...')
try:
r = self.__sess.get(f'https://www.digitalconcerthall.com/en/concerts/season_{season_id}')
r.raise_for_status()
except requests.HTTPError as httpe:
log.error(f'Get season with ID {season_id} failed with http error: {httpe}')
sys.exit(-1)
except requests.exceptions.ConnectionError as cerr:
log.error(f'Get season with ID {season_id} failed with network problems: {cerr}')
sys.exit(-1)
except requests.exceptions.Timeout:
log.error(f'Get season with ID {season_id} timed out!')
sys.exit(-1)
except requests.exceptions.RequestException as err:
log.error(f'Get season with ID {season_id} failed with request error: {err}')
sys.exit(-1)
soup = BeautifulSoup(r.content, 'lxml')
concerts = soup.select('li.archive')
season_dict = dict(seasonId=season_id, season=season["label"].replace(u'\u2013', '-'), concerts=[])
for concert in concerts:
concert_dict = self.__handle_concert(concert['id'][8:])
# if none, concert already existed
if concert_dict:
season_dict['concerts'].append(concert_dict)
sleep(randint(self.MIN_DELAY, self.MAX_DELAY))
self.__data.append(season_dict)
def scrape_seasons(self):
'''
Scrapes all concerts for all seasons
'''
self.__data = []
all_seasons = self.__get_seasons()
for season in all_seasons:
self.__handle_season(season)
sleep(randint(self.MIN_DELAY, self.MAX_DELAY))
log.info('Writing to file...')
self.__write_output('seasons')
log.info('Done')
def __extract_film_data(self, tag: PageElement) -> Dict[str, str]:
link = tag.select_one('a')
film_id = link['href'].split('/')[-1]
return dict(film_id=film_id, title=link['title'])
def __get_films(self) -> Iterator[Dict]:
'''
Returns a list of dicts containing information about each film
'''
log.debug('Getting films...')
try:
r = self.__sess.get('https://www.digitalconcerthall.com/en/films')
r.raise_for_status()
except requests.HTTPError as httpe:
log.error(f'Get films failed with http error: {httpe}')
sys.exit(-1)
except requests.exceptions.ConnectionError as cerr:
log.error(f'Get films failed with network problems: {cerr}')
sys.exit(-1)
except requests.exceptions.Timeout:
log.error('Get films timed out!')
sys.exit(-1)
except requests.exceptions.RequestException as err:
log.error(f'Get films failed with request error: {err}')
sys.exit(-1)
soup = BeautifulSoup(r.content, 'lxml')
films = soup.select('li.item')
log.debug(f'Found {len(films)} films')
return map(self.__extract_film_data, films)
def __handle_film(self, film_dict: Dict[str, str]):
film_id = film_dict['film_id']
if film_id not in self.__film_id_list:
log.debug(f'Scraping film with ID {film_id}...')
try:
r = self.__sess.get(f'https://www.digitalconcerthall.com/en/film/{film_id}')
r.raise_for_status()
except requests.HTTPError as httpe:
log.error(f'Get film with ID {film_id} failed with http error: {httpe}')
sys.exit(-1)
except requests.exceptions.ConnectionError as cerr:
log.error(f'Get film with ID {film_id} failed with network problems: {cerr}')
sys.exit(-1)
except requests.exceptions.Timeout:
log.error(f'Get film with ID {film_id} timed out!')
sys.exit(-1)
except requests.exceptions.RequestException as err:
log.error(f'Get film with ID {film_id} failed with request error: {err}')
sys.exit(-1)
soup = BeautifulSoup(r.content, 'lxml')
streams = self.__get_streams(film_id)
if film_id in streams:
film_dict['streamUrl'] = streams[film_id]
else:
log.warning(f'No stream url found for film with ID {film_id}')
film_dict['streamUrl'] = 'not-found'
subTitleTag = soup.select_one('div.margin-15 p')
if subTitleTag:
film_dict['subtitle'] = subTitleTag.text.strip()
actorsTag = soup.select('div.box-50 strong')
if len(actorsTag):
film_dict['actors'] = [actor.text.strip() for actor in actorsTag]
descTag = soup.select_one('div#tabs-0')
if descTag:
film_dict['description'] = descTag.text.strip()
creditsTag = soup.select_one('div#tabs-2')
if creditsTag:
film_dict['credits'] = creditsTag.text.strip()
self.__data.append(film_dict)
else:
log.debug(f'Skipping film with ID {film_id} because it already exists')
def scrape_films(self):
'''
Scrapes all films
'''
self.__data = []
all_films = self.__get_films()
for film in all_films:
self.__handle_film(film)
sleep(randint(self.MIN_DELAY, self.MAX_DELAY))
log.info('Writing to | |
import requests as _requests
from goldsberry._apiFunc import *
class demographics:
def __init__(self, playerid):
self._url = 'http://stats.nba.com/stats/commonplayerinfo?'
self._api_param = {'PlayerID':playerid}
self._pull = _requests.get(self._url, params=self._api_param)
def player_info(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def headline_stats(self):
_headers = self._pull.json()['resultSets'][1]['headers']
_values = self._pull.json()['resultSets'][1]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class career_stats:
def __init__(self, playerid, league='NBA',permode=1):
self._url = "http://stats.nba.com/stats/playerprofilev2?"
self._api_param = {'PlayerID':playerid,
'LeagueID':_nbaLeague(league),
'PerMode':_PerModeSmall36(permode)}
self._pull = _requests.get(self._url, params = self._api_param)
def season_totals_regular(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def career_totals_regular(self):
_headers = self._pull.json()['resultSets'][1]['headers']
_values = self._pull.json()['resultSets'][1]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def season_totals_post(self):
_headers = self._pull.json()['resultSets'][2]['headers']
_values = self._pull.json()['resultSets'][2]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def career_totals_post(self):
_headers = self._pull.json()['resultSets'][3]['headers']
_values = self._pull.json()['resultSets'][3]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def season_totals_allstar(self):
_headers = self._pull.json()['resultSets'][4]['headers']
_values = self._pull.json()['resultSets'][4]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def career_totals_allstar(self):
_headers = self._pull.json()['resultSets'][5]['headers']
_values = self._pull.json()['resultSets'][5]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def season_totals_college(self):
_headers = self._pull.json()['resultSets'][6]['headers']
_values = self._pull.json()['resultSets'][6]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def career_totals_college(self):
_headers = self._pull.json()['resultSets'][7]['headers']
_values = self._pull.json()['resultSets'][7]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def season_rankings_regular(self):
_headers = self._pull.json()['resultSets'][8]['headers']
_values = self._pull.json()['resultSets'][8]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def season_rankings_post(self):
_headers = self._pull.json()['resultSets'][9]['headers']
_values = self._pull.json()['resultSets'][9]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def season_high(self):
_headers = self._pull.json()['resultSets'][10]['headers']
_values = self._pull.json()['resultSets'][10]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def career_high(self):
_headers = self._pull.json()['resultSets'][11]['headers']
_values = self._pull.json()['resultSets'][11]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def next_game(self):
_headers = self._pull.json()['resultSets'][12]['headers']
_values = self._pull.json()['resultSets'][12]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class general_splits:
def __init__(self, playerid, season='2015',seasontype=1, league='NBA',
dateto='', datefrom='', gamesegment=1, lastngames=0, location=1, measuretype=1,
month=0, opponentteamid=0, outcome=1, paceadjust=1, permode=1, period=0,
plusminus=1, rank=1, seasonsegment=1, vsconf=1, vsdiv=1):
self._url = "http://stats.nba.com/stats/playerdashboardbygeneralsplits?"
self._api_param = {'PlayerID':playerid,
'SeasonType': _SeasonType(seasontype),
'Season': _nbaSeason(season),
'LeagueID': _nbaLeague(league),
'DateTo':_valiDate(dateto),
'DateFrom':_valiDate(datefrom),
'GameSegment':_GameSegment(gamesegment),
'LastNGames':lastngames,
'Location':_Location(location),
'MeasureType':_measureType(measuretype),
'Month':month,
'OpponentTeamID':opponentteamid,
'Outcome':_Outcome(outcome),
'PaceAdjust':_PaceAdjust(paceadjust),
'PerMode':_PerModeLarge(permode),
'Period':period,
'PlusMinus':_PlusMinus(plusminus),
'Rank':_Rank(rank),
'SeasonSegment':_SeasonSegment(seasonsegment),
'VsConference':_VsConference(vsconf),
'VsDivision':_VsDivision(vsdiv)}
self._pull = _requests.get(self._url, params = self._api_param)
def overall(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def location(self):
_headers = self._pull.json()['resultSets'][1]['headers']
_values = self._pull.json()['resultSets'][1]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def wins_losses(self):
_headers = self._pull.json()['resultSets'][2]['headers']
_values = self._pull.json()['resultSets'][2]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def month(self):
_headers = self._pull.json()['resultSets'][3]['headers']
_values = self._pull.json()['resultSets'][3]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def pre_post_allstar(self):
_headers = self._pull.json()['resultSets'][4]['headers']
_values = self._pull.json()['resultSets'][4]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def starting_position(self):
_headers = self._pull.json()['resultSets'][5]['headers']
_values = self._pull.json()['resultSets'][5]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def days_rest(self):
_headers = self._pull.json()['resultSets'][6]['headers']
_values = self._pull.json()['resultSets'][6]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class game_logs:
def __init__(self, playerid, season='2015',seasontype=1, league='NBA'):
self._url = "http://stats.nba.com/stats/playergamelog?"
self._api_param = {'PlayerID':playerid,
'SeasonType': _SeasonType(seasontype),
'Season': _nbaSeason(season),
}
self._pull = _requests.get(self._url, params=self._api_param)
def logs(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class shot_dashboard:
def __init__(self,playerid,league='NBA',season='2015', seasontype=1,teamid=0,
outcome=1,location=1,month=0,seasonsegment=1,datefrom='',
dateto='',opponentteamid=0,vsconf=1,vsdiv=1,gamesegment=1,
period=0,lastngames=0, permode=1):
self._url = "http://stats.nba.com/stats/playerdashptshots?"
self._api_param = {'PlayerID' : playerid,
'LeagueID': _nbaLeague(league),
'Season' : _nbaSeason(season),
'SeasonType' : _SeasonType(seasontype),
'TeamID' : teamid,
'Outcome' : _Outcome(outcome),
'Location' : _Location(location),
'Month' : month,
'SeasonSegment' : _SeasonSegment(seasonsegment),
'DateFrom' : _valiDate(datefrom),
'DateTo' : _valiDate(dateto),
'OpponentTeamID' : opponentteamid,
'VsConference' : _VsConference(vsconf),
'VsDivision' : _VsDivision(vsdiv),
'GameSegment' : _GameSegment(gamesegment),
'Period' : period,
'LastNGames' : lastngames,
'PerMode' : _PerModeMini(permode)
}
self._pull = _requests.get(self._url, params=self._api_param)
def overall(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def general(self):
_headers = self._pull.json()['resultSets'][1]['headers']
_values = self._pull.json()['resultSets'][1]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def shot_clock(self):
_headers = self._pull.json()['resultSets'][2]['headers']
_values = self._pull.json()['resultSets'][2]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def dribble(self):
_headers = self._pull.json()['resultSets'][3]['headers']
_values = self._pull.json()['resultSets'][3]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def closest_defender(self):
_headers = self._pull.json()['resultSets'][4]['headers']
_values = self._pull.json()['resultSets'][4]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def closest_defender_10ft(self):
_headers = self._pull.json()['resultSets'][5]['headers']
_values = self._pull.json()['resultSets'][5]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def touch_time(self):
_headers = self._pull.json()['resultSets'][6]['headers']
_values = self._pull.json()['resultSets'][6]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class rebound_dashboard:
def __init__(self,playerid,league='NBA',season='2015', seasontype=1,teamid=0,
outcome=1,location=1,month=0,seasonsegment=1,datefrom='',
dateto='',opponentteamid=0,vsconf=1,vsdiv=1,gamesegment=1,
period=0,lastngames=0,permode=1):
self._url = "http://stats.nba.com/stats/playerdashptreb?"
self._api_param = {'PlayerID' : playerid,
'LeagueID': _nbaLeague(league),
'Season' : _nbaSeason(season),
'SeasonType' : _SeasonType(seasontype),
'TeamID' : teamid,
'Outcome' : _Outcome(outcome),
'Location' : _Location(location),
'Month' : month,
'SeasonSegment' : _SeasonSegment(seasonsegment),
'DateFrom' : _valiDate(datefrom),
'DateTo' : _valiDate(dateto),
'OpponentTeamID' : opponentteamid,
'VsConference' : _VsConference(vsconf),
'VsDivision' : _VsDivision(vsdiv),
'GameSegment' : _GameSegment(gamesegment),
'Period' : period,
'LastNGames' : lastngames,
'PerMode' : _PerModeMini(permode)
}
self._pull = _requests.get(self._url, params=self._api_param)
def overall(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def shot_type(self):
_headers = self._pull.json()['resultSets'][1]['headers']
_values = self._pull.json()['resultSets'][1]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def contesting_rebounders(self):
_headers = self._pull.json()['resultSets'][2]['headers']
_values = self._pull.json()['resultSets'][2]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def shot_distance(self):
_headers = self._pull.json()['resultSets'][3]['headers']
_values = self._pull.json()['resultSets'][3]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def rebound_distance(self):
_headers = self._pull.json()['resultSets'][4]['headers']
_values = self._pull.json()['resultSets'][4]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class passing_dashboard:
def __init__(self,playerid,league='NBA',season='2015', seasontype=1,teamid=0,
outcome=1,location=1,month=0,seasonsegment=1,datefrom='',
dateto='',opponentteamid=0,vsconf=1,vsdiv=1,gamesegment=1,
period=0,lastngames=0,permode=1):
self._url = "http://stats.nba.com/stats/playerdashptpass?"
self._api_param = {'PlayerID' : playerid,
'LeagueID': _nbaLeague(league),
'Season' : _nbaSeason(season),
'SeasonType' : _SeasonType(seasontype),
'TeamID' : teamid,
'Outcome' : _Outcome(outcome),
'Location' : _Location(location),
'Month' : month,
'SeasonSegment' : _SeasonSegment(seasonsegment),
'DateFrom' : _valiDate(datefrom),
'DateTo' : _valiDate(dateto),
'OpponentTeamID' : opponentteamid,
'VsConference' : _VsConference(vsconf),
'VsDivision' : _VsDivision(vsdiv),
'GameSegment' : _GameSegment(gamesegment),
'Period' : period,
'LastNGames' : lastngames,
'PerMode' : _PerModeMini(permode)
}
self._pull = _requests.get(self._url, params=self._api_param)
def passes_made(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def passes_received(self):
_headers = self._pull.json()['resultSets'][1]['headers']
_values = self._pull.json()['resultSets'][1]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class defense_dashboard:
def __init__(self,playerid,league='NBA',season='2015', seasontype=1,teamid=0,
outcome=1,location=1,month=0,seasonsegment=1,datefrom='',
dateto='',opponentteamid=0,vsconf=1,vsdiv=1,gamesegment=1,
period=0,lastngames=0,permode=1):
self._url = "http://stats.nba.com/stats/playerdashptreb?"
self._api_param = {'PlayerID' : playerid,
'LeagueID': _nbaLeague(league),
'Season' : _nbaSeason(season),
'SeasonType' : _SeasonType(seasontype),
'TeamID' : teamid,
'Outcome' : _Outcome(outcome),
'Location' : _Location(location),
'Month' : month,
'SeasonSegment' : _SeasonSegment(seasonsegment),
'DateFrom' : _valiDate(datefrom),
'DateTo' : _valiDate(dateto),
'OpponentTeamID' : opponentteamid,
'VsConference' : _VsConference(vsconf),
'VsDivision' : _VsDivision(vsdiv),
'GameSegment' : _GameSegment(gamesegment),
'Period' : period,
'LastNGames' : lastngames,
'PerMode' : _PerModeMini(permode)
}
self._pull = _requests.get(self._url, params=self._api_param)
def defending_shot(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class shot_log:
def __init__(self,playerid,league='NBA',season='2015',seasontype=1,teamid=0,
outcome=1,location=1,month=0,seasonsegment=1,datefrom='',
dateto='',opponentteamid=0,vsconf=1,vsdiv=1,gamesegment=1,
period=0,lastngames=0):
self._url = "http://stats.nba.com/stats/playerdashptshotlog?"
self._api_param = {'PlayerID' : playerid,
'LeagueID': _nbaLeague(league),
'Season' : _nbaSeason(season),
'SeasonType' : _SeasonType(seasontype),
'TeamID' : teamid,
'Outcome' : _Outcome(outcome),
'Location' : _Location(location),
'Month' : month,
'SeasonSegment' : _SeasonSegment(seasonsegment),
'DateFrom' : _valiDate(datefrom),
'DateTo' : _valiDate(dateto),
'OpponentTeamID' : opponentteamid,
'VsConference' : _VsConference(vsconf),
'VsDivision' : _VsDivision(vsdiv),
'GameSegment' : _GameSegment(gamesegment),
'Period' : period,
'LastNGames' : lastngames
}
self._pull = _requests.get(self._url, params=self._api_param)
def log(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class rebound_log:
def __init__(self,playerid,league='NBA',season='2015',seasontype=1,teamid=0,
outcome=1,location=1,month=0,seasonsegment=1,datefrom='',
dateto='',opponentteamid=0,vsconf=1,vsdiv=1,gamesegment=1,
period=0,lastngames=0):
self._url = "http://stats.nba.com/stats/playerdashptreboundlogs?"
self._api_param = {'PlayerID' : playerid,
'LeagueID': _nbaLeague(league),
'Season' : _nbaSeason(season),
'SeasonType' : _SeasonType(seasontype),
'TeamID' : teamid,
'Outcome' : _Outcome(outcome),
'Location' : _Location(location),
'Month' : month,
'SeasonSegment' : _SeasonSegment(seasonsegment),
'DateFrom' : _valiDate(datefrom),
'DateTo' : _valiDate(dateto),
'OpponentTeamID' : opponentteamid,
'VsConference' : _VsConference(vsconf),
'VsDivision' : _VsDivision(vsdiv),
'GameSegment' : _GameSegment(gamesegment),
'Period' : period,
'LastNGames' : lastngames
}
self._pull = _requests.get(self._url, params=self._api_param)
def log(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
class shot_chart:
def __init__(self,playerid,leagueid='',season='2015', seasontype=1,teamid=0,
gameid='',outcome=1,location=1,month=0,seasonsegment=1,
datefrom='',dateto='',opponentteamid=0,vsconf=1,vsdiv=1,
position=1,period=0,lastngames=0,aheadbehind=1,
contextmeasure=1,clutchtime=7,rookieyear='',
contextfilter='',startperiod='1',endperiod='10',startrange='0',
endrange='28800', gamesegment=1, rangetype='2'):
if not rookieyear == '':
rookieyear = _nbaSeason(rookieyear)
self._url = "http://stats.nba.com/stats/shotchartdetail?"
self._api_param = {'LeagueID': leagueid,
'Season' : _nbaSeason(season),
'SeasonType' : _SeasonType(seasontype),
'TeamID' : teamid,
'PlayerID' : playerid,
'GameID' : gameid,
'Outcome' : _Outcome(outcome),
'Location' : _Location(location),
'Month' : month,
'SeasonSegment' : _SeasonSegment(seasonsegment),
'DateFrom' : _valiDate(datefrom),
'DateTo' : _valiDate(dateto),
'OpponentTeamID' : opponentteamid,
'VsConference' : _VsConference(vsconf),
'VsDivision' : _VsDivision(vsdiv),
'Position' : _Position(position),
'GameSegment' : _GameSegment(gamesegment),
'Period' : period,
'LastNGames' : lastngames,
'AheadBehind' : _AheadBehind(aheadbehind),
'ContextMeasure' : _ContextMeasure(contextmeasure),
'ClutchTime' : _ClutchTime(clutchtime),
'RookieYear' : rookieyear,
'ContextFilter':contextfilter,
'StartPeriod':startperiod,
'EndPeriod':endperiod,
'StartRange':startrange,
'EndRange':endrange,
'RangeType':rangetype,
}
print(self._api_param)
self._pull = _requests.get(self._url, params=self._api_param)
def chart(self):
_headers = self._pull.json()['resultSets'][0]['headers']
_values = self._pull.json()['resultSets'][0]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def leagueaverage(self):
_headers = self._pull.json()['resultSets'][1]['headers']
_values = self._pull.json()['resultSets'][1]['rowSet']
return [dict(zip(_headers, value)) for value in _values]
def PlayerList(season='2015', AllTime=False, league='NBA'):
if AllTime:
_url = "http://stats.nba.com/stats/commonallplayers?"
_api_param = {'IsOnlyCurrentSeason':"0",
| |
<filename>dependencies/quex-0.34.1/quex/core_engine/generator/transition_block.py
import sys
import quex.core_engine.generator.transition as transition
from quex.input.setup import setup as Setup
LanguageDB = Setup.language_db
__DEBUG_CHECK_ACTIVE_F = False # Use this flag to double check that intervals are adjacent
class __info:
def __init__(self, StateIdx, IsInitStateF, DSM):
assert DSM == None or DSM.__class__.__name__ == "StateMachineDecorator"
self.state_index = StateIdx
self.is_init_state_f = IsInitStateF
self.dsm = DSM
def do(TriggerMap, StateIdx, InitStateF, DSM):
assert type(TriggerMap) == list
assert DSM == None or DSM.__class__.__name__ == "StateMachineDecorator"
# If a state has no transitions, no new input needs to be eaten => no reload.
#
# NOTE: The only case where the buffer reload is not required are empty states,
# AND states during backward input position detection!
# Empty states do not exist any longer, the backward input position is
# essential though for pseudo ambiguous post contexts.
assert TriggerMap != [] # states with empty trigger maps are 'dead end states'. those
# # are not to be coded at this place.
info = __info(StateIdx=StateIdx, IsInitStateF=InitStateF, DSM=DSM)
if len(TriggerMap) > 1:
return __get_code(TriggerMap, info) + "\n"
else:
# We can actually be sure, that the Buffer Limit Code is filtered
# out, since this is the task of the regular expression parser.
# In case of backward lexing in pseudo-ambiguous post conditions,
# it makes absolutely sense that there is only one interval that
# covers all characters (see the discussion there).
assert TriggerMap[0][0].begin == -sys.maxint
assert TriggerMap[0][0].end == sys.maxint
return " " + transition.do(StateIdx, TriggerMap[0][0], TriggerMap[0][1], DSM) + "\n"
def __get_code(TriggerMap, info):
"""Creates code for state transitions from this state. This function is very
similar to the function creating code for a 'NumberSet' condition
(see 'interval_handling').
Writes code that does a mapping according to 'binary search' by
means of if-else-blocks.
"""
TriggerSetN = len(TriggerMap)
if TriggerSetN > 1 and __DEBUG_CHECK_ACTIVE_F:
# -- check that the trigger map consist of sorted adjacent intervals
# This assumption is critical because it is assumed that for any isolated
# interval the bordering intervals have bracketed the remaining cases!
previous_interval = TriggerMap[0][0]
for trigger_interval, target_state_index in TriggerMap[1:]:
assert trigger_interval.begin == previous_interval.end, \
"non-adjacent intervals in TriggerMap\n" + \
"TriggerMap = " + repr(TriggerMap)
assert trigger_interval.end > previous_interval.begin, \
"unsorted intervals in TriggerMap\n" + \
"TriggerMap = " + repr(TriggerMap)
previous_interval = deepcopy(trigger_interval)
#________________________________________________________________________________
txt = " "
if TriggerSetN == 1 :
# (*) Only one interval
# (all boundaring cases must have been dealt with already => case is clear)
# If the input falls into this interval the target trigger is identified!
txt += __create_transition_code(TriggerMap[0], info)
else:
simple_txt = __try_very_simplest_case(TriggerMap, info)
if simple_txt != None:
txt += simple_txt
else:
# two or more intervals => cut in the middle
MiddleTrigger_Idx = int(TriggerSetN / 2)
middle = TriggerMap[MiddleTrigger_Idx]
# input < 0 is impossible, since unicode codepoints start at 0!
if middle[0].begin == 0: txt += __get_code(TriggerMap[MiddleTrigger_Idx:], info)
elif TriggerSetN == 2: txt += __bracket_two_intervals(TriggerMap, info)
elif TriggerSetN == 3: txt += __bracket_three_intervals(TriggerMap, info)
else: txt += __bracket_normally(MiddleTrigger_Idx, TriggerMap, info)
# (*) indent by four spaces (nested blocks are correctly indented)
# delete the last newline, to prevent additional indentation
if txt[-1] == "\n": txt = txt[:-1]
txt = txt.replace("\n", "\n ") + "\n"
return txt
def __create_transition_code(TriggerMapEntry, info, IndentF=False):
"""Creates the transition code to a given target based on the information in
the trigger map entry.
"""
interval = TriggerMapEntry[0]
target_state_index = TriggerMapEntry[1]
# target state != None, then the machine is still eating
# => transition to subsequent state.
#
# target state == None, drop into a terminal state (defined by origins).
#
# for details about $transition, see the __transition() function of the
# respective language module.
#
txt = " " + transition.do(info.state_index, interval, target_state_index, info.dsm)
if interval != None:
txt += " " + LanguageDB["$comment"](interval.get_utf8_string()) + "\n"
else:
txt += "\n"
if IndentF:
txt = txt[:-1].replace("\n", "\n ") + "\n" # don't replace last '\n'
return txt
def __try_very_simplest_case(TriggerMap, info):
"""Assume the following setup:
if( input == Char1 ) goto X;
if( input == Char2 ) goto X;
...
if( input == CharN ) goto X;
If the input is equally distributed over the characters 1 to N then the
average number of comparisons for N = 3 will be 2,333. For N = 4, the
everage number of comparisons will be 2,75. Binary bracketing requires
ld(N), so for N = 4 the number of comparisons is 2. Thus until N = 3
it is advantegous to compare step by step. Also, for N = 1 a simple
comparison is, most likely, more efficient that an 'or' operation over
a list of length '1'.
This function is trying to identify the case where there are only two or
three characters that trigger to the same target state.
RETURNS: 'None' if the very simple implementation does not make sense.
A string if it could be implemented that way
"""
# return None
character_list = []
common_target_state_index = -1
for trigger in TriggerMap:
interval = trigger[0]
target_state_index = trigger[1]
if target_state_index == None: continue
assert target_state_index != -1
# All must have the same target state
if common_target_state_index == -1:
common_target_state_index = target_state_index
elif common_target_state_index != target_state_index:
return None
# Because of memory reasons, it is not wise to try to extend sys.maxint number
# of characters. Since, we do not allow for more than three characters, let's
# do a little sanity pre-check:
if interval.size() > 3: return None
character_list.extend(range(interval.begin, interval.end))
# More than three characters does not make sense
if len(character_list) > 3: return None
if len(character_list) < 2: return None
assert common_target_state_index != -1
txt = LanguageDB["$if in-set"](character_list)
# TriggerInfo = [None, TargetStateIndex] because the interval does not matter.
txt += __create_transition_code([None, common_target_state_index], info, IndentF=True)
txt += LanguageDB["$endif-else"]
txt += __create_transition_code([None, None], info, IndentF=True)
txt += LanguageDB["$end-else"]
return txt
def __bracket_two_intervals(TriggerMap, info):
assert len(TriggerMap) == 2
first = TriggerMap[0]
second = TriggerMap[1]
# If the first interval causes a 'drop out' then make it the second.
## If the second interval is a 'drop out' the 'goto drop out' can be spared,
## since it lands there anyway.
## if second[0] < 0: # target state index < 0 ==> drop out
## tmp = first; first = second; second = tmp
# find interval of size '1'
first_interval = first[0]
second_interval = second[0]
# We only need one comparison at the border between the two intervals
if first_interval.size() == 1: txt = LanguageDB["$if =="](repr(first_interval.begin))
elif second_interval.size() == 1: txt = LanguageDB["$if !="](repr(second_interval.begin))
else: txt = LanguageDB["$if <"](repr(second_interval.begin))
txt += __create_transition_code(first, info, IndentF=True)
txt += LanguageDB["$endif-else"]
txt += __create_transition_code(second, info, IndentF=True)
txt += LanguageDB["$end-else"]
return txt
def __bracket_three_intervals(TriggerMap, info):
assert len(TriggerMap) == 3
# does one interval have the size '1'?
size_one_map = [False, False, False] # size_on_map[i] == True if interval 'i' has size '1'
for i in range(len(TriggerMap)):
interval = TriggerMap[i][0]
if interval.size() == 1: size_one_map[i] = True
target_state_0 = TriggerMap[0][1]
target_state_2 = TriggerMap[2][1]
if target_state_0 == target_state_2:
if TriggerMap[1][0].size() == 1:
# (1) Special Trick I only holds for one single case:
# -- the interval in the middle has size 1
# -- the outer two intervals trigger to the same target state
# if inner character is matched: goto its target
# else: goto alternative target
txt = LanguageDB["$if =="](repr(TriggerMap[1][0].begin))
else:
# (2) Special Trick II only holds for:
# -- the outer two intervals trigger to the same target state
# if character in inner interval: goto its target
# else: goto alternative target
txt = LanguageDB["$if in-interval"](TriggerMap[1][0])
txt += __create_transition_code(TriggerMap[1], info, IndentF=True)
txt += LanguageDB["$endif-else"]
# TODO: Add somehow a mechanism to report that here the intervals 0 | |
or B contained in the other, keep only contained.
if a_in_b and not b_in_a:
del TSs[i]
elif b_in_a and not a_in_b:
del TSs[j]
elif a_in_b and b_in_a:
# Keep most complex
if cx_a < cx_b:
del TSs[i]
elif cx_b < cx_a:
del TSs[j]
else:
# they are equal, delete either one
del TSs[i]
if verbose:
print('>>> TWO-SYMBOLS (simplified):')
for i, (tss) in TSs.items():
print("F''-%d: %s | Perms: %s | CX: %s" % (i, tss['tss'], tss['perms'], tss['cx']))
# Final List (from simplified)
TSf = [(tss['tss'][0], tss['perms'], []) for tss in TSs.values()]
# Check if all PI are being covered. If not, include the PI on the TS list
if verbose:
print('>>> Check all PI are accounted for in the TS')
for i, pi in enumerate(pi_matrix, start=0):
if not any([_check_schema_within_schema([pi.tolist()], tss['xl'], dir='a', verbose=verbose)[0] for tss in TSs.values()]):
if verbose:
print("PI-%d '%s' Not in list, ADDING." % (i, pi.tolist()))
TSf.append((pi.tolist(), [], []))
else:
if verbose:
print("PI-%d '%s' OK." % (i, pi.tolist()))
if verbose:
print('>>> Check for Same-Symbol permutables')
# NEW: Step to include same-symbol permutables
for ts, perms, sames in TSf:
# Indices of permutables inputs
idxs = list(set([idx for idxs in perms for idx in idxs]))
# Makes the F'' into a Collum Array so it can be used by '_count_cols_symbols_vX'
ts_matrix = np.array([ts]).T
# Remove Inputs (columns) that already have permutable symbols. Only if there are permutables
if len(idxs):
rmask = np.array(idxs)
ts_matrix_left = ts_matrix[~rmask, :]
else:
ts_matrix_left = ts_matrix
if verbose and verbose_level > 10:
print("> F'' Original:")
print(ts_matrix)
print("> Permutables: %s" % (perms))
print("> F'' without permutables:")
print(ts_matrix_left)
counts_matrix = _count_cols_symbols_v2(pi_matrix=ts_matrix_left.T, verbose=False, verbose_level=verbose_level)
perm_groups = _check_identical_cols_count_symbols_v2(counts_matrix=counts_matrix, verbose=verbose, verbose_level=verbose_level)
sames.extend(perm_groups)
# Step to convert the pi list to string
for i, (ts, perms, sames) in enumerate(TSf, start=0):
ts = ''.join(map(str, ts))
TSf[i] = (ts, perms, sames)
# Final list after all PI were accounted for
if verbose:
print('>>> TS (final list):')
for i, tss, sms in TSf:
print("TS: '%s' | Perm Idx: %s | Sms Idx: %s" % (i, tss, sms))
return TSf
def _calc_ts_complexity(tss, pers):
""" Calculates the complexity of a TS schema
Complexity = (Number of Schemas + Number of Permutable Symbols + Lenght of each Permutable Symbol)
"""
return len(tss) + sum([len(per) for ts, per in zip(tss, pers)])
def _check_schema_within_schema(la, lb, dir=None, verbose=False):
""" Check is a Two-Symbol schemata is covered by another.
This is used to simplify the number of TS schematas returned.
The arguments for this function are generated by `_expand_ts_logic`.
Args:
tsa (list) : A list of :math:`F'` schematas that a Two-Symbol :math:`F''` schemata can cover.
tsb (list) : A list of :math:`F'` schematas that a Two-Symbol :math:`F''` schemata can cover.
dir (string) : The direction to check, either ``a`` or ``b`` is in the other.
Defaults to both directions.
"""
a_in_b, b_in_a = None, None
#
if dir != 'b':
a_in_b = all([(xa in lb) for xa in la])
if verbose:
print('%s in %s : %s' % (la, lb, a_in_b))
if dir != 'a':
b_in_a = all([(xb in la) for xb in lb])
if verbose:
print('%s in %s : %s' % (lb, la, b_in_a))
#
return a_in_b, b_in_a
def _expand_ts_logic(two_symbols, permut_indexes):
""" Expands the Two-Symbol logic to all possible prime-implicants variations being covered.
Args:
two_symbols (list) : Two-Symbol schematas list-of-lists.
Returns:
(list) : a list of :math:`F'` covered by this Two-Symbol.
"""
# If receiving a binary string, convert to list of lists
if isinstance(two_symbols, str):
two_symbols = [list(two_symbols)]
# Queue
Q = deque()
Q.extend(two_symbols)
logics = []
#
while Q:
implicant = np.array(Q.pop())
for idxs in permut_indexes:
# Permutation of all possible combinations of the values that are permutable.
for vals in itertools.permutations(implicant[idxs], len(idxs)):
# Generate a new schema
_implicant = copy.copy(implicant)
_implicant[idxs] = vals
# Insert to list of logics if not already there
if not(_implicant.tolist() in logics):
logics.append(_implicant.tolist())
Q.append(_implicant.tolist())
return logics
def _check_schemata_permutations_v2(schematas, perm_groups, verbose=False, verbose_level=0):
""" Checks if the permutations are possible
Note:
Not sure if this is really needed.
"""
if verbose and verbose_level > 20:
print("-- Check Schemata Permutations (v2) : g(H',L) --")
allowed_perm_groups = []
all_indices = set([i_index for x_group in perm_groups for i_index in x_group])
for x_group in perm_groups:
sofar = []
for i_index in range(len(x_group) - 1):
x_index = x_group[i_index]
small_group = [x_index]
if not (x_index in sofar):
sofar.append(x_index)
for y_index in x_group[(i_index + 1)::]:
if (not(y_index in sofar)) and _can_swap_v2(schematas[:, [x_index, y_index]], verbose=verbose, verbose_level=verbose_level):
small_group.append(y_index)
sofar.append(y_index)
if len(small_group) > 1:
allowed_perm_groups.append(small_group)
if verbose and verbose_level > 30:
print('> allowed_perm_groups', allowed_perm_groups)
if set([i_index for x_group in allowed_perm_groups for i_index in x_group]) == all_indices:
return allowed_perm_groups
return None
def _can_swap_v2(schemata_subset, verbose=False, verbose_level=0):
"""Determines if two schemata subsets can be swapped"""
if verbose and verbose_level > 40:
print('> Can Swap?:',)
can_switch = 1
for row in schemata_subset[:, [1, 0]]:
can_switch *= np.any(np.all(schemata_subset == row, axis=1))
if verbose and verbose_level > 40:
print(can_switch)
return can_switch
def _check_col_counts_v3(counts_matrix, verbose=False, verbose_level=0):
""" This function is used to find permutable symbols.
Args:
counts_matrix (numpy.ndarray) : a matrix where rows are inputs and columns are possible input types (0,1 or #)
Returns:
perm_groups (list) : a list of the indexes that can be permuted.
"""
if verbose and verbose_level > 30:
print('-- Check Col Counts (v3) --')
counts = {} # Multi Counts
perm_groups = [] # A list of groups of Permutable Indexes
for i, row in enumerate(counts_matrix, start=0):
# a tuple (hashable) version of the row counts
row_tuple = tuple(row)
if row_tuple in counts:
# we have seen this one before, so add it to the permutation group
counts[row_tuple].append(i)
elif np.count_nonzero(row) >= 2:
# we have not seen this count before, it is not a fixed variable, so create a new entry for it
counts[row_tuple] = [i]
else:
# we will skip fixed variables
pass
# Append non-constants that have permutable positions
for col, idxs in counts.items():
if verbose and verbose_level > 40:
print(col, ':', idxs)
if len(idxs) == 1:
return -1
elif len(idxs) >= 1:
perm_groups.append(idxs)
if verbose and verbose_level > 40:
print('counts:', counts)
print('perm_groups:', perm_groups)
if len(perm_groups):
return perm_groups
else:
return -1
def _check_identical_cols_count_symbols_v2(counts_matrix, verbose=False, verbose_level=0):
""" This function is used to find same symbol permutables. In practice it is a variance of `_check_cols_symbols_vX`
Args:
counts_matrix (numpy.ndarray) : a matrix where rows are inputs and columns are possible input types (0,1 or #)
Returns:
perm_groups (list) : a list of the indexes that can be permuted
"""
if verbose and verbose_level > 20:
print('-- Check Identical Col Counts (v2) --')
counts = {} # Multi Counts
perm_groups = [] # A list of groups of Permutable Indexes
for i, row in enumerate(counts_matrix, start=0):
# a tuple (hashable) version of the row counts
row = row.tolist()
row_tuple = tuple(row)
if verbose and verbose_level > 30:
print('RC: %s : %s' % (i, row_tuple))
if row_tuple in counts:
# we have seen this one before, so add it to the permutation group
counts[row_tuple].append(i)
else:
# we have not seen this count before, so create a new entry for it
counts[row_tuple] = [i]
# Append non-constants that have permutable positions
for col, idxs in counts.items():
if verbose and verbose_level > 30:
print(col, ':', idxs)
if len(idxs) >= 2:
perm_groups.append(idxs)
if verbose and verbose_level > 30:
print('counts:', counts)
print('sames_groups:', perm_groups)
if len(perm_groups):
return perm_groups
else:
return []
def _count_cols_symbols_v2(pi_matrix=None, verbose=False, verbose_level=0):
""" Given a matrix, where each row is a prime implicant, counts how many 0's, 1's and 2's are found in each column.
Args:
pi_matrix (numpy.ndarray) : a matrix ``n \times k`` of ``n`` prime implicants.
Returns:
counts (numpy.ndarray) : a matrix ``n \times 3`` where the entries are counts.
"""
if verbose and verbose_level > 20:
print(' -- Count Cols (v2) --')
# How many PI?
n = pi_matrix.shape[1]
# Instanciate count matrix
counts = np.zeros((n, 3), dtype=int)
for i, col in enumerate(pi_matrix.T):
# Count how many values | |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some basic layers."""
import gin
import copy
import numpy as np
import torch
import torch.nn as nn
from alf.initializers import variance_scaling_init
from alf.nest.utils import get_outer_rank
from alf.tensor_specs import TensorSpec
from alf.utils import common
from alf.utils.math_ops import identity
def normalize_along_batch_dims(x, mean, variance, variance_epsilon):
"""Normalizes a tensor by ``mean`` and ``variance``, which are expected to have
the same tensor spec with the inner dims of ``x``.
Args:
x (Tensor): a tensor of (``[D1, D2, ..] + shape``), where ``D1``, ``D2``, ..
are arbitrary leading batch dims (can be empty).
mean (Tensor): a tensor of ``shape``
variance (Tensor): a tensor of ``shape``
variance_epsilon (float): A small float number to avoid dividing by 0.
Returns:
Normalized tensor.
"""
spec = TensorSpec.from_tensor(mean)
assert spec == TensorSpec.from_tensor(variance), \
"The specs of mean and variance must be equal!"
bs = BatchSquash(get_outer_rank(x, spec))
x = bs.flatten(x)
variance_epsilon = torch.as_tensor(variance_epsilon).to(variance.dtype)
inv = torch.rsqrt(variance + variance_epsilon)
x = (x - mean.to(x.dtype)) * inv.to(x.dtype)
x = bs.unflatten(x)
return x
class BatchSquash(object):
"""Facilitates flattening and unflattening batch dims of a tensor. Copied
from `tf_agents`.
Exposes a pair of matched flatten and unflatten methods. After flattening
only 1 batch dimension will be left. This facilitates evaluating networks
that expect inputs to have only 1 batch dimension.
"""
def __init__(self, batch_dims):
"""Create two tied ops to flatten and unflatten the front dimensions.
Args:
batch_dims (int): Number of batch dimensions the flatten/unflatten
ops should handle.
Raises:
ValueError: if batch dims is negative.
"""
if batch_dims < 0:
raise ValueError('Batch dims must be non-negative.')
self._batch_dims = batch_dims
self._original_tensor_shape = None
def flatten(self, tensor):
"""Flattens and caches the tensor's batch_dims."""
if self._batch_dims == 1:
return tensor
self._original_tensor_shape = tensor.shape
return torch.reshape(tensor,
(-1, ) + tuple(tensor.shape[self._batch_dims:]))
def unflatten(self, tensor):
"""Unflattens the tensor's batch_dims using the cached shape."""
if self._batch_dims == 1:
return tensor
if self._original_tensor_shape is None:
raise ValueError('Please call flatten before unflatten.')
return torch.reshape(
tensor, (tuple(self._original_tensor_shape[:self._batch_dims]) +
tuple(tensor.shape[1:])))
@gin.configurable
class OneHot(nn.Module):
def __init__(self, num_classes):
super().__init__()
self._num_classes = num_classes
def forward(self, input):
return nn.functional.one_hot(
input, num_classes=self._num_classes).to(torch.float32)
@gin.configurable
class FixedDecodingLayer(nn.Module):
def __init__(self,
input_size,
output_size,
basis_type="rbf",
sigma=1.,
tau=0.5):
"""A layer that uses a set of fixed basis for decoding the inputs.
Args:
input_size (int): the size of input to be decoded, representing the
number of representation coefficients
output_size (int): the size of the decoded output
basis_type (str): the type of basis to be used for decoding
- "poly": polynomial basis using Vandermonde matrix
- "cheb": polynomial basis using Chebyshev polynomials
- "rbf": radial basis functions
- "haar": Haar wavelet basis
sigma (float): the bandwidth parameter used for RBF basis.
If None, a default value of 1. will be used.
tau (float): a factor for weighting the basis exponentially
according to the order (``n``) of the basis, i.e., ``tau**n```
"""
# get the argument list with vals
self._kwargs = copy.deepcopy(locals())
self._kwargs.pop('self')
self._kwargs.pop('__class__')
super(FixedDecodingLayer, self).__init__()
assert input_size > 0, "input_size should be at least one"
assert basis_type in {"poly", "cheb", "rbf", "haar"
}, ("the specified method "
"{} is not supported".format(basis_type))
self._B = nn.Linear(input_size, output_size, bias=False)
def _polyvander_matrix(n, D, tau=tau):
# non-square matrix [n, D + 1]
x = torch.linspace(-1, 1, n)
B = torch.as_tensor(np.polynomial.polynomial.polyvander(x, D))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.arange(D + 1).float()
basis_weight = tau**exp_factor
return B * basis_weight
def _chebvander_matrix(n, D, tau=tau):
# non-square matrix [n, D + 1]
x = np.linspace(-1, 1, n)
B = torch.as_tensor(np.polynomial.chebyshev.chebvander(x, D))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.arange(D + 1).float()
basis_weight = tau**exp_factor
return B * basis_weight
def _rbf_matrix(n, sigma=1.0):
# square matrix [n, n]
x = torch.linspace(-1, 1, n)
B = torch.empty(n, n)
for d in range(n):
B[:, d] = torch.exp(-(x - x[d])**2 / sigma)
return B
def _haar_matrix(n, tau=tau):
# square matrix [n, n]
def _is_power_of_two(x):
return (x & (x - 1)) == 0
# allow only size n to be the power of 2
assert _is_power_of_two(n), "n is required to be the power of 2"
def _get_haar_matrix(n):
if n > 2:
h = _get_haar_matrix(n // 2)
else:
return torch.Tensor([[1, 1], [1, -1]])
def _kron(A, B):
return torch.einsum("ab,cd->acbd", A, B).view(
A.size(0) * B.size(0),
A.size(1) * B.size(1))
# calculate upper haar part
h_n = _kron(h, torch.Tensor([[1], [1]]))
# calculate lower haar part
h_i = torch.sqrt(torch.Tensor([n / 2])) * _kron(
torch.eye(len(h)), torch.Tensor([[1], [-1]]))
# combine both parts
h = torch.cat((h_n, h_i), dim=1)
return h
B = _get_haar_matrix(n) / torch.sqrt(torch.Tensor([n]))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.ceil(torch.log2(torch.arange(n).float() + 1))
basis_weight = tau**exp_factor
return B * basis_weight
if basis_type == "poly":
B = _polyvander_matrix(output_size, input_size - 1)
elif basis_type == "cheb":
B = _chebvander_matrix(output_size, input_size - 1)
elif basis_type == "rbf":
assert input_size == output_size
B = _rbf_matrix(input_size, sigma=sigma)
elif basis_type == "haar":
assert input_size == output_size
B = _haar_matrix(input_size)
# assign the constructed transformation matrix and set it to be non-trainable
self._B.weight.requires_grad = False
self._B.weight.copy_(B)
def forward(self, inputs):
return self._B(inputs)
@property
def weight(self):
return self._B.weight
@gin.configurable
class FC(nn.Module):
def __init__(self,
input_size,
output_size,
activation=identity,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A fully connected layer that's also responsible for activation and
customized weights initialization. An auto gain calculation might depend
on the activation following the linear layer. Suggest using this wrapper
module instead of ``nn.Linear`` if you really care about weight std after
init.
Args:
input_size (int): input size
output_size (int): output size
activation (torch.nn.functional):
use_bias (bool): whether use bias
kernel_initializer (Callable): initializer for the FC layer kernel.
If none is provided a ``variance_scaling_initializer`` with gain as
``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to
the std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
# get the argument list with vals
self._kwargs = copy.deepcopy(locals())
self._kwargs.pop('self')
self._kwargs.pop('__class__')
super(FC, self).__init__()
self._activation = activation
self._linear = nn.Linear(input_size, output_size, bias=use_bias)
self._kernel_initializer = kernel_initializer
self._kernel_init_gain = kernel_init_gain
self._bias_init_value = bias_init_value
self._use_bias = use_bias
self.reset_parameters()
def reset_parameters(self):
if self._kernel_initializer is None:
variance_scaling_init(
self._linear.weight.data,
gain=self._kernel_init_gain,
nonlinearity=self._activation)
else:
self._kernel_initializer(self._linear.weight.data)
if self._use_bias:
nn.init.constant_(self._linear.bias.data, self._bias_init_value)
def forward(self, inputs):
return self._activation(self._linear(inputs))
@property
def weight(self):
return self._linear.weight
@property
def bias(self):
return self._linear.bias
def make_parallel(self, n):
"""Create a ``ParallelFC`` using ``n`` replicas of ``self``.
The initialized layer parameters will be different.
"""
return ParallelFC(n=n, **self._kwargs)
@gin.configurable
class ParallelFC(nn.Module):
def __init__(self,
input_size,
output_size,
n,
activation=identity,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""Parallel FC layer.
It is equivalent to ``n`` separate FC layers with the same
``input_size`` and ``output_size``.
Args:
input_size (int): input size
output_size (int): output size
n (int): n independent ``FC`` layers
activation (torch.nn.functional):
use_bias (bool): whether use bias
kernel_initializer (Callable): initializer for the FC layer kernel.
If none is provided a ``variance_scaling_initializer`` with gain
as ``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to
the std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super().__init__()
self._activation = activation
self._weight = nn.Parameter(torch.Tensor(n, output_size, input_size))
if use_bias:
self._bias = nn.Parameter(torch.Tensor(n, output_size))
else:
self._bias = None
for i in range(n):
if kernel_initializer is None:
variance_scaling_init(
self._weight.data[i],
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(self._weight.data[i])
if use_bias:
nn.init.constant_(self._bias.data, bias_init_value)
def forward(self, inputs):
"""Forward
Args:
inputs (torch.Tensor): with shape ``[B, n, input_size]`` or ``[B, input_size]``
Returns:
torch.Tensor with shape ``[B, n, output_size]``
"""
n, k, l = self._weight.shape
if inputs.ndim == 2:
assert inputs.shape[1] == l, (
"inputs has wrong shape %s. Expecting (B, %d)" % (inputs.shape,
l))
inputs = inputs.unsqueeze(0).expand(n, *inputs.shape)
elif inputs.ndim == 3:
assert (inputs.shape[1] == n | |
ValueError("Define dictionary parameter: hierarchy")
for identifier, dictionary in hierarchy.items():
# - check:
try: dictionary["path"] = self.file_path.path + dictionary["path"]
except: raise ValueError("Invalid hierarchy item [{} : {}]. Specify the [path].".format(identifier, "?"))
try: dictionary["permission"]
except KeyError: dictionary["permission"] = None
try: dictionary["owner"]
except KeyError: dictionary["owner"] = None
try: dictionary["group"]
except KeyError: dictionary["group"] = None
try: dictionary["directory"]
except KeyError: dictionary["directory"] = False
try: dictionary["sudo"]
except KeyError: dictionary["sudo"] = False
try: dictionary["default_data"]
except KeyError: dictionary["default_data"] = None
try: dictionary["default"]
except KeyError: dictionary["default"] = None
try: dictionary["recursive"]
except KeyError: dictionary["recursive"] = False
# - directory:
if dictionary["directory"]:
file_path = Formats.FilePath(dictionary["path"])
if file_path.exists(sudo=dictionary["sudo"]) == False:
file_path.create(
directory=True,
permission=dictionary["permission"],
group=dictionary["group"],
owner=dictionary["owner"],
sudo=dictionary["sudo"],)
else:
file_path.permission.permission = file_path.permission.get()
_owner_,_group_ = file_path.ownership.get()
file_path.ownership.group = _group_
file_path.ownership.owner = _owner_
#if 'back_up_requests/requests' in file_path.path:
# print("file: {}, owner: {}, group: {}, permission: {}".format(file_path.path, file_path.ownership.owner, file_path.ownership.group, file_path.permission.permission))
checkPermissionOwnership(file_path, dictionary, silent=silent, recursive=dictionary["recursive"])
# - file:
elif dictionary["default_data"] != None:
file = Files.File(path=dictionary["path"])
if file.file_path.exists(sudo=dictionary["sudo"]) == False:
file.file_path.create(
data=dictionary["default_data"],
permission=dictionary["permission"],
group=dictionary["group"],
owner=dictionary["owner"],
sudo=dictionary["sudo"])
else:
file.file_path.permission.permission = file_path.permission.get()
_owner_,_group_ = file_path.ownership.get()
file.file_path.ownership.group = _group_
file.file_path.ownership.owner = _owner_
checkPermissionOwnership(file.file_path, dictionary, silent=silent)
# - dictionary:
elif dictionary["default"] != None:
file = Files.Dictionary(path=dictionary["path"])
if file.file_path.exists(sudo=dictionary["sudo"]) == False:
file.save(dictionary["default"])
file.file_path.permission.check(
permission=dictionary["permission"],
sudo=dictionary["sudo"])
file.file_path.ownership.check(
group=dictionary["group"],
owner=dictionary["owner"],
sudo=dictionary["sudo"])
else:
file.file_path.permission.permission = file_path.permission.get()
_owner_,_group_ = file_path.ownership.get()
file.file_path.ownership.group = _group_
file.file_path.ownership.owner = _owner_
checkPermissionOwnership(file.file_path, dictionary, silent=silent)
file.check(default=default, save=True)
else:
raise ValueError("Invalid hierarchy item [{} : {}]. Either [directory] must be enabled, or [default_data / default] must be specified.".format(identifier, dictionary["path"]))
#
# load & save sub paths.
def load(self, path=None, format=str, default=None, sudo=False):
return Files.load(path=self.fullpath(path), format=format, sudo=sudo)
def save(self, path=None, data=None, format=str, sudo=False):
return Files.save(path=self.fullpath(path), data=data, format=format, sudo=sudo)
# returnable functions.
def paths(self,
# get recursively (bool).
recursive=False,
# get files only (bool).
files_only=False,
# get firs only (bool).
dirs_only=False,
# also get empty dirs (bool).
empty_dirs=True,
# the banned full paths (list).
banned=[],
# the banned names (list).
banned_names=[".DS_Store"],
# the banend base names (list).
banned_basenames=["__pycache__"],
# the allowed extensions (list).
extensions=["*"],
# the path (leave None to use self.path) (str, FilePath).
path=None,
):
if dirs_only and files_only: raise ValueError("Both parameters dirs_only & piles_only are True.")
if path == None: path = self.file_path.path
path = str(path)
if not Files.exists(path): return []
if isinstance(extensions, str): extensions = [extensions]
if len(banned) > 0:
l_banned = []
for i in banned:
l_banned.append(gfp.clean(f"{path}/{i}"))
banned = l_banned
paths = []
if recursive:
# does only work with recursive.
for root, dirs, files in os.walk(path):
if not dirs_only:
for name in files:
if name not in banned_names and ("*" in extensions or gfp.extension(name=name) in extensions ):
l_path = gfp.clean(path=f"{root}/{name}")
l_banned = False
for i in banned_basenames:
if f"/{i}/" in l_path: l_banned = True ; break
if l_path not in banned and not l_banned and l_path+"/" not in banned:
paths.append(l_path)
if not files_only:
for name in dirs:
if name not in banned_names and (dirs_only or "*" in extensions or "dir" in extensions ):
l_path = gfp.clean(path=f"{root}/{name}/")
l_banned = False
for i in banned_basenames:
if f"/{i}/" in l_path: l_banned = True ; break
if l_path not in banned and not l_banned and l_path+"/" not in banned:
paths.append(l_path)
if recursive:
paths += self.paths(recursive=recursive, path=l_path, dirs_only=dirs_only, files_only=files_only, banned=banned, banned_names=banned_names, empty_dirs=empty_dirs)
else:
for name in os.listdir(path):
l_path = gfp.clean(path=f"{path}/{name}")
if not dirs_only and not Files.directory(l_path):
if name not in banned_names and ("*" in extensions or gfp.extension(name=name) in extensions ):
l_banned = False
for i in banned_basenames:
if f"/{i}/" in l_path: l_banned = True ; break
if l_path not in banned and not l_banned and l_path+"/" not in banned:
paths.append(l_path)
if not files_only and Files.directory(l_path):
l_path += "/"
if name not in banned_names and (dirs_only or "*" in extensions or "dir" in extensions ):
l_banned = False
for i in banned_basenames:
if f"/{i}/" in l_path: l_banned = True ; break
if l_path not in banned and not l_banned and l_path+"/" not in banned:
paths.append(l_path)
return paths
def names(self,
# get recursively (bool).
recursive=False,
# get files only (bool).
files_only=False,
# get firs only (bool).
dirs_only=False,
# also get empty dirs (bool).
empty_dirs=True,
# remove the extension names (bool).
remove_extensions=False,
# the banned full paths (list).
banned=[],
# the banned names (list).
banned_names=[".DS_Store"],
# the banend base names (list).
banned_basenames=["__pycache__"],
# the allowed extensions (list).
extensions=["*"],
# the path (leave None to use self.path) (str, FilePath).
path=None,
):
names = []
for _path_ in self.paths(dirs_only=dirs_only, files_only=files_only, empty_dirs=empty_dirs, recursive=recursive, path=path, banned=banned, banned_names=banned_names, extensions=extensions):
if remove_extensions:
name = gfp.name(path=_path_)
names.append(name[:-len(gfp.extension(name=name))])
else:
names.append(gfp.name(path=_path_))
return names
def oldest(self):
files = []
for i in os.listdir(self.file_path.path):
if i not in [".DS_Store"]:
path = f'{self.file_path.path}/{i}'.replace("//",'/')
files.append(path)
if len(files) == 0: return False
return min(files, key=os.path.getctime) # oldest is min (this is not a code error)
def newest(self):
files = []
for i in os.listdir(self.file_path.path):
if i not in [".DS_Store"]:
path = f'{self.file_path.path}/{i}'.replace("//",'/')
files.append(path)
if len(files) == 0: return False
return max(files, key=os.path.getctime) # newest is max (this is not a code error)
def random(self):
files = []
for i in os.listdir(self.file_path.path):
if i not in [".DS_Store"]:
path = f'{self.file_path.path}/{i}'.replace("//",'/')
files.append(path)
if len(files) == 0: return False
return files[random.randrange(0, len(files))]
def generate(self, length=24, type="/"):
path, paths = None, self.paths()
for x in range(1000):
path = self.join(utils.generate.shell_string(length=length), type)
if path not in paths:
break
if path == None: __error__("Failed to generate a new random path inside directory [{}].".format(self.file_path.path))
return path
def structured_join(self, name, type="", structure="alphabetical", create_base=False, sudo=False, owner=None, group=None, permission=None):
if type not in ["/", ""]:
type = "."+type
if structure == "alphabetical":
alphabetical = None
try: alphabetical = name[0].upper()
except: alphabetical = "SPECIAL"
if str(alphabetical) not in ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Z","0","1","2","3","4","5","6","7","8","9"]: aplhabetical = "SPECIAL"
base = self.file_path.path + "/" + alphabetical + "/"
if create_base and os.path.exists(base) == False:
self.create(path=base, sudo=sudo, owner=owner, group=group, permission=permission)
alph_dir = base + name + type
return alph_dir
else: raise ValueError("Invalid usage, parameter structure [{}], valid options: {}".format(structure, ["alphabetical"]))
def contains(self, name=None, type="/", recursive=False):
return self.join(name, type) in self.paths(recursive=recursive)
#
def subpath(self, fullpath):
return self.fp.clean(path=fullpath.replace(self.fp.path, ""), remove_double_slash=True)
def fullpath(self, subpath):
return self.fp.clean(path=f"{self.fp.path}/{subpath}", remove_double_slash=True)
# set the icon.
def set_icon(self,
# the path to the .png / .jpg icon.
icon=None,
# the directory path (leave None to use self.fp.path).
path=None,
):
if icon == None: raise Exceptions.InvalidUsage("Define parameter: icon.")
if path == None: path = self.fp.path
if OS in ["osx", "macos"]:
utils.__execute_script__(f"""
#!/bin/bash
# settings.
icon="{icon}"
dest="{path}"
# check inputs
if [ ! -f $icon ]; then
echo "ERROR: File $1 does not exists"
exit 1
elif [[ ! $icon =~ .*\.(png|PNG|jpg|JPG) ]]; then
echo "ERROR: Icon must be a .png|.jpg file"
exit 1
elif [ -f $dest ]; then
folder=false
elif [ -d $dest ]; then
folder=true
else
echo 'ERROR: File|Folder destination does not exists'
exit 1
fi
# create icns icon
sips -i $icon > /dev/null
DeRez -only icns $icon > /tmp/tmpicns.rsrc
# set icon
if [ "$folder" = true ]; then
Rez -append /tmp/tmpicns.rsrc -o $dest$'/Icon\r'
SetFile -a C $dest
SetFile -a V $dest$'/Icon\r'
else
Rez -append /tmp/tmpicns.rsrc -o $dest
SetFile -a C $dest
fi
# clean up
rm /tmp/tmpicns.rsrc
exit 0
""")
else:
raise OSError("Unsupported operating system.")
# index the content.
def index(self,
# the wanted options.
metrics=[],
options=["size", "mtime", "content", "name", "basename", "extension", "mount", "directory"],
# optional path (leave None to use self.path).
path=None,
):
def process(path):
info = {}
if "mtime" in metrics:
info["mtime"] = gfp.mtime(path=path, format="seconds")
if "size" in metrics:
info["size"] = gfp.size(path=path, format=int)
directory = None
if "directory" in metcics:
directory = info["directory"] = Files.directory(str(path))
if "content" in metrics:
if directory == None: raise Exceptions.InvalidUsage("Metric [directory] is required when obtaining metric [content].")
if not directory:
info["content"] = Files.load(path)
else:
info["content"] = None
if "mount" in metrics:
info["mount"] = os.path.ismount(str(path))
if "name" in metrics:
info["name"] = gfp.name(path=path)
if "extension" in metrics:
info["name"] = gfp.extension(path=path)
if "basename" in metrics:
info["basename"] = gfp.basename(path=path)
return info
#
if path == None: path = self.path
if metrics == []:
raise Exceptions.InvalidUsage(f'No metrics are specified, metric options: [{Array(options).string(joiner=" ")}].')
for i in metrics:
if i not in options:
raise Exceptions.InvalidUsage(f'Metric [{i}] is not a valid metric option, options: [{Array(options).string(joiner=" ")}].')
indexed, dir, ids = Dictionary(path=False, dictionary={}), Files.Directory(path=path), []
for _path_ in dir.paths(recursive=True, files_only=True, banned=[gfp.clean(f"{path}/Icon\r")], banned_names=[".DS_Store", "__pycache__"]):
if _path_ not in ids and "/__pycache__/" not in _path_ and "/.DS_Store" not in _path_:
indexed[_path_] = process(_path_)
ids.append(_path_)
for _path_ in dir.paths(recursive=True, dirs_only=True, banned=[gfp.clean(f"{path}/Icon\r")], banned_names=[".DS_Store", "__pycache__"]):
if _path_ not in ids and "/__pycache__/" not in _path_ and "/.DS_Store" not in _path_:
indexed[_path_] = process(_path_)
ids.append(_path_)
return indexed.sort(alphabetical=True)
# open for desktop.
def open(self, path=None, sudo=False):
if path == None: path = self.fp.path
if sudo: sudo = "sudo "
else: sudo = ""
if OS in ["macos"]:
os.system(f"{sudo}open {path}")
elif OS in ["linux"]:
os.system(f"{sudo}nautulis {path}")
else: raise Exceptions.InvalidOperatingSystem(f"Unsupported operating system [{OS}].")
# return references of each file that includes one of the matches.
def find(self, matches:list, path=None, recursive=False, log_level=0):
if path == None: path = self.path
gfp = Formats.FilePath("")
c, references = 0, {}
for string in matches:
if not os.path.exists(path):
raise ValueError(f"Path {path} does not exist.")
elif not Files.directory(path):
raise ValueError(f"Path {path} is not a directory.")
for i_path in self.paths(recursive=recursive, files_only=True, banned_names=[".DS_Store", ".git"], path=path):
data = None
try:
data = Files.load(i_path)
except:
try:
data = f"{Files.load(i_path, format=bytes)}"
except: data = None
if data != None and string in data:
if log_level >= 0:
print("")
print(f"{i_path}:")
lines, linecount = data.split("\n"), 0
for _ in lines:
if string in lines[linecount]:
try: before = lines[linecount-1]
except: before = None
try: after = lines[linecount+1]
except: after = None
if log_level >= 0:
if before != None: print(" * "+before)
print(" * "+lines[linecount])
if after != None: print(" * "+after)
references[i_path] = lines[linecount]
linecount += 1
c += 1
if log_level >= 0 and c > 0: print("")
return references
# replace str within all files.
def replace(self, replacements:list, path=None, recursive=False, log_level=0):
if path == None: path = self.path
gfp = Formats.FilePath("")
c, updates = 0, []
for from_, to in replacements:
if not os.path.exists(path):
raise ValueError(f"Path {path} does not exist.")
elif not Files.directory(path):
raise ValueError(f"Path {path} is not a directory.")
for path in self.paths(recursive=recursive, | |
<reponame>mw-root/rswt
"""A library to control a RobertSonics WavTrigger through a serial port
"""
from __future__ import absolute_import, division, print_function
try:
from os import errno
except ImportError:
import errno
import serial
import struct
__version__ = '0.1.2'
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 <NAME>'
#Constants for the commands a wavtrigger understands
# Reading data back from a WavTrigger
# Firmware version
_WT_GET_VERSION = bytearray([0xF0,0xAA,0x05,0x01,0x55])
# Number of polyphonic voices and number of tracks on sd-card
_WT_GET_SYS_INFO = bytearray([0xF0,0xAA,0x05,0x02,0x55])
# List of currently playing tracks
_WT_GET_STATUS = bytearray([0xF0,0xAA,0x05,0x07,0x55])
# Timeout when waiting for the data from the Get-Status command
_WT_GET_STATUS_TIMEOUT = 0.25
# Playing individual tracks
_WT_TRACK_SOLO = bytearray([0xF0,0xAA,0x08,0x03,0x00,0x00,0x00,0x55])
_WT_TRACK_PLAY = bytearray([0xF0,0xAA,0x08,0x03,0x01,0x00,0x00,0x55])
_WT_TRACK_PAUSE = bytearray([0xF0,0xAA,0x08,0x03,0x02,0x00,0x00,0x55])
_WT_TRACK_RESUME = bytearray([0xF0,0xAA,0x08,0x03,0x03,0x00,0x00,0x55])
_WT_TRACK_STOP = bytearray([0xF0,0xAA,0x08,0x03,0x04,0x00,0x00,0x55])
_WT_TRACK_LOOP_ON = bytearray([0xF0,0xAA,0x08,0x03,0x05,0x00,0x00,0x55])
_WT_TRACK_LOOP_OFF = bytearray([0xF0,0xAA,0x08,0x03,0x06,0x00,0x00,0x55])
_WT_TRACK_LOAD = bytearray([0xF0,0xAA,0x08,0x03,0x07,0x00,0x00,0x55])
# Stopping and resuming several tracks at once
_WT_STOP_ALL = bytearray([0xF0,0xAA,0x05,0x04,0x55])
_WT_RESUME_ALL = bytearray([0xF0,0xAA,0x05,0x0B,0x55])
# Mixer settings and fader
_WT_VOLUME = bytearray([0xF0,0xAA,0x07,0x05,0x00,0x00,0x55])
_WT_TRACK_VOLUME = bytearray([0xF0,0xAA,0x09,0x08,0x00,0x00,0x00,0x00,0x55])
_WT_FADE = bytearray([0xF0,0xAA,0x0C,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x55])
# Pitch bending
_WT_SAMPLERATE = bytearray([0xF0,0xAA,0x07,0x0C,0x00,0x00,0x55])
# Switching the Power amp on or off (not implemented!)
_WT_AMP_POWER = bytearray([0xF0,0xAA,0x06,0x09,0x00,0x55])
class WavTrigger(object):
"""A controller for a RobertSonics WavTrigger
"""
def __init__(self,device, baud=57600, timeout=5.0):
"""Open a serial port to the device and read the
hardware version and info from the WavTrigger.
:param device: The serial port where the WavTrigger is listening.
:type device: str
:param baud: The baudrate to be used on the port. The value must match
the baudrate set in the init file of the WavTrigger. The default
value (57600) seems to be fast enough for all purposes
:type baud: int
:param timeout: A timeout for reading and writing on the port.
The default (5.0 seconds) is plenty. If this limit is reached
you can be quite sure to have lost the connection.
:type timeout: float
"""
self._wt=serial.Serial(port=device, baudrate=baud)
self._wt.timeout=timeout
if self._wt.isOpen():
self._version=self._getVersion()
self._voices,self._tracks=self._getSysInfo()
def close(self):
"""Closes the port to the WavTrigger. Does not stop playing tracks.
"""
self._wt.close()
def isOpen(self):
"""Test if a serial connection to the WavTrigger is established.
:returns: bool -- True if the device is open, False otherwise
"""
return self._wt.isOpen()
@property
def version(self):
"""Get the version string of the WavTrigger firmeware
:returns: str -- A string with the firmware version that runs on the WavTrigger
"""
return self._version
@property
def voices(self):
"""Get the number of polyphonic voices the WavTrigger can play simultanously.
:returns: int -- The number of voices that can be played simultanously
"""
return self._voices
@property
def tracks(self):
"""Get the number of tracks stored on SD-Card of the WavTrigger.
:returns: int -- The total number of tracks the WavTrigger found on the SD-Card.
"""
return self._tracks
def play(self,track):
"""Play a track
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PLAY,track)
self._wt.write(t)
def solo(self,track):
"""Play a track solo. Stops all currently playing tracks
and starts the solo track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_SOLO,track)
self._wt.write(t)
def stop(self,track):
"""Stop a playing track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_STOP,track)
self._wt.write(t)
def pause(self,track):
"""Pause a track. Stops a playing track until
'resume' is called for the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_PAUSE,track)
self._wt.write(t)
def resume(self,track):
"""Resume playing a track that has been paused previously.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_RESUME,track)
self._wt.write(t)
def load(self,track):
"""Load a track into the memory of the WavTrigger and pause it.
The track can than be played using the :meth:`resume` or :meth:`resumeAll` commands
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
t=self._setTrackForCommand(_WT_TRACK_LOAD,track)
self._wt.write(t)
def loop(self,track):
"""Set loop flag for a track. When the track is started it is played
in a loop until it is stopped. But stopping it does not clear the loop flag.
If the track is started again, it will still loop. Use :meth:`unLoop` to clear
the loop flag
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_ON,track))
def unLoop(self,track):
"""Clear the loop flag for a track. see :meth:`loop`
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
"""
if self._isValidTrackNumber(track):
self._wt.write(self._setTrackForCommand(_WT_TRACK_LOOP_OFF,track))
def stopAll(self):
"""Stop all playing tracks.
"""
self._wt.write(_WT_STOP_ALL)
def resumeAll(self):
"""Restart all resumed tracks.
"""
self._wt.write(_WT_RESUME_ALL)
def masterGain(self,gain):
"""
Sets the gain for the WavTrigger output.
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_VOLUME
g[4],g[5]=self._intToLsb(gain)
self._wt.write(g)
def trackGain(self, track, gain):
""" Set the gain for a specific track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param gain: Gain for the WavTrigger output.
The valid range for the gain argument is -70..+10
:type gain: int
"""
if gain<-70 or gain>10:
raise ValueError('Gain argument range is from -70 to +10')
g=_WT_TRACK_VOLUME
g[4],g[5]=self._intToLsb(track)
g[6],g[7]=self._intToLsb(gain)
self._wt.write(g)
def masterVolume(self,volume):
"""Set the volume for the WavTrigger output. This method never
amplifies the signal as the :meth:`masterGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param volume: Volume for the WavTrigger output.
The valid range for the volume argument is 0..100
:type gain: int
"""
vol=_WT_VOLUME
vol[4],vol[5]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(vol)
def trackVolume(self,track,volume):
"""Set the volume for a track. This method never
amplifies the track signal as the :meth:`trackGain` does when called
with gain values > 0. This prevents distorsion in the output signal.
Also most people are used to volume ranges from zero to 100 rather then
a gain value in dB.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: Volume for the track.
The valid range for the volume argument is 0..100
:type gain: int
"""
tvol=_WT_TRACK_VOLUME
tvol[4],tvol[5]=self._intToLsb(track)
tvol[6],tvol[7]=self._intToLsb(self._volumeToDb(volume))
self._wt.write(tvol)
def pitch(self,offset):
"""Set an offset for the samplerate that the WavTrigger uses.
A negative offset lowers the tone, a positive offset raises the tone
value.
:param offset: Offset to the samplerate.
The range of valid offset agrument values is -32767..+32767
:type offset: int
"""
if offset>32767 :
offset=32767
if offset < -32767:
ofset = -32767
pitch=_WT_SAMPLERATE
pitch[4],pitch[5]=self._intToLsb(offset)
self._wt.write(pitch)
def fade(self,track,volume,time):
"""Fade the track volume from the current volume level to
a lower or higer volume
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param volume: The target volume for the track.
The valid range for the volume argument is 0..100
:type volume: int
:param time: The time in milliseconds for the fade from the current
to the target level
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(volume))
f[8],f[9]=self._intToLsb(time)
f[10]=0x00
self._wt.write(f)
def fadeOut(self,track, time):
"""Fade the track volume from the current volume level to zero,
than stop the track.
:param track: Number of the track.
The range of valid tracknumbers is 1..999
:type track: int
:param time: The time in milliseconds for the fade out from the current
to silence
:type time: int
"""
f=_WT_FADE
f[4],f[5]=self._intToLsb(track)
f[6],f[7]=self._intToLsb(self._volumeToDb(0))
f[8],f[9]=self._intToLsb(time)
f[10]=0x01
self._wt.write(f)
def playing(self):
"""
Get a list of the currently playing tracks on the WavTrigger.
:returns: list -- A list with the track numbers currently playing.
If no tracks are playing the empty list is returned.
If there is a problem reading the return value from | |
= {mention_b[ID]}
try:
mention_b[self.INCOMPATIBLES].add(mention_a[ID])
except KeyError:
mention_b[self.INCOMPATIBLES] = {mention_a[ID]}
def _merge(self, entity_a, entity_b):
""" Merge two entities into new one.
:param entity_a: a entity to merge
:param entity_b: a entity to merge
"""
# Add the new mentions to first cluster
entity = list(sorted(
entity_a + entity_b, key=lambda x: x[SPAN],))
incompatibles = set()
for mention in entity:
incompatibles.update(mention.get(self.INCOMPATIBLES, set()))
idx = entity[0][SPAN]
for mention in entity:
mention["entity"] = (idx, entity)
mention[self.INCOMPATIBLES] = incompatibles
return idx, entity
@staticmethod
def entity_representative_mention(entity):
""" Get the most representative mention of the entity.
:param entity: The entity of which representative mention is fetched.
"""
for mention in entity:
if mention.get(MENTION) == PROPER_MENTION:
return mention
for mention in entity:
if mention.get(MENTION) == NOMINAL_MENTION:
return mention
for mention in entity:
if mention.get(MENTION) == PRONOUN_MENTION:
return mention
return entity[0]
def entity_property(self, entity, property_name):
""" Get a combined property of the values of all mentions of the entity
:param property_name: The name of the property to fetch.
:param entity: The entity of which property is fetched.
"""
combined_property = set(
(mention.get(property_name, UNKNOWN) for mention in entity))
if len(combined_property) > 1:
combined_property = combined_property.difference(
self.UNKNOWN_VALUES)
if len(combined_property) == 0:
combined_property.add(UNKNOWN)
return combined_property
@staticmethod
def entity_ne(entity):
""" Get a combined NE of the values of all mentions of the entity.
Other and no NER tags are cleared. If no NE tag is found None is
returned.
:param entity: The entity of which NE is fetched.
"""
combined_property = set(
(mention.get(NER, None) for mention in entity))
combined_property = list(filter(
lambda x: ner_tags.mention_ner(x), combined_property))
if len(combined_property) == 0:
return set()
return set(combined_property)
def narrative_you(self, mention):
"""The mention is second person(YOU) or the narrator(PER0) in an article.
:param mention: The mention to check.
"""
return \
mention[self.graph_builder.doc_type] == \
self.graph_builder.doc_article and\
mention.get(SPEAKER, False) == "PER0" and \
mention.get(PERSON) == SECOND_PERSON
@staticmethod
def is_pronoun(mention):
""" The mentions is a pronoun mention?
:param mention: The mention to check.
"""
return (mention.get(MENTION) == PRONOUN_MENTION) or pronouns.all(mention[FORM])
@staticmethod
def is_undefined(mention):
""" The mentions is an undefined mention?
:param mention: The mention to check.
"""
return mention[STARTED_BY_INDEFINITE_PRONOUN] or mention[STARTED_BY_INDEFINITE_ARTICLE]
@staticmethod
def is_location(mention):
""" The mentions is a location?
:param mention: The mention to check.
"""
return ner_tags.location(mention.get(NER))
def agree_attributes(self, entity, candidate_entity):
""" All attributes are compatible. Its mean the attributes of each are
a subset one of the another.
:param entity: Entity of the mention
:param candidate_entity: Entity of the candidate
:return: True or False
"""
candidate_gender = self.entity_property(candidate_entity, GENDER)
entity_gender = self.entity_property(entity, GENDER)
if not (self.UNKNOWN_VALUES.intersection(entity_gender) or
self.UNKNOWN_VALUES.intersection(candidate_gender)):
if candidate_gender.difference(entity_gender) \
and entity_gender.difference(candidate_gender):
self.logger.debug(
"Gender disagree %s %s",
entity_gender, candidate_gender)
return False
candidate_number = self.entity_property(candidate_entity, NUMBER)
entity_number = self.entity_property(entity, NUMBER)
if not(self.UNKNOWN_VALUES.intersection(entity_number) or
self.UNKNOWN_VALUES.intersection(candidate_number)):
if candidate_number.difference(entity_number) \
and entity_number.difference(candidate_number):
self.logger.debug(
"Number disagree %s %s",
entity_number, candidate_number)
return False
candidate_animacy = self.entity_property(candidate_entity, ANIMACY)
entity_animacy = self.entity_property(entity, ANIMACY)
if not(self.UNKNOWN_VALUES.intersection(entity_animacy) or
self.UNKNOWN_VALUES.intersection(candidate_animacy)):
if candidate_animacy.difference(entity_animacy) \
and entity_animacy.difference(candidate_animacy):
self.logger.debug(
"Animacy disagree %s %s",
entity_animacy, candidate_animacy)
return False
candidate_ner = self.entity_ne(candidate_entity)
entity_ner = self.entity_ne(entity)
if not(entity_ner is None or candidate_ner is None):
if candidate_ner.difference(entity_ner) and \
entity_ner.difference(candidate_ner):
self.logger.debug(
"NER disagree %s %s",
entity_ner, candidate_ner)
return False
return True
def subject_object(self, entity_a, entity_b):
""" Check if entities are linked by any subject-object relation.
:param entity_a: An entity to check
:param entity_b: An entity to check
:return: True or False
"""
if entity_a[0]["doc_type"] != "article":
return False
for mention_a in entity_a:
for mention_b in entity_b:
if self.graph_builder.sentence_distance(
mention_a, mention_b) > 0:
continue
if mention_a.get("subject", False) and \
mention_b.get("object", False) and \
mention_a["subject"] == mention_b["object"]:
return True
if mention_b.get("subject", False) and \
mention_a.get("object", False) and \
mention_b["subject"] == mention_a["object"]:
return True
pass
return False
def i_within_i(self, mention_a, mention_b):
""" Check if the mention and candidate are in a i-within-i
construction.
:param mention_a: a mention
:param mention_b: another mention
"""
if not self.graph_builder.same_sentence(mention_a, mention_b):
return False
# Aren't appositive
if mention_a.get(APPOSITIVE, False) and mention_b.get(APPOSITIVE, False):
return False
# Aren't Relative pronouns
if rules.is_relative_pronoun(self.graph_builder, mention_b, mention_a) or \
rules.is_relative_pronoun(self.graph_builder, mention_a, mention_b):
return False
# One is included in the other
if self.graph_builder.is_inside(mention_a[SPAN], mention_b[SPAN]) \
or self.graph_builder.is_inside(
mention_b[SPAN], mention_a[SPAN]):
return True
return False
def relaxed_form_word(self, mention):
""" Return the words of the mention without the words after the head
word.
:param mention: The mention where the words are extracted.
:return: a list of words.
"""
mention_words = self.graph_builder.get_words(mention)
mention_head = self.graph_builder.get_head_word(mention)
head = False
for index, word in enumerate(mention_words):
word_pos = word[POS]
if word[ID] == mention_head[ID]:
head = True
if head and pos_tags.relative_pronoun(word_pos):
return [word for word in mention_words[:index]]
# TODO CHANGE TO CLAUSE CONNECTORS
if head and word[FORM] == ",":
return [word for word in mention_words[:index]]
return [word for word in mention_words]
def relaxed_form(self, mention):
""" Return the form of the mention without the words after the head
word. The form is lowered and all words are space separated.
:param mention: The mention where the words are extracted.
:return: a string of word forms separated by spaces.
"""
return " ".join(word[FORM] for word in self.relaxed_form_word(mention=mention)).lower()
def same_speaker(self, mention_a, mention_b):
""" Check if mention refer to the same speaker.
:param mention_a: a mention
:param mention_b: another mention
:return type: Bool
"""
speaker_a = mention_a.get(SPEAKER, False)
speaker_b = mention_b.get(SPEAKER, False)
if not(speaker_a and speaker_b):
return False
if speaker_a == speaker_b:
return True
# Two speakers are the same string
if type(speaker_a) == str and\
type(speaker_b) == str and \
speaker_a == speaker_b:
return True
# Speaker A is B head word
if self._check_speaker(speaker_a, mention_b):
return True
# Speaker B is A head word
if self._check_speaker(speaker_b, mention_a):
return True
return False
def _check_speaker(self, speaker, mention):
""" Is the mention a form of the speaker.
:param speaker:
:param mention:
:return:
"""
# the speaker may be a string or another mention
if not (type(speaker) is str):
speaker = speaker[FORM]
mention_head_form = self.graph_builder.get_head_word(mention)[FORM]
if mention_head_form == speaker:
return True
for speaker_token in speaker.split():
if speaker_token == mention_head_form:
return True
return False
def are_speaker_speech(self, speaker, speech):
""" Tho mention are in a speaker speech relation?
:param speaker: The mention that is a speaker
:param speech: The mention that is inside a speech.
:return: True or False
"""
speech_speaker = speech.get(SPEAKER, False)
# TODO check this Only heads??
if type(speech_speaker) is dict:
speaker_words_ids = [
word[ID]
for word in self.graph_builder.get_words(speaker)]
return speech_speaker[ID] in speaker_words_ids
else:
speaker_head_word = rules.get_head_word_form(self.graph_builder, speaker)\
.lower()
for word in speech_speaker.split(" "):
if word.lower() == speaker_head_word:
return True
return False
def incompatible_discourse(self, entity_a, entity_b):
""" Check if two entities have any incompatible mentions between them.
:param entity_a: A entity
:param entity_b: Another entity
:return: Return True if the entities are incompatible.
"""
for mention_a in entity_a:
doc_type = entity_b[0][self.graph_builder.doc_type]
mention_a_person = mention_a.get(PERSON)
for mention_b in entity_b:
mention_b_person = mention_b.get(PERSON)
if (self.are_speaker_speech(
speaker=mention_a, speech=mention_b) or
self.are_speaker_speech(
speaker=mention_b, speech=mention_a)
) and not (
mention_a_person == FIRST_PERSON and
mention_b_person == FIRST_PERSON):
return True
if doc_type == self.graph_builder.doc_article:
continue
distance = abs(mention_a[UTTERANCE] - mention_b[UTTERANCE])
if distance == 1 and \
not self.same_speaker(mention_a, mention_b):
if mention_a_person != mention_b_person:
if mention_b_person == FIRST_PERSON:
return True
if mention_b_person == SECOND_PERSON:
return True
return False
def check_gold(self, mention, candidate):
""" Check if the link is in the gold Standard.
:param mention: The mention which link want to check.
:param candidate: The candidate of the link.
:return: True or False depends of the veracity
"""
clusters_m = set(m['gold_entity'] for m in self.graph_builder.get_gold_mention_by_span(mention[SPAN]))
clusters_c = set(c['gold_entity'] for c in self.graph_builder.get_gold_mention_by_span(candidate[SPAN]))
return bool(clusters_c and clusters_m and clusters_c.intersection(clusters_m))
def log_mention(self, mention):
""" The function that log the mention and all useful info for this sieve
coreference resolution
:param mention: The mention to show
"""
self.logger.debug("MENTION -%s- %s", mention[FORM], mention[SPAN])
def log_candidate(self, candidate):
""" The function that show the candidate of a link and all the relevant
info for the linking process.
:param candidate:
"""
self.logger.debug("CANDIDATE -%s- %s", candidate[FORM], candidate[SPAN])
def context(self, mention_entity, mention, candidate_entity, candidate):
""" Return a Human readable and sieve specific | |
import numpy as np
import pandas as pd
class Scenario(object):
"""
Model scenario parameters
"""
def __init__(self, *initial_data, **kwargs):
# Set default values
# 16/4/2020 Adjust parameters so that day starts with FTE arrival
# Proportion high priority
self.high_priority = 0.25
# Work arrival
self.samples_per_day = 18600
# List of sample arrival times (hours from start of day)
self.delivery_schedule_name = 'Single'
self.basic_batch_size = 93
# Day and run parameters
# 16/4/2020 Model is designed to run primarily in single days
self.day_duration = 1440
self.run_days = 2
self.warm_up_days = 2
# Breaks for people (high priority job, but does not interrupt work)
# Times from start of FTE day (6am)
self.tea_break_times = [2*60, 16*60, 18*60]
self.meal_break_times = [6.5*60, 14*60, 22*60]
# Spread start of break for people randomly after set start times
self.break_start_spread = 60
# break duration is a uniform distribution between min and max
self.tea_break_duration = [25, 35]
self.meal_break_duration = [45, 60]
# Audit parameters
self.audit_interval = 15
# Resource numbers
self.resource_numbers = {
'human_sample_preprocess_1': 10,
'human_sample_preprocess_2': 10,
'human_sample_preprocess_3': 0,
'human_sample_receipt_1': 1,
'human_sample_receipt_2': 1,
'human_sample_receipt_3': 1,
'human_sample_prep_1': 29,
'human_sample_prep_2': 29,
'human_sample_prep_3': 4,
'human_rna_prep_1': 4,
'human_rna_prep_2': 4,
'human_rna_prep_3': 3,
'human_pcr_1': 4,
'human_pcr_2': 4,
'human_pcr_3': 2,
'human_data_analysis': 10,
'sample_heat_incubator': 8,
'beckman_rna_extraction': 11,
'pcr_plate_stamper': 4,
'pcr_plate_reader': 8,
'sample_prep_automation': 0,
'transfer': 1
}
self.workstation_capacity = {
'workstation_0': 9999,
'workstation_1a': 9,
'workstation_1b_man': 9,
'workstation_1b_auto': 0,
'workstation_1c': 8,
'workstation_2': 11,
'workstation_3': 4,
'workstation_4': 8,
'workstation_5': 999,
'workstation_6': 10,
'transfer': 99
}
# Resource available hours (use hours)
self.resource_shift_hours = {
'human_sample_preprocess_1': (0.00, 9.00),
'human_sample_preprocess_2': (9.01, 18.00),
'human_sample_preprocess_3': (17.0, 24.00),
'human_sample_receipt_1': (0.00, 9.00),
'human_sample_receipt_2': (9.01, 18.0),
'human_sample_receipt_3': (17.0, 24.0),
'human_sample_prep_1': (0.00, 9.00),
'human_sample_prep_2': (9.01, 18.0),
'human_sample_prep_3': (17, 24.0),
'human_rna_prep_1': (0.00, 9.00),
'human_rna_prep_2': (9.01, 18.0),
'human_rna_prep_3': (17.0, 24.0),
'human_pcr_1': (0.0, 9.00),
'human_pcr_2': (9.01, 18.0),
'human_pcr_3': (17.0, 24.0),
'human_data_analysis': (0.0, 24.0),
'sample_heat_incubator': (0.0, 24.0),
'beckman_rna_extraction': (0.0, 24.0),
'pcr_plate_stamper': (0.0, 24.0),
'pcr_plate_reader': (0.0, 24.0),
'sample_prep_automation': (0.0, 24.0),
'transfer': (0.0, 24.0),
'dummy': (0.0, 24.0)
}
# Resource unavailability on any whole day due to breakdown
self.resource_breakdown_unavailability = {
'human_sample_preprocess_1': 0.1,
'human_sample_preprocess_2': 0.1,
'human_sample_preprocess_3': 0.1,
'human_sample_receipt_1': 0.1,
'human_sample_receipt_2': 0.1,
'human_sample_receipt_3': 0.1,
'human_sample_prep_1': 0.1,
'human_sample_prep_2': 0.1,
'human_rna_prep_1': 0,
'human_rna_prep_2': 0,
'human_pcr_1': 0,
'human_pcr_2': 0,
'human_data_analysis': 0,
'sample_heat_incubator': 0,
'beckman_rna_extraction': 0.04,
'pcr_plate_stamper': 0.08,
'pcr_plate_reader': 0.02,
'sample_prep_automation': 0.04,
'transfer': 0,
'dummy': 0
}
# FTE resources (these will take breaks!)
self.fte_resources = [
'human_sample_preprocess_1',
'human_sample_preprocess_2',
'human_sample_preprocess_3',
'human_sample_receipt_1',
'human_sample_receipt_2',
'human_sample_prep_1',
'human_sample_prep_2',
'human_pcr_1',
'human_pcr_2',
'human_rna_prep_1',
'human_rna_prep_2',
'human_data_analysis',
'transfer'
]
# Process duration. Tuple of fixed time, time per entity, and time per
# item in entity. Multi-step automated processes have three sets of
# times (set up, automated, clean down)
self.process_duration = {
'batch_input': ([0, 0, 0],),
'sample_preprocess': ([10, 0, 0],),
'sample_receipt': ([22*1.3, 0, 0],),
'sample_prep_manual': ([22*1.3, 0, 0],),
'sample_prep_auto': ([2.5, 0, 0], [10, 0, 0], [2.5, 0, 0]),
'sample_heat': ([4, 0, 0], [20, 0, 0], [1, 0, 0]),
'pcr_prep': ([5, 0, 0], [3, 0, 0], [5, 0, 0]),
'pcr': ([6.5, 0, 0], [90, 0, 0], [1, 0, 0]),
'rna_extraction': ([5, 0, 0], [114, 0, 0], [2.5, 0, 0]),
'data_analysis': ([10, 0, 0],),
'transfer_1': ([6, 0, 0],)
}
# Allow manual sample prep (automated prep will be chosen first if free)
self.allow_manual_sample_prep = True
# Batch sizing for stages (collate for job then re-split)
self.heat_batch_size = 3
self.rna_extraction_batch_size = 3
self.transfer_1_batch_size = 4
# Add a triangular distribution of extra time per process
# Average extra time with be 1/4 of this
# (e.g. 0.25 = 6.25% added length on average)
self.additional_time_manual = 0.76
self.additional_time_auto = 0.10
# Range of times new jobs may start
self.process_start_hours = {
'sample_preprocess': (0.0, 24.0),
'sample_receipt': (0.0, 24.0),
'sample_heat': (0.0, 24.0),
'sample_prep': (0.0, 24.0),
'rna_extraction': (0.0, 24.0),
'pcr_prep': (0.0, 24.0),
'pcr': (0.0, 24.0),
'data_analysis': (0.0, 24.0),
'transfer_1': (0.0, 24.0)
}
# Process priories (lower number -> higher priority)
self.process_priorities = {
'sample_preprocess': 110,
'sample_receipt': 100,
'sample_prep_manual': 90,
'sample_prep_auto': 80,
'sample_heat': 70,
'rna_extraction': 60,
'pcr_prep': 50,
'pcr': 40,
'data_analysis': 30,
'transfer_1': 20
}
# Process check intervals (if not 1 minute)
# Must be integer multiple of 1
self.process_intervals = {
'transfer_1': 10
}
# Process resources = tuple of different resources needed and lists of
# alternatives. Remember to put , after a single list to maintain tuple
# format! tuple of two or more elements will require resources from each
# tuple element
self.process_resources = {
'sample_preprocess': {
'process_type': 'manual',
'human_list': (['human_sample_preprocess_1',
'human_sample_preprocess_2',
'human_sample_preprocess_3'],
['tracker_all_jobs_fte'],
['tracker_sample_preprocess_jobs'],
['tracker_sample_preprocess_fte']),
'machine_list': ([],)},
'sample_receipt': {
'process_type': 'manual',
'human_list': (['human_sample_receipt_1',
'human_sample_receipt_2',
'human_sample_receipt_3',
'human_sample_prep_1',
'human_sample_prep_2',
'human_sample_prep_3'],
['human_sample_receipt_1',
'human_sample_receipt_2',
'human_sample_receipt_3',
'human_sample_prep_1',
'human_sample_prep_2',
'human_sample_prep_3'],
['tracker_sample_receipt_fte'],
['tracker_all_jobs_fte'],
['tracker_sample_receipt_jobs']),
'machine_list': ([],)},
'sample_prep_manual': {
'process_type': 'manual',
'human_list': (['human_sample_prep_1',
'human_sample_prep_2',
'human_sample_prep_3'],
['human_sample_prep_1',
'human_sample_prep_2',
'human_sample_prep_3'],
['tracker_all_jobs_fte'],
['tracker_sample_prep_jobs'],
['tracker_sample_prep_fte']),
'machine_list': ([],)},
'sample_prep_auto': {
'process_type': 'auto',
'human_list': (['human_sample_prep_1',
'human_sample_prep_2',
'human_sample_prep_3'],
['human_sample_prep_1',
'human_sample_prep_2',
'human_sample_prep_3'],
['tracker_all_jobs_fte'],
['tracker_sample_prep_jobs'],
['tracker_sample_prep_fte']),
'machine_list': (['sample_prep_automation'],
['tracker_sample_prep_jobs'])},
'sample_heat': {
'process_type': 'auto',
'human_list': (['human_sample_prep_1',
'human_sample_prep_2',
'human_sample_prep_3'],
['tracker_all_jobs_fte'],
['tracker_heat_fte']),
'machine_list': (['sample_heat_incubator'],
['tracker_heat_jobs'])},
'rna_extraction': {
'process_type': 'auto',
'human_list': (['human_rna_prep_1',
'human_rna_prep_2',
'human_rna_prep_3'],
['tracker_all_jobs_fte'],
['tracker_rna_prep_fte']),
'machine_list': (['beckman_rna_extraction'],
['tracker_rna_prep_jobs'])},
'pcr_prep': {
'process_type': 'auto',
'human_list': (['human_pcr_1',
'human_pcr_2',
'human_pcr_3'],
['tracker_all_jobs_fte'],
['tracker_pcr_prep_fte']),
'machine_list': (['pcr_plate_stamper'],
['tracker_pcr_prep_jobs'])},
'pcr': {
'process_type': 'auto',
'human_list': (['human_pcr_1',
'human_pcr_2',
'human_pcr_3'],
['tracker_all_jobs_fte'],
['tracker_pcr_fte']),
'machine_list': (['pcr_plate_reader'],
['tracker_pcr_jobs'])},
'data_analysis': {
'process_type': 'manual',
'human_list': (['human_data_analysis'],
['tracker_all_jobs_fte'],
['tracker_data_analysis_jobs'],
['tracker_data_analysis_fte']),
'machine_list': ([],)},
'transfer_1': {
'process_type': 'manual',
'human_list': (['transfer'],
['tracker_all_jobs_fte'],
['tracker_transfer_fte'],
['tracker_transfer_jobs']),
'machine_list': ([],)},
}
# Workstation (used to limit work in progress)
self.process_workstations = {
'sample_preprocess': ['workstation_6'],
'data_analysis': ['workstation_0'],
'batch_input': ['workstation_0'],
'sample_receipt': ['workstation_1a'],
'sample_prep_manual': ['workstation_1b_man'],
'sample_prep_auto': ['workstation_1b_auto'],
'sample_heat': ['workstation_1c'],
'rna_extraction': ['workstation_2'],
'pcr_prep': ['workstation_3'],
'pcr': ['workstation_4'],
'data_analysis': ['workstation_5'],
'transfer_1': ['transfer']
}
# kanban groups have start process, end process, max samples,
# current samples
self.kanban_groups = {
0: ['sample_receipt', 'pcr', 99999999]
}
# Overwrite default values
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
# Load delivery schedule, normalise to 1, and add to parameters
df = pd.read_csv('deliveries.csv', index_col='Hour')
for col in list(df):
total = df[col].sum()
values = df[col] / total
df[col] = values
self.delivery_schedule = df
# Calculations
# Add dummy resources
self.resource_numbers['dummy'] = 9999
self.resource_shift_hours['dummy'] = (0, 24)
self.resource_breakdown_unavailability['dummy'] = 0
# Set arrival batch size and round to nearest basic batch size
self.delivery_times = list(range(24))
self.delivery_batch_sizes = (
self.delivery_schedule[self.delivery_schedule_name] * self.samples_per_day)
self.delivery_batch_sizes = (
np.round(self.delivery_batch_sizes / self.basic_batch_size, 0) *
self.basic_batch_size)
self.delivery_batch_sizes = list(self.delivery_batch_sizes.values)
# Set warm up and run length
self.audit_warm_up = self.day_duration * self.warm_up_days
self.run_length = self.run_days * self.day_duration + self.audit_warm_up
# Sort priority dictionary by value
self.process_priorities = {key: value for key, value in sorted(
self.process_priorities.items(), key=lambda item: item[1])}
# Set up kanban group counts and dictionaries for start.end
self.kanban_group_counts = dict()
self.kanban_group_max = dict()
self.kanban_start = dict()
self.kanban_end = dict()
# Set up dictionaries based on process
for key in self.process_duration.keys():
self.kanban_start[key] = []
self.kanban_end[key] = []
# Update dictionaries if kanban groups exist
if len(self.kanban_groups) > 0:
# Add process start and ends to dictionaries
for key, value in self.kanban_groups.items():
self.kanban_start[value[0]].append(key)
self.kanban_end[value[1]].append(key)
# Set up kanban group counts
for key, value in self.kanban_groups.items():
self.kanban_group_counts[key] = 0
self.kanban_group_max[key] = value[2]
# Add tracker resources
tracker_resource_numbers = {
'tracker_all_jobs_fte': 1000,
'tracker_data_analysis_fte': 1000,
'tracker_data_analysis_jobs': 1000,
'tracker_heat_fte': 1000,
'tracker_heat_jobs': 1000,
'tracker_pcr_prep_fte': 1000,
'tracker_pcr_prep_jobs': 1000,
'tracker_pcr_fte': 1000,
'tracker_pcr_jobs': 1000,
'tracker_rna_prep_fte': 1000,
'tracker_rna_prep_jobs': 1000,
'tracker_sample_preprocess_jobs': 1000,
'tracker_sample_preprocess_fte': 1000,
'tracker_sample_prep_jobs': 1000,
'tracker_sample_prep_fte': 1000,
'tracker_sample_receipt_fte': 1000,
'tracker_sample_receipt_jobs': 1000,
'tracker_transfer_fte': 1000,
'tracker_transfer_jobs': 1000
}
self.resource_numbers.update(tracker_resource_numbers)
tracker_shifts = {
'tracker_all_jobs_fte': (0.0, 24.0),
'tracker_data_analysis_fte': (0.0, 24.0),
'tracker_data_analysis_jobs': (0.0, 24.0),
'tracker_heat_fte': (0.0, 24.0),
'tracker_heat_jobs': (0.0, 24.0),
'tracker_pcr_prep_fte': (0.0, 24.0),
'tracker_pcr_prep_jobs': (0.0, 24.0),
'tracker_pcr_fte': (0.0, 24.0),
'tracker_pcr_jobs': (0.0, 24.0),
'tracker_rna_prep_fte': (0.0, 24.0),
'tracker_rna_prep_jobs': (0.0, 24.0),
'tracker_sample_preprocess_jobs': (0.0, 24.0),
'tracker_sample_preprocess_fte': (0.0, 24.0),
'tracker_sample_prep_fte': (0.0, 24.0),
'tracker_sample_prep_jobs': (0.0, 24.0),
'tracker_sample_receipt_fte': (0.0, 24.0),
'tracker_sample_receipt_jobs': (0.0, 24.0),
'tracker_transfer_fte': (0.0, 24.0),
'tracker_transfer_jobs': (0.0, 24.0)
}
self.resource_shift_hours.update(tracker_shifts)
tracker_unavailability = {
'tracker_all_jobs_fte': 0,
'tracker_data_analysis_jobs': 0,
'tracker_data_analysis_fte': 0,
'tracker_heat_fte': 0,
'tracker_heat_jobs': 0,
'tracker_pcr_prep_fte': 0,
'tracker_pcr_prep_jobs': 0,
'tracker_pcr_fte': 0,
'tracker_pcr_jobs': 0,
'tracker_rna_prep_fte': 0,
'tracker_rna_prep_jobs': 0,
'tracker_sample_preprocess_jobs': 0,
'tracker_sample_preprocess_fte': 0,
'tracker_sample_prep_fte': 0,
'tracker_sample_prep_jobs': 0,
'tracker_sample_receipt_fte': 0,
'tracker_sample_receipt_jobs': 0,
'tracker_transfer_fte': 0,
'tracker_transfer_jobs': | |
# -*- coding:utf-8 -*-
#######################################################################
# Copyright (C) 2016 <NAME> (<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from utils import barrier_warning
import numpy as np
from numpy import log, exp, sqrt
from scipy import stats
from typing import Tuple
class BSMOptionValuation:
"""
Valuation of European call options in Black-Scholes-Merton Model (incl. dividend)
Attributes
==========
S0: float
initial stock/index level
K: float
strike price
T: float
time to maturity (in year fractions)
r: float
constant risk-free short rate
assume flat term structure
sigma: float
volatility factor in diffusion term
div_yield: float
dividend_yield, in percentage %, default = 0.0%
"""
def __init__(self, S0: float, K: float, T: float, r: float, sigma: float, div_yield: float = 0.0):
assert sigma >= 0, 'volatility cannot be less than zero'
assert S0 >= 0, 'initial stock price cannot be less than zero'
assert T >= 0, 'time to maturity cannot be less than zero'
assert div_yield >= 0, 'dividend yield cannot be less than zero'
self.S0 = float(S0)
self.K = float(K)
self.T = float(T)
self.r = float(r)
self.sigma = float(sigma)
self.div_yield = float(div_yield)
self._d1, self._d2 = self._calculate_d1_d2()
self._d3 = None
self._d4 = None
self._d5 = None
self._d6 = None
self._d7 = None
self._d8 = None
def _calculate_d1_d2(self):
d1 = ((log(self.S0 / self.K) + (self.r - self.div_yield + 0.5 * self.sigma ** 2) * self.T) / (
self.sigma * sqrt(self.T)))
d2 = d1 - self.sigma * sqrt(self.T)
return d1, d2
def call_value(self, observed_put_price: float = None) -> float:
"""
:return: call option value
"""
if observed_put_price is None:
call_value = (self.S0 * exp(-self.div_yield * self.T) * stats.norm.cdf(self._d1, 0.0, 1.0) - self.K * exp(
-self.r * self.T) * stats.norm.cdf(self._d2, 0.0, 1.0))
else:
call_value = observed_put_price + exp(-self.div_yield * self.T) * self.S0 - exp(-self.r * self.T) * self.K
return call_value
def delta(self) -> Tuple[float, float]:
"""
Delta measures the change in the option price for a $1 change in the stock price
:return: delta of the option
"""
delta_call = exp(- self.div_yield * self.T) * stats.norm.cdf(self._d1, 0.0, 1.0)
delta_put = -exp(- self.div_yield * self.T) * stats.norm.cdf(-self._d1, 0.0, 1.0)
return delta_call, delta_put
def gamma(self) -> float:
"""
Gamma measures the change in delta when the stock price changes
:return: gamma of the option
"""
gamma = exp(-self.div_yield * self.T) * stats.norm.pdf(self._d1) / (self.S0 * self.sigma * sqrt(self.T))
return gamma
def theta(self) -> Tuple[float, float]:
"""
Theta measures the change in the option price with respect to calendar time (t ),
holding fixed time to expiration (T).
If time to expiration is measured in years, theta will be the annualized change in the option value.
To obtain a per-day theta, divide by 252.
:return: theta of the option
"""
part1 = self.div_yield * self.S0 * exp(-self.div_yield * self.T) * stats.norm.cdf(self._d1)
part2 = self.r * self.K * stats.norm.cdf(self._d2)
part3 = (self.K * exp(-self.r * self.T) * stats.norm.pdf(self._d2) * self.sigma) / (2 * sqrt(self.T))
theta_call = part1 - part2 - part3
theta_put = theta_call + self.r * self.K * exp(-self.r * self.T) - self.div_yield * self.S0 * exp(
-self.div_yield * self.T)
return theta_call, theta_put
def vega(self) -> float:
"""
Vega measures the change in the option price when volatility changes. Some writers also
use the terms lambda or kappa to refer to this measure:
It is common to report vega as the change in the option price per percentage point change
in the volatility. This requires dividing the vega formula above by 100.
:return: vega of option
"""
vega = self.S0 * exp(-self.div_yield * self.T) * stats.norm.pdf(self._d1, 0.0, 1.0) * sqrt(self.T)
return vega
def rho(self) -> Tuple[float, float]:
"""
Rho is the partial derivative of the option price with respect to the interest rate.
These expressions for rho assume a change in r of 1.0. We are typically interested in
evaluating the effect of a change of 0.01 (100 basis points) or 0.0001 (1 basis point). To
report rho as a change per percentage point in the interest rate, divide this measure by 100.
To interpret it as a change per basis point, divide by 10,000.
:return: call_rho, put_rho
"""
call_rho = self.T * self.K * exp(-self.r * self.T) * stats.norm.cdf(self._d2)
put_rho = -self.T * self.K * exp(-self.r * self.T) * stats.norm.cdf(-self._d2)
return call_rho, put_rho
def psi(self) -> Tuple[float, float]:
"""
Psi is the partial derivative of the option price with respect to the continuous dividend yield:
To interpret psi as a price change per percentage point change in the dividend yield, divide
by 100.
:return: call_psi, put_psi
"""
call_psi = - self.T * self.S0 * exp(-self.div_yield * self.T) * stats.norm.cdf(self._d1)
put_psi = self.T * self.S0 * exp(-self.div_yield * self.T) * stats.norm.cdf(-self._d1)
return call_psi, put_psi
def implied_vol(self, observed_call_price: float, num_iterations: int = 1000, tolerance: float = 1e-4) -> float:
"""
Newton-Raphson iterative approach, assuming black_scholes_merton model
:param observed_call_price: call price from the market
:param num_iterations: no. of iteration
:param tolerance: allows to specify the tolerance level
:return: implied volatility given the observed option price
"""
sigma_old = self.sigma
for _ in range(num_iterations):
self._d1, self._d2 = self._calculate_d1_d2()
_cal_val = self.call_value()
option_price_diff = _cal_val - observed_call_price
_vega = self.vega()
sigma_new = self.sigma - option_price_diff / (_vega + 1e-10)
if abs(sigma_new - self.sigma) <= tolerance:
break
self.sigma = sigma_new
implied_vol = self.sigma
# restore back the status
self.sigma = sigma_old
self._d1, self._d2 = self._calculate_d1_d2()
return implied_vol
def put_value(self, observed_call_price: float = None) -> float:
"""
Use put call parity (incl. continuous dividend) to calculate the put option value
:return: put option value
"""
if observed_call_price is None:
put_value = self.call_value() + exp(-self.r * self.T) * self.K - exp(-self.div_yield * self.T) * self.S0
else:
put_value = observed_call_price + exp(-self.r * self.T) * self.K - exp(-self.div_yield * self.T) * self.S0
return put_value
def lookback_BSM(self, option_type: str, max_share_price: float, min_share_price: float) -> float:
"""
A European lookback call at maturity pays St - min(St).
A European lookback put at maturity pays max(St) - St.
min(St) is the minimum price over the life of the option
max(St) is the maximum price over the life of the option
<NAME>: Derivatives Markets (3rd. edition)
Chapter 23: Exotic Option II
Formula 23.47 (Exercise)
:param option_type: call, put
:param max_share_price: maximum share price
:param min_share_price: minimum share price
:return: value of lookback option
"""
assert option_type == "call" or option_type == "put"
if option_type == "call":
self.w = 1
self.s_bar = float(min_share_price)
elif option_type == "put":
self.w = -1
self.s_bar = float(max_share_price)
self._d5 = (log(self.K / self.s_bar) + (self.r - self.div_yield + 0.5 * (self.sigma ** 2)) * self.T) / (
self.sigma * sqrt(self.T))
self._d6 = self._d5 - self.sigma * sqrt(self.T)
self._d7 = (log(self.s_bar / self.K) + (self.r - self.div_yield + 0.5 * (self.sigma ** 2)) * self.T) / (
self.sigma * sqrt(self.T))
self._d8 = self._d7 - self.sigma * sqrt(self.T)
# Lookback option pricing
self.lb_first_part = self.w * self.K * exp(-self.div_yield * self.T) * (
stats.norm.cdf(self.w * self._d5) - (self.sigma ** 2) * stats.norm.cdf(-self.w * self._d5) / (
2 * (self.r - self.div_yield)))
self.lb_second_part = self.w * self.s_bar * exp(-self.r * self.T) * (stats.norm.cdf(self.w * self._d6) - (
(self.sigma ** 2) / (2 * (self.r - self.div_yield)) * (self.K / self.s_bar) ** (
1 - 2 * (self.r - self.div_yield) / (self.sigma ** 2))) * stats.norm.cdf(self.w * self._d8))
return self.lb_first_part - self.lb_second_part
def merton_jump_diffusion(self, option_type: str, avg_num_jumps: float, jump_size_mean: float,
jump_size_std: float) -> float:
"""
Merton closed-form solution for European options with underlying asset jumps
assuming jump size follows a log-normal distribution: ln(jump_size) ~ N(jump_size_mean, jump_size_std).
Notice: the model is fine under a certain set of parameters.
The model works properly for sigma_j smaller than a certain level dependent on lam.
see: https://github.com/cantaro86/Financial-Models-Numerical-Methods/blob/master/3.1%20Merton%20jump-diffusion%2C%20PIDE%20method.ipynb
Parameters
----------
option_type: (str) call or put
avg_num_jumps: (float) how many jumps in T, can fractional
jump_size_mean: (float) ln(jump_size) ~ N(jump_size_mean, jump_size_std)
jump_size_std: (float) ln(jump_size) ~ | |
self.data_finished_fut:
try:
await self.data_finished_fut
logger.debug(
"Websocket task finished. Closing the connection."
)
except asyncio.CancelledError:
# Cancelled error is called when data phase is cancelled
# if an error occurred or the client closed the connection
logger.debug(
"Websocket handler cancelled. Closing the connection."
)
# Cancel the keepalive ping task.
if self.keepalive_ping_task:
self.keepalive_ping_task.cancel()
self.keepalive_ping_task = None
# Half-close the TCP connection if possible (when there's no TLS).
if (
self.io_proto
and self.io_proto.transport
and self.io_proto.transport.can_write_eof()
):
logger.debug("Websocket half-closing TCP connection")
self.io_proto.transport.write_eof()
if self.connection_lost_waiter:
if await self.wait_for_connection_lost(timeout=0):
return
except asyncio.CancelledError:
...
finally:
# The try/finally ensures that the transport never remains open,
# even if this coroutine is cancelled (for example).
if (not self.io_proto) or (not self.io_proto.transport):
# we were never open, or done. Can't do any finalization.
return
elif (
self.connection_lost_waiter
and self.connection_lost_waiter.done()
):
# connection confirmed closed already, proceed to abort waiter
...
elif self.io_proto.transport.is_closing():
# Connection is already closing (due to half-close above)
# proceed to abort waiter
...
else:
self.io_proto.transport.close()
if not self.connection_lost_waiter:
# Our connection monitor task isn't running.
try:
await asyncio.sleep(self.close_timeout)
except asyncio.CancelledError:
...
if self.io_proto and self.io_proto.transport:
self.io_proto.transport.abort()
else:
if await self.wait_for_connection_lost(
timeout=self.close_timeout
):
# Connection aborted before the timeout expired.
return
error_logger.warning(
"Timeout waiting for TCP connection to close. Aborting"
)
if self.io_proto and self.io_proto.transport:
self.io_proto.transport.abort()
def abort_pings(self) -> None:
"""
Raise ConnectionClosed in pending keepalive pings.
They'll never receive a pong once the connection is closed.
"""
if self.connection.state is not CLOSED:
raise ServerError(
"Webscoket about_pings should only be called "
"after connection state is changed to CLOSED"
)
for ping in self.pings.values():
ping.set_exception(ConnectionClosedError(None, None))
# If the exception is never retrieved, it will be logged when ping
# is garbage-collected. This is confusing for users.
# Given that ping is done (with an exception), canceling it does
# nothing, but it prevents logging the exception.
ping.cancel()
async def close(self, code: int = 1000, reason: str = "") -> None:
"""
Perform the closing handshake.
This is a websocket-protocol level close.
:meth:`close` waits for the other end to complete the handshake and
for the TCP connection to terminate.
:meth:`close` is idempotent: it doesn't do anything once the
connection is closed.
:param code: WebSocket close code
:param reason: WebSocket close reason
"""
if code == 1006:
self.fail_connection(code, reason)
return
async with self.conn_mutex:
if self.connection.state is OPEN:
self.connection.send_close(code, reason)
data_to_send = self.connection.data_to_send()
await self.send_data(data_to_send)
async def recv(self, timeout: Optional[float] = None) -> Optional[Data]:
"""
Receive the next message.
Return a :class:`str` for a text frame and :class:`bytes` for a binary
frame.
When the end of the message stream is reached, :meth:`recv` raises
:exc:`~websockets.exceptions.ConnectionClosed`. Specifically, it
raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a normal
connection closure and
:exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure.
If ``timeout`` is ``None``, block until a message is received. Else,
if no message is received within ``timeout`` seconds, return ``None``.
Set ``timeout`` to ``0`` to check if a message was already received.
:raises ~websockets.exceptions.ConnectionClosed: when the
connection is closed
:raises asyncio.CancelledError: if the websocket closes while waiting
:raises ServerError: if two tasks call :meth:`recv` or
:meth:`recv_streaming` concurrently
"""
if self.recv_lock.locked():
raise ServerError(
"cannot call recv while another task is "
"already waiting for the next message"
)
await self.recv_lock.acquire()
if self.connection.state is CLOSED:
self.recv_lock.release()
raise WebsocketClosed(
"Cannot receive from websocket interface after it is closed."
)
try:
self.recv_cancel = asyncio.Future()
done, pending = await asyncio.wait(
(self.recv_cancel, self.assembler.get(timeout)),
return_when=asyncio.FIRST_COMPLETED,
)
done_task = next(iter(done))
if done_task is self.recv_cancel:
# recv was cancelled
for p in pending:
p.cancel()
raise asyncio.CancelledError()
else:
self.recv_cancel.cancel()
return done_task.result()
finally:
self.recv_cancel = None
self.recv_lock.release()
async def recv_burst(self, max_recv=256) -> Sequence[Data]:
"""
Receive the messages which have arrived since last checking.
Return a :class:`list` containing :class:`str` for a text frame
and :class:`bytes` for a binary frame.
When the end of the message stream is reached, :meth:`recv_burst`
raises :exc:`~websockets.exceptions.ConnectionClosed`. Specifically,
it raises :exc:`~websockets.exceptions.ConnectionClosedOK` after a
normal connection closure and
:exc:`~websockets.exceptions.ConnectionClosedError` after a protocol
error or a network failure.
:raises ~websockets.exceptions.ConnectionClosed: when the
connection is closed
:raises ServerError: if two tasks call :meth:`recv_burst` or
:meth:`recv_streaming` concurrently
"""
if self.recv_lock.locked():
raise ServerError(
"cannot call recv_burst while another task is already waiting "
"for the next message"
)
await self.recv_lock.acquire()
if self.connection.state is CLOSED:
self.recv_lock.release()
raise WebsocketClosed(
"Cannot receive from websocket interface after it is closed."
)
messages = []
try:
# Prevent pausing the transport when we're
# receiving a burst of messages
self.can_pause = False
self.recv_cancel = asyncio.Future()
while True:
done, pending = await asyncio.wait(
(self.recv_cancel, self.assembler.get(timeout=0)),
return_when=asyncio.FIRST_COMPLETED,
)
done_task = next(iter(done))
if done_task is self.recv_cancel:
# recv_burst was cancelled
for p in pending:
p.cancel()
raise asyncio.CancelledError()
m = done_task.result()
if m is None:
# None left in the burst. This is good!
break
messages.append(m)
if len(messages) >= max_recv:
# Too much data in the pipe. Hit our burst limit.
break
# Allow an eventloop iteration for the
# next message to pass into the Assembler
await asyncio.sleep(0)
self.recv_cancel.cancel()
finally:
self.recv_cancel = None
self.can_pause = True
self.recv_lock.release()
return messages
async def recv_streaming(self) -> AsyncIterator[Data]:
"""
Receive the next message frame by frame.
Return an iterator of :class:`str` for a text frame and :class:`bytes`
for a binary frame. The iterator should be exhausted, or else the
connection will become unusable.
With the exception of the return value, :meth:`recv_streaming` behaves
like :meth:`recv`.
"""
if self.recv_lock.locked():
raise ServerError(
"Cannot call recv_streaming while another task "
"is already waiting for the next message"
)
await self.recv_lock.acquire()
if self.connection.state is CLOSED:
self.recv_lock.release()
raise WebsocketClosed(
"Cannot receive from websocket interface after it is closed."
)
try:
cancelled = False
self.recv_cancel = asyncio.Future()
self.can_pause = False
async for m in self.assembler.get_iter():
if self.recv_cancel.done():
cancelled = True
break
yield m
if cancelled:
raise asyncio.CancelledError()
finally:
self.can_pause = True
self.recv_cancel = None
self.recv_lock.release()
async def send(self, message: Union[Data, Iterable[Data]]) -> None:
"""
Send a message.
A string (:class:`str`) is sent as a `Text frame`_. A bytestring or
bytes-like object (:class:`bytes`, :class:`bytearray`, or
:class:`memoryview`) is sent as a `Binary frame`_.
.. _Text frame: https://tools.ietf.org/html/rfc6455#section-5.6
.. _Binary frame: https://tools.ietf.org/html/rfc6455#section-5.6
:meth:`send` also accepts an iterable of strings, bytestrings, or
bytes-like objects. In that case the message is fragmented. Each item
is treated as a message fragment and sent in its own frame. All items
must be of the same type, or else :meth:`send` will raise a
:exc:`TypeError` and the connection will be closed.
:meth:`send` rejects dict-like objects because this is often an error.
If you wish to send the keys of a dict-like object as fragments, call
its :meth:`~dict.keys` method and pass the result to :meth:`send`.
:raises TypeError: for unsupported inputs
"""
async with self.conn_mutex:
if self.connection.state in (CLOSED, CLOSING):
raise WebsocketClosed(
"Cannot write to websocket interface after it is closed."
)
if (not self.data_finished_fut) or self.data_finished_fut.done():
raise ServerError(
"Cannot write to websocket interface after it is finished."
)
# Unfragmented message -- this case must be handled first because
# strings and bytes-like objects are iterable.
if isinstance(message, str):
self.connection.send_text(message.encode("utf-8"))
await self.send_data(self.connection.data_to_send())
elif isinstance(message, (bytes, bytearray, memoryview)):
self.connection.send_binary(message)
await self.send_data(self.connection.data_to_send())
elif isinstance(message, Mapping):
# Catch a common mistake -- passing a dict to send().
raise TypeError("data is a dict-like object")
elif isinstance(message, Iterable):
# Fragmented message -- regular iterator.
raise NotImplementedError(
"Fragmented websocket messages are not supported."
)
else:
raise TypeError("Websocket data must be bytes, str.")
async def ping(self, data: Optional[Data] = None) -> asyncio.Future:
"""
Send a ping.
Return an :class:`~asyncio.Future` that will be resolved when the
corresponding pong is received. You can ignore it if you don't intend
to wait.
A ping may serve as a keepalive or as a check that the remote endpoint
received all messages up to this point::
await pong_event = ws.ping()
await pong_event # only if you want to wait for the pong
By default, the ping contains four random bytes. This payload may be
overridden with the | |
# Copyright(c) 2017-2021 CloudNetEngine. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines keywords for virtual switch operations."""
import os
import re
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from resources.libraries.python.constants import Constants
from resources.libraries.python.ssh import exec_cmd, kill_process
from resources.libraries.python.vif import TapInterface
__all__ = [
u"VirtualSwitch",
u"OvsDpdk",
u"OvsNative",
u"Uplink",
u"TunnelPort",
]
class Uplink():
"""Contains uplink configuration. """
def __init__(self, pci_addr):
self.pci_addr = pci_addr
self.name = None # Uplink's name is populated during start_vswitch
self.ofp = None # uplink's ofp is populated when it's added to a bridge
self.n_queue_pair = None
self.n_rxq_desc = None
self.n_txq_desc = None
class TunnelPort():
"""Contains tunnel port configuration. """
def __init__(self, name, rip, vni, ofp):
self.name = name
self.rip = rip
self.vni = vni
self.ofp = ofp
class IDAllocator():
"""Allocator for managing IDs. """
def __init__(self, name, min_id, max_id):
self.name = name
self.ids = list(range(min_id, max_id+1))
def get(self):
"""Get an ID from the allocator.
returns: An ID.
rtype: int
"""
get_id = self.ids.pop(0)
return get_id
def put(self, put_id):
"""Put an ID back to the allocator.
param put_id: ID to put back.
type put_id: int
"""
self.ids.append(put_id)
class Bridge():
"""Contains bridge configuration. """
def __init__(self, name):
self.name = name
self.vifs = list()
self.vnis = dict()
self.uplinks = list()
self.tnl_ports = list()
self.with_md = False
self.ofp_ids_uplink = IDAllocator(f"{name} uplink", Constants.OFP_UPLINK_BASE,
Constants.OFP_VHOST_BASE)
self.ofp_ids_vif = IDAllocator(f"{name} vif", Constants.OFP_VHOST_BASE,
Constants.OFP_TUNNEL_BASE)
self.ofp_ids_tunnel = IDAllocator(f"{name} tunnel", Constants.OFP_TUNNEL_BASE,
Constants.OFP_TUNNEL_BASE+100)
def _bridge_add_vni(br, vif):
if not vif.vni in br.vnis:
br.vnis[vif.vni] = list()
br.vnis[vif.vni].append(vif)
def _bridge_add_vif(br, vif):
br.vifs.append(vif)
_bridge_add_vni(br, vif)
class VirtualSwitch():
"""Defines basic methods and attirbutes of a virtual switch."""
def __init__(self, ssh_info, uplinks_spec, tep_addr, ovs_bin_dir, dpdk_devbind_dir):
self.bridges = list()
self.ssh_info = ssh_info
self.tep_addr = tep_addr
# Sorted uplink interface based on interface pseudo name in config file
self.uplinks = list()
self.bound_uplinks = list()
iface_keys = sorted(uplinks_spec.keys())
for iface in iface_keys:
iface_spec = uplinks_spec[iface]
uplink = Uplink(iface_spec['pci_address'])
if iface_spec.get("n_queue_pair"):
uplink.n_queue_pair = iface_spec.get("n_queue_pair")
if iface_spec.get("n_rxq_desc"):
uplink.n_rxq_desc = iface_spec.get("n_rxq_desc")
if iface_spec.get("n_txq_desc"):
uplink.n_txq_desc = iface_spec.get("n_txq_desc")
self.uplinks.append(uplink)
self._ovs_bin_dir = ovs_bin_dir
self._dpdk_devbind_full_cmd = os.path.join(dpdk_devbind_dir, "dpdk-devbind.py")
def execute(self, cmd, timeout=30):
"""Execute an OVS command.
:param cmd: OVS command.
:param timeout: Timeout value in seconds.
:type cmd: str
:type timeout: int
:returns: ret_code, stdout, stderr
:rtype: tuple(int, str, str)
"""
ret_code, stdout, stderr = \
exec_cmd(self.ssh_info, f"{self._ovs_bin_dir}/{cmd}", timeout, sudo=True)
logger.trace(stdout)
if ret_code is None or int(ret_code) != 0:
raise RuntimeError(f"Execute OVS cmd failed on {self.ssh_info['host']} : {cmd}")
return (ret_code, stdout, stderr)
def execute_batch(self, cmds, timeout=30):
"""Execute a batch of OVS commands.
:param cmds: OVS commands.
:param timeout: Timeout value in seconds.
:type cmds: list(str)
:type timeout: int
"""
for cmd in cmds:
self.execute(cmd, timeout)
def execute_host(self, cmd, timeout=30, exp_fail=False):
"""Execute a command on a host which the vswitch resides.
:param cmd: Command.
:param timeout: Timeout value in seconds.
:param exp_fail: Expect the command failure or success. Default: False.
None means don't care about the command result.
:type cmd: str
:type timeout: int
:type exp_fail: bool
:returns: ret_code, stdout, stderr
:rtype: tuple(int, str, str)
"""
ret_code, stdout, stderr = \
exec_cmd(self.ssh_info, cmd, timeout, sudo=True)
logger.trace(stdout)
if ret_code is None or int(ret_code) != 0:
# 'None' for exp_fail means don't care the result
if exp_fail is not None and not exp_fail:
raise RuntimeError(f"Execute host cmd failed on {self.ssh_info['host']} : {cmd}")
return (ret_code, stdout, stderr)
def execute_host_batch(self, cmds, timeout=30):
"""Execute a batch of commands on a host which the vswitch resides.
:param cmds: Commands.
:param timeout: Timeout value in seconds.
:type cmds: list(str)
:type timeout: int
"""
for cmd in cmds:
self.execute_host(cmd, timeout)
def kill_process(self, proc_name):
"""Kill a process on a host which the vswitch resides.
:param proc_name: Process name.
:type proc_name: str
"""
kill_process(self.ssh_info, proc_name)
def get_bridge(self, br_name):
"""Get a Bridge object by a bridge name.
:param br_name: Bridge name.
:type br_name: str
:returns: Bridge object.
:rtype: Bridge obj
"""
for br in self.bridges:
if br.name == br_name:
return br
return None
def _create_bridge_impl(self, br):
"""Get a Bridge object by a bridge name.
:param br_name: Bridge name.
:type br_name: str
:returns: Bridge object.
:rtype: Bridge obj
"""
def create_bridge(self, br_name):
"""Create an OVS bridge.
:param br_name: Bridge name.
:type br_name: str
:returns: Bridge object.
:rtype: Bridge obj
"""
# There might be stale tap interface left on the host if the previous
# run is not gracefully exit, just try to delete it.
self.execute_host(f"ip link del {br_name}", exp_fail=None)
br = Bridge(br_name)
self._create_bridge_impl(br)
self.bridges.append(br)
tap = TapInterface(br_name, Constants.OFP_LOCAL)
_bridge_add_vif(br, tap)
return br
def delete_bridge(self, br_name):
"""Delete an OVS bridge.
:param br_name: Bridge name.
:type name: str
"""
self.execute('ovs-vsctl del-br {}'.format(br_name))
br = self.get_bridge(br_name)
self.bridges.remove(br)
def refresh_bridge_vnis(self):
"""Refresh vni -> vif maps of all bridges on the virtual switch. """
for br in self.bridges:
br.vnis = dict()
for vif in br.vifs:
_bridge_add_vni(br, vif)
def create_tunnel_port(self, br_name, tnl_type, rip, vni):
"""Create a tunnel port on a bridge.
:param br_name: Bridge name.
:param tnl_type: Tunnel type.
:param rip: Tunnel remote ip.
:param vni: Virtual network identifier.
:type br_name: str
:type tnl_type: str
:type rip: IPv4Address or IPv6Address or 'flow'
:type vni: int or 'flow'
:returns: Tunnel port.
:rtype: TunnelPort obj
"""
br = self.get_bridge(br_name)
tnl_ofp = br.ofp_ids_tunnel.get()
tnl_name = '{0}{1}'.format(tnl_type, tnl_ofp)
self.execute(f"ovs-vsctl add-port {br_name} {tnl_name} "
f"-- set Interface {tnl_name} type={tnl_type} "
f"options:remote_ip={rip} "
f"options:key={vni} ofport_request={tnl_ofp} ")
tnl_port = TunnelPort(tnl_name, rip, vni, tnl_ofp)
br = self.get_bridge(br_name)
br.tnl_ports.append(tnl_port)
return tnl_port
def delete_tunnel_port(self, br_name, tnl_port):
"""Delete a tunnel port from a bridge.
:param br_name: Bridge name.
:param tnl_port: Tunnel port.
:type br_name: str
:type tnl_port: TunnelPort obj
"""
br = self.get_bridge(br_name)
self.delete_interface(br_name, tnl_port.name)
br.ofp_ids_tunnel.put(tnl_port.ofp)
br.tnl_ports.remove(tnl_port)
def _create_vhost_user_interface_impl(self, br_name, vif):
"""Create a vhost user interface on a bridge.
:param br_name: Bridge name.
:param vif: Virtual interface.
:type br_name: str
:type vif: VirtualInterface obj
"""
def _create_uplink_interface_impl(self, br_name, uplink):
"""Create an uplink interface on a bridge.
:param node: Node to create interface on.
:param br_name: Bridge name.
:param if_name: Interface name.
:type node: dict
:type br_name: str
:type if_name: str
"""
def delete_interface(self, br_name, if_name):
"""Delete an interface on CNE vSwitch.
:param node: Node to create interface on.
:param br_name: Bridge name.
:param if_name: Interface name.
:type node: dict
:type br_name: str
:type if_name: str
:return: Operation status.
:rtype: int
"""
self.execute(f"ovs-vsctl del-port {br_name} {if_name}")
def set_port_vlan(self, if_name, vlan_id):
"""Delete an interface on CNE vSwitch.
:param node: Node to create interface on.
:param br_name: Bridge name.
:param if_name: Interface name.
:type node: dict
:type br_name: str
:type if_name: str
:return: Operation status.
:rtype: int
"""
self.execute(f"ovs-vsctl set port {if_name} tag={vlan_id}")
def set_vlan_limit(self, limit):
"""Set VLAN limit of a virtual switch.
:param limit: VLAN limitation.
:type limit: int
"""
self.execute(f"ovs-vsctl set Open_vSwitch . other_config:vlan-limit={limit}")
def set_uplink_mtu(self, mtu=1600):
"""Set uplink MTU.
:param mtu: Request MTU.
:type mtu: int
"""
for br in self.bridges:
for uplink in br.uplinks:
self.execute(f"ovs-vsctl set Interface {uplink.name} mtu_request={mtu}")
def create_vhost_user_interface(self, br_name, vif):
"""Create a vhost user interface on a bridge.
:param node: Node to create interface on.
:param br_name: Bridge name.
:param if_name: Interface name.
:type node: dict
:type br_name: str
:type vif: dict
"""
self._create_vhost_user_interface_impl(br_name, vif)
br = self.get_bridge(br_name)
_bridge_add_vif(br, vif)
def _create_uplink_bond_impl(self, br):
pass
def create_uplink_bridge(self, br_name, bond=False):
"""Create an OVS bridge with uplinks.
It will also bind tep network configuration on the bridge local port.
:param br_name: Bridge name.
:param bond: Configure bond or not.
:type br_name: str
:type bond: bool
"""
br = self.create_bridge(br_name)
ipv4_str = self.tep_addr.ipv4_str_with_prefix()
ipv6_str = self.tep_addr.ipv6_str_with_prefix()
cmds = [f"ip -4 addr add {ipv4_str} dev {br_name}",
f"ip -6 addr add {ipv6_str} dev {br_name}",
f"ip link set {br_name} up",
f"ip -4 route flush {self.tep_addr.ipv4_network}",
f"ip -6 route flush | |
<reponame>klokan/googlecrisismap<gh_stars>1-10
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""Displays a card containing a list of nearby features for a given topic."""
import cgi
import datetime
import json
import logging
import math
import re
import urllib
import urlparse
import base_handler
import cache
import config
import kmlify
import maproot
import model
import utils
from google.appengine.api import urlfetch
from google.appengine.ext import ndb # just for GeoPt
# A cache of Feature list representing points from XML, keyed by
# [url, map_id, map_version_id, layer_id]
XML_FEATURES_CACHE = cache.Cache('card_features.xml', 300)
# Fetched strings of Google Places API JSON results, keyed by request URL.
JSON_PLACES_API_CACHE = cache.Cache('card.places', 300)
# Lists of Feature objects, keyed by [map_id, map_version_id, topic_id,
# geolocation_rounded_to_10m, radius, max_count].
FILTERED_FEATURES_CACHE = cache.Cache('card.filtered_features', 60)
# Key: [map_id, map_version_id, topic_id, geolocation_rounded_to_10m, radius].
# Value: 3-tuple of (latest_answers, answer_times, report_dicts) where
# - latest_answers is a dictionary {qid: latest_answer_to_that_question}
# - answer_times is a dictionary {qid: effective_time_of_latest_answer}
# - report_dicts contains the last REPORTS_PER_FEATURE reports, as a list
# of dicts [{qid: answer, '_effective': time, '_id': report_id}]
REPORT_CACHE = cache.Cache('card.reports', 15)
# Number of crowd reports to cache and return per feature.
REPORTS_PER_FEATURE = 5
MAX_ANSWER_AGE = datetime.timedelta(days=7) # ignore answers older than 7 days
GOOGLE_SPREADSHEET_CSV_URL = (
'https://docs.google.com/spreadsheet/pub?key=$key&output=csv')
DEGREES = 3.14159265358979/180
DEADLINE = 10
PLACES_API_SEARCH_URL = (
'https://maps.googleapis.com/maps/api/place/nearbysearch/json?')
PLACES_API_DETAILS_URL = (
'https://maps.googleapis.com/maps/api/place/details/json?')
def RoundGeoPt(point):
return '%.4f,%.4f' % (point.lat, point.lon) # 10-m resolution
class Feature(object):
"""A feature (map item) from a source data layer."""
def __init__(self, name, description_html, location, layer_id=None,
layer_type=None, gplace_id=None, html_attrs=None):
self.name = name
self.layer_id = layer_id
self.location = location # should be an ndb.GeoPt
self.description_html = description_html
self.html_attrs = html_attrs or []
self.layer_type = layer_type
self.gplace_id = gplace_id # Google Places place_id
self.distance = None
self.status_color = None
self.answer_text = ''
self.answer_time = ''
self.answer_source = ''
self.answers = {}
self.reports = []
def __lt__(self, other):
return self.distance < other.distance
def __eq__(self, other):
return self.__dict__ == other.__dict__
distance_km = property(lambda self: self.distance and self.distance/1000.0)
distance_mi = property(lambda self: self.distance and self.distance/1609.344)
def EarthDistance(a, b):
"""Great circle distance in metres between two points on the Earth."""
lat1, lon1 = a.lat*DEGREES, a.lon*DEGREES
lat2, lon2 = b.lat*DEGREES, b.lon*DEGREES
dlon = lon2 - lon1
atan2, cos, sin, sqrt = math.atan2, math.cos, math.sin, math.sqrt
y = sqrt(pow(cos(lat2)*sin(dlon), 2) +
pow(cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(dlon), 2))
x = sin(lat1)*sin(lat2) + cos(lat1)*cos(lat2)*cos(dlon)
return 6378000*atan2(y, x)
def GetText(element):
return (element is not None) and element.text or ''
def GetFeaturesFromXml(xml_content, layer_id=None):
"""Extracts a list of Feature objects from KML, GeoRSS, or Atom content."""
root = kmlify.ParseXml(xml_content)
for element in root.getiterator():
element.tag = element.tag.split('}')[-1] # remove XML namespaces
features = []
for item in (root.findall('.//Placemark') +
root.findall('.//entry') + root.findall('.//item')):
location = GetLocationFromXmlItem(item)
if not location:
continue
texts = {child.tag: GetText(child) for child in item.getchildren()}
# For now strip description of all the html tags to prevent XSS
# vulnerabilities except some basic text formatting tags
# TODO(user): sanitization should move closer to render time
# (revisit this once iframed version goes away) - b/17374443
description_html = (texts.get('description') or
texts.get('content') or
texts.get('summary') or '')
description_escaped = utils.StripHtmlTags(
description_html, tag_whitelist=['b', 'u', 'i', 'br'])
features.append(Feature(texts.get('title') or texts.get('name'),
description_escaped, location, layer_id))
return features
def GetLocationFromXmlItem(item):
lat = lon = ''
try:
if item.find('.//coordinates') is not None:
lon, lat = GetText(item.find('.//coordinates')).split(',')[:2]
if item.find('.//point') is not None:
lat, lon = GetText(item.find('.//point')).split()[:2]
location = ndb.GeoPt(float(lat), float(lon))
return location
except ValueError:
return None
def GetKmlUrl(root_url, layer):
"""Forms the URL that gets the KML for a given KML-powered layer."""
layer_type = layer.get('type')
if layer_type not in [maproot.LayerType.KML,
maproot.LayerType.GEORSS,
maproot.LayerType.GOOGLE_SPREADSHEET,
maproot.LayerType.GEOJSON,
maproot.LayerType.CSV]:
return None
source = (layer.get('source', {}).values() or [{}])[0]
url = source.get('url')
if layer_type in [maproot.LayerType.KML, maproot.LayerType.GEORSS]:
return url or None
if layer_type == maproot.LayerType.GOOGLE_SPREADSHEET:
match = re.search(r'spreadsheet/.*[?&]key=(\w+)', url)
url = match and GOOGLE_SPREADSHEET_CSV_URL.replace('$key', match.group(1))
# See http://goto.google.com/kmlify for details on kmlify's query params.
if url:
params = [('url', url)]
if layer_type == maproot.LayerType.GEOJSON:
params += [('type', 'geojson')]
else:
lat, lon = source.get('latitude_field'), source.get('longitude_field')
if not (lat and lon):
return None
params += [('type', 'csv'),
('loc', lat == lon and lat or lat + ',' + lon),
('icon', source.get('icon_url_template')),
('color', source.get('color_template')),
('hotspot', source.get('hotspot_template'))]
params += [('name', source.get('title_template')),
('desc', source.get('description_template')),
('cond', source.get('condition0')),
('cond', source.get('condition1')),
('cond', source.get('condition2'))]
return (root_url + '/.kmlify?' +
urllib.urlencode([(k, v) for k, v in params if v]))
def GetGeoPt(place):
"""Returns a geo location of a given place.
Args:
place: Google Places API place
Returns:
GeoPt corresponding to the place location
"""
location = place['geometry']['location']
return ndb.GeoPt(location['lat'], location['lng'])
def GetFeaturesFromPlacesLayer(layer, location):
"""Builds a list of Feature objects for the Places layer near given location.
Args:
layer: Places layer that defines the criteria for places query
location: db.GeoPt around which to retrieve places
Returns:
A list of Feature objects representing Google Places.
"""
# Fetch JSON from the Places API nearby search
places_layer = layer.get('source').get('google_places')
request_params = [
('location', location),
('rankby', 'distance'),
('keyword', places_layer.get('keyword')),
('name', places_layer.get('name')),
('types', places_layer.get('types'))]
place_results = GetPlacesApiResults(PLACES_API_SEARCH_URL, request_params,
'results')
# Convert Places API results to Feature objects
features = []
for place in place_results:
# Delay building description_html until after features list was trimmed.
# Otherwise, we'd be doing wasteful calls to Places API
# to get address/phone number that will never get displayed.
features.append(Feature(place['name'], None, GetGeoPt(place),
layer.get('id'), layer_type=layer.get('type'),
gplace_id=place['place_id']))
return features
def GetGooglePlaceDetails(place_id):
return GetPlacesApiResults(PLACES_API_DETAILS_URL, [('placeid', place_id)])
def GetGooglePlaceDescriptionHtml(place_details):
# TODO(user): build a shorter address format (will require i18n)
result = place_details.get('result')
return ('<div>%s</div><div>%s</div>' %
(result.get('formatted_address', ''),
result.get('formatted_phone_number', '')))
def GetGooglePlaceHtmlAttributions(place_details):
return place_details.get('html_attributions', [])
def GetPlacesApiResults(base_url, request_params, result_key_name=None):
"""Fetches results from Places API given base_url and request params.
Args:
base_url: URL prefix to use before the request params
request_params: An array of key and value pairs for the request
result_key_name: Name of the results field in the Places API response
or None if the whole response should be returned
Returns:
Value for the result_key_name in the Places API response or all of the
response if result_key_name is None
"""
google_api_server_key = config.Get('google_api_server_key')
if not google_api_server_key:
raise base_handler.Error(
500, 'google_api_server_key is not set in the config')
request_params += [('key', google_api_server_key)]
url = base_url + urllib.urlencode([(k, v) for k, v in request_params if v])
# Call Places API if cache doesn't have a corresponding entry for the url
response = JSON_PLACES_API_CACHE.Get(
url, lambda: urlfetch.fetch(url=url, deadline=DEADLINE))
# Parse JSON results
response_content = json.loads(response.content)
status = response_content.get('status')
if status != 'OK' and status != 'ZERO_RESULTS':
# Something went wrong with the request, log the error
logging.error('Places API request [%s] failed with error %s', url, status)
return []
return (response_content.get(result_key_name) if result_key_name
else response_content)
def GetTopic(root, topic_id):
return {topic['id']: topic for topic in root['topics']}.get(topic_id)
def GetLayer(root, layer_id):
return {layer['id']: layer for layer in root['layers']}.get(layer_id)
def GetFeatures(map_root, map_version_id, topic_id, request, location_center):
"""Gets a list of Feature objects for a given topic.
Args:
map_root: A dictionary with all the topics and layers information
map_version_id: ID of the map version
topic_id: ID of the crowd report topic; features are retrieved from the
layers associated with this topic
request: Original card request
location_center: db.GeoPt around which to retrieve features. So far only
Places layer uses this to narrow results according to the
distance from this location. All other layers ignore this for now
and just return all features. Note that Places layer doesn't have a set
radius around location_center, it just tries to find features
as close as possible to location_center.
Returns:
A list of Feature objects associated with layers of a given topic in a given
map.
"""
topic = GetTopic(map_root, topic_id) or {}
features = []
for layer_id in topic.get('layer_ids', []):
layer = GetLayer(map_root, layer_id)
if layer.get('type') == maproot.LayerType.GOOGLE_PLACES:
features += GetFeaturesFromPlacesLayer(layer, location_center)
else:
url = GetKmlUrl(request.root_url, layer or {})
if url:
try:
def GetXmlFeatures():
content = kmlify.FetchData(url, request.host)
return GetFeaturesFromXml(content, layer_id)
features += XML_FEATURES_CACHE.Get(
[url, | |
<gh_stars>0
"""
Classes that represent a collection of points/structures that will define a labelmap or similar for image analysis purposes.
Currently the parent object is GeometryTopologyData, that can contain objects of type Point and/or BoundingBox.
The structure of the object is defined in the GeometryTopologyData.xsd schema.
Created on Apr 6, 2015
@author: <NAME>
"""
import xml.etree.ElementTree as et
import os
import platform
import time
class GeometryTopologyData:
# Coordinate System Constants
UNKNOWN = 0
IJK = 1
RAS = 2
LPS = 3
__num_dimensions__ = 0
@property
def num_dimensions(self):
""" Number of dimensions (generally 3)"""
if self.__num_dimensions__ == 0:
# Try to get the number of dimensions from the first point or bounding box
if len(self.points) > 0:
self.__num_dimensions__ = len(self.points[0].coordinate)
elif len(self.bounding_boxes) > 0:
self.__num_dimensions__ = len(self.bounding_boxes[0].start)
return self.__num_dimensions__
@num_dimensions.setter
def num_dimensions(self, value):
self.__num_dimensions__ = value
@property
def lps_to_ijk_transformation_matrix_array(self):
""" LPS_IJK transformation matrix in a numpy format
"""
if self.lps_to_ijk_transformation_matrix is None:
return None
if self.__lps_to_ijk_transformation_matrix_array__ is None:
import numpy as np
self.__lps_to_ijk_transformation_matrix_array__ = np.array(self.lps_to_ijk_transformation_matrix, dtype=np.float)
return self.__lps_to_ijk_transformation_matrix_array__
def __init__(self):
self.__num_dimensions__ = 0
self.coordinate_system = self.UNKNOWN
self.lps_to_ijk_transformation_matrix = None # Transformation matrix to go from LPS to IJK (in the shape of a 4x4 list)
self.__lps_to_ijk_transformation_matrix_array__ = None # Same matrix in a numpy array
self.points = [] # List of Point objects
self.bounding_boxes = [] # List of BoundingBox objects
self.id_seed = 0 # Seed. The structures added with "add_point", etc. will have an id = id_seed + 1
def add_point(self, point, fill_auto_fields=True, timestamp=None):
""" Add a new Point to the structure
:param point: Point object
:param fill_auto_fields: fill automatically UserName, MachineName, etc.
:param timestamp: optional timestamp to be set in the object
"""
self.points.append(point)
if fill_auto_fields:
self.fill_auto_fields(point)
if timestamp:
point.timestamp = timestamp
def add_bounding_box(self, bounding_box, fill_auto_fields=True, timestamp=None):
""" Add a new BoundingBox to the structure
:param bounding_box: BoundingBox object
:param fill_auto_fields: fill automatically UserName, MachineName, etc.
:param timestamp: optional timestamp to be set in the object
"""
self.bounding_boxes.append(bounding_box)
if fill_auto_fields:
self.fill_auto_fields(bounding_box)
if timestamp:
bounding_box.timestamp = timestamp
def fill_auto_fields(self, structure):
""" Fill "auto" fields like timestamp, username, etc, unless there is already a specified value
The id will be id_seed + 1
@param structure: object whose fields will be filled
"""
if structure.__id__ == 0:
structure.__id__ = self.id_seed + 1
self.id_seed += 1
if not structure.timestamp:
structure.timestamp = GeometryTopologyData.get_timestamp()
if not structure.user_name:
structure.user_name = os.path.split(os.path.expanduser('~'))[-1]
if not structure.machine_name:
structure.machine_name = platform.node()
@staticmethod
def get_timestamp():
""" Get a timestamp of the current date in the preferred format
@return:
"""
return time.strftime('%Y-%m-%d %H:%M:%S')
def to_xml(self):
""" Generate the XML string representation of this object.
It doesn't use any special python module to keep compatibility with Slicer """
output = '<?xml version="1.0" encoding="utf8"?><GeometryTopologyData>'
if self.num_dimensions != 0:
output += ('<NumDimensions>%i</NumDimensions>' % self.num_dimensions)
output += ('<CoordinateSystem>%s</CoordinateSystem>' % self.__coordinate_system_to_str__(self.coordinate_system))
if self.lps_to_ijk_transformation_matrix is not None:
output += self.__write_transformation_matrix__(self.lps_to_ijk_transformation_matrix)
# Concatenate points
points = "".join(map(lambda i:i.to_xml(), self.points))
# Concatenate bounding boxes
bounding_boxes = "".join(map(lambda i:i.to_xml(), self.bounding_boxes))
return output + points + bounding_boxes + "</GeometryTopologyData>"
def to_xml_file(self, xml_file_path):
""" Save this object to an xml file
:param: xml_file_path: file path
"""
s = self.to_xml()
with open(xml_file_path, "w+b") as f:
f.write(s)
@staticmethod
def from_xml_file(xml_file_path):
""" Get a GeometryTopologyObject from a file
@param xml_file_path: file path
@return: GeometryTopologyData object
"""
with open(xml_file_path, 'r+b') as f:
xml = f.read()
return GeometryTopologyData.from_xml(xml)
@staticmethod
def from_xml(xml):
""" Build a GeometryTopologyData object from a xml string.
All the coordinates will be float.
remark: Use the ElementTree instead of lxml module to be compatible with Slicer
:param xml: xml string
:return: new GeometryTopologyData object
"""
root = et.fromstring(xml)
geometry_topology = GeometryTopologyData()
# NumDimensions
s = root.find("NumDimensions")
if s is not None:
geometry_topology.__num_dimensions__ = int(s.text)
# Coordinate System
s = root.find("CoordinateSystem")
if s is not None:
geometry_topology.coordinate_system = geometry_topology.__coordinate_system_from_str__(s.text)
geometry_topology.lps_to_ijk_transformation_matrix = geometry_topology.__read_transformation_matrix__(root)
seed = 0
# Points
for xml_point_node in root.findall("Point"):
point = Point.from_xml_node(xml_point_node)
geometry_topology.add_point(point, fill_auto_fields=False)
if point.id > seed:
seed = point.id
# BoundingBoxes
for xml_bb_node in root.findall("BoundingBox"):
bb = BoundingBox.from_xml_node(xml_bb_node)
geometry_topology.add_point(bb, fill_auto_fields=False)
geometry_topology.add_bounding_box(BoundingBox.from_xml_node(xml_bb_node), fill_auto_fields=False)
if bb.id > seed:
seed = bb.id
# Set the new seed so that every point (or bounding box) added with "add_point" has a bigger id
geometry_topology.id_seed = seed
return geometry_topology
def get_hashtable(self):
""" Return a "hashtable" that will be a dictionary of hash:structure for every point or
bounding box present in the structure
@return:
"""
hash = {}
for p in self.points:
hash[p.get_hash()] = p
for bb in self.bounding_boxes:
hash[bb.get_hash()] = bb
return hash
@staticmethod
def __to_xml_vector__(array, format_="%f"):
""" Get the xml representation of a vector of coordinates (<value>elem1</value>, <value>elem2</value>...)
:param array: vector of values
:return: xml representation of the vector (<value>elem1</value>, <value>elem2</value>...)
"""
output = ''
for i in array:
output = ("%s<value>" + format_ + "</value>") % (output, i)
return output
@staticmethod
def __coordinate_system_from_str__(value_str):
""" Get one of the possible coordinate systems allowed from its string representation
:param value_str: "IJK", "RAS", "LPS"...
:return: one the allowed coordinates systems
"""
if value_str is not None:
if value_str == "IJK": return GeometryTopologyData.IJK
elif value_str == "RAS": return GeometryTopologyData.RAS
elif value_str == "LPS": return GeometryTopologyData.LPS
else: return GeometryTopologyData.UNKNOWN
else:
return GeometryTopologyData.UNKNOWN
@staticmethod
def __coordinate_system_to_str__(value_int):
""" Get the string representation of one of the coordinates systems
:param value_int: GeometryTopologyData.IJK, GeometryTopologyData.RAS, GeometryTopologyData.LPS...
:return: string representing the coordinate system ("IJK", "RAS", "LPS"...)
"""
if value_int == GeometryTopologyData.IJK: return "IJK"
elif value_int == GeometryTopologyData.RAS: return "RAS"
elif value_int == GeometryTopologyData.LPS: return "LPS"
return "UNKNOWN"
def __read_transformation_matrix__(self, root_xml):
""" Read a 16 elems vector in the xml and return a 4x4 list (or None if node not found)
:param root_xml: xml root node
:return: 4x4 list or None
"""
# Try to find the node first
node = root_xml.find("LPStoIJKTransformationMatrix")
if node is None:
return None
m = []
temp = []
for coord in node.findall("value"):
temp.append(float(coord.text))
# Convert to a 4x4 list
for i in range (4):
m.append([temp[i*4], temp[i*4+1], temp[i*4+2], temp[i*4+3]])
return m
def __write_transformation_matrix__(self, matrix):
""" Generate an xml text for a 4x4 transformation matrix
:param matrix: 4x4 list
:return: xml string (LPStoIJKTransformationMatrix complete node)
"""
# Flatten the list
s = ""
for item in (item for sublist in matrix for item in sublist):
s += ("<value>%f</value>" % item)
return "<LPStoIJKTransformationMatrix>%s</LPStoIJKTransformationMatrix>" % s
class Structure(object):
def __init__(self, chest_region, chest_type, feature_type, description=None, format_="%f",
timestamp=None, user_name=None, machine_name=None):
"""
:param chest_region: chestRegion Id
:param chest_type: chestType Id
:param feature_type: feature type Id (artifacts and others)
:param description: optional description of the content the element
:param format_: Default format to print the xml output coordinate values (also acceptable: %i for integers or customized)
:param timestamp: datetime in format "YYYY/MM/dd HH:mm:ss"
:param user_name: logged username
:param machine_name: name of the current machine
"""
self.__id__ = 0
self.chest_region = chest_region
self.chest_type = chest_type
self.feature_type = feature_type
self.description = description
self.format = format_
self.timestamp = timestamp
self.user_name = user_name
self.machine_name = machine_name
@property
def id(self):
return self.__id__
def get_hash(self):
""" Get a unique identifier for this structure (string encoding all the fields)
@return:
"""
return "%03d_%03d_%03d" % (self.chest_region, self.chest_type, self.feature_type)
@staticmethod
def from_xml_node(xml_node):
""" Return a new instance of a Point object from xml "Point" element
:param xml_node: xml Point element coming from a "find" instruction
:return: new instance of the structure
"""
id = int(xml_node.find("Id").text)
chest_region = int(xml_node.find("ChestRegion").text)
chest_type = int(xml_node.find("ChestType").text)
featureNode = xml_node.find("ImageFeature")
if featureNode is None:
feature_type = 0
else:
feature_type = int(featureNode.text)
# Description
desc = xml_node.find("Description")
if desc is not None:
desc = desc.text
# Timestamp and user info
timestamp = xml_node.find("Timestamp")
if timestamp is not None:
timestamp = timestamp.text
user_name = xml_node.find("Username")
if user_name is not None:
user_name = user_name.text
machine_name = xml_node.find("MachineName")
if machine_name is not None:
machine_name = machine_name.text
structure = Structure(chest_region, chest_type, feature_type, description=desc, timestamp=timestamp,
user_name=user_name, machine_name=machine_name)
structure.__id__ = id
return structure
def to_xml(self):
""" Get the xml string representation of the structure that can be appended to a concrete structure (Point,
BoundingBox, etc)
:return: xml string representation of the point
"""
description | |
target_variant = row['target_variant']
mutation_type = row['mutation_type']
is_cds = row['is_cds']
aa_name = 'N/A'
ref_state = row['ref_state']
alt_state = row['alt_state']
aa_start = row['aa_start']
aa_end = row['aa_end']
if mutation_type == 'snp':
mutation_key = "{}_{}".format(mutation_type,row['unalign_variant_start'])
if state == 'alt':
dna_name = "{}{}{}".format(ref_variant,row['unalign_variant_start'],target_variant)
else:
dna_name = "{}{}{}".format(ref_variant, row['unalign_variant_start'], ref_variant)
if is_cds:
aa_name = "{}{}{}".format(ref_state,aa_start,ref_state)
if state == 'alt':
aa_name = "{}{}{}".format(ref_state,aa_start,alt_state)
elif mutation_type == 'del':
mutation_key = "{}_{}_{}".format(mutation_type,row['unalign_variant_start'],row['unalign_variant_end'])
dna_name = "{}_{}_{}_{}".format(mutation_type,row['unalign_variant_start'],row['unalign_variant_end'],ref_variant )
if state == 'alt':
dna_name = "{}_{}_{}".format(mutation_type, row['unalign_variant_start'],
row['unalign_variant_end'])
if is_cds:
aa_name = "{}_{}_{}_{}".format(mutation_type, aa_start, aa_end, ref_state)
if state == 'alt':
aa_name = "{}_{}_{}".format(mutation_type, aa_start, aa_end)
else:
mutation_key = "{}_{}_{}".format(mutation_type, row['unalign_variant_start'],row['unalign_variant_end'])
if state == 'alt':
dna_name = "{}_{}_{}_{}".format(mutation_type,row['unalign_variant_start'],row['unalign_variant_end'],target_variant )
else:
dna_name = "{}_{}_{}".format(mutation_type, row['unalign_variant_start'],
row['unalign_variant_end'])
if is_cds:
if state == 'alt':
aa_name = "{}_{}_{}_{}".format(mutation_type, aa_start, aa_end, alt_state)
else:
aa_name = "{}_{}_{}".format(mutation_type, aa_start, aa_end)
row['mutation_key'] = mutation_key
row['dna_name'] = dna_name
row['aa_name'] = aa_name
return scheme
def identify_shared_kmers(genotype_mapping,scheme,min_thresh=0.5,max_thresh=1):
'''
:param genotype_mapping:
:type genotype_mapping:
:param kmer_profile:
:type kmer_profile:
:param min_thresh:
:type min_thresh:
:param max_thresh:
:type max_thresh:
:return:
:rtype:
'''
all_seq_ids = list(genotype_mapping.keys())
#Get counts of each genotype
genotype_sample_counts = {}
for sample_id in genotype_mapping:
genotype = genotype_mapping[sample_id]
if not genotype in genotype_sample_counts:
genotype_sample_counts[genotype] = 0
genotype_sample_counts[genotype]+=1
genotype_shared_kmers = {}
for genotype in genotype_sample_counts:
genotype_shared_kmers[genotype] = []
#Process each kmer and determine if it meets the thresholds
for mutation_key in scheme:
for state in scheme[mutation_key]:
for kmer_entry in scheme[mutation_key][state]:
uid = kmer_entry['key']
seq_ids = kmer_entry['seq_ids']
genotypes = []
for sample_id in seq_ids:
genotype = genotype_mapping[sample_id]
genotypes.append(genotype)
gCounts = Counter(genotypes)
for genotype in gCounts:
count = gCounts[genotype]
perc = count / genotype_sample_counts[genotype]
if perc >= min_thresh and perc <= max_thresh:
genotype_shared_kmers[genotype].append(uid)
return genotype_shared_kmers
def identify_shared_mutations(genotype_mapping,scheme,min_thresh=0.5,max_thresh=1):
'''
:param genotype_mapping:
:type genotype_mapping:
:param kmer_profile:
:type kmer_profile:
:param min_thresh:
:type min_thresh:
:param max_thresh:
:type max_thresh:
:return:
:rtype:
'''
#Get counts of each genotype
genotype_sample_counts = {}
shared_mutations = {}
for sample_id in genotype_mapping:
genotype = genotype_mapping[sample_id]
if not genotype in genotype_sample_counts:
genotype_sample_counts[genotype] = 0
shared_mutations[genotype] = []
genotype_sample_counts[genotype]+=1
#get counts of genotypes by mutation
mutation_geno_counts = {}
for mutation_key in scheme:
for state in scheme[mutation_key]:
for row in scheme[mutation_key][state]:
dna_name = row['dna_name']
if not dna_name in mutation_geno_counts:
mutation_geno_counts[dna_name] = {}
seq_ids = row['seq_ids']
for sid in seq_ids:
genotype = genotype_mapping[sid]
if not genotype in mutation_geno_counts[dna_name]:
mutation_geno_counts[dna_name][genotype]= 0
mutation_geno_counts[dna_name][genotype] += 1
for mutation in mutation_geno_counts:
for genotype in mutation_geno_counts[mutation]:
perc = mutation_geno_counts[mutation][genotype] / genotype_sample_counts[genotype]
if perc >= min_thresh and perc <= max_thresh:
shared_mutations[genotype].append(mutation)
return shared_mutations
def qa_genotype_kmers(genotype_mapping,kmer_geno_assoc,uid_map):
report = {}
# Get counts of each genotype
genotype_sample_counts = {}
for sample_id in genotype_mapping:
genotype = genotype_mapping[sample_id]
if not genotype in genotype_sample_counts:
genotype_sample_counts[genotype] = 0
report[genotype] = {
'name': genotype,
'num_members': 0,
'num_shared_kmers': 0,
'num_diagnostic_kmers': 0,
'shared_kmers': [],
'diagnostic_kmers': [],
'qc_message': ''
}
genotype_sample_counts[genotype] += 1
report[genotype]['num_members'] += 1
for uid in kmer_geno_assoc:
shared_genotypes = kmer_geno_assoc[uid]['shared']
for genotype in shared_genotypes:
n = uid_map[uid]
report[genotype]['shared_kmers'].append(n)
diagnostic_genotypes = kmer_geno_assoc[uid]['diagnostic']
for genotype in diagnostic_genotypes:
n = uid_map[uid]
report[genotype]['diagnostic_kmers'].append(n)
for genotype in report:
report[genotype]['num_shared_kmers'] = len(report[genotype]['shared_kmers'])
report[genotype]['num_diagnostic_kmers'] = len(report[genotype]['diagnostic_kmers'])
report[genotype]['shared_kmers'].sort()
report[genotype]['shared_kmers'] = ';'.join(report[genotype]['shared_kmers'] )
report[genotype]['diagnostic_kmers'].sort()
report[genotype]['diagnostic_kmers'] = ';'.join(report[genotype]['diagnostic_kmers'])
qc_message = []
if report[genotype]['num_shared_kmers'] == 0:
qc_message.append( 'FAIL: No shared kmers' )
if report[genotype]['diagnostic_kmers'] == 0:
qc_message.append( 'WARNING: No diagnostic kmers' )
report[genotype]['qc_message'] = ';'.join(qc_message)
return report
def qa_genotype_mutations(genotype_mapping,mutation_geno_association):
report = {}
# Get counts of each genotype
genotype_sample_counts = {}
for sample_id in genotype_mapping:
genotype = genotype_mapping[sample_id]
if not genotype in genotype_sample_counts:
genotype_sample_counts[genotype] = 0
report[genotype] = {
'name': genotype,
'num_members': 0,
'num_shared_mutations': 0,
'num_diagnostic_mutations': 0,
'shared_mutations': [],
'diagnostic_mutations': [],
'qc_message': ''
}
genotype_sample_counts[genotype] += 1
report[genotype]['num_members'] += 1
for mkey in mutation_geno_association:
shared_genotypes = mutation_geno_association[mkey]['shared']
for genotype in shared_genotypes:
report[genotype]['shared_mutations'].append(mkey)
diagnostic_genotypes = mutation_geno_association[mkey]['diagnostic']
for genotype in diagnostic_genotypes:
report[genotype]['diagnostic_mutations'].append(mkey)
for genotype in report:
report[genotype]['num_shared_mutations'] = len(report[genotype]['shared_mutations'])
report[genotype]['num_diagnostic_mutations'] = len(report[genotype]['diagnostic_mutations'])
report[genotype]['shared_mutations'].sort()
report[genotype]['shared_mutations'] = ';'.join(report[genotype]['shared_mutations'] )
report[genotype]['diagnostic_mutations'].sort()
report[genotype]['diagnostic_mutations'] = ';'.join(report[genotype]['diagnostic_mutations'])
qc_message = []
if report[genotype]['num_shared_mutations'] == 0:
qc_message.append( 'FAIL: No shared mutations' )
if report[genotype]['diagnostic_mutations'] == 0:
qc_message.append( 'WARNING: No diagnostic mutations' )
report[genotype]['qc_message'] = ';'.join(qc_message)
return report
def print_scheme(scheme,file,header):
"""
Takes the kmer dictionary and writes it to a file
:param scheme: dict of kmer info
:param file: file path
:return:
"""
fh = open(file,'w')
fh.write("\t".join(header) + "\n")
for mutation_key in scheme:
for state in scheme[mutation_key]:
for data in scheme[mutation_key][state]:
row = []
for field in header:
if field in data:
row.append(data[field])
else:
row.append('')
fh.write("{}\n".format("\t".join([str(x) for x in row])))
fh.close()
def run():
#get arguments
cmd_args = parse_args()
logger = init_console_logger(2)
#input parameters
input_msa = cmd_args.input_msa
input_meta = cmd_args.input_meta
prefix = cmd_args.prefix
ref_id = cmd_args.ref_id
ref_gbk = cmd_args.ref_gbk
outdir = cmd_args.outdir
min_len = cmd_args.min_len
max_len = cmd_args.max_len
max_ambig = cmd_args.max_ambig
min_complexity = cmd_args.min_complexity
max_missing = cmd_args.max_missing
n_threads = cmd_args.n_threads
impute_min_frac = cmd_args.iFrac
# initialize analysis directory
if not os.path.isdir(outdir):
logger.info("Creating analysis results directory {}".format(outdir))
os.mkdir(outdir, 0o755)
else:
logger.info("Results directory {} already exits, will overwrite any results files here".format(outdir))
#output files
scheme_file = os.path.join(outdir,"{}-scheme.txt".format(prefix))
genotypes_mut_file = os.path.join(outdir,"{}-mutation.associations.txt".format(prefix))
genotypes_kmers_file = os.path.join(outdir, "{}-kmers.associations.txt".format(prefix))
genotype_dendrogram = os.path.join(outdir, "{}-dendropgram.html".format(prefix))
#Get the Gene features from the reference sequence
ref_features = parse_reference_sequence(ref_gbk)
#Read the input MSA and calculate the consensus sequence
logger.info("Reading input alignment {}".format(input_msa))
input_alignment = read_fasta(input_msa)
if ref_id not in input_alignment:
logger.error("Reference id specified '{}' is not found in alignment, make sure it is present and retry".format(ref_id))
sys.exit()
if cmd_args.impute:
logger.info("Imputing ambiguous bases enabled with fraction of {}".format(impute_min_frac))
input_alignment = impute_ambiguous_bases(input_alignment, ref_id, min_frac=impute_min_frac)
else:
logger.info("Imputing ambiguous bases not enabled")
min_members = len(input_alignment) - len(input_alignment)*max_missing
logger.info("Found {} sequences in msa".format(len(input_alignment)))
consensus_bases = calc_consensus(input_alignment)
consensus_seq = generate_consensus_seq(consensus_bases)
#read the metadata associations
logger.info("Reading genotype associations from {}".format(input_meta))
metadata_df = read_tsv(input_meta)
logger.info("Found {} lines in {}".format(len(metadata_df),input_meta))
metadata_df['genotype'] = metadata_df['genotype'].astype(str)
genotype_mapping = get_genotype_mapping(metadata_df)
#Remove pseudo sequences generated by parsityper if applicable
logger.info("Filtering samples from genotype map which are not also in the alignment")
missing_samples = list(set(list(genotype_mapping.keys())) - set(list(input_alignment.keys())) )
samples_to_mask = ['pseudo_A','pseudo_T','pseudo_C','pseudo_G','consensus'] + missing_samples
logger.info("Masking {} samples".format(len(missing_samples)))
for sample in samples_to_mask:
if sample in genotype_mapping:
logger.info("Removing sample {} from analysis due to no sequence in alignment".format(sample))
del(genotype_mapping[sample])
#Identify variable positions within the alignment
logger.info("Scanning alignment for SNPs")
snp_positions = find_snp_positions(consensus_seq)
logger.info("Found {} variable sites".format(len(snp_positions)))
sequence_deletions = {}
logger.info("Scanning alignment for indels")
for seq_id in input_alignment:
sequence_deletions[seq_id] = find_internal_gaps(input_alignment[seq_id])
variant_positions = []
for pos in snp_positions:
variant_positions.append([pos,pos])
unique_indels = {}
for sid in sequence_deletions:
for i in sequence_deletions[sid]:
unique_indels[i] = ''
logger.info("Found {} potential indels".format(len(unique_indels)))
for indel in unique_indels:
(start,end) = indel.split(':')
variant_positions.append([int(start),int(end)])
logger.info("Creating scheme based on identified SNPs and indels")
scheme = construct_scheme(variant_positions, ref_id, input_alignment, min_len, max_len,n_threads)
logger.info("Performing QA on selected k-mers")
scheme = qa_scheme(scheme, input_alignment, ref_id, genotype_mapping,min_len=min_len, max_len=max_len,min_complexity=min_complexity)
logger.info("Adding gene annotations")
scheme = add_gene_inference(scheme, ref_id, ref_features, input_alignment)
#remove samples where genotype mapping info is missing
valid_samples = set(list(genotype_mapping.keys()))
unique_index = 0
for mutation_key in scheme:
for state in scheme[mutation_key]:
for record in scheme[mutation_key][state]:
record['seq_ids'] = list( set(record['seq_ids']) & valid_samples )
record['key'] = unique_index
unique_index+=1
scheme = format_scheme_human_readable(scheme, ref_id, input_alignment)
#get the kmer profile for each sample
logger.info("Building genotype kmer profiles")
kmer_profile = build_kmer_profiles(list(genotype_mapping.keys()),scheme)
# create a plot of sample similarity for a multi-sample run
if len(kmer_profile ) > 1:
logger.info("Plotting Sample dendrogram")
labels = []
for sample in kmer_profile:
if sample in genotype_mapping:
genotype = genotype_mapping[sample]
else:
genotype = 'n/a'
labels.append("{} | {}".format(sample,genotype))
d = dendrogram_visualization()
kmer_content_profile_df = pd.DataFrame.from_dict(kmer_profile,orient='index')
d.build_dendrogram(labels, kmer_content_profile_df,
genotype_dendrogram)
#identify genotype shared kmers
logger.info("Identifying shared kmers by genotype")
shared_kmers = identify_shared_kmers(genotype_mapping,scheme,min_thresh=0.5,max_thresh=1)
positive_kmers = identify_shared_kmers(genotype_mapping,scheme,min_thresh=0.95,max_thresh=1)
partial_positive_kmers = identify_shared_kmers(genotype_mapping, scheme, min_thresh=0.01, max_thresh=0.94999)
#identify shared mutations
shared_mutations = identify_shared_mutations(genotype_mapping, scheme, min_thresh=0.5, max_thresh=1)
positive_mutations = identify_shared_mutations(genotype_mapping, scheme, min_thresh=0.95,max_thresh=1)
partial_positive_mutations = identify_shared_mutations(genotype_mapping, scheme, min_thresh=0.01, max_thresh=0.94999)
#reorder datastructure to add diagnostic information to the scheme
kmer_geno_assoc = {}
for genotype in shared_kmers:
for uid in shared_kmers[genotype]:
if not uid in kmer_geno_assoc:
kmer_geno_assoc[uid] = {'positive':[],'partial':[],'shared':[],'diagnostic':[]}
kmer_geno_assoc[uid]['shared'].append(genotype)
for uid in positive_kmers[genotype]:
if not uid in kmer_geno_assoc:
kmer_geno_assoc[uid] = {'positive':[],'partial':[],'shared':[],'diagnostic':[]}
kmer_geno_assoc[uid]['positive'].append(genotype)
for uid in partial_positive_kmers[genotype]:
if not uid in kmer_geno_assoc:
kmer_geno_assoc[uid] = {'positive':[],'partial':[],'shared':[],'diagnostic':[]}
kmer_geno_assoc[uid]['partial'].append(genotype)
uid_index = {}
#Add positive and partial groupings to scheme
for mutation_key in scheme:
for state in scheme[mutation_key]:
for row in scheme[mutation_key][state]:
uid = row['key']
uid_index[uid] | |
self.silver_name,
"typeclass": "typeclasses.dropped_silver.Dropped_Silver",
"aliases": ["money", "coins"],
"desc": "Silver sovereigns have the likeness of a mighty lion engraved upon their surface on one side and the Tower of Corinth on the other.",
"location": caller.location,
"coins_value": self.silver_amount_int}
if self.silver_amount:
if caller.db.silver_carried < self.silver_amount_int:
caller.msg("You aren't carrying that much silver.")
return
else:
string = "You drop {:,} silver.".format(self.silver_amount_int)
caller.msg(string)
string = "{} drops {:,} silver.".format(caller.name, self.silver_amount_int)
caller.location.msg_contents(string, exclude=caller)
caller.db.silver_carried = caller.db.silver_carried - self.silver_amount_int
new_object = spawn(dropped_silver, location=caller.location)
return
obj = caller.search(self.args, location=caller,
nofound_string="You aren't carrying %s." % self.args,
multimatch_string="You carry more than one %s:" % self.args)
if not obj:
caller.msg("Drop what?")
return
if obj in caller.db.wielding:
caller.msg("You have to stop wielding it before you can drop it.")
return
if obj in caller.db.clothes_objects:
caller.msg("You have to remove it before you can drop it!")
return
if obj in caller.db.girdle:
caller.msg("You have to detach it from your girdle before you can drop it.")
return
if obj.db.birth_time:
if obj.db.birth_time + obj.db.duration < time.time():
caller.msg("%s crumbles to dust when you touch it! Just how old was that thing?" % str.capitalize(str(obj.name)))
obj.delete()
return
obj.move_to(caller.location, quiet=True)
caller.msg("You drop %s." % (obj.name,))
caller.location.msg_contents("%s drops %s." %
(caller.name, obj.name),
exclude=caller)
# Call the object script's at_drop() method.
obj.at_drop(caller)
class CmdGive(COMMAND_DEFAULT_CLASS):
"""
Give an item or some silver to someone.
Usage:
give <item> to <target>
give <#> silver to <target>
"""
key = "give"
aliases = ["lend"]
locks = "cmd:all()"
arg_regex = r"\s|$"
def parse(self):
"Not-so-trivial parser!"
raw = self.args
args = raw.strip()
switches = []
silver_amount = False
if args and len(args) > 1 and args[0] == "/":
# we have a switch, or a set of switches. These end with a space.
switches = args[1:].split(None, 1)
if len(switches) > 1:
switches, args = switches
switches = switches.split('/')
else:
args = ""
switches = switches[0].split('/')
arglist = [arg.strip() for arg in args.split()]
# check for arg1, arg2, ... = argA, argB, ... constructs
lhs, rhs = args, None
lhslist, rhslist = [arg.strip() for arg in args.split(',')], []
if args and ' silver to ' in args:
lhs, rhs = [arg.strip() for arg in args.split(' to ', 1)]
lhslist = [arg.strip() for arg in lhs.split(',')]
rhslist = [arg.strip() for arg in rhs.split(',')]
silver_amount = 10
elif args and ' to ' in args:
lhs, rhs = [arg.strip() for arg in args.split(' to ', 1)]
lhslist = [arg.strip() for arg in lhs.split(',')]
rhslist = [arg.strip() for arg in rhs.split(',')]
# save to object properties:
self.raw = raw
self.switches = switches
self.args = args.strip()
self.arglist = arglist
self.lhs = lhs
self.lhslist = lhslist
self.rhs = rhs
self.rhslist = rhslist
self.silver_amount = silver_amount
def func(self):
"""Implement give"""
caller = self.caller
if self.silver_amount:
if not self.args or not self.rhs:
caller.msg("Usage: give <item> to <target>")
return
target = caller.search(self.rhs)
if not target:
caller.msg("Give how much silver to who?")
return
if target == caller:
caller.msg("You decide not to give away your silver after all.")
return
# Start extra silver parsing here!
lhs1 = self.lhs[:-7]
lhs2 = "silver"
silver_amount = int(lhs1)
if caller.db.silver_carried < silver_amount:
caller.msg("You are not carrying that much silver.")
return
elif not utils.inherits_from(target, "typeclasses.characters.Character"):
caller.msg("You can't give silver to that.")
return
else:
#string = "You are carrying {:,} silver sovereigns.".format(caller.db.silver_carried)
string = "You give {:,} silver to {}.".format(silver_amount, target.name)
caller.msg(string)
string = "{} gives you {:,} silver sovereigns.".format(caller.name, silver_amount)
target.msg(string)
emit_string = ("%s gives %s some silver sovereigns." % (caller.name, target.name))
caller.location.msg_contents(emit_string, exclude=(caller, target), from_obj=caller)
caller.db.silver_carried = caller.db.silver_carried - silver_amount
target.db.silver_carried = target.db.silver_carried + silver_amount
return
if not self.args or not self.rhs:
caller.msg("Usage: give <item> to <target>")
return
to_give = caller.search(self.lhs, location=caller,
nofound_string="You aren't carrying %s." % self.lhs,
multimatch_string="You carry more than one %s:" % self.lhs)
target = caller.search(self.rhs)
if not (to_give and target):
caller.msg("Give what to who?")
return
if target == caller:
caller.msg("You keep %s to yourself." % to_give.key)
return
if not to_give.location == caller:
caller.msg("You are not holding %s." % to_give.key)
return
if to_give in caller.db.wielding:
caller.msg("You have to stop wielding it before you can give it away.")
return
if to_give in caller.db.clothes_objects:
caller.msg("You have to remove it before you can give it away!")
return
if to_give in caller.db.girdle:
caller.msg("You have to detach it from your girdle before you can give it away.")
return
if utils.inherits_from(target, "typeclasses.npcs.Combat_Merchant_Mob"):
caller.msg("%s hands %s back to you.\n|c%s says, \"I trade in %s. If you have any you'd like to |wsell|c to me, I'm interested.\"" % (target.name, to_give, target.name, target.db.trade_item))
return
if not utils.inherits_from(target, "typeclasses.characters.Character"):
caller.msg("You can't give %s to that." % to_give)
return
# give object
caller.msg("You give %s to %s." % (to_give.key, target.key))
to_give.move_to(target, quiet=True)
target.msg("%s gives you %s." % (caller.key, to_give.key))
emit_string = ("%s gives %s %s." % (caller.name, target.name, to_give.key))
caller.location.msg_contents(emit_string, exclude=(caller, target), from_obj=caller)
# Call the object script's at_give() method.
to_give.at_give(caller, target)
class CmdDesc(COMMAND_DEFAULT_CLASS):
"""
describe yourself
Usage:
desc <description>
Add a description to yourself. This
will be visible to people when they
look at you.
"""
key = "desc"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""add the description"""
if not self.args:
self.caller.msg("You must add a description.")
return
self.caller.db.desc = self.args.strip()
self.caller.msg("You set your description.")
class CmdSay(COMMAND_DEFAULT_CLASS):
"""
Speak as your character.
Usage:
say message
\"Great!
"""
key = "say"
aliases = ['"', "'"]
locks = "cmd:all()"
def func(self):
"""Run the say command"""
caller = self.caller
if not self.args:
caller.msg("Say what?")
return
#WHN: Cleaning up the presentation.
speech = self.args
speech = speech.lstrip()
speech = speech[0].upper() + speech[1:]
speech_type = "say"
if not speech.endswith(".") and not speech.endswith("!") and not speech.endswith("?"):
speech = speech + "."
if speech.endswith("!"):
speech_type = "exclaim"
elif speech.endswith("?"):
speech_type = "ask"
# calling the speech hook on the location
speech = caller.location.at_say(caller, speech)
# Feedback for the object doing the talking.
caller.msg('|cYou %s, "%s|c"|n' % (speech_type, speech))
# Build the string to emit to neighbors.
emit_string = '|c%s %ss, "%s|c"|n' % (caller.name, speech_type, speech)
caller.location.msg_contents(emit_string, exclude=caller, from_obj=caller)
class CmdWhisper(COMMAND_DEFAULT_CLASS):
"""
Speak privately as your character to another
Usage:
whisper <player> = <message>
Talk privately to those in your current location, without
others being informed.
"""
key = "whisper"
locks = "cmd:all()"
def func(self):
"""Run the whisper command"""
caller = self.caller
if not self.lhs or not self.rhs:
caller.msg("Usage: whisper <player> = <message>")
return
receiver = caller.search(self.lhs)
if not receiver:
return
if caller == receiver:
caller.msg("You can't whisper to yourself.")
return
speech = self.rhs
speech = speech.lstrip()
speech = speech[0].upper() + speech[1:]
if not speech.endswith(".") and not speech.endswith("!") and not speech.endswith("?"):
speech = speech + "."
# Feedback for the object doing the talking.
caller.msg('You whisper to %s, "%s|n"' % (receiver.key, speech))
# Build the string to emit to receiver.
emit_string = '%s whispers, "%s|n"' % (caller.name, speech)
receiver.msg(text=(emit_string, {"type": "whisper"}), from_obj=caller)
class CmdPose(COMMAND_DEFAULT_CLASS):
"""
strike a pose
Usage:
pose <pose text>
pose's <pose text>
Example:
pose is standing by the wall, smiling.
-> others will see:
Tom is standing by the wall, smiling.
Describe an action being taken. The pose text will
automatically begin with your name.
"""
key = "pose"
aliases = [":", "emote"]
locks = "cmd:all()"
def parse(self):
"""
Custom parse the cases where the emote
starts with some special letter, such
as 's, at which we don't want to separate
the caller's name and the emote with a
space.
"""
args = self.args
if args and not args[0] in ["'", ",", ":"]:
args = " %s" % args.strip()
self.args = args
def func(self):
"""Hook function"""
if not self.args:
msg = "What do you want to do?"
self.caller.msg(msg)
else:
msg = "%s%s" % (self.caller.name, self.args)
self.caller.location.msg_contents(text=(msg, {"type": "pose"}),
from_obj=self.caller)
class CmdAccess(COMMAND_DEFAULT_CLASS):
"""
show your current game access
Usage:
access
This command shows you the permission hierarchy and
which permission groups you are a member of.
"""
key = "access"
aliases = ["groups", "hierarchy"]
locks = "cmd:pperm(PlayerHelpers)"
arg_regex = r"$"
def func(self):
"""Load the permission groups"""
caller = self.caller
hierarchy_full = settings.PERMISSION_HIERARCHY
string = "\n|wPermission | |
import csv
import json
import pickle
from difflib import SequenceMatcher as SeqMat
import django_rq
import pandas as pd
import tablib
from django import db
from django.conf import settings
from django.core.management import call_command
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django_tables2.views import RequestConfig
from simple_salesforce import Salesforce
from dedupper.forms import UploadFileForm
from dedupper.tables import SFContactTable, RepContactTable
from dedupper.utils import *
tablib.formats.json.json = json
sf_prog =rep_prog=progress_num = 0
keys = list()
store=name_sort=address_sort=email_sort=crd_sort=phone_sort=average_sort=key_sort=True
db_job=rep_df = None
UPLOAD_JOB_ID = '79243664'
DUPLIFY_JOB_ID = '36647924'
dedupe_q = django_rq.get_queue('high', autocommit=True, is_async=True)
def display(request):
print('render datatables from dataframes')
reps_df = get_contacts('reps')
new_flag = reps_df.Id == ''
manual_flag = reps_df.Id == 'manual'
news = reps_df[new_flag]
manuals = reps_df[manual_flag]
# dupes = reps_df[not (new_flag or manual_flag)]
dupes = 'dupe'
print(f'news: {len(news)}\nmanuals: {len(manuals)}\ndupes: {len(dupes)}')
print(f'news: {type(news)}\nmanuals: {type(manuals)}\ndupes: {type(dupes)}')
# now I have
return render(request, 'dedupper/data-table.html')
def closest(request):
#function gets the SFContactTable for each of the closest matches
if request.method == 'GET':
id = request.GET.get('id')
close_id1 = request.GET.get('close1')
close_id2 = request.GET.get('close2')
close_id3 = request.GET.get('close3')
html_table1=html_table2=html_table3=''
#check if id exists, if so: render html table of the contact
if close_id1 != '':
table1 = SFContactTable( sfcontact.objects.filter(pk=close_id1))
html_table1 = table1.as_html(request)
if close_id2 != '':
table2 = SFContactTable( sfcontact.objects.filter(pk=close_id2))
html_table2 = table2.as_html(request)
if close_id3 != '':
table3 = SFContactTable( sfcontact.objects.filter(pk=close_id3))
html_table3 = table3.as_html(request)
return JsonResponse({ 'table1': html_table1, 'table2':html_table2, 'table3': html_table3}, safe=False)
# return JsonResponse({'rep-table': 'tits'})
def turn_table(request):
#create a new RepContactTable
#function that returns a table of contact types sorted by a user input field
if request.method == 'GET':
type = request.GET.get('type')
sort = request.GET.get('sorting')
#switch statement on sort field
if sort =='name':
#booleans for each sorting field to toggle between ascending and descending
if globals()['name_sort']:
globals()['name_sort'] = False
table = RepContactTable(repContact.objects.filter(type=type).order_by('lastName', 'firstName'))
else:
globals()['name_sort'] = True
table = RepContactTable(repContact.objects.filter(type=type).order_by('-lastName', '-firstName'))
elif sort =='email':
if globals()['email_sort']:
globals()['email_sort'] = False
table = RepContactTable(repContact.objects.filter(type=type).order_by('workEmail'))
else:
globals()['name_sort'] = True
table = RepContactTable(repContact.objects.filter(type=type).order_by('-workEmail'))
elif sort =='phone':
if globals()['name_sort']:
globals()['name_sort'] = False
table = RepContactTable(repContact.objects.filter(type=type).order_by('Phone'))
else:
globals()['name_sort'] = True
table = RepContactTable(repContact.objects.filter(type=type).order_by('-Phone'))
elif sort =='address':
if globals()['name_sort']:
globals()['name_sort'] = False
table = RepContactTable(repContact.objects.filter(type=type).order_by('mailingStateProvince', 'mailingCity'))
else:
globals()['name_sort'] = True
table = RepContactTable(repContact.objects.filter(type=type).order_by('-mailingStateProvince', '-mailingCity'))
elif sort =='average':
if globals()['name_sort']:
globals()['name_sort'] = False
table = RepContactTable(repContact.objects.filter(type=type).order_by('average'))
else:
globals()['name_sort'] = True
table = RepContactTable(repContact.objects.filter(type=type).order_by('-average'))
elif sort =='keySortedBy':
if globals()['name_sort']:
globals()['name_sort'] = False
table = RepContactTable(repContact.objects.filter(type=type).order_by('keySortedBy'))
else:
globals()['name_sort'] = True
table = RepContactTable(repContact.objects.filter(type=type).order_by('-keySortedBy'))
elif sort =='CRD':
if globals()['crd_sort']:
globals()['crd_sort'] = False
table = RepContactTable(repContact.objects.filter(type=type).order_by('CRD'))
else:
globals()['crd_sort'] = True
table = RepContactTable(repContact.objects.filter(type=type).order_by('-CRD'))
else:
table = RepContactTable(repContact.objects.filter(type=type))
config = RequestConfig(request, paginate={'per_page': 250})
config.configure(table)
print('sending table')
return JsonResponse({ 'table': table.as_html(request) }, safe=False)
def download(request,type):
#csv headers
fields = ('id','CRD', 'First', 'Last', 'Street', 'City',
'State', 'Zip', 'Phone', 'Home Phone', 'Mobile Phone',
'Other Phone', 'Work Email', 'Personal Email', 'Other Email', 'Match Score', 'Key' )
#flag to remove contactIDs from new records
no_id = None
#name of uploaded rep list
if 'repCSV_name' in request.session:
repCSV_name = request.session['repCSV_name'].replace('.csv','')
#name the csv
if(type == "Duplicate"):
filename = f'filename="{repCSV_name} (Duplicates).csv"'
elif(type == "NewRecord"):
filename = f'filename= "{repCSV_name} (New Records).csv"'
type = 'New Record'
no_id = '.'
elif(type == "ManualCheck"):
filename = f'filename="{repCSV_name} (Manual Checks).csv"'
type = 'Manual Check'
else:
filename = f'filename="{repCSV_name} (Undecided Records).csv"'
rep_resource = RepContactResource()
users = repContact.objects.filter(type = type)
dataset = rep_resource.export(users)
db_df = pd.read_json(dataset.json)
#parse the misc field back into their respective fields
misc_df = db_df['misc'].astype(str).str.split('-!-', expand=True)
db_df=db_df.drop('misc', axis=1)
f = list(db_df[['average', 'keySortedBy', 'closest1_contactID']])
with open(settings.REP_CSV, 'rb') as file:
pd_rep_csv = pickle.load(file)
print('pickle load reps')
fields = f + list(pd_rep_csv)
fields[fields.index('closest1_contactID')] = 'ContactID'
frames=[db_df[['average', 'keySortedBy', 'closest1_contactID']], misc_df]
export = pd.concat(frames, axis=1)
export.columns = fields
if no_id:
del export['ContactID']
fields.remove('ContactID')
export.replace('nan', '', inplace=True)
dataset.csv = export.to_csv(index=False)
#create response
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; '+filename
#create csv writer for the response and write the
writer = csv.writer(response)
writer.writerow(fields)
for line in dataset:
writer.writerow(line)
return response
def download_times(request,type):
if(type == "DD"):
filename = 'filename="Dedup Times.csv"'
times = dedupTime.objects.all().values_list()
export_headers = [i.name for i in dedupTime._meta.local_concrete_fields]
elif(type == "D"):
filename = 'filename="Duplify Times.csv"'
times = duplifyTime.objects.all().values_list()
export_headers = [i.name for i in duplifyTime._meta.local_concrete_fields]
elif(type == "A"):
if 'repCSV_name' in request.session:
repCSV_name = request.session['repCSV_name']
else:
repCSV_name = 'a Rep list'
if 'sfCSV_name' in request.session:
sfCSV_name = request.session['sfCSV_name']
else:
sfCSV_name = 'a Salesforce Channel'
total_reps = repContact.objects.all().count()
total_sf = sfcontact.objects.all().count()
total_dups = repContact.objects.filter(type='Duplicate').count()
percent_dups = round((total_dups/total_reps)*100,1)
total_news = repContact.objects.filter(type='New Record').count()
percent_news = round((total_news/total_reps)*100,1)
time_hours = round(((duplifyTime.objects.get(pk=1).seconds/60)/60),2)
audit_info = []
audit_info.append([f"Duplify Audit of {repCSV_name} duped against {sfCSV_name}"])
audit_info.append([""])
audit_info.append([f"Number of Records in Rep List: {total_reps} \t Number of Records in {sfCSV_name[2:]}: " +
f"{total_sf}"])
audit_info.append([f"Number of Duplicate Records in the Rep List: {total_dups}({percent_dups}%)"])
audit_info.append([f"Number of New Records in the Rep List: {total_news}({percent_news}%)"])
audit_info.append([f"Time: {time_hours} hours"])
audit_info.append([""])
audit_info.append(["Thank you for using Duplify!"])
filename = 'filename="Audit.txt"'
response = HttpResponse(content_type='text/text')
response['Content-Disposition'] = 'attachment; ' + filename
writer = csv.writer(response, delimiter='\n')
writer.writerows(audit_info)
return response
else:
filename = 'filename="Upload Times.csv"'
times = uploadTime.objects.all().values_list()
export_headers = [i.name for i in uploadTime._meta.local_concrete_fields]
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; '+filename
writer = csv.writer(response)
writer.writerow(export_headers)
for time in times:
writer.writerow(time)
return response
def flush_db(request):
call_command('flush', interactive=False)
return redirect('/map')
def import_csv(request):
global db_job, progress_num
channel = request.GET.get('channel') # sf channel to pull from db
rep_header_map = request.GET.get('rep_map') # the JSON of csv headers mapped to db fields
rep_header_map = json.loads(rep_header_map) # JSON -> dict()
print(rep_header_map)
request.session['prog_num']= progress.objects.all().count()
request.session['sf_channel'] = f'the {channel} channel' # for printing
request.session['fields'] = list(rep_header_map.values())
request.session['channel'] = channel
request.session['map'] = rep_header_map
request.session['misc'] = list(rep_header_map.keys())
# the csv headers are stored to be used for exporting
# get_channel queries the channel and loads the rep list and sf contacts
# newest = dedupe_q.enqueue(get_channel, db_data, job_id=UPLOAD_JOB_ID, timeout='1h', result_ttl='1h')
# request.session['rq_job'] = UPLOAD_JOB_ID
return JsonResponse({'msg': 'success!'}, safe=False)
def index(request):
return render(request, 'dedupper/login.html')
def upload_page(request):
return render(request, 'dedupper/rep_list_upload.html')
def key_gen(request):
if 'fields' in request.session:
fields = request.session['fields']
else:
fields = ['ERROR: Reset and Upload Reps']
return render(request, 'dedupper/key_generator.html', {'keys': fields})
def login(request):
u = request.GET.get('username')
p = request.GET.get('password')
try:
sf = Salesforce(password='<PASSWORD>', username='<EMAIL>', security_token='<PASSWORD>')
msg= 'success'
#store u & p in session, create function called login_check that makes sure a username is in the session
# else, redirect to /
except:
msg = 'failure'
return JsonResponse({'msg': msg}, safe=False)
def map(request):
return render(request, 'dedupper/field_mapping.html',
)
def merge(request, id):
obj = repContact.objects.values().get(id=id)
ids = [obj['closest1_contactID'], obj['closest2_contactID'], obj['closest3_contactID']]
objs = sfcontact.objects.values().filter(ContactID__in=ids)
fields = [i.name for i in repContact._meta.local_fields]
mergers = list()
for i in range(len(objs)):
if objs[i]['ContactID'] == obj['closest1_contactID']:
del objs[i]['closest_rep_id'], objs[i]['dupFlag'], objs[i]['ContactID']
mergers.insert(0, list(objs[i].values()))
elif objs[i]['ContactID'] == obj['closest2_contactID']:
del objs[i]['closest_rep_id'], objs[i]['dupFlag'], objs[i]['ContactID']
mergers.insert(len(mergers), list(objs[i].values()))
else:
del objs[i]['closest_rep_id'], objs[i]['dupFlag'], objs[i]['ContactID']
mergers.insert(-1, list(objs[i].values()))
del obj['closest1_id'], obj['closest2_id'], obj['closest3_id'], obj['closest1_contactID'], obj['closest2_contactID'], obj['closest3_contactID'], obj['type'], obj['dupFlag'], obj['average']
obj = list(obj.values())
if len(mergers) == 3:
obj_map = {i:j for i,j in zip(fields, list(zip(obj,mergers[0],mergers[1],mergers[2])) ) }
elif len(mergers) == 2:
obj_map = {i:j for i,j in zip(fields, list(zip(obj,mergers[0],mergers[1])) ) }
elif len(mergers) == 1:
obj_map = {i:j for i,j in zip(fields, list(zip(obj,mergers[0])) ) }
return render(request, 'dedupper/sort_view.html', {'objs' : obj_map})
def dup_progress(request):
if request.method == 'GET':
print(f'ENTER dup_progress: {progress.objects.latest().completed_keys}')
if progress.objects.latest().completed_keys > 0:
return JsonResponse({'done': 0.1, 'esti': 10}, safe=False)
else:
print('DEDUPING DONE')
return JsonResponse({'done': 1000, 'esti': 10}, safe=False)
def resort(request):
if request.method == 'GET':
print('resorting')
set_sorting_algorithm(int(request.GET.get('upper_thres')), int(request.GET.get('lower_thres')))
return JsonResponse({'msg': 'success!'}, safe=False)
def contact_sort(request):
if request.method == 'GET':
data = request.GET.getlist("data[]")
rep = repContact.objects.get(pk=data[1])
if data[0] == 'Duplicate':
print(f"old order: {rep.closest1}, {rep.closest2}, {rep.closest3}")
rep.type = 'Duplicate'
if rep.closest3 and int(data[2]) == rep.closest3.id:
print('moving 3rd closest to 1st')
rep.closest1, rep.closest2, rep.closest3 = rep.closest3, rep.closest1, rep.closest2
rep.closest1_contactID, rep.closest2_contactID, rep.closest3_contactID = rep.closest3_contactID, rep.closest1_contactID, rep.closest2_contactID
elif rep.closest2 and int(data[2]) == rep.closest2.id:
print('moving 2nd closest to 1st')
rep.closest1, rep.closest2 = rep.closest2, rep.closest1
rep.closest1_contactID, rep.closest2_contactID =rep.closest2_contactID, rep.closest1_contactID
print(f"new order: {rep.closest1}, {rep.closest2}, {rep.closest3}")
elif data[0] == 'New Record':
rep.type = 'New Record'
rep.closest1_contactID = ''
rep.save()
return JsonResponse({'msg': 'success!'}, safe=False)
def run(request):
global keys
if request.method == 'GET':
db.connections.close_all()
keylist = request.GET.get('keys')
#channel = request.GET.get('channel')
keylist = keylist.split("~")
partslist = [i.split('-') for i in keylist[:-1]]
keys=partslist
p = progress.objects.latest()
p.total_keys = len(partslist)
request.session['indicator'] = len(partslist)
p.save()
db.connections.close_all()
data= {
'channel': request.session['channel'],
'map': request.session['map'],
'keys' : partslist,
'reps': pd.read_pickle(settings.REP_CSV),
}
newest = dedupe_q.enqueue(key_generator, data, job_id=DUPLIFY_JOB_ID, timeout='1h', result_ttl='1h')
return JsonResponse({'msg': 'success!'}, safe=False)
def upload(request):
global export_headers, keys, pd_df
print('uploading file')
form = UploadFileForm(request.POST, request.FILES)
repCSV | |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates C++ source files from a mojom.Module."""
import datetime
import mojom
import mojom_generator
import mojom_pack
import os
import re
import sys
from string import Template
# mojom_cpp_generator provides a way to generate c++ code from a mojom.Module.
# cpp = mojom_cpp_generator.CPPGenerator(module)
# cpp.GenerateFiles("/tmp/g")
class Lines(object):
"""Helper class to maintain list of template expanded lines."""
def __init__(self, template, indent = None):
self.template = template
self.indent = indent
self.lines = []
def Add(self, map = {}, **substitutions):
if len(substitutions) > 0:
map = map.copy()
map.update(substitutions)
self.lines.append(self.template.substitute(map))
def __repr__(self):
if self.indent is not None:
prefix = "".ljust(self.indent, ' ')
repr = '\n'.join(self.lines)
self.lines = map(lambda l: prefix + l, repr.splitlines())
return '\n'.join(self.lines)
class CPPGenerator(mojom_generator.Generator):
struct_serialization_compute_template = \
Template(" +\n mojo::internal::ComputeSizeOf($NAME->$FIELD())")
struct_serialization_clone_template = Template(
" clone->set_$FIELD(mojo::internal::Clone($NAME->$FIELD(), buf));")
struct_serialization_handle_release_template = Template(
" mojo::internal::ResetIfNonNull($NAME->$FIELD());")
struct_serialization_encode_template = Template(
" Encode(&$NAME->${FIELD}_, handles);")
struct_serialization_encode_handle_template = Template(
" EncodeHandle(&$NAME->${FIELD}_, handles);")
struct_serialization_decode_template = Template(
" if (!Decode(&$NAME->${FIELD}_, message))\n"
" return false;")
struct_serialization_decode_handle_template = Template(
" if (!DecodeHandle(&$NAME->${FIELD}_, &message->handles))\n"
" return false;")
struct_destructor_body_template = Template(
"(void) mojo::MakePassable(data->$FIELD()).Pass();")
close_handles_template = Template(
" mojo::internal::CloseHandles($NAME->${FIELD}_.ptr);")
param_set_template = Template(
" params->set_$NAME($NAME);")
param_handle_set_template = Template(
" params->set_$NAME($NAME.release());")
param_struct_set_template = Template(
" params->set_$NAME(\n"
" mojo::internal::Clone(mojo::internal::Unwrap($NAME),\n"
" builder.buffer()));")
param_struct_compute_template = Template(
" payload_size += mojo::internal::ComputeSizeOf(\n"
" mojo::internal::Unwrap($NAME));")
field_template = Template(" $TYPE ${FIELD}_;")
bool_field_template = Template(" uint8_t ${FIELD}_ : 1;")
handle_field_template = Template(" mutable $TYPE ${FIELD}_;")
setter_template = \
Template(" void set_$FIELD($TYPE $FIELD) { ${FIELD}_ = $FIELD; }")
ptr_setter_template = \
Template(" void set_$FIELD($TYPE $FIELD) { ${FIELD}_.ptr = $FIELD; }")
getter_template = \
Template(" $TYPE $FIELD() const { return ${FIELD}_; }")
ptr_getter_template = \
Template(" const $TYPE $FIELD() const { return ${FIELD}_.ptr; }")
handle_getter_template = \
Template(" $TYPE* $FIELD() const { return &${FIELD}_; }")
enum_field_template = Template(" $NAME$EQUALS$VALUE,")
wrapper_setter_template = \
Template(" void set_$FIELD($TYPE $FIELD) { " \
"data_->set_$FIELD($FIELD); }")
wrapper_obj_setter_template = \
Template(" void set_$FIELD($TYPE $FIELD) { " \
"data_->set_$FIELD(mojo::internal::Unwrap($FIELD)); }")
wrapper_handle_setter_template = \
Template(" void set_$FIELD($TYPE $FIELD) { " \
"data_->set_$FIELD($FIELD.release()); }")
wrapper_getter_template = \
Template(" $TYPE $FIELD() const { return data_->$FIELD(); }")
wrapper_obj_getter_template = \
Template(" const $TYPE $FIELD() const { " \
"return mojo::internal::Wrap(data_->$FIELD()); }")
wrapper_handle_getter_template = \
Template(" $TYPE $FIELD() const { " \
"return mojo::MakePassable(data_->$FIELD()); }")
pad_template = Template(" uint8_t _pad${COUNT}_[$PAD];")
templates = {}
HEADER_SIZE = 8
kind_to_type = {
mojom.BOOL: "bool",
mojom.INT8: "int8_t",
mojom.UINT8: "uint8_t",
mojom.INT16: "int16_t",
mojom.UINT16: "uint16_t",
mojom.INT32: "int32_t",
mojom.UINT32: "uint32_t",
mojom.FLOAT: "float",
mojom.HANDLE: "mojo::Handle",
mojom.MSGPIPE: "mojo::MessagePipeHandle",
mojom.INT64: "int64_t",
mojom.UINT64: "uint64_t",
mojom.DOUBLE: "double",
}
@classmethod
def GetTemplate(cls, template_name):
if template_name not in cls.templates:
dir = os.path.dirname(__file__)
filename = os.path.join(dir, 'cpp_templates', template_name)
filename = filename.replace('.h', '.h-template')
filename = filename.replace('.cc', '.cc-template')
with open(filename, 'r') as file:
template = Template(file.read())
cls.templates[template_name] = template
return cls.templates[template_name]
@classmethod
def GetType(cls, kind):
if isinstance(kind, mojom.Struct):
return "%s_Data*" % kind.name
if isinstance(kind, mojom.Array):
return "mojo::internal::Array_Data<%s>*" % cls.GetType(kind.kind)
if kind.spec == 's':
return "mojo::internal::String_Data*"
return cls.kind_to_type[kind]
@classmethod
def GetConstType(cls, kind):
if isinstance(kind, mojom.Struct):
return "const %s_Data*" % kind.name
if isinstance(kind, mojom.Array):
return "const mojo::internal::Array_Data<%s>*" % \
cls.GetConstType(kind.kind)
if kind.spec == 's':
return "const mojo::internal::String_Data*"
return cls.kind_to_type[kind]
@classmethod
def GetGetterLine(cls, field):
subs = {'FIELD': field.name, 'TYPE': cls.GetType(field.kind)}
if mojom_generator.IsObjectKind(field.kind):
return cls.ptr_getter_template.substitute(subs)
elif mojom_generator.IsHandleKind(field.kind):
return cls.handle_getter_template.substitute(subs)
else:
return cls.getter_template.substitute(subs)
@classmethod
def GetSetterLine(cls, field):
subs = {'FIELD': field.name, 'TYPE': cls.GetType(field.kind)}
if mojom_generator.IsObjectKind(field.kind):
return cls.ptr_setter_template.substitute(subs)
else:
return cls.setter_template.substitute(subs)
@classmethod
def GetWrapperType(cls, kind):
if isinstance(kind, mojom.Struct):
return "%s" % kind.name
if isinstance(kind, mojom.Array):
return "mojo::Array<%s >" % cls.GetWrapperType(kind.kind)
if kind.spec == 's':
return "mojo::String"
if kind.spec == 'h':
return "mojo::Passable<mojo::Handle>"
if kind.spec == 'h:m':
return "mojo::Passable<mojo::MessagePipeHandle>"
return cls.kind_to_type[kind]
@classmethod
def GetConstWrapperType(cls, kind):
if isinstance(kind, mojom.Struct):
return "const %s&" % kind.name
if isinstance(kind, mojom.Array):
return "const mojo::Array<%s >&" % cls.GetWrapperType(kind.kind)
if kind.spec == 's':
return "const mojo::String&"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
return cls.kind_to_type[kind]
@classmethod
def GetWrapperGetterLine(cls, field):
subs = {'FIELD': field.name, 'TYPE': cls.GetWrapperType(field.kind)}
if mojom_generator.IsObjectKind(field.kind):
return cls.wrapper_obj_getter_template.substitute(subs)
elif mojom_generator.IsHandleKind(field.kind):
return cls.wrapper_handle_getter_template.substitute(subs)
else:
return cls.wrapper_getter_template.substitute(subs)
@classmethod
def GetWrapperSetterLine(cls, field):
subs = {'FIELD': field.name, 'TYPE': cls.GetConstWrapperType(field.kind)}
if mojom_generator.IsObjectKind(field.kind):
return cls.wrapper_obj_setter_template.substitute(subs)
elif mojom_generator.IsHandleKind(field.kind):
return cls.wrapper_handle_setter_template.substitute(subs)
else:
return cls.wrapper_setter_template.substitute(subs)
@classmethod
def GetFieldLine(cls, field):
kind = field.kind
if kind.spec == 'b':
return cls.bool_field_template.substitute(FIELD=field.name)
if mojom_generator.IsHandleKind(kind):
return cls.handle_field_template.substitute(FIELD=field.name,
TYPE=cls.kind_to_type[kind])
itype = None
if isinstance(kind, mojom.Struct):
itype = "mojo::internal::StructPointer<%s_Data>" % kind.name
elif isinstance(kind, mojom.Array):
itype = "mojo::internal::ArrayPointer<%s>" % cls.GetType(kind.kind)
elif kind.spec == 's':
itype = "mojo::internal::StringPointer"
else:
itype = cls.kind_to_type[kind]
return cls.field_template.substitute(FIELD=field.name, TYPE=itype)
@classmethod
def GetParamLine(cls, name, kind):
line = None
if mojom_generator.IsObjectKind(kind):
line = "mojo::internal::Wrap(params->%s())" % (name)
elif mojom_generator.IsHandleKind(kind):
line = "mojo::MakePassable(params->%s()).Pass()" % (name)
else:
line = "params->%s()" % name
return line
@classmethod
def GetCaseLine(cls, interface, method):
params = map(
lambda param: cls.GetParamLine(
param.name,
param.kind),
method.parameters)
method_call = "%s(%s);" % (method.name, ", ".join(params))
return cls.GetTemplate("interface_stub_case").substitute(
CLASS = interface.name,
METHOD = method.name,
METHOD_CALL = method_call);
@classmethod
def GetSerializedFields(cls, ps):
fields = []
for pf in ps.packed_fields:
if mojom_generator.IsObjectKind(pf.field.kind):
fields.append(pf.field)
return fields
@classmethod
def GetHandleFields(cls, ps):
fields = []
for pf in ps.packed_fields:
if mojom_generator.IsHandleKind(pf.field.kind):
fields.append(pf.field)
return fields
@classmethod
def IsStructWithHandles(cls, struct):
for field in struct.fields:
if mojom_generator.IsHandleKind(field.kind):
return True
return False
@classmethod
def GetStructsWithHandles(cls, structs):
result = []
for struct in structs:
if cls.IsStructWithHandles(struct):
result.append(struct)
return result
def GetHeaderGuard(self, name):
return "MOJO_GENERATED_BINDINGS_%s_%s_H_" % \
(self.module.name.upper(), name.upper())
def GetHeaderFile(self, *components):
components = map(mojom_generator.CamelToUnderscores, components)
component_string = '_'.join(components)
return os.path.join(self.header_dir, "%s.h" % component_string)
def WriteTemplateToFile(self, template_name, **substitutions):
template = self.GetTemplate(template_name)
filename = template_name.replace("module", self.module.name)
substitutions['YEAR'] = datetime.date.today().year
substitutions['NAMESPACE'] = self.module.namespace
if self.output_dir is None:
file = sys.stdout
else:
file = open(os.path.join(self.output_dir, filename), "w+")
try:
file.write(template.substitute(substitutions))
finally:
if self.output_dir is not None:
file.close()
def GetStructDeclaration(self, name, ps, template, subs = {}):
fields = []
setters = []
getters = []
pad_count = 0
num_fields = len(ps.packed_fields)
for i in xrange(num_fields):
pf = ps.packed_fields[i]
field = pf.field
fields.append(self.GetFieldLine(field))
if i < (num_fields - 1):
next_pf = ps.packed_fields[i+1]
pad = next_pf.offset - (pf.offset + pf.size)
if pad > 0:
fields.append(self.pad_template.substitute(COUNT=pad_count, PAD=pad))
pad_count += 1
setters.append(self.GetSetterLine(field))
getters.append(self.GetGetterLine(field))
if num_fields > 0:
last_field = ps.packed_fields[num_fields - 1]
offset = last_field.offset + last_field.size
pad = mojom_pack.GetPad(offset, 8)
if pad > 0:
fields.append(self.pad_template.substitute(COUNT=pad_count, PAD=pad))
pad_count += 1
subs.update(
CLASS = name + '_Data',
WRAPPER = name,
SETTERS = '\n'.join(setters),
GETTERS = '\n'.join(getters),
FIELDS = '\n'.join(fields),
SIZE = ps.GetTotalSize() + self.HEADER_SIZE)
return template.substitute(subs)
def GetStructSerialization(
self, class_name, param_name, ps, template, indent = None):
struct = ps.struct
closes = Lines(self.close_handles_template, indent)
encodes = Lines(self.struct_serialization_encode_template, indent)
encode_handles = \
Lines(self.struct_serialization_encode_handle_template, indent)
decodes = Lines(self.struct_serialization_decode_template, indent)
decode_handles = \
Lines(self.struct_serialization_decode_handle_template, indent)
fields = self.GetSerializedFields(ps)
handle_fields = self.GetHandleFields(ps)
for field in fields:
substitutions = {'NAME': param_name, 'FIELD': field.name}
encodes.Add(substitutions)
decodes.Add(substitutions)
closes.Add(substitutions)
for field in handle_fields:
substitutions = {'NAME': param_name, 'FIELD': field.name}
encode_handles.Add(substitutions)
decode_handles.Add(substitutions)
return template.substitute(
CLASS = \
"%s::internal::%s" % (self.module.namespace, class_name + '_Data'),
NAME = param_name,
CLOSES = closes,
ENCODES = encodes,
DECODES = decodes,
ENCODE_HANDLES = encode_handles,
DECODE_HANDLES = decode_handles)
def GetStructClassDeclarations(self):
struct_decls = map(
lambda s: self.GetStructDeclaration(
s.name,
mojom_pack.PackedStruct(s),
self.GetTemplate("struct_declaration"),
{}),
self.module.structs)
return '\n'.join(struct_decls)
def GetEnumFields(self, enum):
fields = Lines(self.enum_field_template)
for field in enum.fields:
if field.value:
fields.Add(NAME=field.name, EQUALS = " = ", VALUE = field.value)
else:
fields.Add(NAME=field.name, EQUALS = "", VALUE = "")
return fields
def GetEnumDeclarations(self):
decls = Lines(self.GetTemplate("enum_declaration"))
for enum in self.module.enums:
decls.Add(NAME = enum.name, ENUM_FIELDS = self.GetEnumFields(enum))
return decls
def GetWrapperDeclaration(self, name, ps, template, subs = {}):
setters = []
getters = []
num_fields = len(ps.packed_fields)
for i in xrange(num_fields):
field = ps.packed_fields[i].field
setters.append(self.GetWrapperSetterLine(field))
getters.append(self.GetWrapperGetterLine(field))
subs.update(
CLASS = name,
SETTERS = '\n'.join(setters),
GETTERS = '\n'.join(getters))
return template.substitute(subs)
def GetWrapperClassDeclarations(self):
wrapper_decls = map(
lambda s: self.GetWrapperDeclaration(
s.name,
mojom_pack.PackedStruct(s),
self.GetTemplate("wrapper_class_declaration"),
{}),
self.module.structs)
return '\n'.join(wrapper_decls)
def GetWrapperForwardDeclarations(self):
wrapper_fwds = map(lambda s: "class " + s.name + ";", self.module.structs)
return '\n'.join(wrapper_fwds)
def GetInterfaceClassDeclaration(self, interface, template, method_postfix):
methods = []
for method in interface.methods:
params = []
for param in method.parameters:
params.append("%s %s" %
(self.GetConstWrapperType(param.kind), param.name))
methods.append(
" virtual void %s(%s) %s;" %
(method.name, ", ".join(params), method_postfix))
return template.substitute(
CLASS=interface.name,
PEER=interface.peer,
METHODS='\n'.join(methods))
def | |
dataset = [
{
"tweet_id": 1346604540049690624,
"text": "@LLinWood @realDonaldTrump @VP @Mike_Pence We are about to witness a legislative attempt at a coup d'etat.",
"timestamp": "2021-01-05T23:48:35.707000+00:00",
"user_id": 1241662805713059840,
"user_verified": False,
"user_name": "Chris 20",
"hash_tags": [],
"followers": 1805,
"following": 2320,
"tweets_by_user": 9071,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346554035688644608,
},
{
"tweet_id": 1346604540045324288,
"text": "something to try this year starting NOW! get an empty jar and every Friday add a note with a good thing that happened that week so by next New Year\u2019s Eve you can read them and look back on those good days!",
"timestamp": "2021-01-05T23:48:35.706000+00:00",
"user_id": 1004773858350518272,
"user_verified": False,
"user_name": "Johnny",
"hash_tags": [],
"followers": 52,
"following": 49,
"tweets_by_user": 7314,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346603365195599872,
},
{
"tweet_id": 1346604540112453632,
"text": "If you own more than 0.01 #btc you're a millionaire!!!\nA satoshi millionaire \ud83d\ude0e",
"timestamp": "2021-01-05T23:48:35.722000+00:00",
"user_id": 1013259590,
"user_verified": False,
"user_name": "\u20b3lessandro Grunwald \ud83d\ude80\ud83c\udf11",
"hash_tags": ["btc"],
"followers": 26,
"following": 52,
"tweets_by_user": 279,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": None,
},
{
"tweet_id": 1346604540058021889,
"text": "I will be speaking at the SAVE AMERICA RALLY tomorrow on the Ellipse at 11AM Eastern. Arrive early \u2014 doors open at 7AM Eastern. BIG CROWDS! https://t.co/k4blXESc0c",
"timestamp": "2021-01-05T23:48:35.709000+00:00",
"user_id": 733756682,
"user_verified": False,
"user_name": "President-Elect \u2600\ufe0fHapified\u2600\ufe0f",
"hash_tags": [],
"followers": 4247,
"following": 5000,
"tweets_by_user": 22866,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346588064026685443,
},
{
"tweet_id": 1346604540007751680,
"text": "Pa. GOP senators refused to seat a Democrat and removed Lt. Gov. Fetterman from presiding https://t.co/ucX1m1cC6E",
"timestamp": "2021-01-05T23:48:35.697000+00:00",
"user_id": 789124945393569792,
"user_verified": False,
"user_name": "Ecoxx",
"hash_tags": [],
"followers": 111,
"following": 98,
"tweets_by_user": 22162,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": None,
},
{
"tweet_id": 1346604540070686722,
"text": "tell me now \ni love you \n well everybody loves me https://t.co/ktIxfGsSlo",
"timestamp": "2021-01-05T23:48:35.712000+00:00",
"user_id": 1020819041273950208,
"user_verified": False,
"user_name": "carola",
"hash_tags": [],
"followers": 2302,
"following": 2365,
"tweets_by_user": 4920,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346544949614026761,
},
{
"tweet_id": 1346604539902791680,
"text": "Come on, GEORGIA! @charliekirk11 and all of us are counting on you. Read Charlie's quote below. Only YOU can make a difference for America. STAY IN LINE to vote, and God love you; we do.",
"timestamp": "2021-01-05T23:48:35.672000+00:00",
"user_id": 98479091,
"user_verified": False,
"user_name": "Tiffany Cat",
"hash_tags": [],
"followers": 292,
"following": 822,
"tweets_by_user": 3156,
"is_quoting": 1346597239716683776,
"is_reply_to": None,
"is_retweeting": None,
},
{
"tweet_id": 1346604540066459650,
"text": "@korysdaphne I am exactly the opposite. \u2620\nThese two are my least favorite, while I love Infinity War and the first Avengers.",
"timestamp": "2021-01-05T23:48:35.711000+00:00",
"user_id": 3131714800,
"user_verified": False,
"user_name": "\ud835\udc81\u0334\ud835\udc68\u0334\ud835\udc70\u0334\ud835\udc72\u0334\ud835\udc76\u0334",
"hash_tags": [],
"followers": 229,
"following": 906,
"tweets_by_user": 43851,
"is_quoting": None,
"is_reply_to": 1346603311978455041,
"is_retweeting": None,
},
{
"tweet_id": 1346604540062281730,
"text": "@startrekcbs Nog getting a vessel named after him is just the sweetest thing.",
"timestamp": "2021-01-05T23:48:35.710000+00:00",
"user_id": 1152336213543706629,
"user_verified": False,
"user_name": "<NAME>.",
"hash_tags": [],
"followers": 5,
"following": 234,
"tweets_by_user": 6581,
"is_quoting": None,
"is_reply_to": 1346562112580067328,
"is_retweeting": None,
},
{
"tweet_id": 1346604539927908352,
"text": "@thekoolklj i played it for a bit when it came out and then i got bored of it, like i do with pretty much all mobiel games (also u can just dm me instead of asking in a tweet)",
"timestamp": "2021-01-05T23:48:35.678000+00:00",
"user_id": 1171926157014364160,
"user_verified": False,
"user_name": "Adelaide \u2728",
"hash_tags": [],
"followers": 221,
"following": 241,
"tweets_by_user": 11526,
"is_quoting": None,
"is_reply_to": 1346578258431766528,
"is_retweeting": None,
},
{
"tweet_id": 1346604540091637760,
"text": "Never allow love to be spoiled by making it about possession, control or a desire to try and fulfill a sense of your own lack of completeness. \n\nAllow it to be an incentive to rise to the highest state of being. Use whatever arises to bring you back to the source of love itself.",
"timestamp": "2021-01-05T23:48:35.717000+00:00",
"user_id": 2147860407,
"user_verified": False,
"user_name": "\ud83e\udda5\ud<PASSWORD>",
"hash_tags": [],
"followers": 123,
"following": 328,
"tweets_by_user": 4737,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346457239821852677,
},
{
"tweet_id": 1346604544042676230,
"text": "@IanAMartin Well! I never!",
"timestamp": "2021-01-05T23:48:36.659000+00:00",
"user_id": 10722592,
"user_verified": False,
"user_name": "Herne",
"hash_tags": [],
"followers": 794,
"following": 460,
"tweets_by_user": 133513,
"is_quoting": None,
"is_reply_to": 1346603771845959683,
"is_retweeting": None,
},
{
"tweet_id": 1346604544021688320,
"text": "@ifeeltheneed_ @Talisker111 @DavidScullion @sugarkane_12 With that response, I\u2019m clearly out my depth here Boris!! Lol",
"timestamp": "2021-01-05T23:48:36.654000+00:00",
"user_id": 1264557464239964161,
"user_verified": False,
"user_name": "Ian Thomson",
"hash_tags": [],
"followers": 175,
"following": 292,
"tweets_by_user": 1282,
"is_quoting": None,
"is_reply_to": 1346603365489373184,
"is_retweeting": None,
},
{
"tweet_id": 1346604544009101312,
"text": "@UnofficialCiel am i supposed to understand or- \ud83d\ude00",
"timestamp": "2021-01-05T23:48:36.651000+00:00",
"user_id": 1225233272420130816,
"user_verified": False,
"user_name": "butler nezuko || is ciel's fave",
"hash_tags": [],
"followers": 607,
"following": 771,
"tweets_by_user": 2520,
"is_quoting": None,
"is_reply_to": 1346603386456555521,
"is_retweeting": None,
},
{
"tweet_id": 1346604544046882817,
"text": "Antifa is a Terrorist Organization, stay out of Washington. Law enforcement is watching you very closely! @DeptofDefense @TheJusticeDept @DHSgov @DHS_Wolf @SecBernhardt @SecretService @FBI",
"timestamp": "2021-01-05T23:48:36.660000+00:00",
"user_id": 1325430409413451778,
"user_verified": False,
"user_name": "Lumpenproletar",
"hash_tags": [],
"followers": 3,
"following": 29,
"tweets_by_user": 299,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346583537256976385,
},
{
"tweet_id": 1346604543983767552,
"text": "Make sure when blinks drag bts you clapback and defend them not just watching it\ud83e\udd70",
"timestamp": "2021-01-05T23:48:36.645000+00:00",
"user_id": 1255772393672331265,
"user_verified": False,
"user_name": "<NAME>",
"hash_tags": [],
"followers": 323,
"following": 426,
"tweets_by_user": 10035,
"is_quoting": 1346559529601945605,
"is_reply_to": None,
"is_retweeting": None,
},
{
"tweet_id": 1346604543916691457,
"text": "@jaredlholt @mnxmoosi He needs to get some proper boots. I mean skinheads used to use steal-caps - these guys are amateurs cosplayers. Nice hat.",
"timestamp": "2021-01-05T23:48:36.629000+00:00",
"user_id": 14977459,
"user_verified": False,
"user_name": "antipattern",
"hash_tags": [],
"followers": 367,
"following": 351,
"tweets_by_user": 14779,
"is_quoting": None,
"is_reply_to": 1346575085369520132,
"is_retweeting": None,
},
{
"tweet_id": 1346604544042688515,
"text": "Can you ever accept that you\u2019ve made a mistake or that you were wrong? When you do recognize it, do you still stand your ground? If you do, you probably have toxic traits. You like to pick on people\u2019s faults too. You need to work on yourself & your heart where it all starts!",
"timestamp": "2021-01-05T23:48:36.659000+00:00",
"user_id": 1187800623519019010,
"user_verified": False,
"user_name": "Adey<NAME>",
"hash_tags": [],
"followers": 86,
"following": 72,
"tweets_by_user": 412,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346466795843014658,
},
{
"tweet_id": 1346604544093020162,
"text": "I came out the house looking crazy to get food now I actually got to get out \ud83d\ude12",
"timestamp": "2021-01-05T23:48:36.671000+00:00",
"user_id": 545914508,
"user_verified": False,
"user_name": "Mamacita",
"hash_tags": [],
"followers": 482,
"following": 1600,
"tweets_by_user": 36277,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": None,
},
{
"tweet_id": 1346604544017510402,
"text": 'This idea that art is meant to "push you out of your comfort zone" is subversive bullshit. Real art is meant to inspire you, awe you, and give you a glimpse of something more transcendent than the mundane world. "Art" that offers nothing but degenerate shock value is not art.',
"timestamp": "2021-01-05T23:48:36.653000+00:00",
"user_id": 1932684434,
"user_verified": False,
"user_name": "Another person",
"hash_tags": [],
"followers": 152,
"following": 92,
"tweets_by_user": 116989,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346374437755518976,
},
{
"tweet_id": 1346604544042569728,
"text": "I know i called you bro but im kinda in love with you",
"timestamp": "2021-01-05T23:48:36.659000+00:00",
"user_id": 1279709685222797313,
"user_verified": False,
"user_name": "lildickkgirll",
"hash_tags": [],
"followers": 113,
"following": 84,
"tweets_by_user": 3635,
"is_quoting": None,
"is_reply_to": None,
"is_retweeting": 1346527857556787203,
},
{
"tweet_id": 1346604544050999296,
"text": "@gyucentric @PRODSOOB1N No u cute :(",
"timestamp": "2021-01-05T23:48:36.661000+00:00",
"user_id": 1436231089,
"user_verified": False,
"user_name": "brian\u2077",
"hash_tags": [],
"followers": 15033,
"following": 2115,
"tweets_by_user": 93121,
"is_quoting": None,
"is_reply_to": 1346604093419245568,
"is_retweeting": None,
},
{
"tweet_id": 1346604544093024256,
"text": "@bethwatt27 Am I able to like it myself?",
"timestamp": "2021-01-05T23:48:36.671000+00:00",
"user_id": 1244681714330406913,
"user_verified": False,
"user_name": "Slaps",
"hash_tags": [],
"followers": 238,
"following": 225,
"tweets_by_user": 1425,
"is_quoting": None,
"is_reply_to": 1346604157684297731,
"is_retweeting": None,
},
{
"tweet_id": 1346604544051060739,
"text": "The fact that I cut my hair by myself is funny how nice it turned out",
"timestamp": "2021-01-05T23:48:36.661000+00:00",
"user_id": 3344404108,
"user_verified": False,
"user_name": "K\u00fcr",
"hash_tags": [],
"followers": 8268,
"following": 4758,
"tweets_by_user": 19515,
"is_quoting": None,
"is_reply_to": 1346603824245563394,
"is_retweeting": None,
},
{
"tweet_id": 1346604543996383232,
"text": "Re-upping this funded PhD posting as the review of applications will commence soon. People from outside of Canada are welcome to apply (NSERC eligibility is handy but not critical). Email me if you have questions.\n\nWork here: Live here: https://t.co/qJWla39Tta",
"timestamp": "2021-01-05T23:48:36.648000+00:00",
"user_id": 951523130513633280,
"user_verified": False,
"user_name": "<NAME>oney",
"hash_tags": [],
"followers": 248,
"following": 795,
"tweets_by_user": 412,
"is_quoting": 1334108049833943040,
"is_reply_to": None,
"is_retweeting": 1346099565540216832,
},
{
"tweet_id": 1346604544113987585,
"text": "@80SKEEF i just wish | |
m.x4547 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4080 = Constraint(expr= m.x4148 - m.x4548 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4081 = Constraint(expr= m.x4149 - m.x4549 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4082 = Constraint(expr= m.x4150 - m.x4550 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4083 = Constraint(expr= m.x4151 - m.x4551 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4084 = Constraint(expr= m.x4152 - m.x4552 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4085 = Constraint(expr= m.x4153 - m.x4553 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4086 = Constraint(expr= m.x4154 - m.x4554 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4087 = Constraint(expr= m.x4155 - m.x4555 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4088 = Constraint(expr= m.x4156 - m.x4556 - m.x6291 - m.x6491 - m.x6611 == 0)
m.c4089 = Constraint(expr= m.x4157 - m.x4557 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4090 = Constraint(expr= m.x4158 - m.x4558 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4091 = Constraint(expr= m.x4159 - m.x4559 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4092 = Constraint(expr= m.x4160 - m.x4560 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4093 = Constraint(expr= m.x4161 - m.x4561 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4094 = Constraint(expr= m.x4162 - m.x4562 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4095 = Constraint(expr= m.x4163 - m.x4563 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4096 = Constraint(expr= m.x4164 - m.x4564 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4097 = Constraint(expr= m.x4165 - m.x4565 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4098 = Constraint(expr= m.x4166 - m.x4566 - m.x6292 - m.x6492 - m.x6612 == 0)
m.c4099 = Constraint(expr= m.x4167 - m.x4567 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4100 = Constraint(expr= m.x4168 - m.x4568 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4101 = Constraint(expr= m.x4169 - m.x4569 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4102 = Constraint(expr= m.x4170 - m.x4570 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4103 = Constraint(expr= m.x4171 - m.x4571 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4104 = Constraint(expr= m.x4172 - m.x4572 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4105 = Constraint(expr= m.x4173 - m.x4573 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4106 = Constraint(expr= m.x4174 - m.x4574 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4107 = Constraint(expr= m.x4175 - m.x4575 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4108 = Constraint(expr= m.x4176 - m.x4576 - m.x6293 - m.x6493 - m.x6613 == 0)
m.c4109 = Constraint(expr= m.x4177 - m.x4577 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4110 = Constraint(expr= m.x4178 - m.x4578 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4111 = Constraint(expr= m.x4179 - m.x4579 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4112 = Constraint(expr= m.x4180 - m.x4580 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4113 = Constraint(expr= m.x4181 - m.x4581 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4114 = Constraint(expr= m.x4182 - m.x4582 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4115 = Constraint(expr= m.x4183 - m.x4583 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4116 = Constraint(expr= m.x4184 - m.x4584 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4117 = Constraint(expr= m.x4185 - m.x4585 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4118 = Constraint(expr= m.x4186 - m.x4586 - m.x6294 - m.x6494 - m.x6614 == 0)
m.c4119 = Constraint(expr= m.x4187 - m.x4587 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4120 = Constraint(expr= m.x4188 - m.x4588 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4121 = Constraint(expr= m.x4189 - m.x4589 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4122 = Constraint(expr= m.x4190 - m.x4590 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4123 = Constraint(expr= m.x4191 - m.x4591 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4124 = Constraint(expr= m.x4192 - m.x4592 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4125 = Constraint(expr= m.x4193 - m.x4593 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4126 = Constraint(expr= m.x4194 - m.x4594 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4127 = Constraint(expr= m.x4195 - m.x4595 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4128 = Constraint(expr= m.x4196 - m.x4596 - m.x6295 - m.x6495 - m.x6615 == 0)
m.c4129 = Constraint(expr= m.x4197 - m.x4597 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4130 = Constraint(expr= m.x4198 - m.x4598 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4131 = Constraint(expr= m.x4199 - m.x4599 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4132 = Constraint(expr= m.x4200 - m.x4600 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4133 = Constraint(expr= m.x4201 - m.x4601 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4134 = Constraint(expr= m.x4202 - m.x4602 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4135 = Constraint(expr= m.x4203 - m.x4603 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4136 = Constraint(expr= m.x4204 - m.x4604 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4137 = Constraint(expr= m.x4205 - m.x4605 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4138 = Constraint(expr= m.x4206 - m.x4606 - m.x6296 - m.x6496 - m.x6616 == 0)
m.c4139 = Constraint(expr= m.x4207 - m.x4607 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4140 = Constraint(expr= m.x4208 - m.x4608 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4141 = Constraint(expr= m.x4209 - m.x4609 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4142 = Constraint(expr= m.x4210 - m.x4610 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4143 = Constraint(expr= m.x4211 - m.x4611 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4144 = Constraint(expr= m.x4212 - m.x4612 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4145 = Constraint(expr= m.x4213 - m.x4613 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4146 = Constraint(expr= m.x4214 - m.x4614 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4147 = Constraint(expr= m.x4215 - m.x4615 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4148 = Constraint(expr= m.x4216 - m.x4616 - m.x6297 - m.x6497 - m.x6617 == 0)
m.c4149 = Constraint(expr= m.b6778 + m.b6898 + m.b7018 <= 1)
m.c4150 = Constraint(expr= m.b6779 + m.b6899 + m.b7019 <= 1)
m.c4151 = Constraint(expr= m.b6780 + m.b6900 + m.b7020 <= 1)
m.c4152 = Constraint(expr= m.b6781 + m.b6901 + m.b7021 <= 1)
m.c4153 = Constraint(expr= m.b6782 + m.b6902 + m.b7022 <= 1)
m.c4154 = Constraint(expr= m.b6783 + m.b6903 + m.b7023 <= 1)
m.c4155 = Constraint(expr= m.b6784 + m.b6904 + m.b7024 <= 1)
m.c4156 = Constraint(expr= m.b6785 + m.b6905 + m.b7025 <= 1)
m.c4157 = Constraint(expr= m.b6786 + m.b6906 + m.b7026 <= 1)
m.c4158 = Constraint(expr= m.b6787 + m.b6907 + m.b7027 <= 1)
m.c4159 = Constraint(expr= m.b6788 + m.b6908 + m.b7028 <= 1)
m.c4160 = Constraint(expr= m.b6789 + m.b6909 + m.b7029 <= 1)
m.c4161 = Constraint(expr= m.b6790 + m.b6910 + m.b7030 <= 1)
m.c4162 = Constraint(expr= m.b6791 + m.b6911 + m.b7031 <= 1)
m.c4163 = Constraint(expr= m.b6792 + m.b6912 + m.b7032 <= 1)
m.c4164 = Constraint(expr= m.b6793 + m.b6913 + m.b7033 <= 1)
m.c4165 = Constraint(expr= m.b6794 + m.b6914 + m.b7034 <= 1)
m.c4166 = Constraint(expr= m.b6795 + m.b6915 + m.b7035 <= 1)
m.c4167 = Constraint(expr= m.b6796 + m.b6916 + m.b7036 <= 1)
m.c4168 = Constraint(expr= m.b6797 + m.b6917 + m.b7037 <= 1)
m.c4169 = Constraint(expr= m.b6798 + m.b6918 + m.b7038 <= 1)
m.c4170 = Constraint(expr= m.b6799 + m.b6919 + m.b7039 <= 1)
m.c4171 = Constraint(expr= m.b6800 + m.b6920 + m.b7040 <= 1)
m.c4172 = Constraint(expr= m.b6801 + m.b6921 + m.b7041 <= 1)
m.c4173 = Constraint(expr= m.b6802 + m.b6922 + m.b7042 <= 1)
m.c4174 = Constraint(expr= m.b6803 + m.b6923 + m.b7043 <= 1)
m.c4175 = Constraint(expr= m.b6804 + m.b6924 + m.b7044 <= 1)
m.c4176 = Constraint(expr= m.b6805 + m.b6925 + m.b7045 <= 1)
m.c4177 = Constraint(expr= m.b6806 + m.b6926 + m.b7046 <= 1)
m.c4178 = Constraint(expr= m.b6807 + m.b6927 + m.b7047 <= 1)
m.c4179 = Constraint(expr= m.b6808 + m.b6928 + m.b7048 <= 1)
m.c4180 = Constraint(expr= m.b6809 + m.b6929 + m.b7049 <= 1)
m.c4181 = Constraint(expr= m.b6810 + m.b6930 + m.b7050 <= 1)
m.c4182 = Constraint(expr= m.b6811 + m.b6931 + m.b7051 <= 1)
m.c4183 = Constraint(expr= m.b6812 + m.b6932 + m.b7052 <= 1)
m.c4184 = Constraint(expr= m.b6813 + m.b6933 + m.b7053 <= 1)
m.c4185 = Constraint(expr= m.b6814 + m.b6934 + m.b7054 <= 1)
m.c4186 = Constraint(expr= m.b6815 + m.b6935 + m.b7055 <= 1)
m.c4187 = Constraint(expr= m.b6816 + m.b6936 + m.b7056 <= 1)
m.c4188 = Constraint(expr= m.b6817 + m.b6937 + m.b7057 <= 1)
m.c4189 = Constraint(expr= m.x6138 - 1.85*m.x6298 - 1.45*m.x6299 == 0)
m.c4190 = Constraint(expr= m.x6141 - 1.85*m.x6300 | |
<filename>src/swiftpath/swiftpath.py
"""A module for interacting with **Openstack Swift** using the standard
:mod:`pathlib.Path` interface.
"""
import atexit
import base64
import contextlib
import datetime
import io
import logging
import os
import pathlib
import posix
import re
import sys
import tempfile
import urllib.parse
from pathlib import PurePath
from re import L
from typing import (
IO,
Any,
AnyStr,
Dict,
Generator,
Iterable,
List,
Optional,
Protocol,
Type,
TypeVar,
Union,
)
import attr
from requests.exceptions import StreamConsumedError
try:
import keystoneauth1
import keystoneauth1.exceptions.catalog
import keystoneauth1.session
import keystoneauth1.identity
import swiftclient.client
import swiftclient.exceptions
except ImportError:
keystoneauth1 = None
swiftclient = None
try:
import filelock
except ImportError:
filelock = None
IOTYPES = Union[Type["SwiftKeyReadableFileObject"], Type["SwiftKeyWritableFileObject"]]
TStrTypes = TypeVar("TStrTypes", str, bytes)
# See https://stackoverflow.com/a/8571649 for explanation
BASE64_RE = re.compile(b"^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$")
_SUPPORTED_OPEN_MODES = {"r", "br", "rb", "tr", "rt", "w", "wb", "bw", "wt", "tw"}
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
def log(message, level="info"):
getattr(logger, level.lower())(message)
class AttrProto(Protocol):
def __init__(self, *args: Any, **kwargs: Any) -> None:
...
def fromisoformat(dt: str) -> datetime.datetime:
result: datetime.datetime
try:
result = datetime.datetime.fromisoformat(dt) # type: ignore
except AttributeError:
result = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f")
return result
@attr.s(frozen=True)
class ObjectPath(AttrProto):
#: The name of the container
container = attr.ib(type=str)
#: The optional path to the target object
key = attr.ib(type=Optional[str])
def __str__(self):
if not self.key:
return f"/{self.container}/"
return f"/{self.container}/{self.key}"
def as_path(self) -> "SwiftPath":
return SwiftPath(str(self))
@classmethod
def from_path(cls, path: "SwiftPath") -> "ObjectPath":
container = str(path.container) if path.container else None
if not container:
if path.root == str(path):
container = ""
else:
raise ValueError(
f"Absolute path required to parse container, got {path!s}"
)
container = container.strip(path._flavour.sep)
key = str(path.key) if path.key else None
if key is not None:
key = key.lstrip(path._flavour.sep)
return cls(container=container, key=key)
class _Backend:
def __init__(
self,
username: Optional[str] = None,
project: Optional[str] = None,
password: Optional[str] = None,
auth_url: Optional[str] = None,
domain: Optional[str] = None,
object_storage_url: Optional[str] = None,
project_id: Optional[str] = None,
user_id: Optional[str] = None,
project_domain: Optional[str] = None,
region: Optional[str] = None,
) -> None:
swift_credentials = {
"user_domain_name": domain
or os.environ.get("OS_USER_DOMAIN_NAME", "default"),
"project_domain_name": project_domain
or os.environ.get("OS_PROJECT_DOMAIN_NAME", "default"),
"password": password or os.environ.get("OS_PASSWORD"),
}
os_options = {}
user_id = user_id or os.environ.get("OS_USER_ID", None)
username = username or os.environ.get("OS_USERNAME", None)
project = project or os.environ.get(
"OS_PROJECT_NAME", os.environ.get("OS_TENANT_NAME")
)
if not auth_url:
auth_url = os.environ.get(
"OS_AUTH_URL", os.environ.get("OS_AUTHENTICATION_URL")
)
object_storage_url = object_storage_url or os.environ.get("OS_STORAGE_URL")
region = region or os.environ.get("OS_REGION_NAME")
project_id = project_id or os.environ.get("OS_PROJECT_ID")
if username:
swift_credentials["username"] = username
elif user_id:
swift_credentials["user_id"] = user_id
if project:
swift_credentials["project_name"] = project
os_options["project_name"] = project
if object_storage_url:
os_options["object_storage_url"] = object_storage_url
if region:
os_options["region_name"] = region
if project_id:
os_options["project_id"] = project_id
if auth_url:
swift_credentials["auth_url"] = auth_url
self.os_options = os_options
self.auth = keystoneauth1.identity.v3.Password(**swift_credentials)
self.swift = self._get_connection()
def _get_session(self) -> keystoneauth1.session.Session:
return keystoneauth1.session.Session(auth=self.auth)
def _get_connection(self) -> swiftclient.client.Connection:
return swiftclient.client.Connection(
session=self._get_session(), os_options=self.os_options
)
@contextlib.contextmanager
def connection(self) -> Generator[swiftclient.client.Connection, None, None]:
with contextlib.closing(self._get_connection()) as swift_conn:
yield swift_conn
class _SwiftFlavour(pathlib._PosixFlavour): # type: ignore
is_supported = bool(keystoneauth1)
def make_uri(self, path):
uri = super().make_uri(path)
return uri.replace("file:///", "swift://")
class _SwiftScandir:
def __init__(self, *, swift_accessor, path):
self._swift_accessor = swift_accessor
self._path = path
def __enter__(self):
return self
def __exit__(self, exc_typ, exc_val, exc_tb):
return
def __iter__(self):
try:
parsed_path = ObjectPath.from_path(self._path)
except ValueError:
parsed_path = None
if not parsed_path.container:
path_prefix = "/"
else:
path_prefix = f"/{parsed_path.container}/"
with self._swift_accessor.Backend.connection() as conn:
if not parsed_path or not parsed_path.container:
_, containers = conn.get_account()
for container in containers:
yield SwiftDirEntry(container["name"], is_dir=True)
return
path = parsed_path.key if parsed_path.key else ""
if path and not path.endswith(self._path._flavour.sep):
path = f"{path}{self._path._flavour.sep}"
headers, paths = conn.get_container(
parsed_path.container, prefix=path, delimiter=self._path._flavour.sep
)
for p in paths:
if "subdir" in p:
sub_path = type(self._path)(f"{path_prefix}{p['subdir']}")
name = str(sub_path.relative_to(self._path))
yield SwiftDirEntry(name, is_dir=True)
else:
is_symlink = p.get("content_type", "") == "application/symlink"
sub_path = type(self._path)(f"{path_prefix}{p['name']}")
name = str(sub_path.relative_to(self._path))
yield SwiftDirEntry(
name,
is_dir=False,
size=p["bytes"],
last_modified=p["last_modified"],
is_symlink=is_symlink,
)
class _SwiftAccessor:
Backend: _Backend = _Backend()
@staticmethod
def stat(target: "SwiftPath") -> "StatResult":
parsed_path = ObjectPath.from_path(target)
with _SwiftAccessor.Backend.connection() as conn:
headers = {}
try:
headers = conn.head_object(parsed_path.container, parsed_path.key)
except swiftclient.exceptions.ClientException:
try:
result = conn.get_container(
parsed_path.container, prefix=parsed_path.key
)
except (swiftclient.exceptions.ClientException, TypeError):
raise FileNotFoundError(str(target))
else:
if result is not None:
headers, _ = result
if "x-object-meta-mtime" in headers:
last_modified = float(headers["x-object-meta-mtime"])
elif "x-timestamp" in headers:
try:
last_modified = fromisoformat(headers["x-timestamp"]).timestamp()
except ValueError:
last_modified = float(headers["x-timestamp"])
else:
last_modified = 0
return StatResult(
size=headers["content-length"], last_modified=last_modified,
)
@staticmethod
def lstat(target: "SwiftPath") -> None:
raise NotImplementedError("lstat() not available on this system")
@staticmethod
def open(
path, *, mode="r", buffering=-1, encoding=None, errors=None, newline=None
) -> IO:
file_object: IOTYPES = (
SwiftKeyReadableFileObject if "r" in mode else SwiftKeyWritableFileObject
)
result = file_object(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return result
@staticmethod
def listdir(target: "SwiftPath") -> List[str]:
results: List[str] = []
parsed_path = ObjectPath.from_path(target)
target_path = parsed_path.key
paths: List[Dict[str, str]] = []
if target_path and not target_path.endswith(target._flavour.sep):
target_path = f"{target_path}{target._flavour.sep}"
with _SwiftAccessor.Backend.connection() as conn:
if not parsed_path.container:
acct_results = conn.get_account()
if acct_results is not None:
_, paths = acct_results
for container in paths:
results.append(container["name"])
else:
try:
container_results = conn.get_container(
parsed_path.container,
prefix=target_path,
delimiter=target._flavour.sep,
)
except swiftclient.exceptions.ClientException:
raise FileNotFoundError(str(target))
else:
if container_results is not None:
_, paths = container_results
for p in paths:
if "subdir" in p:
results.append(str(p["subdir"]).strip(target._flavour.sep))
else:
results.append(str(p["name"]).strip(target._flavour.sep))
results = [os.path.basename(str(r)) for r in results]
return results
@staticmethod
def scandir(path: "SwiftPath") -> _SwiftScandir:
return _SwiftScandir(swift_accessor=_SwiftAccessor, path=path)
@staticmethod
def chmod(target: "SwiftPath") -> None:
raise NotImplementedError("chmod() is not available on this platform")
def lchmod(self, pathobj: "SwiftPath", mode: int) -> None:
raise NotImplementedError("lchmod() not available on this system")
@staticmethod
def mkdir(path: "SwiftPath", exist_ok: bool = False, parents: bool = False) -> None:
"""Create the provided directory.
This operation is a no-op on swift.
"""
parsed_path = ObjectPath.from_path(path)
if path.exists() or path.joinpath(".swiftkeep").exists():
if not exist_ok:
raise FileExistsError(str(path))
if path.key:
path.joinpath(".swiftkeep").touch()
return None
with _SwiftAccessor.Backend.connection() as conn:
try:
conn.put_container(parsed_path.container)
except swiftclient.exceptions.ClientException:
raise FileExistsError(parsed_path.container)
return None
@staticmethod
def unlink(path: "SwiftPath", missing_ok: bool) -> None:
parsed_path = ObjectPath.from_path(path)
with _SwiftAccessor.Backend.connection() as conn:
try:
conn.delete_object(parsed_path.container, parsed_path.key)
except swiftclient.exceptions.ClientException:
if not missing_ok:
raise FileNotFoundError(str(path))
return None
@staticmethod
def link_to(
src: "SwiftPath",
link_name: Union[str, "SwiftPath"],
*,
src_dir_fd: Optional[int] = None,
dst_dir_fd: Optional[int] = None,
follow_symlinks: bool = True,
) -> None:
if not isinstance(link_name, SwiftPath):
target_path = SwiftPath(str(link_name))
else:
target_path = link_name
parsed_path = ObjectPath.from_path(src)
if not target_path.is_absolute():
target_path = SwiftPath(f"/{parsed_path.container!s}/{target_path!s}")
with _SwiftAccessor.Backend.connection() as conn:
conn.copy_object(parsed_path.container, parsed_path.key, str(target_path))
@staticmethod
def rmdir(path: "SwiftPath", *args: Any, **kwargs: Any) -> None:
# force = kwargs.pop("force", False)
# if not force:
# contents = list(path.iterdir(include_swiftkeep=True, recurse=True))
# for p in contents:
# p.unlink()
# # else:
# # raise OSError(
# # "Object container directories are auto-destroyed when they are emptied"
# # )
# # if contents and all(p.name == ".swiftkeep" for p in contents):
# # for p in contents:
# # if p.name == ".swiftkeep":
# # p.unlink()
# return
with _SwiftAccessor.Backend.connection() as conn:
try:
for item in path.iterdir():
if item.is_dir():
item.rmdir()
else:
parsed_path = ObjectPath.from_path(item)
conn.delete_object(parsed_path.container, parsed_path.key)
except FileNotFoundError:
return None
return None
@staticmethod
def rename(path: "SwiftPath", target: Union[pathlib.PurePath, str]) -> None:
caller_name = "[_SwiftAccessor.rename]"
if not isinstance(target, SwiftPath):
target_path = SwiftPath(str(target))
else:
target_path = target
parsed_path = ObjectPath.from_path(path)
if not target_path.is_absolute():
target_path = SwiftPath(f"/{parsed_path.container!s}/{target!s}")
log(
f"{caller_name} Added {path.container!s} to target: {target!s}",
level="debug",
)
with _SwiftAccessor.Backend.connection() as conn:
if path.is_dir():
for entry in path.iterdir():
sub_target = target_path.joinpath(entry.relative_to(path))
entry.rename(sub_target)
path.rmdir()
else:
parsed_path = ObjectPath.from_path(path)
container = parsed_path.container
key = parsed_path.key
log(
f"{caller_name} Renaming key: {key} from {container!s} to {target_path!s}",
level="debug",
)
conn.copy_object(container, key, str(target_path))
path.unlink()
@staticmethod
def replace(path: "SwiftPath", target: "SwiftPath") -> None:
return _SwiftAccessor.rename(path, target)
@staticmethod
def symlink(
a: "SwiftPath",
b: "SwiftPath",
target_is_directory: bool = False,
src_account: Optional[str] = None,
) -> None:
if not a.exists():
raise FileNotFoundError(a)
if b.exists():
raise FileExistsError(b)
with _SwiftAccessor.Backend.connection() as conn:
parsed_dest = ObjectPath.from_path(b)
headers = {
"X-Symlink-Target": str(a),
}
if src_account is not None:
headers["X-Symlink-Target-Account"] = src_account
conn.put_object(
parsed_dest.container,
parsed_dest.key,
b"",
content_length=0,
content_type="application/symlink",
headers=headers,
)
@staticmethod
def utime(target: "SwiftPath") -> None:
if not target.exists():
raise FileNotFoundError(str(target))
parsed_path = ObjectPath.from_path(target)
with _SwiftAccessor.Backend.connection() as conn:
conn.post_object(
parsed_path.container,
parsed_path.key,
{"x-timestamp": str(datetime.datetime.now().timestamp())},
)
# Helper for resolve()
def readlink(self, path: "SwiftPath") -> "SwiftPath":
return path
@property
def backend(self):
return self.Backend
_swift_flavour = _SwiftFlavour()
class PureSwiftPath(pathlib.PurePath):
"""Swift PurePath implementation for Openstack."""
_flavour = _swift_flavour
__slots__ = ()
@classmethod
def _parse_uri(cls, uri: str) -> urllib.parse.ParseResult:
result = urllib.parse.urlparse(uri)
# swift://container/path puts container in 'netloc'
# we want | |
#!/usr/bin/env python
#
# Copyright (c) 2018 SAP SE
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import argparse
import logging
import re
import time
from datetime import datetime
from http.server import BaseHTTPRequestHandler
from threading import Lock
import requests
from prometheus_client import Counter, Gauge
from sqlalchemy import Table, and_, select, update
from manilananny import ManilaNanny, is_utcts_recent, response, update_records
log = logging.getLogger('nanny-manila-share-sync')
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
ONEGB = 1073741824
TASK_SHARE_SIZE = '1'
TASK_MISSING_VOLUME = '2'
TASK_OFFLINE_VOLUME = '3'
TASK_ORPHAN_VOLUME = '4'
class MyHandler(BaseHTTPRequestHandler):
''' http server handler '''
def do_GET(self):
if self.path == '/orphan_volumes':
status_code, header, data = self.server.get_orphan_volumes()
elif self.path == '/offline_volumes':
status_code, header, data = self.server.get_offline_volumes()
elif self.path == '/missing_volume_shares':
status_code, header, data = self.server.get_missing_volume_shares()
else:
status_code, header, data = self.server.undefined_route(self.path)
self.send_response(status_code)
self.send_header(*header)
self.end_headers()
self.wfile.write(data.encode('utf-8'))
class ManilaShareSyncNanny(ManilaNanny):
def __init__(self, config_file, prom_host, interval, tasks, dry_run_tasks, prom_port, http_port, handler):
super(ManilaShareSyncNanny, self).__init__(config_file, interval,
prom_port=prom_port, http_port=http_port, handler=handler)
self.prom_host = prom_host + "/api/v1/query"
self.MANILA_NANNY_SHARE_SYNC_FAILURE = Counter('manila_nanny_share_sync_failure', '')
self.MANILA_SYNC_SHARE_SIZE_COUNTER = Counter('manila_nanny_sync_share_size',
'manila nanny sync share size')
self.MANILA_RESET_SHARE_ERROR_COUNTER = Counter('manila_nanny_reset_share_error',
'manila nanny reset share status to error')
self.manila_missing_volume_shares_gauge = Gauge('manila_nanny_share_missing_volume',
'Manila Share missing backend volume',
['share_id', 'instance_id', 'share_name', 'share_status'])
self.manila_orphan_volumes_gauge = Gauge('manila_nanny_orphan_volumes',
'Orphan backend volumes of Manila service',
['share_id', 'share_status', 'filer', 'vserver', 'volume'])
self.manila_offline_volumes_gauge = Gauge('manila_nanny_offline_volumes',
'Offline volumes of Manila service',
['share_id', 'share_status', 'filer', 'vserver', 'volume'])
self._tasks = tasks
self._dry_run_tasks = dry_run_tasks
if not any(tasks.values()):
raise Exception('All tasks are disabled')
self.orphan_volumes_lock = Lock()
self.orphan_volumes = {}
self.missing_volumes_lock = Lock()
self.missing_volumes = {}
self.offline_volumes_lock = Lock()
self.offline_volumes = {}
def _run(self):
# Need to recreate manila client each run, because of session timeout
# self.renew_manila_client()
# fetch data
try:
if self._tasks[TASK_SHARE_SIZE] or self._tasks[TASK_MISSING_VOLUME]\
or self._tasks[TASK_ORPHAN_VOLUME]:
_share_list = self._query_shares()
_volume_list = self._get_netapp_volumes()
_shares, _orphan_volumes = self._merge_share_and_volumes(_share_list, _volume_list)
if self._tasks[TASK_OFFLINE_VOLUME]:
_offline_volume_list = self._get_netapp_volumes('offline')
except Exception as e:
log.warning(e)
self.MANILA_NANNY_SHARE_SYNC_FAILURE.inc()
return
if self._tasks[TASK_SHARE_SIZE]:
dry_run = self._dry_run_tasks[TASK_SHARE_SIZE]
self.sync_share_size(_shares, dry_run)
if self._tasks[TASK_MISSING_VOLUME]:
dry_run = self._dry_run_tasks[TASK_MISSING_VOLUME]
self.process_missing_volume(_shares, dry_run)
if self._tasks[TASK_ORPHAN_VOLUME]:
dry_run = self._dry_run_tasks[TASK_ORPHAN_VOLUME]
self.process_orphan_volumes(_orphan_volumes, dry_run)
if self._tasks[TASK_OFFLINE_VOLUME]:
dry_run = self._dry_run_tasks[TASK_OFFLINE_VOLUME]
self.process_offline_volumes(_offline_volume_list, dry_run)
def sync_share_size(self, shares, dry_run=True):
""" Backend volume exists, but share size does not match """
msg = "share %s: share size != netapp volume size (%d != %d)"
msg_dry_run = "Dry run: " + msg
for (share_id, _), share in shares.items():
if 'volume' not in share:
continue
size, vsize = share['size'], share['volume']['size']
# volume size can not be zero, could be in offline state
if vsize == 0:
continue
if share['updated_at'] is not None:
if is_utcts_recent(share['updated_at'], 3600):
continue
if size != vsize:
if dry_run:
log.info(msg_dry_run, share_id, size, vsize)
else:
log.info(msg, share_id, size, vsize)
self.set_share_size(share_id, vsize)
self.MANILA_SYNC_SHARE_SIZE_COUNTER.inc()
def process_missing_volume(self, shares, dry_run=True):
""" Set share state to error when backend volume is missing
Ignore shares that are created/updated within 6 hours.
"""
missing_volumes = {}
for (share_id, instance_id), share in shares.items():
if 'volume' not in share:
# check if shares are created/updated recently
if is_utcts_recent(share['updated_at'] or share['created_at'], 6*3600):
continue
share_name = share['name']
share_status = share['status']
msg = f'ManilaShareMissingVolume: share={share_id}, '\
f'instance={instance_id}, status={share_status}'
if not dry_run:
if share_status == 'available':
self._reset_share_state(share_id, 'error')
share_status = 'error'
msg = f'ManilaShareMissingVolume: Set share {share_id} to error'
else:
msg = 'Dry run: ' + msg
log.info(msg)
self.manila_missing_volume_shares_gauge.labels(
share_id=share_id,
instance_id=instance_id,
share_name=share_name,
share_status=share_status,
).set(1)
missing_volumes[(share_id, instance_id)] = {
'share_id': share_id,
'instance_id': instance_id,
'share_name': share_name,
'share_status': share_status,
}
for (share_id, instance_id) in self.missing_volumes:
s = self.missing_volumes[(share_id, instance_id)]
share_name, share_status = s['share_name'], s['share_status']
if (share_id, instance_id) not in shares:
self.manila_missing_volume_shares_gauge.remove(share_id, instance_id, share_name,
share_status)
with self.missing_volumes_lock:
self.missing_volumes = update_records(self.missing_volumes, missing_volumes)
def process_offline_volumes(self, offline_volume_list, dry_run=True):
""" offline volume
@params offline_volumes:
List[Volume]
Volume: Dict[Keys['volume', 'vserver', 'filer'], Any]
"""
_offline_volumes = {}
for vol in offline_volume_list:
if vol['volume'].startswith('share'):
instance_id = vol['volume'][6:].replace('_', '-')
_offline_volumes[instance_id] = vol
# find associated share for offline volumes
_shares = self._query_shares_by_instance_ids(list(_offline_volumes.keys()))
for s in _shares:
instance_id = s['instance_id']
if instance_id in _offline_volumes:
_offline_volumes[instance_id].update({'share': s})
# ignore the shares that are updated/deleted recently
_offline_volume_keys = list(_offline_volumes.keys())
for vol_key, vol in _offline_volumes.items():
share = vol.get('share')
if share is not None:
if share['deleted_at'] or share['updated_at']:
if is_utcts_recent(share['deleted_at'] or share['updated_at'], 6*3600):
_offline_volume_keys.remove(vol_key)
# process remaining volume
offline_volumes = {}
for vol_key in _offline_volume_keys:
vol = _offline_volumes[vol_key]
name, filer, vserver = vol['volume'], vol['filer'], vol['vserver']
share = vol.get('share')
if share is not None:
share_id, status = share['share_id'], share['status']
else:
share_id, status = '', ''
self.manila_offline_volumes_gauge.labels(
share_id=share_id,
share_status=status,
volume=name,
vserver=vserver,
filer=filer,
).set(1)
offline_volumes[name] = {
'volume': name,
'filer': filer,
'vserver': vserver,
'share_id': share_id,
'status': status,
}
for volname, vol in self.offline_volumes.items():
if volname not in offline_volumes:
self.manila_offline_volumes_gauge.remove(vol['share_id'], vol['status'],
vol['filer'], vol['vserver'], vol['name'])
with self.offline_volumes_lock:
self.offline_volumes = update_records(self.offline_volumes, offline_volumes)
def process_orphan_volumes(self, volumes, dry_run=True):
""" orphan volumes
Check if the corresponding manila shares are deleted recently (hard coded as 6 hours).
@params volumes: Dict[(FilerName, InstanceId), Volume]
"""
# share instance id
# volume key (extracted from volume name) is manila instance id
vol_keys = list(volumes.keys())
# Shares: List[Share])
# Share.Keys: share_id, instance_id, deleted_at, status
shares = self._query_shares_by_instance_ids([instance_id for (_, instance_id) in vol_keys])
# merge share into volume
r = re.compile('^manila-share-netapp-(?P<filer>.+)@(?P=filer)#.*')
for s in shares:
m = r.match(s['host'])
if m:
filer = m.group('filer')
else:
continue
if (filer, s['instance_id']) in volumes:
volumes[(filer, s['instance_id'])].update({'share': s})
# loop over vol
for (filer, instance_id), vol in volumes.items():
# double check if the manila shares are deleted recently
if 'share' in vol:
share = vol['share']
deleted_at = share.get('deleted_at', None)
if deleted_at is not None:
if (datetime.utcnow() - deleted_at).total_seconds() < 6*3600:
vol_keys.remove((filer, instance_id))
orphan_volumes = {}
for vol_key in vol_keys:
vol = volumes[vol_key]
volume, vserver, filer = vol['volume'], vol['vserver'], vol['filer']
if 'share' in vol:
share_id = vol['share']['share_id']
share_deleted = vol['share']['deleted']
share_deleted_at = vol['share']['deleted_at']
instance_id = vol['share']['instance_id']
instance_status = vol['share']['status']
else:
share_id, share_deleted, share_deleted_at, instance_id, instance_status = None, None, None, None, ''
self.manila_orphan_volumes_gauge.labels(
share_id=share_id,
share_status=instance_status,
filer=filer,
vserver=vserver,
volume=volume,
).set(1)
orphan_volumes[vol_key] = {
'filer': filer,
'vserver': vserver,
'volume': volume,
'share_id': share_id,
'share_deleted': share_deleted,
'share_deleted_at': share_deleted_at,
'instance_id': instance_id,
'instance_status': instance_status,
}
for k, vol in self.orphan_volumes.items():
if k not in orphan_volumes:
self.manila_orphan_volumes_gauge.remove(vol['share_id'], vol['instance_status'],
vol['filer'], vol['vserver'], vol['volume'])
with self.orphan_volumes_lock:
self.orphan_volumes = update_records(self.orphan_volumes, orphan_volumes)
def _get_netapp_volumes(self, status='online'):
""" get netapp volumes from prometheus metrics
return [<vol>, <vol>, ...]
"""
def _merge_dicts(dict_a, dict_b):
dict_a.update(dict_b)
return dict_a
def _filter_labels(vol):
return {
'volume': vol['volume'],
'vserver': vol['vserver'],
'filer': vol['filer'],
}
if status == 'online':
query = "netapp_volume_total_bytes{app='netapp-capacity-exporter-manila'} + "\
"netapp_volume_snapshot_reserved_bytes"
results = self._fetch_prom_metrics(query)
return [
_merge_dicts(_filter_labels(vol['metric']),
{'size': int(vol['value'][1]) / ONEGB})
for vol in results
]
if status == 'offline':
query = "netapp_volume_state{app='netapp-capacity-exporter-manila'}==3"
results = self._fetch_prom_metrics(query)
return [
_filter_labels(vol['metric']) for vol in results
]
def _fetch_prom_metrics(self, query):
try:
r = requests.get(self.prom_host, params={'query': query, 'time': time.time()})
except Exception as e:
raise type(e)(f'_fetch_prom_metrics(query=\"{query}\"): {e}')
if r.status_code != 200:
return None
return r.json()['data']['result']
def _query_shares_by_instance_ids(self, instance_ids):
"""
@return List[Share]
Share: Dict[Keys['share_id', 'instance_id', 'created_at', 'updated_at', 'deleted_at',
'deleted', 'status', 'host'], Any]
"""
shares_t = Table('shares', self.db_metadata, autoload=True)
instances_t = Table('share_instances', self.db_metadata, autoload=True)
q = select([shares_t.c.id.label('share_id'),
shares_t.c.created_at,
shares_t.c.updated_at,
shares_t.c.deleted_at,
shares_t.c.deleted,
instances_t.c.status,
instances_t.c.id.label('instance_id'),
instances_t.c.host,
])\
.where(shares_t.c.id == instances_t.c.share_id)\
.where(instances_t.c.id.in_(instance_ids))
r = q.execute()
return [dict(zip(r.keys(), x)) for x in r.fetchall()]
def _query_shares(self):
""" Get shares that are not deleted """
shares = Table('shares', self.db_metadata, autoload=True)
instances = Table('share_instances', self.db_metadata, autoload=True)
stmt = select([shares.c.id,
shares.c.display_name,
shares.c.size,
shares.c.created_at,
shares.c.updated_at,
instances.c.id,
instances.c.status,
instances.c.host,
])\
.select_from(
shares.join(instances, shares.c.id == instances.c.share_id))\
.where(shares.c.deleted == 'False')
shares = []
for (sid, name, size, ctime, utime, siid, status, host) in stmt.execute():
shares.append({
'id': sid,
'name': name,
'size': size,
'created_at': ctime,
'updated_at': utime,
'instance_id': siid,
'status': status,
'host': host,
})
return shares
def _merge_share_and_volumes(self, shares, volumes):
""" Merge shares and volumes by share id and volume name
Assuming the volume name is `share_[share_instance_id]`. Update the share object
with the volume fields ("filer", "vserver", "volume", "volume_size").
Args:
shares: List[]
volumes: List[]
Return:
(shares, volumes): | |
class AbstractStemmer:
pass
class PorterStemmer(AbstractStemmer):
"""
A Stemmer class
tutorial from: https://medium.com/analytics-vidhya/building-a-stemmer-492e9a128e84
"""
consonants = "bcdfghjklmnpqrstwxz"
special_case = "y"
vowels = "aeiou"
def _divide_into_groups(self, word):
"""
Divides each word according to vowel and consonents
eg. tree -> [t,r,ee]
"""
groups = []
preceding = ""
for index,letter in enumerate(word.lower()):
if preceding == "":
preceding = letter
else:
if self._compare_same_class(preceding,letter):
preceding += letter
else:
groups.append(preceding)
preceding = letter
if index == len(word)-1:
groups.append(letter)
return groups
def _compare_same_class(self,letter1, letter2):
if letter1 in self.consonants and letter2 in self.consonants:
return True
elif letter1 in self.vowels and letter2 in self.vowels:
return True
else:
return False
return False
def _determine_class(self,letter):
if letter[0] in self.consonants:
return 'C'
return 'V'
def _encode_word(self,word):
divided_list = self._divide_into_groups(word)
classified = [self._determine_class(letter) for letter in divided_list]
return classified
print(dict(zip(divided_list, classified)))
def _determine_m(self, word):
"""
[C] VC * {m} [V]
ie. m is the numbers of VC.
"""
class_list = self._encode_word(word)
if len(class_list) < 2:
return 0
if class_list[0] == 'C': # if first elment is C remove it
class_list = class_list[1:]
if class_list[-1] == 'V': # if last elment is V remove it
class_list = class_list[:len(class_list) - 1]
m = len(class_list) // 2 if (len(class_list) / 2) >= 1 else 0
return m
def _check_endswith(self, stem, letters):
"""
*v --> stem ends with letters sucn as S,L,T
"""
for letter in letters:
if stem.endswith(letter):
return True
return False
def _check_vowel(self, stem):
"""
*v* --> stem contains a vowel in between
"""
for letter in stem:
if letter in self.vowels:
return True
return False
def _check_double_consonant(self, stem):
"""
*d — stem ends with a double consonant of any type.
"""
if stem[-1] in self.consonants and stem[-2] in self.consonants:
return True
return False
def _check_o(self, stem):
"""
*o — stem ends with cvc (consonant followed by vowel followed by consonant) where second
consonant is not W, X or Y (see, weird y again!).
"""
if len(stem) <= 3:
return False
if stem[-3] in self.consonants and stem[-2] in self.vowels and stem[-1] in self.consonants:
return True
return False
#PorterStemming Starts
def _porter_step_1(self, word):
"""
Deals with plurals and past participles
"""
stem = word
step2b = False
#step1a
# SSES -> SS caresses -> caress
# IES -> I ponies -> poni
# ties -> ti
# SS -> SS caress -> caress
# S -> epsolon cats -> cat
if stem.endswith('sses'):
stem = stem[:-2]
elif stem.endswith('ies'):
stem = stem[:-2]
elif not stem.endswith('ss') and stem.endswith('s'):
stem = stem[:-1]
# Step 1b,
# (m>0) EED -> EE feed -> feed
# agreed -> agree
# (*v*) ED -> plastered -> plaster
# bled -> bled
# (*v*) ING -> motoring -> motor
# sing -> sing
# if steps 2 or 3 of 1b passes, another (sub)sub step has to be done
# — this step will add letters.
# Why? To be able to generalize better in further steps.
if len(stem) > 4:
if stem.endswith('eed') and self._determine_m(stem) > 0:
stem = stem[:-1]
elif stem.endswith('ed'):
stem = stem[:-2]
if not self._check_vowel(stem):
stem = word
else:
step2b = True
elif stem.endswith('ing'):
stem = stem[:-3]
if not self._check_vowel(stem):
stem = word
else:
step2b = True
# Step 1b,2
# (if step 1b is true)
# AT -> ATE conflat(ed) -> conflate
# BL -> BLE troubl(ed) -> trouble
# IZ -> IZE siz(ed) -> size
# (*d and not (*L or *S or *Z))
# -> single letter
# hopp(ing) -> hop
# tann(ed) -> tan
# fall(ing) -> fall
# hiss(ing) -> hiss
# fizz(ed) -> fizz
# m=1 and *o) -> E fail(ing) -> fail
# fil(ing) -> file
if step2b:
if stem.endswith('at') or stem.endswith('bl') or stem.endswith('iz'):
stem += "e"
elif self._check_double_consonant(stem) and not self._check_endswith(stem, "lsz"):
stem = stem[:-1]
elif self._determine_m(stem) == 1 and self._check_o(stem):
stem += "e"
# Step 1c
# (*v*) Y -> I happy -> happi
# sky -> sky
if self._check_vowel(stem) and stem.endswith('y'):
stem = stem[:-1] + "i"
return stem
def _porter_step_2(self, stem):
# Step 2
# (m>0) ATIONAL -> ATE relational -> relate
# (m>0) TIONAL -> TION conditional -> condition
# rational -> rational
# (m>0) ENCI -> ENCE valenci -> valence
# (m>0) ANCI -> ANCE hesitanci -> hesitance
# (m>0) IZER -> IZE digitizer -> digitize
# (m>0) ABLI -> ABLE conformabli -> conformable
# (m>0) ALLI -> AL radicalli -> radical
# (m>0) ENTLI -> ENT differentli -> different
# (m>0) ELI -> E vileli - > vile
# (m>0) OUSLI -> OUS analogousli -> analogous
# (m>0) IZATION -> IZE vietnamization -> vietnamize
# (m>0) ATION -> ATE predication -> predicate
# (m>0) ATOR -> ATE operator -> operate
# (m>0) ALISM -> AL feudalism -> feudal
# (m>0) IVENESS -> IVE decisiveness -> decisive
# (m>0) FULNESS -> FUL hopefulness -> hopeful
# (m>0) OUSNESS -> OUS callousness -> callous
# (m>0) ALITI -> AL formaliti -> formal
# (m>0) IVITI -> IVE sensitiviti -> sensitiv
pair_tests = [('ational','ate'), ('tional','tion'), ('enci','ence'), ('anci','ance'), ('izer', 'ize'),
('abli','able'), ('alli','al'), ('entli', 'ent'), ('eli', 'e'), ('ousli', 'ous'), ('ization', 'ize'),
('ation', 'ate'), ('ator', 'ate'), ('alism', 'al'), ('iveness', 'ive'), ('fulness', 'ful'),
('ousness', 'ous'), ('aliti','al'), ('ivit', 'ive'), ('biliti','ble')]
if self._determine_m(stem) > 0:
for termination, substitute in pair_tests:
if stem.endswith(termination):
stem = stem[: -len(termination)] + substitute
break
return stem
def _porter_step_3(self, stem):
# Step 3
# (m>0) ICATE -> IC triplicate -> triplic
# (m>0) ATIVE -> formative -> form
# (m>0) ALIZE -> AL formalize -> formal
# (m>0) ICITI -> IC electriciti -> electric
# (m>0) ICAL -> IC electrical -> electric
# (m>0) FUL -> hopeful -> hope
# (m>0) NESS -> goodness -> good
pair_tests = [('icate','ic'),('ative',''),('alize','al'),('iciti','ic'),('ical','ic'),('ful',''),('ness','')]
if self._determine_m(stem) > 0:
for termination, substitute in pair_tests:
if stem.endswith(termination):
stem = stem[: -len(termination)] + substitute
break
return stem
def _porter_step_4(self, stem):
"""
Remove suffixes
"""
# Step 4
# (m>1) AL -> revival -> reviv
# (m>1) ANCE -> allowance -> allow
# (m>1) ENCE -> inference -> infer
# (m>1) ER -> airliner -> airlin
# (m>1) IC -> gyroscopic -> gyroscop
# (m>1) ABLE -> adjustable -> adjust
# (m>1) IBLE -> defensible -> defens
# (m>1) ANT -> irritant -> irrit
# (m>1) EMENT -> replacement -> replac
# (m>1) MENT -> adjustment -> adjust
# (m>1) ENT -> dependent -> depend
# (m>1 and (*S or *T)) ION -> adoption -> adopt
# (m>1) OU -> homologou -> homolog
# (m>1) ISM -> communism -> commun
# (m>1) ATE -> activate -> activ
# (m>1) ITI -> angulariti -> angular
# (m>1) OUS -> homologous -> homolog
# (m>1) IVE -> effective -> effect
# (m>1) IZE -> bowdlerize -> bowdler
suffixes_1 = ['al','ance','ence','er','ic','able','ible','ant','ement','ment','ent']
special_case = 'ion'
suffixes_2 = ['ou','ism','ate','iti','ous','ive','ize']
if self._determine_m(stem) > 1:
for suffix in suffixes_1:
if stem.endswith(suffix):
return stem[: -len(suffix)]
if stem.endswith(special_case):
temp = stem[: -len(special_case)]
if self._check_endswith(temp, "st"):
return temp
for suffix in suffixes_2:
if stem.endswith(suffix):
return stem[: -len(suffix)]
return stem
def _porter_step_5(self, stem):
# Step 5a
# (m>1) E -> probate -> probat
# rate -> rate
# (m=1 and not *o) E -> cease -> ceas
# Step 5b
# (m > 1 and *d and *L) -> single letter
# controll -> control
# roll -> roll
#step 5a
if self._determine_m(stem) > 1 and stem.endswith('e') and len(stem) > 4:
stem = stem[:-1]
elif self._determine_m(stem) == 1 and not self._check_o(stem) and stem.endswith('e') and len(stem) > 4:
stem = stem[:-1]
#step5b
if self._determine_m(stem) > 1 and self._check_endswith(stem, "dl") and len(stem) > 4:
stem = stem[:-1]
return stem
def stem_now(self, sentence):
"""
input: A string sentence
"""
stem_words = []
for stem in sentence.split():
stem = self._porter_step_1(stem)
stem = self._porter_step_2(stem)
stem = self._porter_step_3(stem)
stem = self._porter_step_4(stem)
stem = self._porter_step_5(stem)
stem_words.append(stem)
return " ".join(stem_words)
p = PorterStemmer()
# print(p.stem_now("This is | |
session tracks the reference to the TeleApi object
sess = dmon.sessions[list(dmon.sessions.keys())[0]]
self.isinstance(sess.getSessItem(None), TeleApi)
# And that data is available from the session helper API
snfo = await dmon.getSessInfo()
self.len(1, snfo)
self.eq(snfo[0].get('items'), {None: 'synapse.tests.test_telepath.TeleApi'})
self.eq(10, await proxy.getFooBar(20, 10))
# check a custom share works
obj = await proxy.customshare()
self.eq(999, await obj.boo(999))
# Ensure the Share object is placed into the
# session for the daemon.
self.len(2, sess.items)
key = [k for k in sess.items.keys() if k][0]
self.isinstance(sess.getSessItem(key), CustomShare)
# make another customshare reference which will be
# tracked by the Sess object
evt = asyncio.Event()
async with await proxy.customshare():
self.len(3, sess.items)
_key = [k for k in sess.items.keys() if k and k != key][0]
_cshare = sess.getSessItem(_key)
self.isinstance(_cshare, CustomShare)
_cshare.onfini(evt.set)
# and that item is removed from the sess on the
# _share fini by the client
self.true(await asyncio.wait_for(evt.wait(), 6))
self.len(2, sess.items)
self.nn(sess.getSessItem(key))
# ensure that the share is represented in the session info via
# the helper APIs as well
snfo = await dmon.getSessInfo()
self.len(1, snfo)
self.eq(snfo[0].get('items'),
{None: 'synapse.tests.test_telepath.TeleApi',
key: 'synapse.tests.test_telepath.CustomShare'})
# and we can still use the first obj we made
ret = await alist(obj.custgenr(3))
self.eq(ret, [0, 1, 2])
# check that a dynamic share works
async with await self.getTestProxy(dmon, 'woke/up') as proxy:
self.eq('up: beep', await proxy.beep())
async def test_telepath_auth(self):
item = TeleAuth()
async with self.getTestDmon() as dmon:
dmon.share('auth', item)
host, port = dmon.addr
url = 'tcp://localhost/auth'
await self.asyncraises(s_exc.AuthDeny, s_telepath.openurl(url, port=port))
url = 'tcp://visi@localhost/auth'
await self.asyncraises(s_exc.AuthDeny, s_telepath.openurl(url, port=port))
url = 'tcp://visi:secretsauce@localhost/auth'
async with await s_telepath.openurl(url, port=port) as proxy:
self.eq(17, await proxy.getFooBar(10, 7))
async def test_telepath_server_badvers(self):
async with self.getTestDmon() as dmon:
dmon.televers = (0, 0)
host, port = await dmon.listen('tcp://127.0.0.1:0/')
await self.asyncraises(s_exc.BadMesgVers, s_telepath.openurl('tcp://127.0.0.1/', port=port))
async def test_alias(self):
item = TeleAware()
name = 'item'
async with self.getTestDmon() as dmon:
host, port = dmon.addr
dmon.share(name, item)
with self.getTestDir() as dirn:
url = f'tcp://{host}:{port}/{name}'
beepbeep_alias = url + '/beepbeep'
aliases = {name: url,
f'{name}/borp': beepbeep_alias}
with self.setSynDir(dirn):
fp = s_common.getSynPath('aliases.yaml')
s_common.yamlsave(aliases, fp)
# None existent aliases return None
self.none(s_telepath.alias('newp'))
self.none(s_telepath.alias('newp/path'))
# An exact match wins
self.eq(s_telepath.alias(name), url)
self.eq(s_telepath.alias(f'{name}/borp'), beepbeep_alias)
# Dynamic aliases are valid.
self.eq(s_telepath.alias(f'{name}/beepbeep'), beepbeep_alias)
async with await s_telepath.openurl(name) as prox:
self.eq(10, await prox.getFooBar(20, 10))
# Check to see that we can connect to an aliased name
# with a dynamic share attached to it.
async with await s_telepath.openurl(f'{name}/bar') as prox:
self.eq('bar: beep', await prox.beep())
async def test_default_name(self):
async with self.getTestDmon() as dmon:
host, port = dmon.addr
dmon.share('*', Foo())
async with await s_telepath.openurl(f'tcp://{host}:{port}/') as prox:
self.eq('hiya', await prox.echo('hiya'))
async def test_url_cell(self):
with self.getTestDir(chdir=True) as dirn:
path = os.path.join(dirn, 'cell')
sockpath = os.path.join(path, 'sock')
async with await s_cell.Cell.anit(path) as cell:
# test a relative cell:// url
async with await s_telepath.openurl('cell://cell') as prox:
self.eq('cell', await prox.getCellType())
# unix path information is available from the session information.
snfo = await cell.dmon.getSessInfo()
self.eq(snfo[0].get('conninfo'),
{'family': 'unix',
'addr': sockpath})
# test an absolute cell:// url
async with await s_telepath.openurl(f'cell://{path}') as prox:
self.eq('cell', await prox.getCellType())
# unix path information is available from the session information.
snfo = await cell.dmon.getSessInfo()
self.eq(snfo[0].get('conninfo'),
{'family': 'unix',
'addr': sockpath})
async def test_ipv6(self):
foo = Foo()
async with self.getTestDmon() as dmon:
dmon.share('foo', foo)
try:
addr = await dmon.listen('tcp://[::1]:0/')
except asyncio.CancelledError:
raise
except OSError:
if os.getenv('CIRCLECI', False):
# Circleci container tests do not support IPV6 (but osx does)
# https://circleci.com/docs/2.0/faq/#can-i-use-ipv6-in-my-tests
self.skip('ipv6 is not supported in circleci')
else:
raise
host, port = addr[0], addr[1]
async with await s_telepath.openurl(f'tcp://{host}/foo',
port=port) as prox:
# Ensure that ipv6 is returned via session info
snfo = await dmon.getSessInfo()
conninfo = snfo[0].get('conninfo')
self.eq(conninfo, {'family': 'tcp',
'ipver': 'ipv6',
'addr': prox.link.sock.getsockname()})
# check a standard return value
self.eq(30, await prox.bar(10, 20))
async def test_telepath_client_redir(self):
class TestRedir(s_telepath.Aware):
def __init__(self, valu, redir=None):
self.valu = valu
self.redir = redir
def getTeleApi(self, link, mesg, path):
if self.redir is not None:
raise s_exc.TeleRedir(url=self.redir)
return self
async def dostuff(self, x):
if self.redir:
raise s_exc.TeleRedir(url=self.redir)
return x + self.valu
dmon0 = await s_daemon.Daemon.anit()
dmon1 = await s_daemon.Daemon.anit()
addr0 = await dmon0.listen('tcp://127.0.0.1:0/')
addr1 = await dmon1.listen('tcp://127.0.0.1:0/')
url0 = f'tcp://127.0.0.1:{addr0[1]}/foo'
url1 = f'tcp://127.0.0.1:{addr1[1]}/foo'
rdir0 = TestRedir(10)
rdir1 = TestRedir(20, redir=url0)
dmon0.share('foo', rdir0)
dmon1.share('foo', rdir1)
async with await s_telepath.Client.anit(url0) as targ:
with self.raises(s_exc.NotReady):
targ.dostuff(100)
await targ.waitready()
proxy = await targ.proxy()
self.eq(proxy._getSynVers(), s_version.version)
# Client implements some base helpers the proxy does
self.eq(targ._getSynVers(), s_version.version)
self.eq(targ._getClasses(),
('synapse.tests.test_telepath.TestRedir',
'synapse.telepath.Aware'))
# client works as a passthrough to the proxy
self.eq(110, await targ.dostuff(100))
# this should get redirected to url0...
async with await s_telepath.Client.anit(url1) as targ:
await targ.waitready()
self.eq(110, await targ.dostuff(100))
# fake out the redirect to connect, then redirect on call...
rdir1.redir = None
async with await s_telepath.Client.anit(url1) as targ:
await targ.waitready()
self.eq(120, await targ.dostuff(100))
rdir1.redir = url0
self.eq(110, await targ.dostuff(100))
await dmon0.fini()
await dmon1.fini()
async def test_telepath_client_failover(self):
class TestFail:
def __init__(self):
self.count = 0
async def dostuff(self, x):
self.count += 1
return x + 10
dmon0 = await s_daemon.Daemon.anit()
dmon1 = await s_daemon.Daemon.anit()
addr0 = await dmon0.listen('tcp://127.0.0.1:0/')
addr1 = await dmon1.listen('tcp://127.0.0.1:0/')
url0 = f'tcp://user:[email protected]:{addr0[1]}/foo'
url1 = f'tcp://127.0.0.1:{addr1[1]}/foo'
fail0 = TestFail()
fail1 = TestFail()
dmon0.share('foo', fail0)
dmon1.share('foo', fail1)
urls = (url0, url1)
async with await s_telepath.Client.anit(urls) as targ:
await targ.waitready()
self.eq(110, await targ.dostuff(100))
self.eq(1, fail0.count)
self.eq(0, fail1.count)
await dmon0.fini()
self.eq(110, await targ.dostuff(100))
self.eq(1, fail0.count)
self.eq(1, fail1.count)
async with await s_telepath.Client.anit(urls) as targ:
with self.getAsyncLoggerStream('synapse.telepath', 'Connect call failed') as stream:
await targ.waitready()
# Verify the password doesn't leak into the log
self.true(await stream.wait(2))
stream.seek(0)
mesgs = stream.read()
self.notin('password', mesgs)
self.eq(110, await targ.dostuff(100))
self.eq(1, fail0.count)
self.eq(2, fail1.count)
await dmon1.fini()
async def test_telepath_poolsize(self):
# While test_telepath_sync_genr_break also touches the link pool,
# it doesn't validate the pool size or automatic link teardown
# behavior when a extra link is placed into the pool.
foo = Foo()
async with self.getTestDmon() as dmon:
dmon.share('foo', foo)
url = f'tcp://127.0.0.1:{dmon.addr[1]}/foo'
# Validate the Proxy behavior then the client override
prox = await s_telepath.openurl(url) # type: Foo
prox._link_poolsize = 2
# Start with no links
self.len(0, prox.links)
self.eq(await prox.echo(1), 1)
# We now have one link - spin up a generator to grab it
self.len(1, prox.links)
l0 = prox.links[0]
genr = await prox.genr() # type: s_coro.GenrHelp
self.eq(await genr.genr.__anext__(), 10)
# The link is being used by the genr
self.len(0, prox.links)
# and upon exhuastion, that link is put back
self.eq(await genr.list(), (20, 30))
self.len(1, prox.links)
self.true(prox.links[0] is l0)
# Grab the existing link, then do two more calls
genr0 = await prox.genr() # contains l0
genr1 = await prox.genr()
genr2 = await prox.genr()
self.len(0, prox.links)
# Consume two of the three generators
self.eq(await genr2.list(), (10, 20, 30))
self.len(1, prox.links)
self.eq(await genr1.list(), (10, 20, 30))
self.len(2, prox.links)
# Exhausting the lsat generator results in his
# link not being placed back into the pool
self.eq(await genr0.list(), (10, 20, 30))
self.len(2, prox.links)
links = set(lnk for lnk in prox.links)
self.notin(l0, links)
# And that link l0 has been fini'd
self.true(l0.isfini)
# Tear down a link by hand and place it back
# into the pool - that will fail b/c the link
# has been down down.
l1 = await prox.getPoolLink()
self.len(1, prox.links)
await l1.fini()
await prox._putPoolLink(l1)
self.len(1, prox.links)
# And all our links are torn down on fini
await prox.fini()
self.len(1, prox.links)
for link in prox.links:
self.true(link.isfini)
# The telepath Client passes through this value as a configuration parameter
conf = {'link_poolsize': 2, 'timeout': 2}
async with await s_telepath.Client.anit(url, conf=conf) as client:
await client.waitready()
self.true(client._t_proxy._link_poolsize, 2)
async def test_link_fini_breaking_tasks(self):
foo = Foo()
async with self.getTestDmon() as dmon:
dmon.share('foo', foo)
url = f'tcp://127.0.0.1:{dmon.addr[1]}/foo'
prox = await s_telepath.openurl(url) # type: Foo
# Fire up an async generator which will yield a message then
# wait for a while
async for mesg in prox.sleepg(t=60):
self.eq(mesg, ('init', {}))
break
# Ensure that tearing down the client prompty tears down
# taskv2init coro due to the link being fini'd by the server.
# It is important that we validate these items BEFORE we
# teardown the proxy, since the previous (<0.1.32) behaviour
| |
<gh_stars>0
# USDA_CoA_Cropland.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
import json
import numpy as np
import pandas as pd
from flowsa.common import *
from flowsa.flowbyfunctions import assign_fips_location_system, sector_disaggregation
def CoA_Cropland_URL_helper(build_url, config, args):
"""This helper function uses the "build_url" input from flowbyactivity.py, which is a base url for coa cropland data
that requires parts of the url text string to be replaced with info specific to the usda nass quickstats API.
This function does not parse the data, only modifies the urls from which data is obtained. """
# initiate url list for coa cropland data
urls = []
# call on state acronyms from common.py (and remove entry for DC)
state_abbrevs = abbrev_us_state
state_abbrevs = {k: v for (k, v) in state_abbrevs.items() if k != "DC"}
# replace "__aggLevel__" in build_url to create three urls
for x in config['agg_levels']:
for y in config['sector_levels']:
# at national level, remove the text string calling for state acronyms
if x == 'NATIONAL':
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("&state_alpha=__stateAlpha__", "")
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA&statisticcat_desc=AREA OPERATED")
else:
url = url.replace("&commodity_desc=AG LAND&commodity_desc=FARM OPERATIONS", "")
url = url.replace(" ", "%20")
urls.append(url)
else:
# substitute in state acronyms for state and county url calls
for z in state_abbrevs:
url = build_url
url = url.replace("__aggLevel__", x)
url = url.replace("__secLevel__", y)
url = url.replace("__stateAlpha__", z)
if y == "ECONOMICS":
url = url.replace(
"AREA HARVESTED&statisticcat_desc=AREA IN PRODUCTION&statisticcat_desc=TOTAL&statisticcat_desc=AREA BEARING %26 NON-BEARING",
"AREA&statisticcat_desc=AREA OPERATED")
else:
url = url.replace("&commodity_desc=AG LAND&commodity_desc=FARM OPERATIONS", "")
url = url.replace(" ", "%20")
urls.append(url)
return urls
def coa_cropland_call(url, coa_response, args):
cropland_json = json.loads(coa_response.text)
df_cropland = pd.DataFrame(data=cropland_json["data"])
return df_cropland
def coa_cropland_parse(dataframe_list, args):
"""Modify the imported data so it meets the flowbyactivity criteria and only includes data on harvested acreage
(irrigated and total). """
df = pd.concat(dataframe_list, sort=False)
# specify desired data based on domain_desc
df = df[~df['domain_desc'].isin(['ECONOMIC CLASS', 'FARM SALES', 'IRRIGATION STATUS', 'CONCENTRATION',
'ORGANIC STATUS', 'NAICS CLASSIFICATION', 'PRODUCERS'])]
df = df[df['statisticcat_desc'].isin(['AREA HARVESTED', 'AREA IN PRODUCTION', 'AREA BEARING & NON-BEARING',
'AREA', 'AREA OPERATED'])]
# drop rows that subset data into farm sizes (ex. 'area harvested: (1,000 to 1,999 acres)
df = df[~df['domaincat_desc'].str.contains(' ACRES')].reset_index(drop=True)
# drop Descriptions that contain certain phrases, as these data are included in other categories
df = df[~df['short_desc'].str.contains('FRESH MARKET|PROCESSING|ENTIRE CROP|NONE OF CROP|PART OF CROP')]
# drop Descriptions that contain certain phrases - only occur in AG LAND data
df = df[~df['short_desc'].str.contains('INSURANCE|OWNED|RENTED|FAILED|FALLOW|IDLE')].reset_index(drop=True)
# Many crops are listed as their own commodities as well as grouped within a broader category (for example, orange
# trees are also part of orchards). As this dta is not needed, takes up space, and can lead to double counting if
# included, want to drop these unused columns
# subset dataframe into the 5 crop types and land in farms and drop rows
# crop totals: drop all data
# field crops: don't want certain commodities and don't want detailed types of wheat, cotton, or sunflower
df_fc = df[df['group_desc'] == 'FIELD CROPS']
df_fc = df_fc[~df_fc['commodity_desc'].isin(['GRASSES', 'GRASSES & LEGUMES, OTHER', 'LEGUMES', 'HAY', 'HAYLAGE'])]
df_fc = df_fc[~df_fc['class_desc'].str.contains('SPRING|WINTER|TRADITIONAL|OIL|PIMA|UPLAND', regex=True)]
# fruit and tree nuts: only want a few commodities
df_ftn = df[df['group_desc'] == 'FRUIT & TREE NUTS']
df_ftn = df_ftn[df_ftn['commodity_desc'].isin(['BERRY TOTALS', 'ORCHARDS'])]
df_ftn = df_ftn[df_ftn['class_desc'].isin(['ALL CLASSES'])]
# horticulture: only want a few commodities
df_h = df[df['group_desc'] == 'HORTICULTURE']
df_h = df_h[df_h['commodity_desc'].isin(['CUT CHRISTMAS TREES', 'SHORT TERM WOODY CROPS'])]
# vegetables: only want a few commodities
df_v = df[df['group_desc'] == 'VEGETABLES']
df_v = df_v[df_v['commodity_desc'].isin(['VEGETABLE TOTALS'])]
# only want ag land and farm operations in farms & land & assets
df_fla = df[df['group_desc'] == 'FARMS & LAND & ASSETS']
df_fla = df_fla[df_fla['short_desc'].str.contains("AG LAND|FARM OPERATIONS")]
# drop the irrigated acreage in farms (want the irrigated harvested acres)
df_fla = df_fla[((df_fla['domaincat_desc'] == 'AREA CROPLAND, HARVESTED:(ANY)') &
(df_fla['domain_desc'] == 'AREA CROPLAND, HARVESTED ') &
(df_fla['short_desc'] == 'AG LAND, IRRIGATED - ACRES'))]
# concat data frames
df = pd.concat([df_fc, df_ftn, df_h, df_v, df_fla], sort=False).reset_index(drop=True)
# drop unused columns
df = df.drop(columns=['agg_level_desc', 'location_desc', 'state_alpha', 'sector_desc',
'country_code', 'begin_code', 'watershed_code', 'reference_period_desc',
'asd_desc', 'county_name', 'source_desc', 'congr_district_code', 'asd_code',
'week_ending', 'freq_desc', 'load_time', 'zip_5', 'watershed_desc', 'region_desc',
'state_ansi', 'state_name', 'country_name', 'county_ansi', 'end_code', 'group_desc'])
# create FIPS column by combining existing columns
df.loc[df['county_code'] == '', 'county_code'] = '000' # add county fips when missing
df['Location'] = df['state_fips_code'] + df['county_code']
df.loc[df['Location'] == '99000', 'Location'] = US_FIPS # modify national level fips
# address non-NAICS classification data
# use info from other columns to determine flow name
df.loc[:, 'FlowName'] = df['statisticcat_desc'] + ', ' + df['prodn_practice_desc']
df.loc[:, 'FlowName'] = df['FlowName'].str.replace(", ALL PRODUCTION PRACTICES", "", regex=True)
df.loc[:, 'FlowName'] = df['FlowName'].str.replace(", IN THE OPEN", "", regex=True)
# combine column information to create activity information, and create two new columns for activities
df['Activity'] = df['commodity_desc'] + ', ' + df['class_desc'] + ', ' + df['util_practice_desc'] # drop this column later
df['Activity'] = df['Activity'].str.replace(", ALL CLASSES", "", regex=True) # not interested in all data from class_desc
df['Activity'] = df['Activity'].str.replace(", ALL UTILIZATION PRACTICES", "", regex=True) # not interested in all data from class_desc
df['ActivityProducedBy'] = np.where(df["unit_desc"] == 'OPERATIONS', df["Activity"], None)
df['ActivityConsumedBy'] = np.where(df["unit_desc"] == 'ACRES', df["Activity"], None)
# rename columns to match flowbyactivity format
df = df.rename(columns={"Value": "FlowAmount", "unit_desc": "Unit",
"year": "Year", "CV (%)": "Spread",
"short_desc": "Description"})
# drop remaining unused columns
df = df.drop(columns=['Activity', 'class_desc', 'commodity_desc', 'domain_desc', 'state_fips_code', 'county_code',
'statisticcat_desc', 'prodn_practice_desc', 'domaincat_desc', 'util_practice_desc'])
# modify contents of units column
df.loc[df['Unit'] == 'OPERATIONS', 'Unit'] = 'p'
# modify contents of flowamount column, "D" is supressed data, "z" means less than half the unit is shown
df['FlowAmount'] = df['FlowAmount'].str.strip() # trim whitespace
df.loc[df['FlowAmount'] == "(D)", 'FlowAmount'] = withdrawn_keyword
df.loc[df['FlowAmount'] == "(Z)", 'FlowAmount'] = withdrawn_keyword
df['FlowAmount'] = df['FlowAmount'].str.replace(",", "", regex=True)
# USDA CoA 2017 states that (H) means CV >= 99.95, therefore replacing with 99.95 so can convert column to int
# (L) is a CV of <= 0.05
df['Spread'] = df['Spread'].str.strip() # trim whitespace
df.loc[df['Spread'] == "(H)", 'Spread'] = 99.95
df.loc[df['Spread'] == "(L)", 'Spread'] = 0.05
df.loc[df['Spread'] == "", 'Spread'] = None # for instances where data is missing
df.loc[df['Spread'] == "(D)", 'Spread'] = withdrawn_keyword
# add location system based on year of data
df = assign_fips_location_system(df, args['year'])
# Add hardcoded data
df['Class'] = np.where(df["Unit"] == 'ACRES', "Land", "Other")
df['SourceName'] = "USDA_CoA_Cropland"
df['MeasureofSpread'] = "RSD"
df['DataReliability'] = None
df['DataCollection'] = 2
return df
def coa_irrigated_cropland_fba_cleanup(fba):
"""
When using irrigated cropland, aggregate sectors to cropland and total ag land. Doing this because published values
for irrigated harvested cropland do not include the water use for vegetables, woody crops, berries.
:param fba:
:return:
"""
fba = fba[~fba['ActivityConsumedBy'].isin(['AG LAND', 'AG LAND, CROPLAND, HARVESTED'])]
return fba
def disaggregate_coa_cropland_to_6_digit_naics(fba_w_sector, attr):
"""
Disaggregate usda coa cropland to naics 6
:param fba_w_sector:
:param attr:
:return:
"""
# use ratios of usda 'land in farms' to determine animal use of pasturelands at 6 digit naics
fba_w_sector = disaggregate_pastureland(fba_w_sector, attr)
# use ratios of usda 'harvested cropland' to determine missing 6 digit naics
fba_w_sector = disaggregate_cropland(fba_w_sector, attr)
return fba_w_sector
def disaggregate_pastureland(fba_w_sector, attr):
"""
The USDA CoA Cropland irrigated pastureland data only links to the 3 digit NAICS '112'. This function uses state
level CoA 'Land in Farms' to allocate the county level acreage data to 6 digit NAICS.
:param fba_w_sector: The CoA Cropland dataframe after linked to sectors
:return: The CoA cropland dataframe with disaggregated pastureland data
"""
import flowsa
from flowsa.flowbyfunctions import allocate_by_sector, clean_df, flow_by_activity_fields, \
fba_fill_na_dict
# subset the coa data so only pastureland
p = fba_w_sector.loc[fba_w_sector['Sector'] == '112']
# add temp loc column for state fips
p.loc[:, 'Location_tmp'] = p['Location'].apply(lambda x: str(x[0:2]))
# load usda coa cropland naics
df_f = flowsa.getFlowByActivity(flowclass=['Land'],
years=[attr['allocation_source_year']],
datasource='USDA_CoA_Cropland_NAICS')
df_f = clean_df(df_f, flow_by_activity_fields, fba_fill_na_dict)
# subset to land in farms data
df_f = df_f[df_f['FlowName'] == 'FARM OPERATIONS']
# subset to rows related to pastureland
df_f = df_f.loc[df_f['ActivityConsumedBy'].apply(lambda x: str(x[0:3])) == '112']
# drop rows with "&'
df_f = df_f[~df_f['ActivityConsumedBy'].str.contains('&')]
# create sector column
df_f.loc[:, 'Sector'] = df_f['ActivityConsumedBy']
# create | |
- Chapter 4.4
"""
fsim = np.zeros((ns, nsim))
fsim = fsim.astype(int)
Tpow = matrix_power(T, 100)
fprior = np.zeros((1,T.shape[1]))
fprior[0,:] = Tpow[0,:]
for j in range(nsim):
fsim[0, j] = RandDisc(fprior)
for i in range(1,ns):
fcond = np.zeros((1,T.shape[1]))
fcond[0,:] = T[fsim[i-1,j], :]
fsim[i, j] = RandDisc(fcond)
return fsim
def RadialCorrLength(lmin, lmax, azim, theta):
"""
RADIAL CORR LENGTH
Computes the radial correlation length.
Written by <NAME> (August 2020)
Parameters
----------
lmin : float
Minimum correlation length.
lmax : float
Maximum correlation length.
azim : float
Azimuth.
theta : float
Radial coordinate.
Returns
-------
l : float
Radial correlation length.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.2
"""
# covariance function
l = np.sqrt((lmin ** 2 * lmax ** 2) / (lmax ** 2 * (np.sin(azim - theta)) ** 2 + lmin ** 2 * (np.cos(azim - theta)) ** 2))
return l
def SpatialCovariance1D(h, l, krigtype):
"""
SPATIAL COVARIANCE 1D
Computes the 1D spatial covariance function.
Written by <NAME> (August 2020)
Parameters
----------
l : float
Correlation length.
h : float
Distance.
krigtype : str
Function type ('exp', 'gau', 'sph').
Returns
-------
C : array_like
Covariance.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.2
"""
# covariance function
if krigtype == 'exp':
C = ExpCov(h, l)
elif krigtype == 'gau':
C = GauCov(h, l)
elif krigtype == 'sph':
C = SphCov(h, l)
else:
print('error')
return C
def SpatialCovariance2D(lmin, lmax, azim, theta, h, krigtype):
"""
SPATIAL COVARIANCE 2D
Computes the 2D anisotropic spatial covariance function.
Written by <NAME> (August 2020)
Parameters
----------
lmin : float
Minimum correlation length.
lmax : float
Maximum correlation length.
azim : float
Azimuth.
theta : float
Radial coordinate.
h : float
Distance.
krigtype : str
Function type ('exp', 'gau', 'sph').
Returns
-------
C : array_like
Covariance.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.2
"""
# covariance function
if krigtype == 'exp':
C = ExpCov(h, RadialCorrLength(lmin, lmax, azim, theta))
elif krigtype == 'gau':
C = GauCov(h, RadialCorrLength(lmin, lmax, azim, theta))
elif krigtype == 'sph':
C = SphCov(h, RadialCorrLength(lmin, lmax, azim, theta))
else:
print('error')
return C
def SeqGaussianSimulation(xcoords, dcoords, dvalues, xmean, xvar, l, krigtype, krig):
"""
SEQ GAUSSIAN SIMULATION
Generates a realization of the random variable conditioned on
the available measurements using Sequential Gaussian Simulation.
Written by <NAME> (August 2020)
Parameters
----------
xcoord : array_like
Coordinates of the location for the estimation (np, ndim).
dcoords : array_like
Coordinates of the measurements (nd, ndim).
dvalues : array_like
Values of the measurements (nd, 1).
xmean : float or array (for local variable mean)
Prior mean.
xvar : float
Prior variance.
l : float
Correlation length.
krigtype : str
Function type ('exp', 'gau', 'sph').
krig : int
Kriging type (0=simple, 1=ordinary).
Returns
-------
sgsim : array_like
Realization.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 3.5
"""
# initial parameters
n = xcoords.shape[0]
nd = dcoords.shape[0]
# maximum number of conditioning data
nmax = 12
# Data assignment to the simulation grid (-999 or measurements)
sgsim = -999 * np.ones((n, 1))
for i in range(nd):
ind = np.argmin(np.sum((xcoords - dcoords[i, :]) ** 2, axis=1))
sgsim[ind] = dvalues[i]
# random path of locations
npl = n - nd
nonsimcoords = xcoords[sgsim[:,0] == -999, :]
pathind = np.random.permutation(range(npl))
pathcoords = nonsimcoords[pathind, :]
simval = np.zeros((npl, 1))
# if the xmean is a single value, transform to an array
if type(xmean) == float:
xmean = xmean*np.ones((n, 1))
# sequential simulation
for i in range(npl):
if dcoords.shape[0] < nmax:
dc = dcoords
dz = dvalues
else:
# conditioning data selection
dv = []
dv = np.sqrt(np.sum((dcoords - pathcoords[i, :]) ** 2, axis=1))
ind = np.argsort(dv)
dc = dcoords[ind[0:nmax-1],:]
dz = dvalues[ind[0:nmax-1]]
# kriging
if krig == 0:
krigmean, krigvar = SimpleKriging(pathcoords[i,:], dc, dz, xmean[pathind[i]], xvar, l, krigtype)
else:
krigmean, krigvar = OrdinaryKriging(pathcoords[i,:], dc, dz, xvar, l, krigtype)
# realization
simval[pathind[i],0] = krigmean + np.sqrt(krigvar) * np.random.randn(1)
# Adding simulated value the vector of conditioning data
dcoords = np.vstack((dcoords, pathcoords[i, :]))
dvalues = np.vstack((dvalues, simval[pathind[i]]))
# Assigning the sampled values to the simulation grid
sgsim[sgsim[:,0] == -999, 0] = simval[:,0]
return sgsim
def SeqIndicatorSimulation(xcoords, dcoords, dvalues, nf, pprior, l, krigtype):
"""
SEQ INDICATOR SIMULATION
Generates a realization of the discrete random variable conditioned on
the available measurements using Sequential Indicator Simulation.
Written by <NAME> (August 2020)
Parameters
----------
xcoord : array_like
Coordinates of the location for the estimation (np, ndim).
dcoords : array_like
Coordinates of the measurements (nd, ndim).
dvalues : array_like
Values of the measurements (ns, 1).
nf : int
Number of possible outcomes (e.g. number of facies).
pprior : array_like
Prior probability (1, nf).
l : float or array_like
Correlation range, for different range for each facies
(array with nf components).
krigtype : str
Function type ('exp', 'gau', 'sph') for different type for each facies,
(array with nf components).
Returns
-------
sgsim : array_like
Realization.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 4.2
"""
# initial parameters
n = xcoords.shape[0]
nd = dcoords.shape[0]
# maximum number of conditioning data
nmax = 12
# Data assignment to the simulation grid (-999 or measurements)
sgsim = -999 * np.ones((n, 1))
for i in range(nd):
ind = np.argmin(np.sum((xcoords - dcoords[i, :]) ** 2, axis=1))
sgsim[ind] = dvalues[i]
# random path of locations
npl = n - nd
nonsimcoords = xcoords[sgsim[:,0] == -999, :]
pathind = np.random.permutation(range(npl))
pathcoords = nonsimcoords[pathind, :]
simval = np.zeros((npl, 1))
# sequential simulation
for i in range(npl):
if dcoords.shape[0] < nmax:
dc = dcoords
dz = dvalues
else:
# conditioning data selection
dv = []
dv = np.sqrt(np.sum((dcoords - pathcoords[i, :]) ** 2, axis=1))
ind = np.argsort(dv)
dc = dcoords[ind[0:nmax-1],:]
dz = dvalues[ind[0:nmax-1]]
dz = dz.astype(int)
ikprob, ikmap = IndicatorKriging(pathcoords[i,:], dc, dz, nf, pprior, l, krigtype)
# realization
simval[pathind[i]] = RandDisc(ikprob)
# Adding simulated value the vector of conditioning data
dcoords = np.vstack((dcoords, pathcoords[i, :]))
dvalues = np.vstack((dvalues, simval[pathind[i]]))
# Assigning the sampled values to the simulation grid
sgsim[sgsim[:,0] == -999, 0] = simval[:,0]
return sgsim
def RandDisc(p):
"""
RANDDISC
Samples a discrete random variable with a given probability mass function.
Written by <NAME> (August 2020)
Parameters
----------
p : array_like
Probabilities.
Returns
-------
index : array_like
Sampled value.
References: Grana, Mukerji, Doyen, 2021, Seismic Reservoir Modeling: Wiley - Chapter 4.4
"""
u = np.random.rand(1)
index = 0
s = p[0,0]
while ((u > s) and (index < p.shape[1])):
index = index + 1
s = s + p[0,index]
return index
def NonParametricToUniform(data2transform, reference_variables, gridsize=0.05):
"""
STEPWISE CONDITIONAL TRANSFORMATION (non-par to uniform)
Tranform a non-parametric distributed variables to a uniformly distributed variables following the stepwise transformation approach
REFRERENCE:
Direct Multivariate Simulation - A stepwise conditional transformation for multivariate geostatistical simulation
de Figueiredo et al., 2020
Written by <NAME> (May 2021)
Parameters
----------
data2transform : array_like
Non-parametric distributed variables to be transformed to a uniform distribution, each line is a simulation value and each column is a different variable.
reference_variables : array_like
Non-parametric distributed variables to be used as the reference distribution, each line is a simulation value/point and each column is a different variable..
gridsize : float
Grid size for conditioning. Low values may cause not enought points to compute the conditional distribution. High values may cause a non accurate transformation.
Returns
-------
variable_uniform : array_like
Uniformly distributed variables related to data2transform.
"""
# Treatment to ensure that the method works with inputs arrays of shape (n,) or (n,n_variables)
n_points = data2transform.shape[0]
n_variables = int(data2transform.ravel().shape[0]/data2transform.shape[0])
if n_variables == 1:
data2transform = data2transform.reshape( ( data2transform.shape[0], n_variables) )
reference_variables = reference_variables.reshape( ( reference_variables.shape[0], n_variables) )
# Normalize the input variables
min2norm = reference_variables.min(axis=0)
reference_variables = reference_variables - np.tile(min2norm, (reference_variables.shape[0], 1))
| |
import csv
import glob
import math
import os
import torch
from astropy.io import fits
from six.moves import urllib
import torch
is_torchvision_installed = True
try:
import torchvision
except:
is_torchvision_installed = False
import torch.utils.data
import random
import itertools
import numpy as np
def load_GG2_images2(images):
"""
Normalizes images and upscales them
"""
images = [fits.open(file, memmap=False)[0].data for file in images]
images = [torch.from_numpy(x.byteswap().newbyteorder()) for x in images]
#Normailze
normalize = [3.5239e+10, 1.5327e+09, 1.8903e+09, 1.2963e+09] #normalizations for 4 images
images = [x.mul(n) for x, n in zip(images, normalize)]
#Upscale 66*66 Images
othersv = torch.stack(images[1:])
upsample = torch.nn.Upsample(size=(200, 200), mode='bilinear', align_corners=True)
others_upsampled = torch.squeeze(upsample(othersv.unsqueeze(0)))
return torch.cat((images[0].unsqueeze(0), others_upsampled),dim=0)
def load_GG2_imagesTransfer(images):
"""
Normalizes images and does NOT upscales them and returns only visible part
"""
images = [fits.open(file, memmap=False)[0].data for file in images]
images = [torch.from_numpy(x.byteswap().newbyteorder()) for x in images]
#Normailze
normalize = [3.5239e+10, 1.5327e+09, 1.8903e+09, 1.2963e+09] #normalizations for 4 images
images = [x.mul(n) for x, n in zip(images, normalize)]
visible = torch.stack(images[1:])
return visible
def label_tansform_basic(labels):
return (int(labels['n_sources']))*2.0 -1.0
def load_GG2_images(images):
"""
Normalizes images and upscales them
"""
images = [fits.open(file, memmap=False)[0].data for file in images]
images = [torch.from_numpy(x.byteswap().newbyteorder()) for x in images]
#Normailze
normalize = [3.5239e+10, 1.5327e+09, 1.8903e+09, 1.2963e+09] #normalizations for 4 images
images = [x.mul(n) for x, n in zip(images, normalize)]
return images[0].unsqueeze(0), torch.stack(images[1:])
class GG2(torch.utils.data.Dataset):
url_train = 'http://metcalf1.difa.unibo.it/DATA3/datapack2.0train.tar.gz'
url_train_log = 'http://metcalf1.difa.unibo.it/DATA3/image_catalog2.0train.csv'
def __init__(self, root, data_augmentation=False, transform=load_GG2_imagesTransfer,target_transform = label_tansform_basic):
#Upscale
"""
Initializes the dataset with images and labels using the root path given.
The images are transformed using the transfomation given in the second argument.
"""
self.root = os.path.expanduser(root)
self.files = None
self.data = None
#self.tar = None
self.download()
self.transform = transform
self.target_transform= target_transform
self.data_augmentation=data_augmentation
def __getitem__(self, index):
images = self.files[index]
#files = [self.tar.extractfile(self.tar.getmember(x)) for x in images]
ID = int(images[0].split('-')[-1].split('.')[0])
if self.transform:
#files = self.transform(files)
images = self.transform(images)
if self.data_augmentation:
transform = {1: flip_horizontal , 2: flip_vertical, 3: flip_on_diagonal_that_goes_down, 4: flip_on_diagonal_that_goes_up, 5: identity, 6: rotate_by_90_deg, 7: rotate_by_180_deg, 8: rotate_by_270_deg}
num_possible_transf=len(transform)
which_transformation=np.random.randint(1, high=num_possible_transf+1)
images= transform[which_transformation](images)
labels = self.data[ID]
if self.target_transform:
labels = self.target_transform(labels)
return images, labels
def __len__(self):
return len(self.files)
def get_labels(self):
return torch.tensor( [self.data[i]['n_sources']*2.0-1.0 for i in self.data ])
def download(self):
if not os.path.isdir(self.root):
os.makedirs(self.root)
log_path = os.path.join(self.root, "train.csv")
if not os.path.isfile(log_path):
print("Download log...", flush=True)
data = urllib.request.urlopen(self.url_train_log)
with open(log_path, 'wb') as f:
f.write(data.read())
keys = [
'', 'ID', 'x_crit', 'y_crit',
'source_ID', 'z_source', 'z_lens', 'mag_source',
'ein_area', 'n_crit', 'r_source', 'crit_area',
'n_pix_source', 'source_flux', 'n_pix_lens', 'lens_flux',
'n_source_im', 'mag_eff', 'sb_contrast', 'color_diff',
'n_gal_3', 'n_gal_5', 'n_gal_10', 'halo_mass',
'star_mass', 'mag_lens', 'n_sources'
]
assert len(keys) == 27
with open(log_path, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
data = [x for x in reader if len(x) == 27 and not 'ID' in x]
data = [{k: float(x) if x else math.nan for k, x in zip(keys, xs)} for xs in data]
self.data = {x['ID']: x for x in data}
gz_path = os.path.join(self.root, "datapack2.0train.tar.gz")
if not os.path.isfile(gz_path):
print("Download...", flush=True)
data = urllib.request.urlopen(self.url_train)
with open(gz_path, 'wb') as f:
f.write(data.read())
tar_path = os.path.join(self.root, "datapack2.0train.tar")
if not os.path.isfile(tar_path):
print("Decompress...", flush=True)
import gzip
import shutil
with gzip.open(gz_path, 'rb') as f_in:
with open(tar_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
dir_path = os.path.join(self.root, "datapack2.0train")
if not os.path.isdir(dir_path):
print("Extract...", flush=True)
import tarfile
tar = tarfile.open(tar_path)
tar.extractall(dir_path)
tar.close()
# print("Open tar...", flush=True)
# import tarfile
# self.tar = tarfile.open(tar_path)
self.files = list(zip(*(
sorted(glob.glob(os.path.join(dir_path, "Public/{}/*.fits".format(band))))
for band in ("EUC_VIS", "EUC_J", "EUC_Y", "EUC_H")
)))
assert all(len({x.split('-')[-1] for x in fs}) == 1 for fs in self.files)
def inf_shuffle(xs):
while xs:
random.shuffle(xs)
for x in xs:
yield x
class BalancedBatchSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, labels=None):
self.labels = labels
self.dataset = dict()
self.balanced_max = 0
# Save all the indices for all the classes
for idx in range(0, len(dataset)):
label = self._get_label(dataset, idx)
if label not in self.dataset:
self.dataset[label] = list()
self.dataset[label].append(idx)
self.balanced_max = len(self.dataset[label]) \
if len(self.dataset[label]) > self.balanced_max else self.balanced_max
# Oversample the classes with fewer elements than the max
for label in self.dataset:
while len(self.dataset[label]) < self.balanced_max:
self.dataset[label].append(random.choice(self.dataset[label]))
self.keys = list(self.dataset.keys())
self.currentkey = 0
self.indices = [-1]*len(self.keys)
def __iter__(self):
while self.indices[self.currentkey] < self.balanced_max - 1:
self.indices[self.currentkey] += 1
yield self.dataset[self.keys[self.currentkey]][self.indices[self.currentkey]]
self.currentkey = (self.currentkey + 1) % len(self.keys)
self.indices = [-1]*len(self.keys)
def _get_label(self, dataset, idx, labels = None):
if self.labels is not None:
return self.labels[idx].item()
else:
raise Exception("You should pass the tensor of labels to the constructor as second argument")
def __len__(self):
return self.balanced_max*len(self.keys)
class BalancedBatchSampler2(torch.utils.data.sampler.Sampler):
def __init__(self, dataset):
from collections import defaultdict
if hasattr(dataset, 'dataset'):
transform = dataset.dataset.transform
dataset.dataset.transform = None # trick to avoid useless computations
indices = defaultdict(list)
for subset_index, full_data_index in enumerate(dataset.indices):
_, label = dataset.dataset[full_data_index]
indices[label].append(subset_index)
dataset.dataset.transform = transform
else:
transform = dataset.transform
dataset.transform = None # trick to avoid useless computations
indices = defaultdict(list)
for i in range(0, len(dataset)):
_, label = dataset[i]
indices[label].append(i)
dataset.transform = transform
self.indices = list(indices.values())
self.n = max(len(ids) for ids in self.indices) * len(self.indices)
def __iter__(self):
m = 0
for xs in zip(*(inf_shuffle(xs) for xs in self.indices)):
for i in xs: # yield one index of each label
yield i
m += 1
if m >= self.n:
return
def __len__(self):
return self.n
def random_splitY(dataset, lengths):
r"""
Randomly split a dataset into non-overlapping new datasets of given lengths.
Arguments:
dataset (Dataset): Dataset to be split
lengths (sequence): lengths of splits to be produced
"""
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = torch.randperm(sum(lengths)).tolist()
return indices, [torch.utils.data.Subset(dataset, indices[offset - length:offset]) for offset, length in
zip(itertools.accumulate(lengths), lengths)]
def accuracy(net, loader,device="cpu"):
r"""
Calculates a net's accuracy for a given testset using its dataloader.
Arguments:
loader (Dataloader): Dataloader for the testset
net (pytorch nn): neuralnet used for predictions
"""
correct = 0.0
total = 0.0
with torch.no_grad():
for data in loader:
images, labels = data[0].to(device), data[1].to(device)
predicted = net(images)
#print(predicted.squeeze())
predicted = torch.sign(predicted)
#print(predicted.squeeze())
#print(labels.squeeze())
total += labels.size(0)
correct += (predicted.squeeze() == labels.squeeze()).long().sum().item()
return correct/total
#----------------------transforms for data augmentation-----------------------
def flip_horizontal(tensor):
return tensor.flip(1)
def flip_vertical(tensor):
return tensor.flip(2)
def rotate_by_90_deg(tensor):
return tensor.transpose(1,2).flip(1)
def rotate_by_270_deg(tensor):
return tensor.transpose(1,2).flip(2)
def rotate_by_180_deg(tensor):
return rotate_by_90_deg(rotate_by_90_deg(tensor))
def identity(tensor):
return tensor
def flip_on_diagonal_that_goes_down(tensor):
return tensor.transpose(1,2)
def flip_on_diagonal_that_goes_up(tensor):
return rotate_by_270_deg(flip_on_diagonal_that_goes_down(rotate_by_90_deg(tensor)))
#-----------------------------------------------------
#----------------------Functions for main-----------------------
def MakingDatasets(datapath,transfer_learning, PathDataset,data_augmentation,batch_sizev,test_batch_size,proportion_traindata):
r"""
Imports test and training datasets and downloads and creates them if necessary.
Arguments:
datapath (string): path to dataset
transfer_learning (boolean): Whether to use transfer learning with freezing or not
PathDataset (string): path for creating or loading the dataset
data_augmentation (boolean): whether or not to use data augmentation
batch_sizev (int): batch size for the testing dataloader
test_batch_size (int): batch size for the test dataloader
proportion_traindata (float): proportion of training data in the whole dataset
"""
if transfer_learning:
transform=load_GG2_imagesTransfer
else:
transform=load_GG2_images2
import os
if os.path.isfile(PathDataset):
if os.stat(PathDataset).st_size > 0:
import pickle
with open(PathDataset, 'rb') as pickle_file:
[full_dataset,trainset,testset] = pickle.load(pickle_file)
full_dataset.transform=transform
trainset.transform=transform
testset.transform=transform
print("Loading datasets...")
else:
full_dataset = GG2(datapath,data_augmentation=False,transform=transform)
# To split the full_dataset
train_size = int(proportion_traindata * len(full_dataset))
test_size = len(full_dataset) - train_size
indices, sets = random_splitY(full_dataset, [train_size, test_size])
[trainset, testset]=sets
import pickle
with open(PathDataset, 'wb') as pickle_file:
pickle.dump([full_dataset,trainset,testset],pickle_file)
print("Creating and pickling datasets...")
# Data augmentation
if data_augmentation:
full_dataset.data_augmentation=True
trainset.data_augmentation=True
testset.data_augmentation=True
print(len(trainset))
# Dataloaders
batch_sizev=8
test_batch_size = 8
samplerv= BalancedBatchSampler2(trainset)
samplertest = BalancedBatchSampler2(testset)
trainloader = torch.utils.data.DataLoader(trainset, sampler=samplerv, shuffle=False, batch_size= batch_sizev)
testloader = torch.utils.data.DataLoader(testset, sampler=None, shuffle =True, batch_size= test_batch_size)
ROCloader = torch.utils.data.DataLoader(testset,batch_size=1)
return trainloader, testloader, ROCloader
# Replace all batch normalization layers by Instance
def convert_batch_to_instance(model):
r"""
Replace all batch normalization layers by Instance
Arguments:
Model : The model to which this is applied
"""
import torch.nn as nn
for child_name, child in model.named_children():
if isinstance(child, nn.BatchNorm2d):
num_features= child.num_features
setattr(model, child_name, nn.InstanceNorm2d(num_features=num_features))
else:
convert_batch_to_instance(child)
# For initializing the batch normalization layers
def init_batchnorm(model):
r"""
Reinitialises all batch normalization layers
Arguments:
Model : The model to which this is applied
"""
import torch.nn as nn
for child_name, child in model.named_children():
if isinstance(child, nn.BatchNorm2d):
num_features= child.num_features
setattr(model, child_name, nn.BatchNorm2d(num_features=num_features))
else:
convert_batch_to_instance(child)
def output(testloader,device,net):
r"""
Plots ROC, calculates AUROC, outputs all predictions and labels for the testset to a local csv file
Arguments:
testloader : The dataloader for the testset
"""
predictions = []
labels = []
for k, testset_partial in enumerate(testloader):
if k | |
"""
@Time : 08.12.2021 22:42
@Author : Subvael
@File : DueTime_test.py
"""
import unittest
from airignis import DueTime
from datetime import datetime, time, date, timedelta
from airignis.exceptions import *
from dateutil import tz
import pytz
class DueTimeTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_due_time_1 = DueTime(-2, 'day')
self.test_due_time_2 = DueTime(0, 'day')
self.test_due_time_3: DueTime = None
self.test_due_time_4 = DueTime(3, 'week', time(0, 0, 0, 0), date(2000, 1, 1))
self.test_due_time_5 = DueTime(1, 'month', time(15, 10, 30, 800, tzinfo=tz.gettz('Europe/Berlin')))
self.test_due_time_6 = DueTime(2, 'year', time(15, 10, 30, 800, tzinfo=tz.gettz('Europe/Berlin')),
date(2018, 12, 18))
def test_read_sufficiently_configured_due_time(self):
self.assertEqual(self.test_due_time_4.date_time, datetime.combine(date(2000, 1, 1),
time(0, 0, 0, 0, tz.tzutc())))
def create_due_time_with_undefined_unit(self):
self.test_due_time_3 = DueTime(1, 'undefined_unit')
def test_raise_if_undefined_unit(self):
self.assertRaises(UndefinedRateUnitError, self.create_due_time_with_undefined_unit)
def print_date_time_property(self):
print(self.test_due_time_2.date_time)
def test_date_time_not_configured(self):
self.assertRaises(DateNotConfiguredError, self.print_date_time_property)
def test_negative_rate_input_handling(self):
self.assertEqual(self.test_due_time_1.rate, 2)
def test_null_rate_input_handling(self):
self.assertEqual(self.test_due_time_2.rate, 1)
def test_print_due_time_object_properly(self):
self.assertEqual(str(self.test_due_time_1), 'DueTime: Every 2 days at [Time: 00:00:00+00:00, Timezone: tzutc()]')
pass
def test_month_days(self):
self.assertEqual(DueTime.month_days(12), 31)
self.assertEqual(DueTime.month_days(4, 2022), 30)
self.assertEqual(DueTime.month_days(6, 2022), 30)
self.assertEqual(DueTime.month_days(1, 1050), 31)
self.assertEqual(DueTime.month_days(9, 1050), 30)
self.assertEqual(DueTime.month_days(2, 1050), 28)
self.assertEqual(DueTime.month_days(2, 1061), 28)
self.assertEqual(DueTime.month_days(5, 1061), 31)
self.assertEqual(DueTime.month_days(8, 1185), 31)
self.assertEqual(DueTime.month_days(11, 1185), 30)
def test_months_diff(self):
self.assertEqual(DueTime.months_diff(datetime(2000, 1, 1), datetime(2010, 1, 1)), 120)
self.assertEqual(DueTime.months_diff(datetime(2000, 1, 1), datetime(2110, 6, 2)), 1325)
self.assertEqual(DueTime.months_diff(datetime(2026, 2, 1), datetime(2026, 1, 12)), 1)
self.assertEqual(DueTime.months_diff(datetime(2000, 1, 1), datetime(2010, 3, 27)), 122)
def test_months_diff_order_insensitivity(self):
self.assertEqual(DueTime.months_diff(datetime(2000, 1, 1), datetime(2010, 5, 1)), 124)
self.assertEqual(DueTime.months_diff(datetime(2010, 5, 1), datetime(2000, 1, 1)), 124)
def test_round_down(self):
self.assertEqual(DueTime.round_down(26.669, 2), 26.66)
self.assertEqual(DueTime.round_down(60, 2), 60)
self.assertEqual(DueTime.round_down(999999999.66552336699859, 13), 999999999.6655233669985)
def call_round_down_with_string_value(self):
DueTime.round_down('cat', 2)
def test_round_down_invalid_value(self):
self.assertRaises(ValueError, self.call_round_down_with_string_value)
def call_round_down_with_float_decimals(self):
DueTime.round_down(24.5563, 2.3)
def test_round_down_invalid_decimals(self):
self.assertRaises(ValueError, self.call_round_down_with_float_decimals)
def test_add_months(self):
self.assertEqual(DueTime.add_months(datetime(2021, 12, 18), 3), datetime(2022, 3, 18))
def test_add_months_time_consistency(self):
self.assertEqual(DueTime.add_months(datetime(2021, 12, 18, 17, 30, 10, 600), 3),
datetime(2022, 3, 18, 17, 30, 10, 600))
def test_add_months_timezone_consistency(self):
self.assertEqual(DueTime.add_months(datetime(2021, 12, 18, 0, 0, 0, 0, tz.gettz("US/Mountain")), 3),
datetime(2022, 3, 18, 0, 0, 0, 0, tz.gettz("US/Mountain")))
def test_add_months_days_robustness(self):
self.assertEqual(DueTime.add_months(datetime(2021, 12, 31, 0, 0, 0, 0, tz.gettz("US/Mountain")), 2),
datetime(2022, 2, 28, 0, 0, 0, 0, tz.gettz("US/Mountain")))
def test_add_months_negative_value(self):
self.assertEqual(DueTime.add_months(datetime(2021, 12, 31, 0, 0, 0, 0, tz.gettz("GMT")), -2),
datetime(2021, 10, 31, 0, 0, 0, 0, tz.gettz("GMT")))
def test_add_months_negative_value_at_jan(self):
self.assertEqual(DueTime.add_months(datetime(2021, 1, 1, 0, 0, 0, 0, tz.gettz("GMT")), -2),
datetime(2020, 11, 1, 0, 0, 0, 0, tz.gettz("GMT")))
def test_next_due_year_not_skipped(self):
now = datetime.now(tz=tz.gettz('Navajo'))
rate = 2
test_due = DueTime(rate, 'year', time(15, 10, 30, 800, tzinfo=tz.gettz('Navajo')),
date(now.year - int(rate/2), 12, 18))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(now.year - int(rate/2), 12, 18,
15, 10, 30, 800, tzinfo=tz.gettz('Navajo')))
self.assertEqual(test_due.next_due, datetime(now.year + int(rate/2), 12, 18,
15, 10, 30, 800, tzinfo=tz.gettz('Navajo')))
self.assertEqual(test_due.skipped_dues, 0)
def test_next_due_year_skipped_not_due_year(self):
now = datetime.now(tz=tz.gettz('Europe/Berlin'))
rate = 2
test_due = DueTime(rate, 'year', time(15, 10, 30, 800, tzinfo=tz.gettz('Europe/Berlin')),
date(now.year - rate - 1, 12, 18))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(now.year - rate - 1, 12, 18,
15, 10, 30, 800, tzinfo=tz.gettz('Europe/Berlin')))
self.assertEqual(test_due.next_due, datetime(now.year + 1, 12, 18,
15, 10, 30, 800, tzinfo=tz.gettz('Europe/Berlin')))
self.assertEqual(test_due.skipped_dues, 1)
def test_next_due_year_skipped_due__due_before_date(self):
now = datetime.now(tz=tz.gettz('Europe/Berlin'))
test_date = now - timedelta(hours=1)
rate = 3
test_due = DueTime(rate, 'year', time(test_date.hour, test_date.minute, test_date.second,
test_date.microsecond, tzinfo=tz.gettz('Europe/Berlin')),
date(test_date.year - rate * 2, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(test_date.year - rate * 2, test_date.month, test_date.day,
test_date.hour, test_date.minute, test_date.second,
test_date.microsecond, tzinfo=tz.gettz('Europe/Berlin')))
self.assertEqual(test_due.next_due, datetime(test_date.year + rate, test_date.month, test_date.day,
test_date.hour, test_date.minute, test_date.second,
test_date.microsecond, tzinfo=tz.gettz('Europe/Berlin')))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_year_skipped_due__due_after_date(self):
now = datetime.now(tz=tz.gettz('Europe/Berlin'))
test_date = now + timedelta(hours=1)
rate = 3
test_due = DueTime(rate, 'year', time(test_date.hour, test_date.minute, test_date.second,
test_date.microsecond, tzinfo=tz.gettz('Europe/Berlin')),
date(test_date.year - rate * 2, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(test_date.year - rate * 2, test_date.month, test_date.day,
test_date.hour, test_date.minute, test_date.second,
test_date.microsecond, tzinfo=tz.gettz('Europe/Berlin')))
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.skipped_dues, 1)
def test_next_due_month_not_skipped(self):
now = datetime.now(tz=tz.gettz('Japan'))
test_date = now + timedelta(hours=1)
rate = 2
test_date = DueTime.add_months(test_date, -int(rate / 2))
test_due = DueTime(rate, 'month', time(15, 10, 30, 800, tzinfo=tz.gettz('Japan')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(test_date.year, test_date.month, test_date.day,
15, 10, 30, 800, tzinfo=tz.gettz('Japan')))
self.assertEqual(test_due.next_due, DueTime.add_months(datetime(test_date.year, test_date.month, test_date.day,
15, 10, 30, 800, tzinfo=tz.gettz('Japan')), rate))
self.assertEqual(test_due.skipped_dues, 0)
def test_next_due_month_skipped__not_due_month(self):
now = datetime.now(tz=tz.tzutc())
test_date = now + timedelta(hours=1)
rate = 3
test_date = DueTime.add_months(test_date, -rate*2 - 1)
test_due = DueTime(rate, 'month', time(15, 10, 30, 800),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(test_date.year, test_date.month, test_date.day,
15, 10, 30, 800, tzinfo=tz.tzutc()))
self.assertEqual(test_due.next_due, DueTime.add_months(datetime(test_date.year, test_date.month, test_date.day,
15, 10, 30, 800, tzinfo=tz.tzutc()), rate*3))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_month_skipped__due_month_before_date(self):
now = datetime.now(tz=tz.tzutc())
test_date = now - timedelta(hours=1)
rate = 3
test_date = DueTime.add_months(test_date, -rate*2)
test_due = DueTime(rate, 'month',
time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, DueTime.add_months(test_date, rate*3))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_month_skipped__due_month_after_date(self):
now = datetime.now(tz=tz.tzutc())
test_date = now + timedelta(hours=1)
rate = 3
test_date = DueTime.add_months(test_date, -rate*2)
test_due = DueTime(rate, 'month',
time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, DueTime.add_months(test_date, rate*2))
self.assertEqual(test_due.skipped_dues, 1)
def test_next_due_day_not_skipped(self):
now = datetime.now(tz=tz.gettz('Europe/Berlin'))
rate = 2
test_date = now - timedelta(days=int(rate / 2))
test_due = DueTime(rate, 'day', time(15, 10, 30, 800, tzinfo=tz.gettz('Europe/Berlin')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(test_date.year, test_date.month, test_date.day,
15, 10, 30, 800, tzinfo=tz.gettz('Europe/Berlin')))
self.assertEqual(test_due.next_due, datetime(test_date.year, test_date.month, test_date.day, 15, 10,
30, 800, tzinfo=tz.gettz('Europe/Berlin')) + timedelta(days=rate))
self.assertEqual(test_due.skipped_dues, 0)
def test_next_due_day_skipped__not_due_day(self):
now = datetime.now(tz=tz.gettz('Europe/Zurich'))
rate = 3
test_date = now - timedelta(days=int(rate*2 + 1))
test_due = DueTime(rate, 'day', time(15, 10, 30, 800, tzinfo=tz.gettz('Europe/Zurich')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, datetime(test_date.year, test_date.month, test_date.day,
15, 10, 30, 800, tzinfo=tz.gettz('Europe/Zurich')))
self.assertEqual(test_due.next_due, datetime(test_date.year, test_date.month, test_date.day, 15, 10, 30, 800,
tzinfo=tz.gettz('Europe/Zurich')) + timedelta(days=rate*3))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_day_skipped__due_day_before_date(self):
now = datetime.now(tz=tz.gettz('Europe/Berlin'))
rate = 3
test_date = now - timedelta(days=int(rate*2), hours=2)
test_due = DueTime(rate, 'day', time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond,
tzinfo=tz.gettz('Europe/Berlin')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, test_date + timedelta(days=rate*3))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_day_skipped__due_day_after_date(self):
now = datetime.now(tz=tz.gettz('Asia/Hong_Kong'))
rate = 3
test_date = now - timedelta(days=int(rate*2), hours=-2)
test_due = DueTime(rate, 'day', time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond,
tzinfo=tz.gettz('Asia/Hong_Kong')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, test_date + timedelta(days=rate*2))
self.assertEqual(test_due.skipped_dues, 1)
def test_next_due_hour_not_skipped(self):
now = datetime.now(tz=tz.gettz('Mexico/General'))
rate = 2
test_date = now - timedelta(hours=int(rate / 2))
test_due = DueTime(rate, 'hour', time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond,
tzinfo=tz.gettz('Mexico/General')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, test_date + timedelta(hours=rate))
self.assertEqual(test_due.skipped_dues, 0)
def test_next_due_hour_skipped__not_due_hour(self):
now = datetime.now(tz=tz.gettz('Asia/Kolkata'))
rate = 2
test_date = now - timedelta(hours=int(rate*2 + 1))
test_due = DueTime(rate, 'hour', time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond,
tzinfo=tz.gettz('Asia/Kolkata')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, test_date + timedelta(hours=rate*3))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_hour_skipped__due_hour_before_date(self):
now = datetime.now(tz=tz.gettz('Asia/Singapore'))
rate = 2
test_date = now - timedelta(hours=int(rate*2))
test_due = DueTime(rate, 'hour', time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond,
tzinfo=tz.gettz('Asia/Singapore')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, test_date + timedelta(hours=rate*3))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_hour_skipped__due_hour_after_date(self):
now = datetime.now(tz=tz.gettz('Europe/Berlin'))
rate = 2
test_date = now - timedelta(hours=int(rate*2))
test_due = DueTime(rate, 'hour', time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond,
tzinfo=tz.gettz('Europe/Berlin')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more due times than just the next one
test_due.set_test_mode()
self.assertEqual(test_due.next_due, test_date)
self.assertEqual(test_due.next_due, test_date + timedelta(hours=rate*3))
self.assertEqual(test_due.skipped_dues, 2)
def test_next_due_minute_not_skipped(self):
now = datetime.now(tz=tz.gettz('Africa/Johannesburg'))
rate = 2
test_date = now - timedelta(minutes=int(rate / 2))
test_due = DueTime(rate, 'minute', time(test_date.hour, test_date.minute, test_date.second, test_date.microsecond,
tzinfo=tz.gettz('Africa/Johannesburg')),
date(test_date.year, test_date.month, test_date.day))
# Allowing the object to compute more | |
<filename>regreg/atoms/cones.py
from copy import copy
import warnings
from scipy import sparse
import numpy as np
from ..problems.composite import nonsmooth, smooth_conjugate
from ..affine import linear_transform, identity as identity_transform
from ..identity_quadratic import identity_quadratic
from ..smooth import affine_smooth
from ..atoms import _work_out_conjugate, atom, affine_atom
from ..objdoctemplates import objective_doc_templater
from ..doctemplates import (doc_template_user, doc_template_provider)
from .projl1_cython import projl1_epigraph
@objective_doc_templater()
class cone(atom):
"""
A class that defines the API for cone constraints.
"""
objective_template = r'\|%(var)s\|'
objective_vars = {'var': r'\beta',
'shape':'p',
'linear':'D',
'offset':r'\alpha',
'coneklass':'nonnegative',
'dualconeklass':'nonpositive',
'initargs':'(30,)', # args need to construct penalty
}
tol = 1.0e-05
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.shape == other.shape
return False
def __copy__(self):
return self.__class__(copy(self.shape),
offset=copy(self.offset),
initial=copy(self.coefs),
quadratic=copy(self.quadratic))
def __repr__(self):
if self.quadratic.iszero:
return "%s(%s, offset=%s)" % \
(self.__class__.__name__,
repr(self.shape),
repr(self.offset))
else:
return "%s(%s, offset=%s, quadratic=%s)" % \
(self.__class__.__name__,
repr(self.shape),
repr(self.offset),
repr(self.quadratic))
@doc_template_user
@doc_template_provider
def get_conjugate(self):
"""
Return the conjugate of an given atom.
>>> import regreg.api as rr
>>> penalty = rr.%(coneklass)s(%(initargs)s)
>>> penalty.get_conjugate() # doctest: +SKIP
%(dualconeklass)s(%(initargs)s, offset=None)
"""
if self.quadratic.coef == 0:
offset, outq = _work_out_conjugate(self.offset,
self.quadratic)
cls = conjugate_cone_pairs[self.__class__]
atom = cls(self.shape,
offset=offset,
quadratic=outq)
else:
atom = smooth_conjugate(self)
self._conjugate = atom
self._conjugate._conjugate = self
return self._conjugate
conjugate = property(get_conjugate)
@doc_template_user
@doc_template_provider
def get_dual(self):
r"""
Return the dual of an atom. This dual is formed by making the
substitution $v=Ax$ where $A$ is the `self.linear_transform`.
>>> import regreg.api as rr
>>> penalty = rr.%(coneklass)s(%(initargs)s)
>>> penalty # doctest: +SKIP
%(coneklass)s(%(initargs)s, offset=None)
>>> penalty.dual # doctest: +SKIP
(<regreg.affine.identity object at 0x...>, %(dualconeklass)s(%(initargs)s, offset=None))
If there is a linear part to the penalty, the linear_transform may not be identity:
>>> D = (np.identity(4) + np.diag(-np.ones(3),1))[:-1]
>>> D
array([[ 1., -1., 0., 0.],
[ 0., 1., -1., 0.],
[ 0., 0., 1., -1.]])
>>> linear_atom = rr.nonnegative.linear(D)
>>> linear_atom # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
affine_cone(nonnegative((3,), offset=None), array([[ 1., -1., 0., 0.],
[ 0., 1., -1., 0.],
[ 0., 0., 1., -1.]]))
>>> linear_atom.dual # doctest: +ELLIPSIS
(<regreg.affine.linear_transform object at 0x...>, nonpositive((3,), offset=None))
"""
return self.linear_transform, self.conjugate
@property
def linear_transform(self):
if not hasattr(self, "_linear_transform"):
self._linear_transform = identity_transform(self.shape)
return self._linear_transform
@doc_template_user
@doc_template_provider
def constraint(self, x):
"""
The constraint
.. math::
%(objective)s
"""
raise NotImplementedError
@doc_template_user
@doc_template_provider
def nonsmooth_objective(self, x, check_feasibility=False):
'''
>>> import regreg.api as rr
>>> cone = rr.nonnegative(4)
>>> cone.nonsmooth_objective([3, 4, 5, 9])
0.0
'''
arg = np.asarray(x)
x_offset = self.apply_offset(x)
if check_feasibility:
v = self.constraint(x_offset)
else:
v = 0
v += self.quadratic.objective(arg, 'func')
return v
@doc_template_user
@doc_template_provider
def proximal(self, quadratic, prox_control=None):
r"""
The proximal operator.
.. math::
v^{\lambda}(x) = \text{argmin}_{v \in \mathbb{R}^{%(shape)s}} \frac{L}{2}
\|x-\alpha - v\|^2_2 + %(objective)s + \langle v, \eta \rangle
where :math:`\alpha` is `self.offset`,
:math:`\eta` is `quadratic.linear_term`.
>>> import regreg.api as rr
>>> cone = rr.nonnegative((4,))
>>> Q = rr.identity_quadratic(1.5, [3, -4, -1, 1], 0, 0)
>>> np.allclose(cone.proximal(Q), [3, 0, 0, 1]) # doctest: +NORMALIZE_WHITESPACE
True
Parameters
----------
quadratic : `regreg.identity_quadratic.identity_quadratic`
A quadratic added to the atom before minimizing.
prox_control : `[None, dict]`
This argument is ignored for seminorms, but otherwise
is passed to `regreg.algorithms.FISTA` if the atom
needs to be solved iteratively.
Returns
-------
Z : `np.ndarray(np.float)`
The proximal map of the implied center of `quadratic`.
"""
offset, totalq = (self.quadratic + quadratic).recenter(self.offset)
if totalq.coef == 0:
raise ValueError('lipschitz + quadratic coef must be positive')
prox_arg = -totalq.linear_term / totalq.coef
eta = self.cone_prox(prox_arg)
if offset is None:
return eta
else:
return eta + offset
@doc_template_user
@doc_template_provider
def cone_prox(self, x):
r"""
Return (unique) minimizer
.. math::
%(var)s^{\lambda}(u) = \text{argmin}_{%(var)s \in \mathbb{R}^%(shape)s}
\frac{1}{2} \|%(var)s-u\|^2_2 + %(objective)s
"""
raise NotImplementedError
# the minus signs below for offset is there until affine transforms SUBTRACT
# their offset until add.
# for atoms, the offset is really the "center"
@classmethod
@doc_template_provider
def linear(cls, linear_operator, diag=False,
offset=None,
quadratic=None):
"""
Composition of a cone constraint and a linear
transform.
"""
if not isinstance(linear_operator, linear_transform):
l = linear_transform(linear_operator, diag=diag)
else:
l = linear_operator
if offset is None:
offset = 0
cone = cls(l.output_shape,
offset=-offset,
quadratic=quadratic)
return affine_cone(cone, l)
@classmethod
@doc_template_provider
def affine(cls, linear_operator, offset, diag=False,
quadratic=None):
"""
Composition of a cone constraint and a linear
transform.
"""
if not isinstance(linear_operator, linear_transform):
l = linear_transform(linear_operator, diag=diag)
else:
l = linear_operator
if offset is None:
offset = 0
cone = cls(l.output_shape,
offset=-offset,
quadratic=quadratic)
return affine_cone(cone, l)
@staticmethod
def check_subgradient(atom, prox_center):
r"""
For a given seminorm, verify the KKT condition for
the problem for the proximal problem
.. math::
\text{minimize}_u \frac{1}{2} \|u-z\|^2_2 + h(z)
where $z$ is the `prox_center` and $h$ is `atom`.
This should return two values that are 0,
one is the inner product of the minimizer and the residual, the
other is just 0.
Parameters
----------
atom : `cone`
A cone instance with a `proximal` method.
prox_center : np.ndarray(np.float)
Center for the proximal map.
Returns
-------
v1, v2 : float
Two values that should be equal if the proximal map is correct.
"""
atom = copy(atom)
atom.quadratic = identity_quadratic(0,0,0,0)
atom.offset = None
q = identity_quadratic(1, prox_center, 0, 0)
U = atom.proximal(q)
return ((prox_center - U) * U).sum(), 0
@objective_doc_templater()
class affine_cone(affine_atom):
def __repr__(self):
return "affine_cone(%s, %s)" % (repr(self.atom),
repr(self.linear_transform.linear_operator))
@objective_doc_templater()
class nonnegative(cone):
"""
The non-negative cone constraint (which is the support
function of the non-positive cone constraint).
"""
objective_template = r"""I^{\infty}(%(var)s \succeq 0)"""
objective_vars = cone.objective_vars.copy()
objective_vars['coneklass'] = 'nonnegative'
objective_vars['dualconeklass'] = 'nonpositive'
@doc_template_user
def constraint(self, x):
tol_lim = np.fabs(x).max() * self.tol
incone = np.all(np.greater_equal(x, -tol_lim))
if incone:
return 0
return np.inf
@doc_template_user
def cone_prox(self, x):
return np.maximum(x, 0)
@doc_template_user
def proximal(self, quadratic, prox_control=None):
return cone.proximal(self, quadratic, prox_control)
@doc_template_user
def get_conjugate(self):
return cone.get_conjugate(self)
@doc_template_user
def get_dual(self):
return cone.dual(self)
@objective_doc_templater()
class nonpositive(nonnegative):
"""
The non-positive cone constraint (which is the support
function of the non-negative cone constraint).
"""
objective_template = r"""I^{\infty}(%(var)s \preceq 0)"""
objective_vars = cone.objective_vars.copy()
objective_vars['dualconeklass'] = 'nonnegative'
objective_vars['coneklass'] = 'nonpositive'
@doc_template_user
def constraint(self, x):
tol_lim = np.fabs(x).max() * self.tol
incone = np.all(np.less_equal(x, tol_lim))
if incone:
return 0
return np.inf
@doc_template_user
def cone_prox(self, x):
return np.minimum(x, 0)
@doc_template_user
def proximal(self, quadratic, prox_control=None):
return cone.proximal(self, quadratic, prox_control)
@doc_template_user
def get_conjugate(self):
return cone.get_conjugate(self)
@doc_template_user
def get_dual(self):
return cone.dual(self)
@objective_doc_templater()
class zero(cone):
"""
The zero seminorm, support function of :math:\{0\}
"""
objective_template = r"""{\cal Z}(%(var)s)"""
objective_vars = cone.objective_vars.copy()
objective_vars['coneklass'] = 'zero'
objective_vars['dualconeklass'] = 'zero_constraint'
@doc_template_user
def constraint(self, x):
return 0.
@doc_template_user
def cone_prox(self, x):
return np.asarray(x)
@doc_template_user
def proximal(self, quadratic, prox_control=None):
return cone.proximal(self, quadratic, prox_control)
@doc_template_user
def get_conjugate(self):
return cone.get_conjugate(self)
@doc_template_user
def get_dual(self):
return cone.dual(self)
@objective_doc_templater()
class zero_constraint(cone):
"""
The zero constraint, support function of :math:`\mathbb{R}`^p
"""
objective_template = r"""I^{\infty}(%(var)s = 0)"""
objective_vars = cone.objective_vars.copy()
objective_vars['coneklass'] = 'zero_constraint'
objective_vars['dualconeklass'] = 'zero'
@doc_template_user
def constraint(self, x):
if not np.linalg.norm(x) <= self.tol:
return np.inf
return 0.
@doc_template_user
def cone_prox(self, x):
return np.zeros(np.asarray(x).shape)
@doc_template_user
def proximal(self, quadratic, prox_control=None):
return cone.proximal(self, quadratic, prox_control)
@doc_template_user
def get_conjugate(self):
return cone.get_conjugate(self)
@doc_template_user
def get_dual(self):
return cone.dual(self)
@objective_doc_templater()
class l2_epigraph(cone):
"""
The l2_epigraph constraint.
"""
objective_template = r"""I^{\infty}(\|%(var)s[:-1]\|_2 \leq %(var)s[-1])"""
objective_vars = cone.objective_vars.copy()
objective_vars['coneklass'] = 'l2_epigraph'
objective_vars['dualconeklass'] = 'l2_epigraph_polar'
@doc_template_user
def constraint(self, x):
incone = np.linalg.norm(x[:-1]) <= (1 + self.tol) * x[-1]
if incone:
return 0
return np.inf
@doc_template_user
def cone_prox(self, x):
norm = x[-1]
coef = x[:-1]
norm_coef = np.linalg.norm(coef)
thold = (norm_coef - norm) / 2.
result = np.zeros_like(x)
result[:-1] = coef / norm_coef * max(norm_coef - thold, 0)
result[-1] = max(norm + thold, 0)
return result
@doc_template_user
def proximal(self, quadratic, prox_control=None):
return cone.proximal(self, quadratic, prox_control)
@doc_template_user
def get_conjugate(self):
return cone.get_conjugate(self)
@doc_template_user
def get_dual(self):
return cone.dual(self)
@objective_doc_templater()
class l2_epigraph_polar(cone):
"""
The polar of the l2_epigraph constraint, which is the negative of the
l2 epigraph..
"""
objective_template = r"""I^{\infty}(\|%(var)s[:-1]\|_2 \in -%(var)s[-1])"""
objective_vars = cone.objective_vars.copy()
objective_vars['dualconeklass'] = 'l2_epigraph'
objective_vars['coneklass'] = 'l2_epigraph_polar'
@doc_template_user
def constraint(self, x):
incone = np.linalg.norm(x[:-1]) <= (1 + self.tol) * (-x[-1])
if incone:
return 0
return np.inf
@doc_template_user
def cone_prox(self, arg):
arg = -arg
norm = arg[-1]
coef = arg[:-1]
norm_coef = np.linalg.norm(coef)
thold = (norm_coef - norm) / 2.
result = np.zeros_like(arg)
result[:-1] = coef / norm_coef * max(norm_coef | |
<reponame>ilona-asa/SpamDetect<filename>tutorial.py
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import load_iris
import os
import email.parser
import re
from email.parser import Parser
# from email.Utils import parseaddr
from email.Header import decode_header
iris = load_iris()
# store the feature matrix (X) and response vector (y)
X = iris.data
y = iris.target
# ## Part 3: Reading a text-based dataset into pandas
# In[38]:
atom_rfc2822 = r"[a-zA-Z0-9_!#\$\%&'*+/=?\^`{}~|\-]+"
atom_posfix_restricted = r"[a-zA-Z0-9_#\$&'*+/=?\^`{}~|\-]+" # without '!' and '%'
atom = atom_rfc2822
dot_atom = atom + r"(?:\." + atom + ")*"
quoted = r'"(?:\\[^\r\n]|[^\\"])*"'
local = "(?:" + dot_atom + "|" + quoted + ")"
domain_lit = r"\[(?:\\\S|[\x21-\x5a\x5e-\x7e])*\]"
domain = "(?:" + dot_atom + "|" + domain_lit + ")"
addr_spec = local + "\@" + domain
email_address_re = re.compile('^' + addr_spec + '$')
def getmailaddresses(msg, name):
"""retrieve From:, To: and Cc: addresses"""
addrs = email.utils.getaddresses(msg.get_all(name, []))
for i, (name, addr) in enumerate(addrs):
if not name and addr:
# only one string! Is it the address or is it the name ?
# use the same for both and see later
name = addr
try:
# address must be ascii only
addr = addr.encode('ascii')
except UnicodeError:
addr = ''
else:
# address must match adress regex
if not email_address_re.match(addr):
addr = ''
addrs[i] = (getmailheader(name), addr)
return addrs
def getmailheader(header_text, default="ascii"):
"""Decode header_text if needed"""
try:
headers = decode_header(header_text)
except email.Errors.HeaderParseError:
# This already append in email.base64mime.decode()
# instead return a sanitized ascii string
return header_text.encode('ascii', 'replace').decode('ascii')
else:
for i, (text, charset) in enumerate(headers):
try:
headers[i] = unicode(text, charset or default, errors='replace')
except LookupError:
# if the charset is unknown, force default
headers[i] = unicode(text, default, errors='replace')
return u"".join(headers)
# read file into pandas using a relative path
# path = 'data/sms.tsv'
# path = 'data/0006.2003-12-18.GP.spam.txt'
parser = Parser()
rootdir = '/root/Desktop/Machine_Learning/Project-SpamDetection/'
# rootdir = '/root/Desktop/Machine_Learning/Project-SpamDetection/'
listtexts = []
labels = []
for subdirs, dir, files in os.walk(rootdir):
for file in files:
path = os.path.join(subdirs, file)
if '.idea' in path:
continue
elif 'py' in path:
continue
else:
f = open(path, 'r').read()
msg = email.message_from_string(f)
subject = getmailheader(msg.get('Subject', ''))
# print(subject)
from_ = getmailaddresses(msg, 'from')
from_ = ('', '') if not from_ else from_[0]
print(from_)
if msg.is_multipart():
for payload in msg.get_payload():
Text = str(payload.get_payload())
Text = re.sub(r'[^\x00-\x7F]+',' ', Text)
else:
Text = str(msg.get_payload())
Text = re.sub(r'[^\x00-\x7F]+', ' ', Text)
cleanbr = re.compile('<br>|<BR>')
cleanr = re.compile('<.*?>')
# cleannline = re.compile('\n')
Text = re.sub('\s+', ' ', Text)
# Text = Text.translate(" ", '\t\n ')
Text = re.sub(cleanbr, ' ', Text)
Text = re.sub(cleanr, '', Text)
'''email = f.read()
em = email.splitlines()
Text = ""
flag = 0
for e in em:
if 'X-FileName:' in e:
flag = 1
continue
if flag == 1:
Text = Text + e'''
listtexts.append(Text)
if 'BG' in path or 'GP' in path or 'SH' in path:
labels.append('Spam')
else:
labels.append('Not Spam')
print listtexts
# print labels
# sms = pd.read_table(path, header=None, names=['label', 'message'])
# In[ ]:
# alternative: read file into pandas from a URL
# url = 'https://raw.githubusercontent.com/justmarkham/pycon-2016-tutorial/master/data/sms.tsv'
# sms = pd.read_table(url, header=None, names=['label', 'message'])
# In[39]:
# examine the shape
# sms.shape
# email
# In[41]:
# print ("test")
# print (em)
# In[33]:
# type(em)
# In[34]:
vect = CountVectorizer(stop_words='english')
# learn the 'vocabulary' of the training data (occurs in-place)
vect.fit(listtexts)
# In[35]:
# examine the fitted vocabulary
vect.get_feature_names()
# In[36]:
# transform training data into a 'document-term matrix'
simple_train_dtm = vect.transform(listtexts)
simple_train_dtm
# In[37]:
# convert sparse matrix to a dense matrix
simple_train_dtm.toarray()
# In[ ]:
# examine the first 10 rows
# sms.head(10)
# In[ ]:
# examine the class distribution
# sms.label.value_counts()
# In[ ]:
# convert label to a numerical variable
# sms['label_num'] = sms.label.map({'ham':0, 'spam':1})
# In[ ]:
# check that the conversion worked
# sms.head(10)
# In[ ]:
# how to define X and y (from the iris data) for use with a MODEL
# X = iris.data
# y = iris.target
# print(X.shape)
# print(y.shape)
# In[ ]:
# how to define X and y (from the SMS data) for use with COUNTVECTORIZER
# X = sms.message
# y = sms.label_num
# print(X.shape)
# print(y.shape)
# In[ ]:
# split X and y into training and testing sets
'''
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# ## Part 4: Vectorizing our dataset
# In[ ]:
# instantiate the vectorizer
vect = CountVectorizer()
# In[ ]:
# learn training data vocabulary, then use it to create a document-term matrix
vect.fit(X_train)
X_train_dtm = vect.transform(X_train)
# In[ ]:
# equivalently: combine fit and transform into a single step
X_train_dtm = vect.fit_transform(X_train)
# In[ ]:
# examine the document-term matrix
X_train_dtm
# In[ ]:
# transform testing data (using fitted vocabulary) into a document-term matrix
X_test_dtm = vect.transform(X_test)
X_test_dtm
# ## Part 5: Building and evaluating a model
#
# We will use [multinomial Naive Bayes](http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html):
#
# > The multinomial Naive Bayes classifier is suitable for classification with **discrete features** (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
# In[ ]:
# import and instantiate a Multinomial Naive Bayes model
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
# In[ ]:
# train the model using X_train_dtm (timing it with an IPython "magic command")
get_ipython().magic(u'time nb.fit(X_train_dtm, y_train)')
# In[ ]:
# make class predictions for X_test_dtm
y_pred_class = nb.predict(X_test_dtm)
# In[ ]:
# calculate accuracy of class predictions
from sklearn import metrics
metrics.accuracy_score(y_test, y_pred_class)
# In[ ]:
# print the confusion matrix
metrics.confusion_matrix(y_test, y_pred_class)
# In[ ]:
# print message text for the false positives (ham incorrectly classified as spam)
# In[ ]:
# print message text for the false negatives (spam incorrectly classified as ham)
# In[ ]:
# example false negative
X_test[3132]
# In[ ]:
# calculate predicted probabilities for X_test_dtm (poorly calibrated)
y_pred_prob = nb.predict_proba(X_test_dtm)[:, 1]
y_pred_prob
# In[ ]:
# calculate AUC
metrics.roc_auc_score(y_test, y_pred_prob)
# ## Part 6: Comparing models
#
# We will compare multinomial Naive Bayes with [logistic regression](http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression):
#
# > Logistic regression, despite its name, is a **linear model for classification** rather than regression. Logistic regression is also known in the literature as logit regression, maximum-entropy classification (MaxEnt) or the log-linear classifier. In this model, the probabilities describing the possible outcomes of a single trial are modeled using a logistic function.
# In[ ]:
# import and instantiate a logistic regression model
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
# In[ ]:
# train the model using X_train_dtm
get_ipython().magic(u'time logreg.fit(X_train_dtm, y_train)')
# In[ ]:
# make class predictions for X_test_dtm
y_pred_class = logreg.predict(X_test_dtm)
# In[ ]:
# calculate predicted probabilities for X_test_dtm (well calibrated)
y_pred_prob = logreg.predict_proba(X_test_dtm)[:, 1]
y_pred_prob
# In[ ]:
# calculate accuracy
metrics.accuracy_score(y_test, y_pred_class)
# In[ ]:
# calculate AUC
metrics.roc_auc_score(y_test, y_pred_prob)
# ## Part 7: Examining a model for further insight
#
# We will examine the our **trained Naive Bayes model** to calculate the approximate **"spamminess" of each token**.
# In[ ]:
# store the vocabulary of X_train
X_train_tokens = vect.get_feature_names()
len(X_train_tokens)
# In[ ]:
# examine the first 50 tokens
print(X_train_tokens[0:50])
# In[ ]:
# examine the last 50 tokens
print(X_train_tokens[-50:])
# In[ ]:
# Naive Bayes counts the number of times each token appears in each class
nb.feature_count_
# In[ ]:
# rows represent classes, columns represent tokens
nb.feature_count_.shape
# In[ ]:
# number of times each token appears across all HAM messages
ham_token_count = nb.feature_count_[0, :]
ham_token_count
# In[ ]:
# number of times each token appears across all SPAM messages
spam_token_count = nb.feature_count_[1, :]
spam_token_count
# In[ ]:
# create a DataFrame of tokens with their separate ham and spam counts
tokens = pd.DataFrame({'token':X_train_tokens, 'ham':ham_token_count, 'spam':spam_token_count}).set_index('token')
tokens.head()
# In[ ]:
# examine 5 random DataFrame rows
tokens.sample(5, random_state=6)
# In[ ]:
# Naive Bayes counts the number of observations in each class
nb.class_count_
# Before we can calculate the "spamminess" of each token, we need to avoid **dividing by zero** and account for the **class imbalance**.
# In[ ]:
# add 1 to ham and spam counts to avoid dividing by 0
tokens['ham'] = tokens.ham + 1
tokens['spam'] = tokens.spam + 1
tokens.sample(5, random_state=6)
# In[ ]:
# convert the ham and spam counts into frequencies
tokens['ham'] = tokens.ham / nb.class_count_[0]
tokens['spam'] = tokens.spam / nb.class_count_[1]
tokens.sample(5, random_state=6)
# In[ ]:
# calculate the ratio of spam-to-ham for each token
tokens['spam_ratio'] = tokens.spam / tokens.ham
tokens.sample(5, random_state=6)
# In[ ]:
# examine the DataFrame sorted by spam_ratio
# note: use sort() instead of sort_values() for pandas 0.16.2 and earlier
tokens.sort_values('spam_ratio', ascending=False)
# In[ ]:
# look up the spam_ratio for a given token
tokens.loc['dating', 'spam_ratio']
# ## Part 8: Practicing this workflow on another dataset
#
# Please open the **`exercise.ipynb`** notebook (or the **`exercise.py`** script).
# ## Part 9: Tuning the vectorizer (discussion)
#
# Thus far, we have been using the default parameters of [CountVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html):
# In[ ]:
# show default parameters for CountVectorizer
vect
# However, the vectorizer is worth tuning, just like a model is worth tuning! Here are a few parameters that you might want to tune:
#
# - **stop_words:** | |
# credits to slimakoi for amino.py base
import json
import base64
import requests
from uuid import UUID
from os import urandom
from time import timezone
from typing import BinaryIO, Union
from binascii import hexlify
from time import time as timestamp
from json_minify import json_minify
from . import client
from .utils import exceptions, headers, device, entities, signature
from torpy.http.requests import TorRequests
device = device.deviceGenerator()
headers.sid = client.Client().sid
class VCHeaders:
def __init__(self, data = None):
vc_headers = {
"Accept-Language": "en-US",
"Content-Type": "application/json",
"User-Agent": "Amino/45725 CFNetwork/1126 Darwin/19.5.0", # Closest server (this one for me)
"Host": "rt.applovin.com",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "Keep-Alive",
"Accept": "*/*"
}
if data: vc_headers["Content-Length"] = str(len(data))
self.vc_headers = vc_headers
class SubClient(client.Client):
def __init__(self, comId: str = None, aminoId: str = None, *, profile: entities.UserProfile):
client.Client.__init__(self)
self.vc_connect = False
if comId is not None:
self.comId = comId
self.community: entities.Community = self.get_community_info(comId)
if aminoId is not None:
self.comId = client.Client().search_community(aminoId).comId[0]
self.community: entities.Community = client.Client().get_community_info(self.comId)
if comId is None and aminoId is None: raise exceptions.NoCommunity()
try: self.profile: entities.UserProfile = self.get_user_info(userId=profile.userId)
except AttributeError: raise exceptions.FailedLogin()
except exceptions.UserUnavailable: pass
def parse_headers(self, data: str = None):
if data is not None:
return headers.ApisHeaders(deviceId=self.device_id, data=data).headers
else:
return headers.ApisHeaders(deviceId=self.device_id).headers
def check_in(self, tz: int = -timezone // 1000):
data = json.dumps({
"timezone": tz,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/x{self.comId}/s/check-in", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def lottery(self, tz: int = -timezone // 1000):
data = json.dumps({
"timezone": tz,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/x{self.comId}/s/check-in/lottery", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return entities.LotteryLog(json.loads(response.text)["lotteryLog"]).LotteryLog
def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, chatRequestPrivilege: str = None, imageList: list = None, captionList: list = None, backgroundImage: str = None, backgroundColor: str = None, titles: list = None, colors: list = None, defaultBubbleId: str = None):
mediaList = []
data = {"timestamp": int(timestamp() * 1000)}
if captionList is not None:
for image, caption in zip(imageList, captionList):
mediaList.append([100, self.upload_media(image, "image"), caption])
else:
if imageList is not None:
for image in imageList:
mediaList.append([100, self.upload_media(image, "image"), None])
if imageList is not None or captionList is not None:
data["mediaList"] = mediaList
if nickname: data["nickname"] = nickname
if icon: data["icon"] = self.upload_media(icon, "image")
if content: data["content"] = content
if chatRequestPrivilege: data["extensions"] = {"privilegeOfChatInviteRequest": chatRequestPrivilege}
if backgroundImage: data["extensions"] = {"style": {"backgroundMediaList": [[100, backgroundImage, None, None, None]]}}
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if defaultBubbleId: data["extensions"] = {"defaultBubbleId": defaultBubbleId}
if titles or colors:
tlt = []
for titles, colors in zip(titles, colors):
tlt.append({"title": titles, "color": colors})
data["extensions"] = {"customTitles": tlt}
data = json.dumps(data)
response = requests.post(f"{self.api}/x{self.comId}/s/user-profile/{self.profile.userId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_blog(self, blogId: Union[str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/x{self.comId}/s/blog/{blogId}/vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
response = requests.post(f"{self.api}/x{self.comId}/s/feed/vote", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/x{self. comId}/s/item/{wikiId}/vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_active_obj(self, startTime: int = None, endTime: int = None, optInAdsFlags: int = 2147483647, tz: int = -timezone // 1000, timers: list = None, timestamp: int = int(timestamp() * 1000)):
data = {"userActiveTimeChunkList": [{"start": startTime, "end": endTime}], "timestamp": timestamp, "optInAdsFlags": optInAdsFlags, "timezone": tz}
if timers: data["userActiveTimeChunkList"] = timers
data = json_minify(json.dumps(data))
with TorRequests() as client:
with client.get_session() as session:
response = requests.post(f"{self.api}/x{self.comId}/s/community/stats/user-active-time", headers=headers.ApisHeaders(data=data, deviceId=self.device_id).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def activity_status(self, status: str):
if "on" in status.lower(): status = 1
elif "off" in status.lower(): status = 2
else: raise exceptions.WrongType(status)
data = json.dumps({
"onlineStatus": status,
"duration": 86400,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/x{self.comId}/s/user-profile/{self.profile.userId}/online-status", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def watch_ad(self):
response = requests.post(f"{self.api}/g/s/wallet/ads/video/start", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def start_chat(self, userId: Union[str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType(type(userId))
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
response = requests.post(f"{self.api}/x{self.comId}/s/chat/thread", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def invite_to_chat(self, userId: Union[str, list], chatId: str):
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType(type(userId))
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/x{self.comId}/s/chat/thread/{chatId}/member/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/x{self.comId}/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/x{self.comId}/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/x{self.comId}/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
response = requests.post(url, headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def thank_tip(self, chatId: str, userId: str):
response = requests.post(f"{self.api}/x{self.comId}/s/chat/thread/{chatId}/tipping/tipped-users/{userId}/thank", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def follow(self, userId: Union[str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
response = requests.post(f"{self.api}/x{self.comId}/s/user-profile/{userId}/member", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/x{self.comId}/s/user-profile/{self.profile.userId}/joined", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type(userId))
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.api}/x{self.comId}/s/user-profile/{self.profile.userId}/joined/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** | |
<reponame>ezequieljsosa/sndg-bio
"""
https://ask.pathwaytools.com/question/17972/access-pathologic-via-api/
So I defined organism-params.dat and genetic-elements.dat in a folder called dir_with_files, included the annotation file test.gbk and started pathwatools: pwt -patho dir_with_files
organism-params.dat
ID test
NAME <NAME>
STORAGE File
NCBI-TAXON-ID 244566
DOMAIN TAX-2
genetic-elements.dat
ID TEST-CHROM-1
TYPE :CHRSM
CIRCULAR? Y
ANNOT-FILE test.gbk
export PROXY=proxy.fcen.uba.ar:8080
pathway-tools -no-cel-overview -no-web-cel-overview -patho /data/organismos/LHelv/annotation/pathways
"""
import logging
from tqdm import tqdm
from glob import glob
from goatools.obo_parser import GODag
import Bio.SeqIO as bpio
from Bio.Seq import Seq
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.SeqRecord import SeqRecord
from SNDG import execute, mkdir
# from Bio.Alphabet import NucleotideAlphabet
from SNDG.BioMongo.Model.SeqCollection import SeqCollection
_log = logging.getLogger(__name__)
class PathwayTools:
def __init__(self, workdir, go_db="/data/databases/go/go.obo", assembly_level="CHRSM"):
"""
:param workdir:
:param assembly_level: "CHRSM", "PLASMID", "MT" (mitochondrial chromosome),"PT" (chloroplast chromosome), or "CONTIG"
"""
self.workdir = workdir
self.assembly_level = assembly_level
self.gb_file = workdir + "/pwtools.gb"
self.go_dag = GODag(go_db, load_obsolete=True)
self.default_mappings = {
"CDS": {
"type": "CDS",
"qualifiers": {
"gene_symbol": "gene",
"locus_tag": "locus_tag",
"description": "product",
"Note": "note",
"EC": ("EC_number", lambda x: x.split(":")[1] if len(x.split(":")) > 1 else x),
"EC_number": "EC_number",
"x": "product_comment",
"x": "gene_comment",
"x": "pseudo",
"x": "alt_name",
"x": "db_xref",
"GO": {"go_component": self.process_go_cc,
"go_function": self.process_go_mf,
"go_process": self.process_go_bp}
}},
"gene.mRNA": {
"type": "CDS",
"qualifiers": {
"gene_symbol": "gene",
"locus_tag": "locus_tag",
"description": "product",
"Note": "note",
"EC": ("EC_number", lambda x: x.split(":")[1] if len(x.split(":")) > 1 else x),
"EC_number": "EC_number",
"x": "product_comment",
"x": "gene_comment",
"x": "pseudo",
"x": "alt_name",
"x": "db_xref",
"GO": {"go_component": self.process_go_cc,
"go_function": self.process_go_mf,
"go_process": self.process_go_bp}
}}
}
def process_go_db(self, db, go_code):
go = self.go_dag[go_code]
if go.namespace == db and go.level > 0:
return "|".join([go.name.strip().replace("\n", ""), go_code.split(":")[1], "1", "ISS"])
else:
return None
def process_go_cc(self, go_code):
"""
GO term name, GO term ID, citation PubMed ID, and evidence code, separated by vertical bars. Example:
antimicrobial humoral response|0019730|16163390|IMP
http://geneontology.org/page/guide-go-evidence-codes -->
Inferred from Sequence or structural Similarity (ISS) = default
"""
return self.process_go_db("cellular_component", go_code)
def process_go_mf(self, go_code):
return self.process_go_db("molecular_function", go_code)
def process_go_bp(self, go_code):
return self.process_go_db("biological_process", go_code)
def create_genetic_elements(self):
"""
;;==========================================================================
;; Sample genetic-elements.dat file, created 2/13/2001 by <NAME>.
;; PathoLogic v. 1.8
;;
;; This file contains a set of records, one for each genetic element
;; (chromosome, plasmid, etc.) or contig in the organism. Each genetic
;; element can be described either with a single annotation file or as a set
;; of contigs (but not both). Contigs are each described with a single
;; annotation file. Records are ended by a single line containing only the
;; string "//". Each record contains a set of attribute-value pairs, one
;; per line, separated by the tab character. Lines beginning with ; are
;; comment lines. Valid attributes are:
;;
;; ID -- Required. Unique identifier for the genetic element. The identifer
;; must be unique with respect to all other identifiers used in the
;; PGDB, such as gene and protein identifiers. The ID should start
;; with an alphabetic character, and contain no spaces or special
;; characters. Case insensitive.
;; NAME -- Optional. Descriptive name for the genetic element
;; TYPE -- Optional. One of :CHRSM, :PLASMID, :MT (mitochondrial chromosome),
;; :PT (chloroplast chromosome), or :CONTIG. Defaults to :CHRSM.
;; CIRCULAR? -- Required (except for contigs). Is the genetic element
;; circular? Valid options are Y or N.
;; CODON-TABLE -- Optional, defaults to the codon table specified in the
;; organism.dat file, or to the standard codon-table if not
;; supplied. This should be a number between 1 and 15.
;; ANNOT-FILE -- Required, unless the CONTIG attribute is supplied. Filename
;; for the annotations file for this genetic element. The file
;; must be in either pathologic format (.pf) or genbank
;; format (.gbk). If the file is in the orgcyc/version/input/
;; directory, then only the filename is required, otherwise a
;; full pathname must be supplied.
;; SEQ-FILE -- Optional. Filename for the sequence file for this
;; genetic element. The file must be in FASTA format
;; (.fna or .fsa). If the file is in the orgcyc/version/input/
;; directory, then only the filename is required, otherwise a full
;; pathname must be supplied.
;; CONTIG -- Optional (for all types other than :CONTIG. This field is not
;; permitted in records of type :CONTIG.) ID for a contig that
;; forms part of this genetic element. A genetic element may have
;; multiple CONTIG lines. This field should not be supplied if the
;; ANNOT-FILE attribute is supplied.
;;
;; Note that the "//" is required after the final genetic element, also.
;;===========================================================================
ID TEST-CHROM-1
NAME Chromosome 1
TYPE :CHRSM
CIRCULAR? N
ANNOT-FILE chrom1.pf
SEQ-FILE chrom1.fsa
//
ID TEST-CHROM-2
NAME Chromosome 2
CIRCULAR? N
ANNOT-FILE /mydata/chrom2.gbk
SEQ-FILE /mydata/chrom2.fna
//
ID TEST-CHROM-3
NAME Chromosome 3
TYPE :CHRSM
CIRCULAR? N
CONTIG CONTIG-1
CONTIG CONTIG-2
//
ID TEST-MIT-CHROM
NAME Mitochondrial chromosome
TYPE :MT
CIRCULAR? Y
CODON-TABLE 2
ANNOT-FILE mit-chrom.pf
//
ID CONTIG-1
NAME Contig 1 of Chromosome 3
TYPE :CONTIG
ANNOT-FILE chrom3-contig1.pf
SEQ-FILE chrom3-contig1.fsa
//
ID CONTIG-2
NAME Contig 2 of Chromosome 3
TYPE :CONTIG
ANNOT-FILE chrom3-contig2.pf
SEQ-FILE chrom3-contig2.fsa
//
"""
mkdir(self.workdir + "/")
with open(self.workdir + "/genetic-elements.dat", "w") as genetic_elements:
for seq_record in bpio.parse(self.gb_file, "genbank"):
new_gb = open(self.workdir + "/%s.gbk" % seq_record.id, "w")
bpio.write(seq_record, new_gb, "genbank")
genetic_elements.write("ID\t" + seq_record.id + "\n")
genetic_elements.write("TYPE\t:" + self.assembly_level + "\n")
genetic_elements.write("CIRCULAR?\tN" + "\n")
genetic_elements.write("ANNOT-FILE\t" + seq_record.id + ".gbk" + "\n")
genetic_elements.write("//" + "\n")
def create_pseudo_scaffold(self, gbks_dir, outdir):
pos = 0
features = []
seq = ""
for gb in tqdm(glob(gbks_dir + "/*.gb") + glob(gbks_dir + "/*.gbk")):
for c in bpio.parse(gb, "gb"):
for f in c.features:
start = f.location.start + pos
end = f.location.end + pos
feature = SeqFeature(type=f.type, qualifiers=f.qualifiers,
location=FeatureLocation(start=start, end=end, strand=f.strand))
features.append(feature)
pos += len(c.seq)
seq += "NNNNN" + str(c.seq)
seqrecord = SeqRecord(id="pseudo", name="", description="", features=features,
seq=Seq(seq),annotations={"molecule_type":"DNA"}) #alphabet = NucleotideAlphabet()
with open(outdir + "/genome.gbk", "w") as h:
bpio.write(seqrecord, h, "gb")
with open(outdir + "/genetic-elements.dat", "w") as genetic_elements:
genetic_elements.write("ID\t" + seqrecord.id + "\n")
genetic_elements.write("TYPE\t:CONTIG\n")
genetic_elements.write("CIRCULAR?\tN" + "\n")
genetic_elements.write("ANNOT-FILE\t" + outdir + "/genome.gbk\n")
genetic_elements.write("//" + "\n")
def create_organism_params(self, name, organism, tax, domain):
"""
:param name: Name of the collection in pwtools
:param organism: Description of the organism
:param tax: taxid
:param domain: "TAX-2" (Bacteria), "TAX-2759" (Eukaryota), and "TAX-2157" (Archaea).
:return:
"""
template = """ID\t{name}
NAME\t{organism}
STORAGE\tFile
NCBI-TAXON-ID\t{tax}
DOMAIN\t{domain}"""
with open(self.workdir + "organism-params.dat", "w") as h:
h.write(template.format(name=name, organism=organism, tax=tax, domain=domain))
def create_genebank(self, contig_iterator, mappings=None):
if not mappings:
mappings = self.default_mappings
def dbfeature2seqfeature(org_feature):
seqf = SeqFeature(
FeatureLocation(org_feature.location.start, org_feature.location.end, org_feature.location.strand),
org_feature.type, id=org_feature.id)
self.map_atributes(mappings, org_feature, seqf)
return seqf
def process_contig(contig):
record = SeqRecord(id=contig.id, seq=Seq(str(contig.seq))) # , alphabet=Alphabet.DNAAlphabet()
for f in contig.features:
if f.type.lower() in ["gene","cds"]:
seqfeature = dbfeature2seqfeature(f)
elif f.type.lower() in ["rrna", "trna", "ncrna"]:
note = f.qualifiers["Note"] if "Note" in f.qualifiers else ""
desc = f.qualifiers["description"] if "description" in f.qualifiers else ""
seqfeature = SeqFeature(f.location,
f.type.replace("ncrna", "misc_RNA"),
id=f.id, qualifiers={"locus_tag": f.id,
"note": note
, "description": desc
, "gene": note
, "alt name": desc
, "product": desc})
elif f.type.lower() in ["contig", "exon", "cdsvi"]:
seqfeature = None
else:
_log.warning("unknow feature " + f.type)
seqfeature = None
if seqfeature:
record.features.append(seqfeature)
return record
with open(self.gb_file, "w") as h:
with tqdm(contig_iterator) as pbar:
for contig in pbar:
pbar.set_description(contig.id)
if contig.features:
new_contig = process_contig(contig)
new_contig.annotations["molecule_type"] = "DNA"
if new_contig.features:
try:
bpio.write(new_contig, h, "gb")
except:
raise
def execute(self, pwtools_path="pathway-tools", proxy="PROXY=proxy.fcen.uba.ar:8080"):
"""
:param pwtools_path: complete path to pathway-tools binary. by default assumes that it is in the PATH
"""
cmd = self.cmd_pwtools(pwtools_path, proxy)
execute(cmd)
def cmd_pwtools(self, pwtools_path, proxy):
return proxy + ' ' + pwtools_path + ' -no-cel-overview -no-web-cel-overview -patho ' + self.workdir
def copy_qualifiers(self, mapping, f_in, f_out):
for k, v in mapping.items():
if k in f_in.qualifiers:
if isinstance(v, dict):
for key, fn in v.items():
value = [y for y in [fn(x) for x in f_in.qualifiers[k]] if y]
if value:
f_out.qualifiers[key] = value
elif isinstance(v, tuple):
dst = v[0]
value = [y for y in [v[1](x) for x in f_in.qualifiers[k]] if y]
if value:
f_out.qualifiers[dst] = value
else:
dst = v
if f_in.qualifiers and (k in f_in.qualifiers):
value = [x for x in f_in.qualifiers[k]]
if value:
f_out.qualifiers[dst] = value
def map_atributes(self, mapping, f_in, f_out):
"""
"gene.mRna" : {
"type" : "CDS",
"qualifiers": {
"gene_symbol" : "gene",
"locus tag": "locus_tag",
"x" : "db xref",
"GO" : ("go_component", process_go_cc ),
"""
for ftype, type_map in mapping.items():
types = ftype.split(".")
if f_in.type != types[0]:
continue#,(ftype,f_in,f_out)
f_in1 = f_in
if len(types) > 1 and hasattr(f_in, "sub_features") and f_in.sub_features:
for subtype in types[1:]:
f_in1 = [x for x in f_in1.sub_features if x.type == subtype]
if f_in1:
f_in1 = f_in1[0]
else:
print (f_in)
continue
if f_in1:
for k, v in type_map.items():
if k == "qualifiers":
self.copy_qualifiers(v, f_in1, f_out)
else:
setattr(f_out, k, getattr(f_in, v) if v.startswith("$") else v)
if __name__ == "__main__":
"""
Example from Genebank
python SNDG/Network/PathwayTools.py -o /tmp/pepe2 -n "Saureus" -desc "Saureus" -ann /home/eze/Downloads/ncbi_BIA_1.gff3 -s /home/eze/Downloads/ncbi_BIA_1.gbf -dn TAX-2 -t 158879
Example from Mongo
python SNDG/Network/PathwayTools.py -o /tmp | |
import os
import time
from rlib import jit
from rlib.string_stream import encode_to_bytes
from rlib.exit import Exit
from rlib.osext import path_split
from rlib import rgc
from som.vm.symbols import symbol_for, sym_false, sym_true, sym_nil
from som.vmobjects.array import Array
from som.vmobjects.block_bc import block_evaluation_primitive
from som.vmobjects.clazz import Class
from som.vmobjects.object_without_fields import ObjectWithoutFields
from som.vmobjects.object_with_layout import Object
from som.vmobjects.string import String
from som.vm.globals import nilObject, trueObject, falseObject
from som.vm.shell import Shell
from som.compiler.sourcecode_compiler import (
compile_class_from_file,
compile_class_from_string,
)
class Assoc(object):
_immutable_fields_ = ["global_name", "value?"]
def __init__(self, global_name, value):
self.global_name = global_name
self.value = value
def __str__(self):
return "(%s => %s)" % (self.global_name, self.value)
class Universe(object):
_immutable_fields_ = [
"object_class",
"class_class",
"metaclass_class",
"nil_class",
"integer_class",
"integer_layout?",
"array_class",
"array_layout?",
"method_class",
"method_layout?",
"symbol_class",
"symbol_layout?",
"primitive_class",
"primitive_layout?",
"system_class",
"block_class",
"block_classes[*]",
"block_layouts?[*]",
"string_class",
"string_layout?",
"double_class",
"double_layout?",
"_globals",
"start_time",
"_object_system_initialized",
]
def __init__(self, avoid_exit=False):
self._globals = {}
self.object_class = None
self.class_class = None
self.metaclass_class = None
self.nil_class = None
self.integer_class = None
self.integer_layout = None
self.array_class = None
self.array_layout = None
self.method_class = None
self.method_layout = None
self.symbol_class = None
self.symbol_layout = None
self.primitive_class = None
self.primitive_layout = None
self.system_class = None
self.block_class = None
self.block_classes = None
self.block_layouts = None
self.string_class = None
self.string_layout = None
self.double_class = None
self.double_layout = None
self._last_exit_code = 0
self._avoid_exit = avoid_exit
self._dump_bytecodes = False
self.classpath = None
self.start_time = time.time() # a float of the time in seconds
self._object_system_initialized = False
def reset(self, avoid_exit):
self.__init__(avoid_exit)
def exit(self, error_code):
if self._avoid_exit:
self._last_exit_code = error_code
else:
raise Exit(error_code)
def last_exit_code(self):
return self._last_exit_code
def execute_method(self, class_name, selector):
self._initialize_object_system()
clazz = self.load_class(symbol_for(class_name))
if clazz is None:
raise Exception("Class " + class_name + " could not be loaded.")
# Lookup the invokable on class
invokable = clazz.get_class(self).lookup_invokable(symbol_for(selector))
if invokable is None:
raise Exception("Lookup of " + selector + " failed in class " + class_name)
return invokable.invoke_1(clazz)
def interpret(self, arguments):
# Check for command line switches
arguments = self.handle_arguments(arguments)
# Initialize the known universe
system_object = self._initialize_object_system()
# Start the shell if no filename is given
if len(arguments) == 0:
shell = Shell(self)
return shell.start()
arguments_array = self.new_array_with_strings(arguments)
initialize = self.system_class.lookup_invokable(symbol_for("initialize:"))
return initialize.invoke_2(system_object, arguments_array)
def handle_arguments(self, arguments):
got_classpath = False
remaining_args = []
saw_others = False
i = 0
while i < len(arguments):
if arguments[i] == "-cp" and not saw_others:
if i + 1 >= len(arguments):
self._print_usage_and_exit()
self.setup_classpath(arguments[i + 1])
i += 1 # skip class path
got_classpath = True
elif arguments[i] == "-d" and not saw_others:
self._dump_bytecodes = True
elif arguments[i] in ["-h", "--help", "-?"] and not saw_others:
self._print_usage_and_exit()
elif arguments[i] == "--no-gc" and not saw_others:
rgc.disable()
if rgc.isenabled() == 0:
print("GC successfully disabled.")
else:
print("GC still enabled.")
else:
saw_others = True
remaining_args.append(arguments[i])
i += 1
if not got_classpath:
# Get the default class path of the appropriate size
self.classpath = self._default_classpath()
# check remaining args for class paths, and strip file extension
if remaining_args:
split = self._get_path_class_ext(remaining_args[0])
if split[0] != "": # there was a path
self.classpath.insert(0, split[0])
remaining_args[0] = split[1]
return remaining_args
def setup_classpath(self, class_path):
self.classpath = class_path.split(os.pathsep)
@staticmethod
def _default_classpath():
return ["."]
# take argument of the form "../foo/Test.som" and return
# "../foo", "Test", "som"
@staticmethod
def _get_path_class_ext(path):
return path_split(path)
def _print_usage_and_exit(self):
# Print the usage
std_println("Usage: som [-options] [args...] ")
std_println(" ")
std_println("where options include: ")
std_println(" -cp <directories separated by " + os.pathsep + ">")
std_println(" set search path for application classes")
std_println(" -d enable disassembling")
std_println(" -h print this help")
std_println("")
std_println(" --no-gc disable garbage collection")
# Exit
self.exit(0)
def _initialize_object_system(self):
# Allocate the Metaclass classes
self.metaclass_class = self.new_metaclass_class()
# Allocate the rest of the system classes
self.object_class = self.new_system_class()
self.nil_class = self.new_system_class()
self.class_class = self.new_system_class()
self.array_class = self.new_system_class()
self.array_layout = self.array_class.get_layout_for_instances()
self.symbol_class = self.new_system_class()
self.symbol_layout = self.symbol_class.get_layout_for_instances()
self.method_class = self.new_system_class()
self.method_layout = self.method_class.get_layout_for_instances()
self.integer_class = self.new_system_class()
self.integer_layout = self.integer_class.get_layout_for_instances()
self.primitive_class = self.new_system_class()
self.primitive_layout = self.primitive_class.get_layout_for_instances()
self.string_class = self.new_system_class()
self.string_layout = self.string_class.get_layout_for_instances()
self.double_class = self.new_system_class()
self.double_layout = self.double_class.get_layout_for_instances()
# Setup the class reference for the nil object
nilObject.set_class(self.nil_class)
# Initialize the system classes
self._initialize_system_class(self.object_class, None, "Object")
self._initialize_system_class(self.class_class, self.object_class, "Class")
self._initialize_system_class(
self.metaclass_class, self.class_class, "Metaclass"
)
self._initialize_system_class(self.nil_class, self.object_class, "Nil")
self._initialize_system_class(self.array_class, self.object_class, "Array")
self._initialize_system_class(self.method_class, self.object_class, "Method")
self._initialize_system_class(self.integer_class, self.object_class, "Integer")
self._initialize_system_class(
self.primitive_class, self.object_class, "Primitive"
)
self._initialize_system_class(self.string_class, self.object_class, "String")
self._initialize_system_class(self.symbol_class, self.string_class, "Symbol")
self._initialize_system_class(self.double_class, self.object_class, "Double")
# Load methods and fields into the system classes
self._load_system_class(self.object_class)
self._load_system_class(self.class_class)
self._load_system_class(self.metaclass_class)
self._load_system_class(self.nil_class)
self._load_system_class(self.array_class)
self._load_system_class(self.method_class)
self._load_system_class(self.string_class)
self._load_system_class(self.symbol_class)
self._load_system_class(self.integer_class)
self._load_system_class(self.primitive_class)
self._load_system_class(self.double_class)
# Load the generic block class
self.block_class = self.load_class(symbol_for("Block"))
# Setup the true and false objects
true_class_name = symbol_for("True")
true_class = self.load_class(true_class_name)
true_class.load_primitives(False, self)
trueObject.set_class(true_class)
false_class_name = symbol_for("False")
false_class = self.load_class(false_class_name)
false_class.load_primitives(False, self)
falseObject.set_class(false_class)
# Load the system class and create an instance of it
self.system_class = self.load_class(symbol_for("System"))
system_object = self.new_instance(self.system_class)
# Put special objects and classes into the dictionary of globals
self.set_global(sym_nil, nilObject)
self.set_global(sym_true, trueObject)
self.set_global(sym_false, falseObject)
self.set_global(symbol_for("system"), system_object)
self.set_global(symbol_for("System"), self.system_class)
self.set_global(symbol_for("Block"), self.block_class)
self.set_global(symbol_for("Nil"), self.nil_class)
self.set_global(true_class_name, true_class)
self.set_global(false_class_name, false_class)
self.block_classes = [self.block_class] + [
self._make_block_class(i) for i in [1, 2, 3]
]
self.block_layouts = [c.get_layout_for_instances() for c in self.block_classes]
self._object_system_initialized = True
return system_object
def is_object_system_initialized(self):
return self._object_system_initialized
@staticmethod
def new_array_with_strings(strings):
values = [String(s) for s in strings]
return Array.from_objects(values)
@staticmethod
def new_instance(instance_class):
layout = instance_class.get_layout_for_instances()
num_fields = layout.get_number_of_fields()
if num_fields == 0:
return ObjectWithoutFields(layout)
return Object(layout)
def new_metaclass_class(self):
# Allocate the metaclass classes
class_class = Class(0, None)
result = Class(0, class_class)
# Setup the metaclass hierarchy
result.get_class(self).set_class(result)
return result
def new_system_class(self):
# Allocate the new system class
system_class_class = Class(0, None)
system_class = Class(0, system_class_class)
# Setup the metaclass hierarchy
system_class.get_class(self).set_class(self.metaclass_class)
return system_class
def _initialize_system_class(self, system_class, super_class, name):
# Initialize the superclass hierarchy
if super_class:
system_class.set_super_class(super_class)
system_class.get_class(self).set_super_class(super_class.get_class(self))
else:
system_class.get_class(self).set_super_class(self.class_class)
# Initialize the array of instance fields
system_class.set_instance_fields(Array.from_size(0))
system_class.get_class(self).set_instance_fields(Array.from_size(0))
# Initialize the name of the system class
system_class.set_name(symbol_for(name))
system_class.get_class(self).set_name(symbol_for(name + " class"))
# Insert the system class into the dictionary of globals
self.set_global(system_class.get_name(), system_class)
def get_global(self, name):
# Return the global with the given name if it's in the dictionary of globals
# if not, return None
jit.promote(self)
assoc = self._get_global(name)
if assoc:
return assoc.value
return None
@jit.elidable
def _get_global(self, name):
return self._globals.get(name, None)
def set_global(self, name, value):
self.get_globals_association(name).value = value
@jit.elidable_promote("all")
def has_global(self, name):
return name in self._globals
@jit.elidable_promote("all")
def get_globals_association(self, name):
assoc = self._globals.get(name, None)
if assoc is None:
assoc = Assoc(name, nilObject)
self._globals[name] = assoc
return assoc
def get_globals_association_or_none(self, name):
return self._globals.get(name, None)
def _make_block_class(self, number_of_arguments):
# Compute the name of the block class with the given number of
# arguments
name = symbol_for("Block" + str(number_of_arguments))
# Get the block class for blocks with the given number of arguments
result = self._load_class(name, None)
# Add the appropriate value primitive to the block class
result.add_primitive(block_evaluation_primitive(number_of_arguments), True)
# Insert the block class into the dictionary of globals
self.set_global(name, result)
# Return the loaded block class
return result
def load_class(self, name):
# Check if the requested class is already in the dictionary of globals
result = self.get_global(name)
if result is not None:
return result
# Load the class
result = self._load_class(name, None)
self._load_primitives(result, False)
self.set_global(name, result)
return result
def _load_primitives(self, clazz, is_system_class):
if not clazz:
return
if clazz.needs_primitives() or is_system_class:
clazz.load_primitives(not is_system_class, self)
def _load_system_class(self, system_class):
# Load the system class
result = self._load_class(system_class.get_name(), system_class)
if not result:
error_println(
system_class.get_name().get_embedded_string()
+ " class could not be loaded. It is likely that the"
+ " class path has not been initialized properly."
+ " Please make sure that the '-cp' parameter is given on the command-line."
)
self.exit(200)
self._load_primitives(result, True)
def _load_class(self, name, system_class):
# Try loading the class from all different paths
for cp_entry in self.classpath:
try:
# Load the class from a file and return the loaded class
result = compile_class_from_file(
cp_entry, name.get_embedded_string(), system_class, self
)
if self._dump_bytecodes:
from som.compiler.disassembler import dump
dump(result.get_class(self))
dump(result)
return result
except IOError:
# Continue trying different paths
pass
# The class could not be found.
return None
def load_shell_class(self, stmt):
# Load the class from a stream and return the loaded class
result = compile_class_from_string(stmt, None, self)
if self._dump_bytecodes:
from som.compiler.disassembler import dump
dump(result)
| |
<filename>sdk/python/pulumi_spotinst/azure/elastigroup.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Elastigroup(pulumi.CustomResource):
custom_data: pulumi.Output[str]
desired_capacity: pulumi.Output[float]
"""
The desired number of instances the group should have at any time.
"""
health_check: pulumi.Output[dict]
images: pulumi.Output[list]
integration_kubernetes: pulumi.Output[dict]
integration_multai_runtime: pulumi.Output[dict]
load_balancers: pulumi.Output[list]
login: pulumi.Output[dict]
low_priority_sizes: pulumi.Output[list]
"""
Available Low-Priority sizes.
"""
managed_service_identities: pulumi.Output[list]
max_size: pulumi.Output[float]
"""
The maximum number of instances the group should have at any time.
"""
min_size: pulumi.Output[float]
"""
The minimum number of instances the group should have at any time.
"""
name: pulumi.Output[str]
"""
The name of the managed identity.
"""
network: pulumi.Output[dict]
od_sizes: pulumi.Output[list]
"""
Available On-Demand sizes
"""
product: pulumi.Output[str]
"""
Operation system type. Valid values: `"Linux"`, `"Windows"`.
"""
region: pulumi.Output[str]
"""
The region your Azure group will be created in.
"""
resource_group_name: pulumi.Output[str]
"""
The Resource Group that the user-assigned managed identity resides in.
"""
scaling_down_policies: pulumi.Output[list]
scaling_up_policies: pulumi.Output[list]
scheduled_tasks: pulumi.Output[list]
shutdown_script: pulumi.Output[str]
"""
Shutdown script for the group. Value should be passed as a string encoded at Base64 only.
"""
strategy: pulumi.Output[dict]
"""
Describes the deployment strategy.
* `draining_timeout` (`float`) - Time (seconds) to allow the instance to be drained from incoming TCP connections and detached from MLB before terminating it during a scale-down operation.
* `lowPriorityPercentage` (`float`) - Percentage of Low Priority instances to maintain. Required if `od_count` is not specified.
* `odCount` (`float`) - Number of On-Demand instances to maintain. Required if low_priority_percentage is not specified.
"""
update_policy: pulumi.Output[dict]
user_data: pulumi.Output[str]
"""
Base64-encoded MIME user data to make available to the instances.
"""
def __init__(__self__, resource_name, opts=None, custom_data=None, desired_capacity=None, health_check=None, images=None, integration_kubernetes=None, integration_multai_runtime=None, load_balancers=None, login=None, low_priority_sizes=None, managed_service_identities=None, max_size=None, min_size=None, name=None, network=None, od_sizes=None, product=None, region=None, resource_group_name=None, scaling_down_policies=None, scaling_up_policies=None, scheduled_tasks=None, shutdown_script=None, strategy=None, update_policy=None, user_data=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Spotinst elastigroup Azure resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] desired_capacity: The desired number of instances the group should have at any time.
:param pulumi.Input[list] low_priority_sizes: Available Low-Priority sizes.
:param pulumi.Input[float] max_size: The maximum number of instances the group should have at any time.
:param pulumi.Input[float] min_size: The minimum number of instances the group should have at any time.
:param pulumi.Input[str] name: The name of the managed identity.
:param pulumi.Input[list] od_sizes: Available On-Demand sizes
:param pulumi.Input[str] product: Operation system type. Valid values: `"Linux"`, `"Windows"`.
:param pulumi.Input[str] region: The region your Azure group will be created in.
:param pulumi.Input[str] resource_group_name: The Resource Group that the user-assigned managed identity resides in.
:param pulumi.Input[str] shutdown_script: Shutdown script for the group. Value should be passed as a string encoded at Base64 only.
:param pulumi.Input[dict] strategy: Describes the deployment strategy.
:param pulumi.Input[str] user_data: Base64-encoded MIME user data to make available to the instances.
The **health_check** object supports the following:
* `autoHealing` (`pulumi.Input[bool]`)
* `gracePeriod` (`pulumi.Input[float]`)
* `health_check_type` (`pulumi.Input[str]`)
The **images** object supports the following:
* `customs` (`pulumi.Input[list]`)
* `imageName` (`pulumi.Input[str]`)
* `resource_group_name` (`pulumi.Input[str]`) - The Resource Group that the user-assigned managed identity resides in.
* `marketplaces` (`pulumi.Input[list]`)
* `offer` (`pulumi.Input[str]`)
* `publisher` (`pulumi.Input[str]`)
* `sku` (`pulumi.Input[str]`)
The **integration_kubernetes** object supports the following:
* `clusterIdentifier` (`pulumi.Input[str]`)
The **integration_multai_runtime** object supports the following:
* `deploymentId` (`pulumi.Input[str]`)
The **load_balancers** object supports the following:
* `autoWeight` (`pulumi.Input[bool]`)
* `balancerId` (`pulumi.Input[str]`)
* `targetSetId` (`pulumi.Input[str]`)
* `type` (`pulumi.Input[str]`)
The **login** object supports the following:
* `password` (`pulumi.Input[str]`)
* `sshPublicKey` (`pulumi.Input[str]`)
* `userName` (`pulumi.Input[str]`)
The **managed_service_identities** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the managed identity.
* `resource_group_name` (`pulumi.Input[str]`) - The Resource Group that the user-assigned managed identity resides in.
The **network** object supports the following:
* `additionalIpConfigs` (`pulumi.Input[list]`)
* `name` (`pulumi.Input[str]`) - The name of the managed identity.
* `privateIpVersion` (`pulumi.Input[str]`)
* `assignPublicIp` (`pulumi.Input[bool]`)
* `resource_group_name` (`pulumi.Input[str]`) - The Resource Group that the user-assigned managed identity resides in.
* `subnetName` (`pulumi.Input[str]`)
* `virtualNetworkName` (`pulumi.Input[str]`)
The **scaling_down_policies** object supports the following:
* `actionType` (`pulumi.Input[str]`)
* `adjustment` (`pulumi.Input[str]`)
* `cooldown` (`pulumi.Input[float]`)
* `dimensions` (`pulumi.Input[list]`)
* `name` (`pulumi.Input[str]`) - The name of the managed identity.
* `value` (`pulumi.Input[str]`)
* `evaluationPeriods` (`pulumi.Input[float]`)
* `maxTargetCapacity` (`pulumi.Input[str]`)
* `maximum` (`pulumi.Input[str]`)
* `metricName` (`pulumi.Input[str]`)
* `minTargetCapacity` (`pulumi.Input[str]`)
* `minimum` (`pulumi.Input[str]`)
* `namespace` (`pulumi.Input[str]`)
* `operator` (`pulumi.Input[str]`)
* `period` (`pulumi.Input[float]`)
* `policyName` (`pulumi.Input[str]`)
* `statistic` (`pulumi.Input[str]`)
* `target` (`pulumi.Input[str]`)
* `threshold` (`pulumi.Input[float]`)
* `unit` (`pulumi.Input[str]`)
The **scaling_up_policies** object supports the following:
* `actionType` (`pulumi.Input[str]`)
* `adjustment` (`pulumi.Input[str]`)
* `cooldown` (`pulumi.Input[float]`)
* `dimensions` (`pulumi.Input[list]`)
* `name` (`pulumi.Input[str]`) - The name of the managed identity.
* `value` (`pulumi.Input[str]`)
* `evaluationPeriods` (`pulumi.Input[float]`)
* `maxTargetCapacity` (`pulumi.Input[str]`)
* `maximum` (`pulumi.Input[str]`)
* `metricName` (`pulumi.Input[str]`)
* `minTargetCapacity` (`pulumi.Input[str]`)
* `minimum` (`pulumi.Input[str]`)
* `namespace` (`pulumi.Input[str]`)
* `operator` (`pulumi.Input[str]`)
* `period` (`pulumi.Input[float]`)
* `policyName` (`pulumi.Input[str]`)
* `statistic` (`pulumi.Input[str]`)
* `target` (`pulumi.Input[str]`)
* `threshold` (`pulumi.Input[float]`)
* `unit` (`pulumi.Input[str]`)
The **scheduled_tasks** object supports the following:
* `adjustment` (`pulumi.Input[str]`)
* `adjustmentPercentage` (`pulumi.Input[str]`)
* `batchSizePercentage` (`pulumi.Input[str]`)
* `cronExpression` (`pulumi.Input[str]`)
* `gracePeriod` (`pulumi.Input[str]`)
* `isEnabled` (`pulumi.Input[bool]`)
* `scaleMaxCapacity` (`pulumi.Input[str]`)
* `scaleMinCapacity` (`pulumi.Input[str]`)
* `scaleTargetCapacity` (`pulumi.Input[str]`)
* `taskType` (`pulumi.Input[str]`)
The **strategy** object supports the following:
* `draining_timeout` (`pulumi.Input[float]`) - Time (seconds) to allow the instance to be drained from incoming TCP connections and detached from MLB before terminating it during a scale-down operation.
* `lowPriorityPercentage` (`pulumi.Input[float]`) - Percentage of Low Priority instances to maintain. Required if `od_count` is not specified.
* `odCount` (`pulumi.Input[float]`) - Number of On-Demand instances to maintain. Required if low_priority_percentage is not specified.
The **update_policy** object supports the following:
* `rollConfig` (`pulumi.Input[dict]`)
* `batchSizePercentage` (`pulumi.Input[float]`)
* `gracePeriod` (`pulumi.Input[float]`)
* `health_check_type` (`pulumi.Input[str]`)
* `shouldRoll` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-spotinst/blob/master/website/docs/r/elastigroup_azure.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['custom_data'] = custom_data
__props__['desired_capacity'] = desired_capacity
__props__['health_check'] = health_check
__props__['images'] = images
__props__['integration_kubernetes'] = integration_kubernetes
__props__['integration_multai_runtime'] = integration_multai_runtime
__props__['load_balancers'] = load_balancers
__props__['login'] = login
if low_priority_sizes is None:
raise TypeError("Missing required property 'low_priority_sizes'")
__props__['low_priority_sizes'] = low_priority_sizes
__props__['managed_service_identities'] = managed_service_identities
__props__['max_size'] = max_size
__props__['min_size'] = min_size
__props__['name'] = name
if network is None:
raise TypeError("Missing required property 'network'")
__props__['network'] = network
if od_sizes is None:
raise TypeError("Missing required property 'od_sizes'")
__props__['od_sizes'] = od_sizes
if product is None:
raise TypeError("Missing required property 'product'")
__props__['product'] = product
if region is None:
raise TypeError("Missing required property 'region'")
__props__['region'] = region
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['scaling_down_policies'] = scaling_down_policies
__props__['scaling_up_policies'] = scaling_up_policies
__props__['scheduled_tasks'] = scheduled_tasks
__props__['shutdown_script'] = shutdown_script
if strategy is None:
raise TypeError("Missing required property 'strategy'")
__props__['strategy'] = strategy
__props__['update_policy'] = update_policy
__props__['user_data'] = user_data
super(Elastigroup, __self__).__init__(
'spotinst:azure/elastigroup:Elastigroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, custom_data=None, desired_capacity=None, health_check=None, images=None, integration_kubernetes=None, integration_multai_runtime=None, load_balancers=None, login=None, low_priority_sizes=None, managed_service_identities=None, max_size=None, min_size=None, name=None, network=None, od_sizes=None, product=None, region=None, resource_group_name=None, scaling_down_policies=None, scaling_up_policies=None, scheduled_tasks=None, shutdown_script=None, strategy=None, update_policy=None, user_data=None):
"""
Get an existing Elastigroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to | |
from __future__ import print_function
import sys
sys.path.insert(0, '.')
import torch
from torch.autograd import Variable
import torch.optim as optim
from torch.nn.parallel import DataParallel
import time
import os.path as osp
from tensorboardX import SummaryWriter
import numpy as np
import argparse
import scipy.io
from bpm.dataset import create_dataset
#from bpm.model.PCBModel import PCBModel as Model
#from bpm.model.PCB_TripletModel import PCBModel as Model
from bpm.model.PCB_TripletModel_record import PCBModel as Model
# from bpm.model.PCB_ColModel import PCBModel as Model
from bpm.utils.utils import time_str,save_mat
from bpm.utils.utils import str2bool
from bpm.utils.utils import may_set_mode
from bpm.utils.utils import load_state_dict
from bpm.utils.utils import load_ckpt
from bpm.utils.utils import save_ckpt
from bpm.utils.utils import set_devices
from bpm.utils.utils import AverageMeter
from bpm.utils.utils import to_scalar
from bpm.utils.utils import ReDirectSTD
from bpm.utils.utils import set_seed
from bpm.utils.utils import adjust_lr_staircase
class Config(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--sys_device_ids', type=eval, default=(0,))
parser.add_argument('-r', '--run', type=int, default=1)
parser.add_argument('--set_seed', type=str2bool, default=False)
parser.add_argument('--dataset', type=str, default='market1501',
choices=['mars_oldmask_retain','mars','mars20','mars22','mars23','mars30','mars32','mars33','market',\
'cuhk20','cuhk22','cuhk23','cuhk20_retain','cuhk22_retain','cuhk23_retain','cuhk30','cuhk32','cuhk33',\
'cuhk30_retain','cuhk32_retain','cuhk33_retain','cuhk40','cuhk42','cuhk43','cuhk40_retain','cuhk42_retain',\
'cuhk43_retain','market1501','market_combined','market23','market22', 'market20','market20_retain','market22_retain',\
'market23_retain', 'market30','market32','market33','market30_retain','market32_retain','market33_retain',\
'market40','market42','market43','market40_retain','market42_retain','market43_retain','market_oldmask',\
'market_oldmask_retain','market_trans','market_png','market30_retain_pixel3'])
parser.add_argument('--trainset_part', type=str, default='trainval',
choices=['trainval', 'train'])
parser.add_argument('--resize_h_w', type=eval, default=(384, 128))
# These several only for training set
parser.add_argument('--crop_prob', type=float, default=0)
parser.add_argument('--crop_ratio', type=float, default=1)
parser.add_argument('--mirror', type=str2bool, default=True)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--log_to_file', type=str2bool, default=True)
parser.add_argument('--steps_per_log', type=int, default=20)
parser.add_argument('--epochs_per_val', type=int, default=1)
parser.add_argument('--last_conv_stride', type=int, default=1, choices=[1, 2])
# When the stride is changed to 1, we can compensate for the receptive field
# using dilated convolution. However, experiments show dilated convolution is useless.
parser.add_argument('--last_conv_dilation', type=int, default=1, choices=[1, 2])
parser.add_argument('--num_stripes', type=int, default=6)
parser.add_argument('--local_conv_out_channels', type=int, default=256)
parser.add_argument('--only_test', type=str2bool, default=False)
parser.add_argument('--only_record', type=str2bool, default=False)
parser.add_argument('--resume', type=str2bool, default=False)
parser.add_argument('--exp_dir', type=str, default='')
parser.add_argument('--model_weight_file', type=str, default='')
parser.add_argument('--new_params_lr', type=float, default=0.1)
parser.add_argument('--finetuned_params_lr', type=float, default=0.01)
parser.add_argument('--staircase_decay_at_epochs',
type=eval, default=(41,))
parser.add_argument('--staircase_decay_multiply_factor',
type=float, default=0.1)
parser.add_argument('--total_epochs', type=int, default=60)
args = parser.parse_args()
# gpu ids
self.sys_device_ids = args.sys_device_ids
# If you want to make your results exactly reproducible, you have
# to fix a random seed.
if args.set_seed:
self.seed = 1
else:
self.seed = None
# The experiments can be run for several times and performances be averaged.
# `run` starts from `1`, not `0`.
self.run = args.run
###########
# Dataset #
###########
# If you want to make your results exactly reproducible, you have
# to also set num of threads to 1 during training.
if self.seed is not None:
self.prefetch_threads = 1
else:
self.prefetch_threads = 2
self.dataset = args.dataset
self.trainset_part = args.trainset_part
# Image Processing
# Just for training set
self.crop_prob = args.crop_prob
self.crop_ratio = args.crop_ratio
self.resize_h_w = args.resize_h_w
# Whether to scale by 1/255
self.scale_im = True
self.im_mean = None #four channels
self.im_std = None
self.train_mirror_type = 'random' if args.mirror else None
self.train_batch_size = args.batch_size
self.train_final_batch = True
self.train_shuffle = True
self.test_mirror_type = None
self.test_batch_size = 32
self.test_final_batch = True
self.test_shuffle = False
dataset_kwargs = dict(
name=self.dataset,
resize_h_w=self.resize_h_w,
scale=self.scale_im,
im_mean=self.im_mean,
im_std=self.im_std,
batch_dims='NCHW',
num_prefetch_threads=self.prefetch_threads)
prng = np.random
if self.seed is not None:
prng = np.random.RandomState(self.seed)
self.train_set_kwargs = dict(
part=self.trainset_part,
batch_size=self.train_batch_size,
final_batch=self.train_final_batch,
shuffle=self.train_shuffle,
crop_prob=self.crop_prob,
crop_ratio=self.crop_ratio,
mirror_type=self.train_mirror_type,
prng=prng)
self.train_set_kwargs.update(dataset_kwargs)
prng = np.random
if self.seed is not None:
prng = np.random.RandomState(self.seed)
self.val_set_kwargs = dict(
part='val',
batch_size=self.test_batch_size,
final_batch=self.test_final_batch,
shuffle=self.test_shuffle,
mirror_type=self.test_mirror_type,
prng=prng)
self.val_set_kwargs.update(dataset_kwargs)
prng = np.random
if self.seed is not None:
prng = np.random.RandomState(self.seed)
self.test_set_kwargs = dict(
part='test',
batch_size=self.test_batch_size,
final_batch=self.test_final_batch,
shuffle=self.test_shuffle,
mirror_type=self.test_mirror_type,
prng=prng)
self.test_set_kwargs.update(dataset_kwargs)
###############
# ReID Model #
###############
# The last block of ResNet has stride 2. We can set the stride to 1 so that
# the spatial resolution before global pooling is doubled.
self.last_conv_stride = args.last_conv_stride
# When the stride is changed to 1, we can compensate for the receptive field
# using dilated convolution. However, experiments show dilated convolution is useless.
self.last_conv_dilation = args.last_conv_dilation
# Number of stripes (parts)
self.num_stripes = args.num_stripes
# Output channel of 1x1 conv
self.local_conv_out_channels = args.local_conv_out_channels
#############
# Training #
#############
self.momentum = 0.9
self.weight_decay = 0.0005
# Initial learning rate
self.new_params_lr = args.new_params_lr
self.finetuned_params_lr = args.finetuned_params_lr
self.staircase_decay_at_epochs = args.staircase_decay_at_epochs
self.staircase_decay_multiply_factor = args.staircase_decay_multiply_factor
# Number of epochs to train
self.total_epochs = args.total_epochs
# How often (in epochs) to test on val set.
self.epochs_per_val = args.epochs_per_val
# How often (in batches) to log. If only need to log the average
# information for each epoch, set this to a large value, e.g. 1e10.
self.steps_per_log = args.steps_per_log
# Only test and without training.
self.only_test = args.only_test
#record the features
self.only_record = args.only_record
self.resume = args.resume
#######
# Log #
#######
# If True,
# 1) stdout and stderr will be redirected to file,
# 2) training loss etc will be written to tensorboard,
# 3) checkpoint will be saved
self.log_to_file = args.log_to_file
# The root dir of logs.
if args.exp_dir == '':
self.exp_dir = osp.join(
'exp/train',
'{}'.format(self.dataset),
'run{}'.format(self.run),
)
else:
self.exp_dir = args.exp_dir
self.stdout_file = osp.join(
self.exp_dir, 'stdout_{}.txt'.format(time_str()))
self.stderr_file = osp.join(
self.exp_dir, 'stderr_{}.txt'.format(time_str()))
# Saving model weights and optimizer states, for resuming.
self.ckpt_file = osp.join(self.exp_dir, 'ckpt.pth')
self.record_file = osp.join(self.exp_dir,'feat.mat')
# Just for loading a pretrained model; no optimizer states is needed.
self.model_weight_file = args.model_weight_file
class ExtractFeature(object):
"""A function to be called in the val/test set, to extract features.
Args:
TVT: A callable to transfer images to specific device.
"""
def __init__(self, model, TVT):
self.model = model
self.TVT = TVT
def __call__(self, ims):
old_train_eval_model = self.model.training
# Set eval mode.
# Force all BN layers to use global mean and variance, also disable
# dropout.
self.model.eval()
ims = Variable(self.TVT(torch.from_numpy(ims).float()))
try:
local_feat_list, logits_list = self.model(ims)
except:
local_feat_list = self.model(ims)
feat = [lf.data.cpu().numpy() for lf in local_feat_list]
feat = np.concatenate(feat, axis=1)
# Restore the model to its old train/eval mode.
self.model.train(old_train_eval_model)
return feat
class ExtractTFeature(object):
def __init__(self, model, TVT):
self.model = model
self.TVT = TVT
def __call__(self, ims):
old_train_eval_model = self.model.training
# Set eval mode.
# Force all BN layers to use global mean and variance, also disable
# dropout.
self.model.eval()
ims = Variable(self.TVT(torch.from_numpy(ims).float()))
try:
feat, g_list, h_list = self.model(ims)
except:
local_feat_list = self.model(ims)
# Restore the model to its old train/eval mode.
self.model.train(old_train_eval_model)
return feat,g_list,h_list
def main():
cfg = Config()
# Redirect logs to both console and file.
if cfg.log_to_file:
ReDirectSTD(cfg.stdout_file, 'stdout', False)
ReDirectSTD(cfg.stderr_file, 'stderr', False)
# Lazily create SummaryWriter
writer = None
TVT, TMO = set_devices(cfg.sys_device_ids)
if cfg.seed is not None:
set_seed(cfg.seed)
# Dump the configurations to log.
import pprint
print('-' * 60)
print('cfg.__dict__')
pprint.pprint(cfg.__dict__)
print('-' * 60)
###########
# Dataset #
###########
train_set = create_dataset(**cfg.train_set_kwargs)
print('train_set shape:{}'.format(len(train_set.im_names)))
num_classes = len(train_set.ids2labels)
# The combined dataset does not provide val set currently.
val_set = None if cfg.dataset == 'combined' else create_dataset(**cfg.val_set_kwargs)
test_sets = []
test_set_names = []
if cfg.dataset == 'combined':
for name in ['market1501', 'cuhk03', 'duke']:
cfg.test_set_kwargs['name'] = name
test_sets.append(create_dataset(**cfg.test_set_kwargs))
test_set_names.append(name)
else:
test_sets.append(create_dataset(**cfg.test_set_kwargs))
test_set_names.append(cfg.dataset)
###########
# Models #
###########
model = Model(
last_conv_stride=cfg.last_conv_stride,
num_stripes=cfg.num_stripes,
local_conv_out_channels=cfg.local_conv_out_channels,
num_classes=num_classes
)
# Model wrapper
model_w = DataParallel(model)
#############################
# Criteria and Optimizers #
#############################
criterion = torch.nn.CrossEntropyLoss()
# To finetune from ImageNet weights
finetuned_params = list(model.base.parameters())
# To train from scratch
new_params = [p for n, p in model.named_parameters()
if not n.startswith('base.')]
param_groups = [{'params': finetuned_params, 'lr': cfg.finetuned_params_lr},
{'params': new_params, 'lr': cfg.new_params_lr}]
optimizer = optim.SGD(
param_groups,
momentum=cfg.momentum,
weight_decay=cfg.weight_decay)
# Bind them together just to save some codes in the following usage.
modules_optims = [model, optimizer]
################################
# May Resume Models and Optims #
################################
if cfg.resume:
resume_ep, scores = load_ckpt(modules_optims, cfg.ckpt_file)
# May Transfer Models and Optims to Specified Device. Transferring optimizer
# is to cope with the case when you load the checkpoint to a new device.
TMO(modules_optims)
########
# Test #
########
def test(load_model_weight=False):
if load_model_weight:
if cfg.model_weight_file != '':
map_location = (lambda storage, loc: storage)
sd = torch.load(cfg.model_weight_file, map_location=map_location)
load_state_dict(model, sd['state_dicts'][0])
print('Loaded model weights from {}'.format(cfg.model_weight_file))
else:
load_ckpt(modules_optims, cfg.ckpt_file)
for test_set, name in zip(test_sets, test_set_names):
test_set.set_feat_func(ExtractFeature(model_w, TVT))
print('\n=========> Test on dataset: {} <=========\n'.format(name))
test_set.eval(
normalize_feat=True,
verbose=True)
def test_record(load_model_weight=False):
if load_model_weight:
if cfg.model_weight_file != '':
map_location = (lambda storage, loc: storage)
sd = torch.load(cfg.model_weight_file, map_location=map_location)
load_state_dict(model, sd['state_dicts'][0])
print('Loaded model weights from {}'.format(cfg.model_weight_file))
else:
load_ckpt(modules_optims, cfg.ckpt_file)
#record trainval feat
epoch_done = False
print('\n=========> Record on Trainval: <=========\n')
i = 0
feat_list = []
h_list = []
im_names_list = []
while not epoch_done:
ims, im_names, labels, mirrored, epoch_done = train_set.next_batch()
# print('the size of ims:{}'.format(ims.size()))
print(ims.shape)
ims_var = Variable(TVT(torch.from_numpy(ims).float()))
print(ims_var.shape)
feat,_,h,_ = model(ims_var)
h = [lf.cpu().data.numpy() for lf in h]
h | |
"""
## In this module there is the botApi class, whose purpose is to send requests
"""
import requests
import json
from silbot import types, helper
from silbot.response import BotAPIResponse
from typing import Union
class BotApi:
"""
Class to send requests to botAPI
"""
def __init__(self, token, default_parse_mode: str = None, default_disable_web_preview: bool = None,
default_disable_notifications: bool = None):
"""Creates a botApi by the given token
Using this class you can easily send requests to botApi and use the response
- - - - -
**Args**:
- `token` (`str`): bot's API Token given by [@botfather](https://t.me/botfather)
- `default_parse_mode` (`str`, *optional*): Can be None, Markdown or HTML, it is used in functions if parse_mode is not specified. Defaults to `None`.
- `default_disable_web_preview` (`bool`, *optional*): It is used in functions if disable_web_page_preview is not specified. Defaults to `None`.
- `default_disable_notifications` (`bool`, *optional*): It is used in functions if disable_notifications is not specified. Defaults to `None`.
"""
self.default_parse_mode = default_parse_mode
self.default_disable_web_preview = default_disable_web_preview
self.default_disable_notifications = default_disable_notifications
self.token = token
self.session = requests.Session()
def sendRequest(self, method, arguments=None):
"""Sends a GET request to botAPI
Using this function you can send custom requests to botAPI
- - - - -
**Args**:
- `method` (`str`): request method, like sendMessage
- `arguments` (`dict`, *optional*): A `dict` whose keys are request's parameters and the values are parameters values. Defaults to `{}`.
**Returns**
- `str` botAPI's string response
"""
if arguments is None:
arguments = {}
try:
r = self.session.get("https://api.telegram.org/bot" + self.token + "/" + method, params=arguments, timeout=10)
except Exception:
return json.dumps({"ok": False, "connection_error": True})
else:
return r.text
@staticmethod
def response(raw_json, func):
"""Creates a botAPIResponse object for the given JSON
- - - - -
**Args**:
- `raw_json` (`str`): Result from botAPI
- `func` (`silbot.types` class or builtin data value): Expected result from botAPI
**Returns**
- `tuple` containing the expected result as object as first argument and the `BotAPIResponse` object as second
"""
response = BotAPIResponse(raw_json, func)
return response.getObject(), response
def getUpdates(self, offset: int = None, limit: int = None, timeout: int = None, allowed_updates: list = None):
"""Use this method to receive incoming updates using long polling (wiki). An Array of Update objects is returned. [See Telegram API](https://core.telegram.org/bots/api#getupdates)
- - - - -
**Args**:
- `offset` :`int` Identifier of the first update to be returned. Must be greater by one than the highest among the identifiers of previously received updates. By default, updates starting with the earliest unconfirmed update are returned. An update is considered confirmed as soon as getUpdates is called with an offset higher than its update_id. The negative offset can be specified to retrieve updates starting from -offset update from the end of the updates queue. All previous updates will forgotten.
- `limit` :`int` Limits the number of updates to be retrieved. Values between 1-100 are accepted. Defaults to 100.
- `timeout` :`int` Timeout in seconds for long polling. Defaults to 0, i.e. usual short polling. Should be positive, short polling should be used for testing purposes only.
- `allowed_updates` :`list` A JSON-serialized list of the update types you want your bot to receive. For example, specify [“message”, “edited_channel_post”, “callback_query”] to only receive updates of these types. See Update for a complete list of available update types. Specify an empty list to receive all update types except chat_member (default). If not specified, the previous setting will be used.Please note that this parameter doesn't affect updates created before the call to the getUpdates, so unwanted updates may be received for a short period of time.
**Returns:**
- A `tuple`, on success a `list` as first member and a botApiResponse object as second member
"""
data = {
"offset": offset,
"limit": limit,
"timeout": timeout,
"allowed_updates": allowed_updates,
}
return self.response(self.sendRequest("getUpdates", data), list)
def setWebhook(self, url: str, certificate: types.InputFile = None, ip_address: str = None, max_connections: int = None, allowed_updates: list = None, drop_pending_updates: bool = None):
"""Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts. Returns True on success.
If you'd like to make sure that the Webhook request comes from Telegram, we recommend using a secret path in the URL, e.g. https://www.example.com/<token>. Since nobody else knows your bot's token, you can be pretty sure it's us. [See Telegram API](https://core.telegram.org/bots/api#setwebhook)
- - - - -
**Args**:
- `url` :`str` HTTPS url to send updates to. Use an empty string to remove webhook integration
- `certificate` :`types.InputFile` Upload your public key certificate so that the root certificate in use can be checked. See our self-signed guide for details.
- `ip_address` :`str` The fixed IP address which will be used to send webhook requests instead of the IP address resolved through DNS
- `max_connections` :`int` Maximum allowed number of simultaneous HTTPS connections to the webhook for update delivery, 1-100. Defaults to 40. Use lower values to limit the load on your bot's server, and higher values to increase your bot's throughput.
- `allowed_updates` :`list` A JSON-serialized list of the update types you want your bot to receive. For example, specify [“message”, “edited_channel_post”, “callback_query”] to only receive updates of these types. See Update for a complete list of available update types. Specify an empty list to receive all update types except chat_member (default). If not specified, the previous setting will be used.Please note that this parameter doesn't affect updates created before the call to the setWebhook, so unwanted updates may be received for a short period of time.
- `drop_pending_updates` :`bool` Pass True to drop all pending updates
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"url": url,
"certificate": helper.toDict(certificate, True),
"ip_address": ip_address,
"max_connections": max_connections,
"allowed_updates": allowed_updates,
"drop_pending_updates": drop_pending_updates,
}
return self.response(self.sendRequest("setWebhook", data), bool)
def deleteWebhook(self, drop_pending_updates: bool = None):
"""Use this method to remove webhook integration if you decide to switch back to getUpdates. Returns True on success. [See Telegram API](https://core.telegram.org/bots/api#deletewebhook)
- - - - -
**Args**:
- `drop_pending_updates` :`bool` Pass True to drop all pending updates
**Returns:**
- A `tuple`, on success a `bool` as first member and a botApiResponse object as second member
"""
data = {
"drop_pending_updates": drop_pending_updates,
}
return self.response(self.sendRequest("deleteWebhook", data), bool)
def getWebhookInfo(self, ):
"""Use this method to get current webhook status. Requires no parameters. On success, returns a WebhookInfo object. If the bot is using getUpdates, will return an object with the url field empty. [See Telegram API](https://core.telegram.org/bots/api#getwebhookinfo)
- - - - -
**Args**:
**Returns:**
- A `tuple`, on success a `types.WebhookInfo` as first member and a botApiResponse object as second member
"""
data = {
}
return self.response(self.sendRequest("getWebhookInfo", data), types.WebhookInfo)
def getMe(self, ):
"""A simple method for testing your bot's authentication token. Requires no parameters. Returns basic information about the bot in form of a User object. [See Telegram API](https://core.telegram.org/bots/api#getme)
- - - - -
**Args**:
**Returns:**
- A `tuple`, on success a `types.User` as first member and a botApiResponse object as second member
"""
data = {
}
return self.response(self.sendRequest("getMe", data), types.User)
def logOut(self, ):
"""Use this method to log out from the cloud Bot API server before launching the bot locally. You must log out the bot before running it locally, otherwise there is no guarantee that the bot will receive updates. After a successful call, you can immediately log in on a local server, but will not be able to log in back to the cloud Bot API server for 10 minutes. Returns True on success. Requires no parameters. [See Telegram API](https://core.telegram.org/bots/api#logout)
- - - - -
**Args**:
**Returns:**
- A `tuple`, on success a `bool` as first member and a | |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common import EngineType
from fate_arch.common import engine_utils
from fate_arch.common.base_utils import json_dumps, current_timestamp
from fate_arch.computing import ComputingEngine
from fate_flow.controller.task_controller import TaskController
from fate_flow.db.job_default_config import JobDefaultConfig
from fate_flow.db.runtime_config import RuntimeConfig
from fate_flow.entity import RunParameters
from fate_flow.entity.run_status import JobStatus, EndStatus
from fate_flow.entity.types import InputSearchType, WorkerName
from fate_flow.manager.provider_manager import ProviderManager
from fate_flow.manager.resource_manager import ResourceManager
from fate_flow.manager.worker_manager import WorkerManager
from fate_flow.operation.job_saver import JobSaver
from fate_flow.operation.job_tracker import Tracker
from fate_flow.protobuf.python import pipeline_pb2
from fate_flow.settings import USE_AUTHENTICATION, USE_DATA_AUTHENTICATION, ENGINES
from fate_flow.utils import job_utils, schedule_utils, data_utils
from fate_flow.utils.authentication_utils import authentication_check
from fate_flow.utils.authentication_utils import data_authentication_check
from fate_flow.utils.log_utils import schedule_logger
class JobController(object):
@classmethod
def create_job(cls, job_id, role, party_id, job_info):
# parse job configuration
dsl = job_info['dsl']
runtime_conf = job_info['runtime_conf']
train_runtime_conf = job_info['train_runtime_conf']
if USE_AUTHENTICATION:
authentication_check(src_role=job_info.get('src_role', None), src_party_id=job_info.get('src_party_id', None),
dsl=dsl, runtime_conf=runtime_conf, role=role, party_id=party_id)
dsl_parser = schedule_utils.get_job_dsl_parser(dsl=dsl,
runtime_conf=runtime_conf,
train_runtime_conf=train_runtime_conf)
job_parameters = dsl_parser.get_job_parameters(runtime_conf)
schedule_logger(job_id).info('job parameters:{}'.format(job_parameters))
dest_user = job_parameters.get(role, {}).get(party_id, {}).get('user', '')
user = {}
src_party_id = int(job_info['src_party_id']) if job_info.get('src_party_id') else 0
src_role = job_info.get('src_role', '')
src_user = job_parameters.get(src_role, {}).get(src_party_id, {}).get('user', '') if src_role else ''
for _role, party_id_item in job_parameters.items():
user[_role] = {}
for _party_id, _parameters in party_id_item.items():
user[_role][_party_id] = _parameters.get("user", "")
schedule_logger(job_id).info('job user:{}'.format(user))
if USE_DATA_AUTHENTICATION:
job_args = dsl_parser.get_args_input()
schedule_logger(job_id).info('job args:{}'.format(job_args))
dataset_dict = cls.get_dataset(False, role, party_id, runtime_conf.get("role"), job_args)
dataset_list = []
if dataset_dict.get(role, {}).get(party_id):
for k, v in dataset_dict[role][party_id].items():
dataset_list.append({"namespace": v.split('.')[0], "table_name": v.split('.')[1]})
data_authentication_check(src_role=job_info.get('src_role'), src_party_id=job_info.get('src_party_id'),
src_user=src_user, dest_user=dest_user, dataset_list=dataset_list)
job_parameters = RunParameters(**job_parameters.get(role, {}).get(party_id, {}))
# save new job into db
if role == job_info["initiator_role"] and party_id == job_info["initiator_party_id"]:
is_initiator = True
else:
is_initiator = False
job_info["status"] = JobStatus.READY
job_info["user_id"] = dest_user
job_info["src_user"] = src_user
job_info["user"] = user
# this party configuration
job_info["role"] = role
job_info["party_id"] = party_id
job_info["is_initiator"] = is_initiator
job_info["progress"] = 0
cls.create_job_parameters_on_party(role=role, party_id=party_id, job_parameters=job_parameters)
# update job parameters on party
job_info["runtime_conf_on_party"]["job_parameters"] = job_parameters.to_dict()
JobSaver.create_job(job_info=job_info)
initialized_result, provider_group = cls.initialize_tasks(job_id=job_id, role=role, party_id=party_id, run_on_this_party=True,
initiator_role=job_info["initiator_role"], initiator_party_id=job_info["initiator_party_id"], job_parameters=job_parameters, dsl_parser=dsl_parser)
for provider_key, group_info in provider_group.items():
for cpn in group_info["components"]:
dsl["components"][cpn]["provider"] = provider_key
roles = job_info['roles']
cls.initialize_job_tracker(job_id=job_id, role=role, party_id=party_id,
job_parameters=job_parameters, roles=roles, is_initiator=is_initiator, dsl_parser=dsl_parser)
job_utils.save_job_conf(job_id=job_id,
role=role,
party_id=party_id,
dsl=dsl,
runtime_conf=runtime_conf,
runtime_conf_on_party=job_info["runtime_conf_on_party"],
train_runtime_conf=train_runtime_conf,
pipeline_dsl=None)
return {"components": initialized_result}
@classmethod
def set_federated_mode(cls, job_parameters: RunParameters):
if not job_parameters.federated_mode:
job_parameters.federated_mode = ENGINES["federated_mode"]
@classmethod
def set_engines(cls, job_parameters: RunParameters, engine_type=None):
engines = engine_utils.get_engines()
if not engine_type:
engine_type = {EngineType.COMPUTING, EngineType.FEDERATION, EngineType.STORAGE}
for k in engine_type:
setattr(job_parameters, f"{k}_engine", engines[k])
@classmethod
def create_common_job_parameters(cls, job_id, initiator_role, common_job_parameters: RunParameters):
JobController.set_federated_mode(job_parameters=common_job_parameters)
JobController.set_engines(job_parameters=common_job_parameters, engine_type={EngineType.COMPUTING})
JobController.fill_default_job_parameters(job_id=job_id, job_parameters=common_job_parameters)
JobController.adapt_job_parameters(role=initiator_role, job_parameters=common_job_parameters, create_initiator_baseline=True)
@classmethod
def create_job_parameters_on_party(cls, role, party_id, job_parameters: RunParameters):
JobController.set_engines(job_parameters=job_parameters)
cls.fill_party_specific_parameters(role=role,
party_id=party_id,
job_parameters=job_parameters)
@classmethod
def fill_party_specific_parameters(cls, role, party_id, job_parameters: RunParameters):
cls.adapt_job_parameters(role=role, job_parameters=job_parameters)
engines_info = cls.get_job_engines_address(job_parameters=job_parameters)
cls.check_parameters(job_parameters=job_parameters,
role=role, party_id=party_id, engines_info=engines_info)
@classmethod
def fill_default_job_parameters(cls, job_id, job_parameters: RunParameters):
keys = {"task_parallelism", "auto_retries", "auto_retry_delay", "federated_status_collect_type"}
for key in keys:
if hasattr(job_parameters, key) and getattr(job_parameters, key) is None:
if hasattr(JobDefaultConfig, key):
setattr(job_parameters, key, getattr(JobDefaultConfig, key))
else:
schedule_logger(job_id).warning(f"can not found {key} job parameter default value from job_default_settings")
@classmethod
def adapt_job_parameters(cls, role, job_parameters: RunParameters, create_initiator_baseline=False):
ResourceManager.adapt_engine_parameters(
role=role, job_parameters=job_parameters, create_initiator_baseline=create_initiator_baseline)
if create_initiator_baseline:
if job_parameters.task_parallelism is None:
job_parameters.task_parallelism = JobDefaultConfig.task_parallelism
if job_parameters.federated_status_collect_type is None:
job_parameters.federated_status_collect_type = JobDefaultConfig.federated_status_collect_type
if create_initiator_baseline and not job_parameters.computing_partitions:
job_parameters.computing_partitions = job_parameters.adaptation_parameters[
"task_cores_per_node"] * job_parameters.adaptation_parameters["task_nodes"]
@classmethod
def get_job_engines_address(cls, job_parameters: RunParameters):
engines_info = {}
engine_list = [
(EngineType.COMPUTING, job_parameters.computing_engine),
(EngineType.FEDERATION, job_parameters.federation_engine),
(EngineType.STORAGE, job_parameters.storage_engine)
]
for engine_type, engine_name in engine_list:
engine_info = ResourceManager.get_engine_registration_info(
engine_type=engine_type, engine_name=engine_name)
job_parameters.engines_address[engine_type] = engine_info.f_engine_config if engine_info else {}
engines_info[engine_type] = engine_info
return engines_info
@classmethod
def check_parameters(cls, job_parameters: RunParameters, role, party_id, engines_info):
status, cores_submit, max_cores_per_job = ResourceManager.check_resource_apply(
job_parameters=job_parameters, role=role, party_id=party_id, engines_info=engines_info)
if not status:
msg = ""
msg2 = "default value is fate_flow/settings.py#DEFAULT_TASK_CORES_PER_NODE, refer fate_flow/examples/simple/simple_job_conf.json"
if job_parameters.computing_engine in {ComputingEngine.EGGROLL, ComputingEngine.STANDALONE}:
msg = "please use task_cores job parameters to set request task cores or you can customize it with eggroll_run job parameters"
elif job_parameters.computing_engine in {ComputingEngine.SPARK}:
msg = "please use task_cores job parameters to set request task cores or you can customize it with spark_run job parameters"
raise RuntimeError(
f"max cores per job is {max_cores_per_job} base on (fate_flow/settings#MAX_CORES_PERCENT_PER_JOB * conf/service_conf.yaml#nodes * conf/service_conf.yaml#cores_per_node), expect {cores_submit} cores, {msg}, {msg2}")
@classmethod
def gen_updated_parameters(cls, job_id, initiator_role, initiator_party_id, input_job_parameters, input_component_parameters):
# todo: check can not update job parameters
job_configuration = job_utils.get_job_configuration(job_id=job_id,
role=initiator_role,
party_id=initiator_party_id)
updated_job_parameters = job_configuration.runtime_conf["job_parameters"]
updated_component_parameters = job_configuration.runtime_conf["component_parameters"]
if input_job_parameters:
if input_job_parameters.get("common"):
common_job_parameters = RunParameters(**input_job_parameters["common"])
cls.create_common_job_parameters(job_id=job_id, initiator_role=initiator_role, common_job_parameters=common_job_parameters)
for attr in {"model_id", "model_version"}:
setattr(common_job_parameters, attr, updated_job_parameters["common"].get(attr))
updated_job_parameters["common"] = common_job_parameters.to_dict()
# not support role
updated_components = set()
if input_component_parameters:
cls.merge_update(input_component_parameters, updated_component_parameters)
return updated_job_parameters, updated_component_parameters, list(updated_components)
@classmethod
def merge_update(cls, inputs: dict, results: dict):
if not isinstance(inputs, dict) or not isinstance(results, dict):
raise ValueError(f"must both dict, but {type(inputs)} inputs and {type(results)} results")
for k, v in inputs.items():
if k not in results:
results[k] = v
elif isinstance(v, dict):
cls.merge_update(v, results[k])
else:
results[k] = v
@classmethod
def update_parameter(cls, job_id, role, party_id, updated_parameters: dict):
job_configuration = job_utils.get_job_configuration(job_id=job_id,
role=role,
party_id=party_id)
job_parameters = updated_parameters.get("job_parameters")
component_parameters = updated_parameters.get("component_parameters")
if job_parameters:
job_configuration.runtime_conf["job_parameters"] = job_parameters
job_parameters = RunParameters(**job_parameters["common"])
cls.create_job_parameters_on_party(role=role,
party_id=party_id,
job_parameters=job_parameters)
job_configuration.runtime_conf_on_party["job_parameters"] = job_parameters.to_dict()
if component_parameters:
job_configuration.runtime_conf["component_parameters"] = component_parameters
job_configuration.runtime_conf_on_party["component_parameters"] = component_parameters
job_info = {}
job_info["job_id"] = job_id
job_info["role"] = role
job_info["party_id"] = party_id
job_info["runtime_conf"] = job_configuration.runtime_conf
job_info["runtime_conf_on_party"] = job_configuration.runtime_conf_on_party
JobSaver.update_job(job_info)
@classmethod
def initialize_task(cls, role, party_id, task_info: dict):
task_info["role"] = role
task_info["party_id"] = party_id
initialized_result, provider_group = cls.initialize_tasks(components=[task_info["component_name"]], **task_info)
return initialized_result
@classmethod
def initialize_tasks(cls, job_id, role, party_id, run_on_this_party, initiator_role, initiator_party_id, job_parameters: RunParameters = None, dsl_parser=None, components: list = None, **kwargs):
common_task_info = {}
common_task_info["job_id"] = job_id
common_task_info["initiator_role"] = initiator_role
common_task_info["initiator_party_id"] = initiator_party_id
common_task_info["role"] = role
common_task_info["party_id"] = party_id
common_task_info["run_on_this_party"] = run_on_this_party
common_task_info["federated_mode"] = kwargs.get("federated_mode", job_parameters.federated_mode if job_parameters else None)
common_task_info["federated_status_collect_type"] = kwargs.get("federated_status_collect_type", job_parameters.federated_status_collect_type if job_parameters else None)
common_task_info["auto_retries"] = kwargs.get("auto_retries", job_parameters.auto_retries if job_parameters else None)
common_task_info["auto_retry_delay"] = kwargs.get("auto_retry_delay", job_parameters.auto_retry_delay if job_parameters else None)
common_task_info["task_version"] = kwargs.get("task_version")
if dsl_parser is None:
dsl_parser = schedule_utils.get_job_dsl_parser_by_job_id(job_id)
provider_group = ProviderManager.get_job_provider_group(dsl_parser=dsl_parser,
components=components)
initialized_result = {}
for group_key, group_info in provider_group.items():
initialized_config = {}
initialized_config.update(group_info)
initialized_config["common_task_info"] = common_task_info
if run_on_this_party:
code, _result = WorkerManager.start_general_worker(worker_name=WorkerName.TASK_INITIALIZER,
job_id=job_id,
role=role,
party_id=party_id,
initialized_config=initialized_config,
run_in_subprocess=False if initialized_config["if_default_provider"] else True)
initialized_result.update(_result)
else:
cls.initialize_task_holder_for_scheduling(role=role,
party_id=party_id,
components=initialized_config["components"],
common_task_info=common_task_info,
provider_info=initialized_config["provider"])
return initialized_result, provider_group
@classmethod
def initialize_task_holder_for_scheduling(cls, role, party_id, components, common_task_info, provider_info):
for component_name in components:
task_info = {}
task_info.update(common_task_info)
task_info["component_name"] = component_name
task_info["component_module"] = ""
task_info["provider_info"] = provider_info
task_info["component_parameters"] = {}
TaskController.create_task(role=role, party_id=party_id,
run_on_this_party=common_task_info["run_on_this_party"],
task_info=task_info)
@classmethod
def initialize_job_tracker(cls, job_id, role, party_id, job_parameters: RunParameters, roles, is_initiator, dsl_parser):
tracker = Tracker(job_id=job_id, role=role, party_id=party_id,
model_id=job_parameters.model_id,
model_version=job_parameters.model_version,
job_parameters=job_parameters)
if job_parameters.job_type != "predict":
tracker.init_pipeline_model()
partner = {}
show_role = {}
for _role, _role_party in roles.items():
if is_initiator or _role == role:
show_role[_role] = show_role.get(_role, [])
for _party_id in _role_party:
if is_initiator or _party_id == party_id:
show_role[_role].append(_party_id)
if _role != role:
partner[_role] = partner.get(_role, [])
partner[_role].extend(_role_party)
else:
for _party_id in _role_party:
if _party_id != party_id:
partner[_role] = partner.get(_role, [])
partner[_role].append(_party_id)
job_args = dsl_parser.get_args_input()
dataset = cls.get_dataset(
is_initiator, role, party_id, roles, job_args)
tracker.log_job_view(
{'partner': partner, 'dataset': dataset, 'roles': show_role})
@classmethod
def get_dataset(cls, is_initiator, role, party_id, roles, job_args):
dataset = {}
dsl_version = 1
if job_args.get('dsl_version'):
if job_args.get('dsl_version') == 2:
dsl_version = 2
for _role, _role_party_args in job_args.items():
if _role == "dsl_version":
continue
if is_initiator or _role == role:
for _party_index in range(len(_role_party_args)):
_party_id = roles[_role][_party_index]
if is_initiator or _party_id == party_id:
dataset[_role] = dataset.get(_role, {})
dataset[_role][_party_id] = dataset[_role].get(
_party_id, {})
if dsl_version == 1:
for _data_type, _data_location in _role_party_args[_party_index]['args']['data'].items():
dataset[_role][_party_id][_data_type] = '{}.{}'.format(
_data_location['namespace'], _data_location['name'])
else:
for key in _role_party_args[_party_index].keys():
for _data_type, _data_location in _role_party_args[_party_index][key].items():
search_type = data_utils.get_input_search_type(parameters=_data_location)
if search_type is InputSearchType.TABLE_INFO:
dataset[_role][_party_id][key] = '{}.{}'.format(_data_location['namespace'], _data_location['name'])
elif search_type is InputSearchType.JOB_COMPONENT_OUTPUT:
dataset[_role][_party_id][key] = '{}.{}.{}'.format(_data_location['job_id'], _data_location['component_name'], _data_location['data_name'])
else:
dataset[_role][_party_id][key] = "unknown"
return dataset
@classmethod
def query_job_input_args(cls, input_data, role, party_id):
min_partition = data_utils.get_input_data_min_partitions(
input_data, role, party_id)
return {'min_input_data_partition': min_partition}
@classmethod
def start_job(cls, job_id, role, party_id, extra_info=None):
schedule_logger(job_id).info(
f"try to start job on {role} {party_id}")
job_info = {
| |
execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.enable_project_ci_with_http_info(owner, name, **kwargs) # noqa: E501
def enable_project_ci_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Enable project CI # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.enable_project_ci_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method enable_project_ci" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `enable_project_ci`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `enable_project_ci`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{name}/ci', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_project(self, owner, name, **kwargs): # noqa: E501
"""Get project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Project
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_project_with_http_info(owner, name, **kwargs) # noqa: E501
def get_project_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Component under namesapce (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Project, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_project`") # noqa: E501
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `get_project`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Project', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_project_activities(self, owner, name, **kwargs): # noqa: E501
"""Get project activities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_activities(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListActivitiesResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_project_activities_with_http_info(owner, name, **kwargs) # noqa: E501
def get_project_activities_with_http_info(self, owner, name, **kwargs): # noqa: E501
"""Get project activities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_project_activities_with_http_info(owner, name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str name: Entity managing the resource (required)
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search.
:param str mode: Mode of the search.
:param bool no_page: No pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListActivitiesResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'name',
'offset',
'limit',
'sort',
'query',
'mode',
'no_page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_project_activities" % key
)
local_var_params[key] | |
<filename>evaluation_tools/evaluation.py<gh_stars>0
import torch
from function_tools import distribution_function, poincare_function
from function_tools import euclidean_function as ef
from clustering_tools.poincare_kmeans import PoincareKMeans
from clustering_tools.poincare_em import PoincareEM
from collections import Counter
import numpy as np
import math
import tqdm
import itertools
# verified
class EvaluationMetrics(object):
# both must be matrix
def __init__(self, prediction, ground_truth, nb_classes):
self.TP = []
self.FP = []
self.TN = []
self.FN = []
for i in range(nb_classes):
positive_gt = (ground_truth[:,i] == 1).float()
positive_pr = (prediction[:,i] == 1).float()
tp = (positive_gt * positive_pr).sum()
fp = positive_gt.sum() - tp
self.TP.append(tp)
self.FP.append(fp)
negative_gt = (ground_truth[:,i] == 0).float()
negative_pr = (prediction[:,i] == 0).float()
tn = (negative_gt * negative_pr).sum()
fn = negative_gt.sum() - tn
self.TN.append(tn)
self.FN.append(fn)
def micro_precision(self):
return sum(self.TP, 0)/(sum(self.TP, 0) + sum(self.FP,0))
def micro_recall(self):
return sum(self.TP, 0)/(sum(self.TP, 0) + sum(self.FN,0))
def micro_F(self):
m_p, m_r = self.micro_precision(), self.micro_recall()
return (2 * m_p * m_r) /(m_p + m_r)
def macro_precision(self):
precision_by_label = [tp/(tp+fp) for tp, fp in zip(self.TP, self.FP)]
return sum(precision_by_label, 0)/(len(precision_by_label))
def macro_recall(self):
recall_by_label = [tp/(tp+fn) for tp, fn in zip(self.TP, self.FN)]
return sum(recall_by_label, 0)/(len(recall_by_label))
def macro_F(self):
m_p, m_r = self.macro_precision(), self.macro_recall()
return (2 * m_p * m_r) /(m_p + m_r)
def score(self):
return self.micro_F(), self.macro_F()
def precision_at(prediction, ground_truth, at=5):
prediction_value, prediction_index = (-prediction).sort(-1)
trange = torch.arange(len(prediction)).unsqueeze(-1).expand(len(prediction), at).flatten()
indexes = prediction_index[:,:at].flatten()
score = ((ground_truth[trange, indexes]).float().view(len(prediction), at)).sum(-1)/at
return score.mean().item()
def mean_conductance(prediction, adjency_matrix):
N = prediction.size(0)
# the number of clusters
K = prediction.size(-1)
# print(K)
I = {i for i in range(len(prediction))}
score = 0
for c in range(K):
# print(prediction[:, c].nonzero().flatten())
c_nodes = set(prediction[:, c].nonzero().flatten().tolist())
nc_nodes = I - c_nodes
cut_score_a = 0
for i in c_nodes:
cut_score_a += len(set(adjency_matrix[i]) - c_nodes)
# for j in nc_nodes:
# if(j in adjency_matrix[i]):
# cut_score_a += 1
cut_score_b = 0
for i in c_nodes:
cut_score_b += len(adjency_matrix[i])
cut_score_c = 0
for i in nc_nodes:
cut_score_c += len(adjency_matrix[i])
if(cut_score_b==0 or cut_score_c ==0):
score += 0
else:
score += cut_score_a/(min(cut_score_b, cut_score_c))
return score/K
def nmi(prediction, ground_truth):
N = prediction.size(0)
# the number of clusters
K = prediction.size(-1)
I = {i for i in range(len(prediction))}
PN = []
GN = []
den = 0
for i in range(K):
PN.append(set(prediction[:, i].nonzero().flatten().tolist()))
GN.append(set(ground_truth[:, i].nonzero().flatten().tolist()))
if(len(PN[-1]) != 0):
den += len(PN[-1]) * math.log(len(PN[-1])/N)
if(len(GN[-1]) != 0):
den += len(GN[-1]) * math.log(len(GN[-1])/N)
num = 0
for a in PN:
for b in GN:
N_ij = len(a.intersection(b))
if(N_ij != 0):
num += N_ij * math.log((N_ij * N)/(len(a) *len(b) ))
return -2 * (num/den)
class PrecisionScore(object):
def __init__(self, at=5):
self.at = at
def __call__(self, x, y):
return precision_at(x, y, at=self.at)
class CrossValEvaluation(object):
def __init__(self, embeddings, ground_truth, nb_set=5, algs_object=PoincareEM):
self.algs_object = algs_object
self.z = embeddings
self.gt = ground_truth
self.nb_set = nb_set
# split set
subset_index = torch.randperm(len(self.z))
nb_value = len(self.z)//nb_set
self.subset_indexer = [subset_index[nb_value *i:min(nb_value * (i+1), len(self.z))] for i in range(nb_set)]
self.all_algs = []
pb = tqdm.trange(len(self.subset_indexer))
for i, test_index in zip(pb, self.subset_indexer):
# create train dataset being concatenation of not current test set
train_index = torch.cat([ subset for ci, subset in enumerate(self.subset_indexer) if(i!=ci)], 0)
# get embeddings sets
train_embeddings = self.z[train_index]
test_embeddings = self.z[test_index]
# get ground truth sets
train_labels = self.gt[train_index]
test_labels = self.gt[test_index]
algs = self.algs_object(self.gt.size(-1))
algs.fit(train_embeddings, Y=train_labels)
self.all_algs.append(algs)
def get_score(self, scoring_function):
scores = []
pb = tqdm.trange(len(self.subset_indexer))
for i, test_index in zip(pb, self.subset_indexer):
# create train dataset being concatenation of not current test set
train_index = torch.cat([ subset for ci, subset in enumerate(self.subset_indexer) if(i!=ci)], 0)
# get embeddings sets
train_embeddings = self.z[train_index]
test_embeddings = self.z[test_index]
# get ground truth sets
train_labels = self.gt[train_index]
test_labels = self.gt[test_index]
# must give the matrix of scores
# print(algs._w)
prediction = self.all_algs[i].probs(test_embeddings)
# print(prediction.mean(0))
# print("Pred size ", prediction.size())
# print("Test size ", test_labels.size())
set_score = scoring_function(prediction, test_labels)
scores.append(set_score)
return scores
def accuracy(prediction, labels):
return (prediction == labels).float().mean()
########################################### TO CLEAN ##############################################
def predict(Z_train, Z_test, Y_train, Y_test, pi, mu, sigma):
G_train = distribution_function.weighted_gmm_pdf(pi, Z_train, mu, sigma, poincare_function.distance)
G_train = G_train.max(-1)[1]+1
# for each class we count
predict_class = torch.zeros(len(mu), len(pi))
for j, v in enumerate(G_train):
predict_class[v.item()-1][torch.LongTensor(Y_train[j])-1] +=1
sv, si = predict_class.sort(-1)
g = torch.zeros(len(mu))
for k in range(len(pi)):
clas = torch.argmax(predict_class,-1)
gaus = predict_class[torch.arange(0,len(predict_class)),clas].argmax()
clas = clas[gaus]
predict_class[gaus] = -1
#predict_class[:,clas] = -1
g[gaus] = clas
# predict
G_test= distribution_function.weighted_gmm_pdf(pi, Z_test, mu, sigma, poincare_function.distance)
G_test= G_test.max(-1)[1]+1
prediction = g[G_test-1].long()
return prediction
def accuracy_cross_validation_multi_disc(Z, Y, pi, mu, sigma, nb_set, verbose=True):
subset_index = torch.randperm(len(Z[0]))
nb_value = len(Z[0])//nb_set
I_CV = [subset_index[nb_value *i:min(nb_value * (i+1), len(Z[0]))] for i in range(nb_set)]
acc_total = 0.
for i, test_index in enumerate(I_CV):
# create train dataset
train_index = torch.cat([ subset for ci, subset in enumerate(I_CV) if(i!=ci)],0)
Y_train = [Y[ic.item()] for ic in train_index]
#create test datase
Y_test = [Y[ic.item()] for ic in test_index]
if(verbose):
print("Set "+str(i)+" :")
print("\t train size -> "+str(len(Y_train)))
print("\t test size -> "+str(len(Y_test)))
print("Associate to each gaussian a class")
predictions = []
for j in range(len(Z)):
Z_train = Z[j][train_index]
Z_test = Z[j][test_index]
predictions.append(predict(Z_train, Z_test, Y_train, Y_test, pi[j], mu[j], sigma[j]).unsqueeze(-1))
predictions = torch.cat(predictions, -1)
predictions = predictions.tolist()
prediction = torch.LongTensor([Counter(l).most_common()[0][0] for l in predictions])
acc = accuracy(prediction, torch.LongTensor([i[0]-1 for i in Y_test]))
acc_total += acc.item()
return acc_total/(len(I_CV))
def accuracy_cross_validation(Z, Y, pi, mu, sigma, nb_set, verbose=True):
subset_index = torch.randperm(len(Z))
nb_value = len(Z)//nb_set
I_CV = [subset_index[nb_value *i:min(nb_value * (i+1), len(Z))] for i in range(nb_set)]
acc_total = 0.
for i, test_index in enumerate(I_CV):
# create train dataset
train_index = torch.cat([ subset for ci, subset in enumerate(I_CV) if(i!=ci)],0)
Z_train = Z[train_index]
Y_train = [Y[ic.item()] for ic in train_index]
#create test datase
Z_test = Z[test_index]
Y_test = [Y[ic.item()] for ic in test_index]
if(verbose):
print("Set "+str(i)+" :")
print("\t train size -> "+str(len(Z_train)))
print("\t test size -> "+str(len(Z_test)))
print("Associate to each gaussian a class")
G_train = distribution_function.weighted_gmm_pdf(pi, Z_train, mu, sigma, poincare_function.distance)
G_train = G_train.max(-1)[1]+1
# for each class we count
predict_class = torch.zeros(len(mu), len(pi))
for j, v in enumerate(G_train):
predict_class[v.item()-1][torch.LongTensor(Y_train[j])-1] +=1
sv, si = predict_class.sort(-1)
g = torch.zeros(len(mu))
for k in range(len(pi)):
clas = torch.argmax(predict_class,-1)
gaus = predict_class[torch.arange(0,len(predict_class)),clas].argmax()
clas = clas[gaus]
predict_class[gaus] = -1
#predict_class[:,clas] = -1
g[gaus] = clas
# predict
G_test= distribution_function.weighted_gmm_pdf(pi, Z_test, mu, sigma, poincare_function.distance)
G_test= G_test.max(-1)[1]+1
prediction = g[G_test-1].long()
acc = accuracy(prediction, torch.LongTensor([i[0]-1 for i in Y_test]))
acc_total += acc.item()
return acc_total/(len(I_CV))
def evaluate_em_supervised(Z, Y, n_gaussian, nb_set=5, verbose=False):
subset_index = torch.randperm(len(Z))
nb_value = len(Z)//nb_set
I_CV = [subset_index[nb_value *i:min(nb_value * (i+1), len(Z))] for i in range(nb_set)]
acc_total = 0.
for i, test_index in enumerate(I_CV):
# create train dataset
train_index = torch.cat([ subset for ci, subset in enumerate(I_CV) if(i!=ci)],0)
Z_train = Z[train_index]
Y_train = [Y[ic.item()] for ic in train_index]
#create test datase
Z_test = Z[test_index]
Y_test = [Y[ic.item()] for ic in test_index]
if(verbose):
print("Set "+str(i)+" :")
print("\t train size -> "+str(len(Z_train)))
print("\t test size -> "+str(len(Z_test)))
print("Associate to each gaussian a class")
em_alg = RiemannianEM(Z.size(-1), n_gaussian)
g_mat = torch.Tensor([[ 1 if(y+1 in Y_train[i]) else 0 for y in range(n_gaussian)] for i in range(len(Z_train))])
em_alg.fit(Z_train, Y=g_mat)
# predict
prediction = em_alg.predict(Z_test)
acc = accuracy(prediction, torch.LongTensor([i[0]-1 for i in Y_test]))
acc_total += acc.item()
return acc_total/(len(I_CV))
# in the following function we perform prediction using disc product
# Z, Y, pi, mu, sigma are list of tensor with the size number of disc
def accuracy_supervised(z, y, mu, nb_set=5, verbose=True):
n_example = len(z)
n_distrib = len(mu)
subset_index = torch.randperm(n_example)
nb_value = n_example//nb_set
I_CV = [subset_index[nb_value *i:min(nb_value * (i+1), n_example)] for i in range(nb_set)]
# print(I_CV)
acc_total = 0.
for i, test_index in enumerate(I_CV):
# create train dataset
train_index = torch.cat([ subset for ci, subset in enumerate(I_CV) if(i!=ci)],0)
Z_train = z[train_index]
Y_train = torch.LongTensor([y[ic.item()] for ic in train_index])
#create test datase
Z_test = z[test_index]
Y_test = torch.LongTensor([y[ic.item()] for ic in test_index])
if(verbose):
print("Set "+str(i)+" :")
print("\t train size -> "+str(len(Z_train)))
print("\t test size -> "+str(len(Z_test)))
print("Obtaining centroids for each classes")
from function_tools import poincare_alg as pa
min_label = Y_train.min().item()
max_label = Y_train.max().item()
centroids = []
for i in range(n_distrib):
# print((Z_train[Y_train[:,0]== (min_label + i)]).size())
centroids.append(pa.barycenter(Z_train[Y_train[:,0]== (min_label + i)], | |
1) {
# cannot approx with 1 value so:
indexSplit[[sc]] = rep(g$index[1], nrow(s));
} else {
g$pos = (g$begin + g$end) / 2;
g = g[order(g$pos),];
# rule 2 means use values from extrema
i = round(approx(g$pos, 1:nrow(g), xout = s$pos, rule=2)$y);
i = pmax(1, pmin(nrow(g), i));
indexSplit[[sc]] = g$index[i];
}
}
unsplit(indexSplit, strains$scaffold);
}
"""
def initialize_gene_fit_d(GeneFitResults, debug=False):
"""
We create the initial version of central variable
'gene_fit_d'. Where we essentially flip the column
names and the set names of the dataframes, in the sense that
we go from having a single setindex name pointing to a
dataframe with columns indicating certain info, to the names
of those columns pointing to a dataframe with that column's info
over all the different set index names.
Args:
GeneFitResults: (dict) setnameIndex -> ret_d
ret_d:
gene_fit: DataFrame, contains cols:
fit (float): (unnormalized
fitNaive (float):
fit1 (float):
fit2 (float):
fitnorm1 (float)
fitnorm2 (float)
fitRaw (float)
locusId (str)
n (int)
nEff (float)
pseudovar (float)
sumsq (float):
sd (float)
sdNaive (float)
se (float) Standard Error
t: (float) t-statistic
tot1 (int or nan)
tot1_0 (int or nan)
tot2 (int or nan)
tot2_0 (int or nan)
tot (int or nan)
tot0 (int or nan)
strain_fit: pandas Series (float)
strain_se: pandas Series (float)
Returns:
gene_fit_d: (python dict)
g (pandas Series (str)): pandas Series of locusIds
lr (float): dataframe with one column per setindexname
lrNaive (float): dataframe with one column per setindexname
lr1 (float): dataframe with one column per setindexname
lr2 (float): dataframe with one column per setindexname
lrn1 (float): dataframe with one column per setindexname
lrn2 (float): dataframe with one column per setindexname
lrRaw (float): dataframe with one column per setindexname
n (int): dataframe with one column per setindexname
nEff (float): dataframe with one column per setindexname
pseudovar (float): dataframe with one column per setindexname
sumsq (float): dataframe with one column per setindexname
sd (float): dataframe with one column per setindexname
sdNaive (float): dataframe with one column per setindexname
se (float) Standard Error dataframe with one column per setindexname
t: (float) t-statistic dataframe with one column per setindexname
tot1 (int or nan) dataframe with one column per setindexname
tot1_0 (int or nan) dataframe with one column per setindexname
tot2 (int or nan) dataframe with one column per setindexname
tot2_0 (int or nan) dataframe with one column per setindexname
tot (int or nan) dataframe with one column per setindexname
tot0 (int or nan) dataframe with one column per setindexname
version (str)
"""
all_ix_names = list(GeneFitResults.keys())
# This dict will just contain dataframes gene_fit
fit_locusIds = GeneFitResults[all_ix_names[0]]['gene_fit']['locusId']
# Why do we replace the name locusId with 'g'?
gene_fit_d = {'g': fit_locusIds}
other_col_names = list(GeneFitResults[all_ix_names[0]]['gene_fit'].head())
# other_col_names should be:
# fit, fitNaive, fit1, fit2, fitnorm1, fitnorm2, fitRaw
# locusId, n, nEff, pseudovar, sumsq, sd, sdNaive, se, t, tot1
# tot1_0, tot2, tot2_0, tot, tot0
other_col_names.remove('locusId')
if "Unnamed: 0" in other_col_names:
other_col_names.remove("Unnamed: 0")
print(other_col_names)
st = time.time()
for col_name in other_col_names:
all_col_values_d = {ix_name: GeneFitResults[ix_name]['gene_fit'][col_name] for ix_name in GeneFitResults.keys()}
gene_fit_d[col_name] = pd.DataFrame.from_dict(all_col_values_d)
print(f"Time to create gene_fit_d: {time.time() - st}")
new_gene_fit_d = {}
for k in gene_fit_d.keys():
new_key = k.replace("fitnorm","lrn")
new_key = new_key.replace("fit", "lr")
new_gene_fit_d[new_key] = gene_fit_d[k].copy(deep=True)
gene_fit_d = new_gene_fit_d
if debug:
print("Extracted fitness values")
gene_fit_d["version"] = "1.1.1"
return gene_fit_d
def tmp_prep_wrap_up(all_pc_fp, genes_fp):
dict_dtypes = {'locusId' : str,
'scaffoldId' : str,
'scaffold': str}
all_df = pd.read_table(all_pc_fp, dtype=dict_dtypes, index_col=1)
genes_df = pd.read_table(genes_fp,dtype=dict_dtypes)
has_gene2 = [True if (0.1<=x<=0.9) else False for x in all_df['f']]
return all_df, genes_df, has_gene2
def FitReadMetrics(all_df, qnames, has_gene2):
"""
Args:
all_df (pandas DataFrame):
qnames (pandas Series): list<str> (names of set_index_names)
has_gene2 list<bool>: gene insertion between 0.1 and 0.9 fraction of length
Returns:
DataFrame with cols:
nMapped
nPastEnd
nGenic
Description:
Compute read metrics -- nMapped, nPastEnd, nGenic, for the given data columns
The final argument is used to define genic
"""
print(all_df.head())
frm_df = pd.DataFrame.from_dict({
"nMapped": all_df[qnames].sum(axis=0),
"nPastEnd": all_df[all_df['scaffold']=="pastEnd"][qnames].sum(axis=0),
"nGenic": all_df[has_gene2][qnames].sum(axis=0)
})
frm_df.index = list(qnames)
return frm_df
def save_gene_fit_d(gene_fit_d, prnt_dbg=False):
for k in gene_fit_d.keys():
if k != "version":
gene_fit_d[k].to_csv("tmp/GENEFITD/pysave_" + k + ".tsv", sep="\t")
def FitQuality(gene_fit_d, genes_df, prnt_dbg=False):
"""
Args:
gene_fit_d: (python dict)
g (pandas Series (str)): pandas Series of locusIds
lr (float): dataframe with one column per setindexname
lrNaive (float): dataframe with one column per setindexname
lr1 (float): dataframe with one column per setindexname
lr2 (float): dataframe with one column per setindexname
lrn1 (float): dataframe with one column per setindexname
lrn2 (float): dataframe with one column per setindexname
fitRaw (float): dataframe with one column per setindexname
n (int): dataframe with one column per setindexname
nEff (float): dataframe with one column per setindexname
pseudovar (float): dataframe with one column per setindexname
sumsq (float): dataframe with one column per setindexname
sd (float): dataframe with one column per setindexname
sdNaive (float): dataframe with one column per setindexname
se (float) Standard Error dataframe with one column per setindexname
t: (float) t-statistic dataframe with one column per setindexname
tot1 (int or nan) dataframe with one column per setindexname
tot1_0 (int or nan) dataframe with one column per setindexname
tot2 (int or nan) dataframe with one column per setindexname
tot2_0 (int or nan) dataframe with one column per setindexname
tot (int or nan) dataframe with one column per setindexname
tot0 (int or nan) dataframe with one column per setindexname
version (str)
genes_df:
Dataframe of genes.GC file
prnt_dbg: boolean
Created:
crudeOpGenes:
DataFrame with cols
'Sep', 'bOp' - list<bool>,
'begin1', 'end1', 'begin2', 'end2'
Returns:
fit_quality_df:
Dataframe with cols:
"nUsed":
"gMed":
"gMedt0":
"gMean":
"cor12":
"mad12":
"mad12c":
"mad12c_t0":
"opcor":
"adjcor":
"gccor":
"maxFit":
CrudeOpGenes:
DataFrame with cols:
Gene2, Gene1, sysName1, type1, scaffoldId1, begin1, end1,
strand1, name1, desc1, GC1, nTA1,
sysName2, type2, scaffoldId2, begin2, end2, strand2, name2,
desc2, GC2, nTA2, Sep, bOp
Description:
Compute the quality metrics from fitness values, fitness values of halves of genes, or
counts per gene (for genes or for halves of genes)
"""
# crudeOpGenes is a dataframe
crudeOpGenes = CrudeOp(genes_df)
if prnt_dbg:
crudeOpGenes.to_csv("tmp/py_crudeOpGenes.tsv", sep="\t")
# adj is a dataframe
adj = AdjacentPairs(genes_df, dbg_prnt=True)
adjDiff = adj[adj['strand1'] != adj['strand2']]
lrn1 = gene_fit_d['lrn1']
lrn2 = gene_fit_d['lrn2']
print("-*-*-*" + "Gene fit D of 'g' then genes_df['locusId'] ")
print(gene_fit_d['g'])
print(genes_df['locusId'])
match_list = py_match(list(gene_fit_d['g']), list(genes_df['locusId']))
print(match_list)
print(len(match_list))
#GC Correlation is the correlation between the fitnorm values and the GC values
GC_Corr = gene_fit_d['lrn'].corrwith(genes_df['GC'].iloc[match_list], method="pearson")
"""
adjDiff = adj[adj$strand1 != adj$strand2,];
data.frame(
nUsed = colSums(fit$tot),
gMed = apply(fit$tot, 2, median),
gMedt0 = apply(fit$tot0, 2, median),
gMean = apply(fit$tot, 2, mean),
cor12 = mapply(function(x,y) cor(x,y,method="s",use="p"), fit$lrn1, fit$lrn2),
mad12 = apply(abs(fit$lrn1-fit$lrn2), 2, median, na.rm=T),
# consistency of log2 counts for 1st and 2nd half, for sample and for time0
mad12c = apply(abs(log2(1+fit$tot1) - log2(1+fit$tot2)), 2, median, na.rm=T),
mad12c_t0 = apply(abs(log2(1+fit$tot1_0) - log2(1+fit$tot2_0)), 2, median, na.rm=T),
opcor = apply(fit$lrn, 2, function(x) paircor(crudeOpGenes[crudeOpGenes$bOp,], fit$g, x, method="s")),
adjcor = sapply(names(fit$lrn), function(x) paircor(adjDiff, fit$g, fit$lrn[[x]], method="s")),
gccor = c( cor(fit$lrn, genes_df$GC[ match(fit$g, genes_df$locusId) ], use="p") ),
maxFit = apply(fit$lrn,2,max,na.rm=T)
);
}
"""
# Note axis=0 means we take values from each row
fitQuality_df = pd.DataFrame.from_dict({
"nUsed": gene_fit_d['tot'].sum(axis=0),
"gMed": gene_fit_d['tot'].median(axis=0),
"gMedt0": gene_fit_d['tot0'].median(axis=0),
"gMean": gene_fit_d['tot'].mean(axis=0),
"cor12": [lrn1[col_name].corr(lrn2[col_name]) for col_name in lrn1.head()],
"mad12": (lrn1-lrn2).abs().median(),
"mad12c": (np.log2(1 + gene_fit_d['tot1']) - np.log2(1 + gene_fit_d['tot2'])).abs().median(),
"mad12c_t0": (np.log2(1 + gene_fit_d['tot1_0']) - np.log2(1 + gene_fit_d['tot2_0'])).abs().median(),
# Remember crudeOpGenes['bOp'] is a list of bools
"opcor": [paircor(crudeOpGenes[crudeOpGenes['bOp']],
gene_fit_d['g'],
gene_fit_d['lrn'][colname],
method="spearman",
dbg_prnt=True) for colname in gene_fit_d['lrn']],
"adjcor": [paircor(adjDiff, gene_fit_d['g'], gene_fit_d['lrn'][colname], method="spearman", dbg_prnt=True)\
for colname in gene_fit_d['lrn']],
"gccor": GC_Corr,
"maxFit": gene_fit_d['lrn'].max()
})
if prnt_dbg:
fitQuality_df.to_csv("tmp/py_fitQuality_df.tsv", sep="\t")
return fitQuality_df, crudeOpGenes
def paircor(pairs, locusIds, values, use="p", method="pearson", names=["Gene1","Gene2"],
dbg_prnt=False):
"""
pairs (pandas DataFrame): dataframe with multiple cols (CrudeOp with TRUE cols from bOp)
locusIds (pandas Series (str)): locusIds
values (pandas Series): normalized fitness scores
use:
method: Correlation method ("pearson", "spearman")
names (list<str>): "Gene1", "Gene2"
dbg_prnt (bool)
"""
if dbg_prnt:
print(f"Length of locusIds: {len(locusIds)}")
if len(locusIds) > 10:
print(f"First ten locusIds: {locusIds[:10]}")
print(f"Length of values: {len(values)}")
if len(values) > 10:
print(f"First ten values: {values[:10]}")
premrg1 = pd.DataFrame.from_dict({
"Gene1": list(locusIds),
"value1": | |
"admin/user_edit.html"
form_extra_fields = dict(is_superuser=BooleanField("Is Superuser"))
form_excluded_columns = (
"roles",
"created_at",
"updated_at",
"created_by",
"updated_by",
)
column_exclude_list = (
"password",
"username",
"first_name",
"last_name",
)
column_searchable_list = (
"name",
"orcid",
"email",
"eppn",
"organisation.name",
)
form_overrides = dict(roles=BitmapMultipleValueField)
form_ajax_refs = {
"organisation": {
"fields": (Organisation.name, "name")
},
}
can_export = True
class OrganisationAdmin(AppModelView):
"""Organisation model view."""
column_formatters = {
"logo":
lambda v, c, m, p: Markup(
'<img style="max-height: 100px; max-width: 100px;" src="'
f"""{url_for('logo_image', token=m.logo.token)}" alt="the logo of {m.name}">""") if m.logo else ''
}
column_exclude_list = (
"orcid_client_id",
"orcid_secret",
"created_at",
"updated_at",
"created_by",
"updated_by",
"email_template",
"email_template_enabled",
)
form_excluded_columns = AppModelView.form_excluded_columns[:]
form_excluded_columns.append("logo")
column_searchable_list = (
"name",
"tuakiri_name",
"city",
)
form_ajax_refs = {
"tech_contact": {
"fields": (User.name, User.email),
"page_size": 5
},
}
edit_template = "admin/organisation_edit.html"
form_widget_args = AppModelView.form_widget_args
form_widget_args["api_credentials_requested_at"] = {"readonly": True}
form_widget_args["api_credentials_entered_at"] = {"readonly": True}
def update_model(self, form, model):
"""Handle change of the technical contact."""
# Technical contact changed:
if form.tech_contact.data and form.tech_contact.data.id != model.tech_contact_id:
# Revoke the TECHNICAL role if thre is no org the user is tech.contact for.
if model.tech_contact and model.tech_contact.has_role(
Role.TECHNICAL) and not Organisation.select().where(
Organisation.tech_contact_id == model.tech_contact_id,
Organisation.id != model.id).exists():
app.logger.info(r"Revoked TECHNICAL from {model.tech_contact}")
model.tech_contact.roles &= ~Role.TECHNICAL
model.tech_contact.save()
return super().update_model(form, model)
class OrgInfoAdmin(AppModelView):
"""OrgInfo model view."""
can_export = True
column_searchable_list = (
"name",
"tuakiri_name",
"city",
"first_name",
"last_name",
"email",
)
form_rules = [
rules.FieldSet(["name", "tuakiri_name"], "Naming"),
rules.FieldSet(["title", "first_name", "last_name", "role", "email", "phone", "is_public"],
"Technical Contact"),
rules.FieldSet(["country", "city"], "Address"),
rules.FieldSet(["disambiguated_id", "disambiguation_source"], "Disambiguation Data"),
]
@action("invite", "Register Organisation",
"Are you sure you want to register selected organisations?")
def action_invite(self, ids):
"""Batch registration of organisations."""
count = 0
for oi in OrgInfo.select().where(OrgInfo.id.in_(ids)):
try:
register_org(
org_name=oi.name,
email=oi.email,
tech_contact=True,
via_orcid=(False if oi.tuakiri_name else True),
first_name=oi.first_name,
last_name=oi.last_name,
city=oi.city,
country=oi.country,
course_or_role=oi.role,
disambiguated_id=oi.disambiguated_id,
disambiguation_source=oi.disambiguation_source)
count += 1
except Exception as ex:
flash(f"Failed to send an invitation to {oi.email}: {ex}")
app.logger.exception(f"Failed to send registration invitation to {oi.email}.")
flash("%d invitations were sent successfully." % count)
class OrcidTokenAdmin(AppModelView):
"""ORCID token model view."""
column_searchable_list = (
"access_token",
"user.name",
"user.email",
"org.name",
)
can_export = True
can_create = True
form_ajax_refs = {
"user": {
"fields": (User.name, User.email),
"page_size": 5
},
"org": {
"fields": (Organisation.name, Organisation.tuakiri_name),
"page_size": 5
},
}
column_filters = ["scopes"]
class OrcidApiCallAmin(AppModelView):
"""ORCID API calls."""
column_list = [
"method", "called_at", "url", "query_params", "body", "status", "put_code",
"response", "response_time_ms"
]
column_default_sort = ("id", True)
can_export = True
can_edit = False
can_delete = False
can_create = False
can_view_details = True
column_searchable_list = (
"url",
"body",
"response",
"user.name",
)
column_formatters = AppModelView.column_formatters
column_formatters_detail = dict()
@staticmethod
def truncate_value(v, c, m, p):
"""Truncate very long strings."""
value = getattr(m, p)
return value[:100] + " ..." if value and len(value) > 100 else value
OrcidApiCallAmin.column_formatters.update(dict(
body=OrcidApiCallAmin.truncate_value, response=OrcidApiCallAmin.truncate_value))
class UserInvitationAdmin(AppModelView):
"""User Invitations."""
can_export = True
can_edit = False
can_delete = False
can_create = False
column_searchable_list = (
"email",
"organisation",
"department",
"first_name",
"last_name",
"token",
"inviter.name",
)
class OrgInvitationAdmin(AppModelView):
"""User Invitations."""
can_export = True
can_edit = False
can_delete = False
can_create = False
column_searchable_list = (
"email",
"org.name",
"token",
"inviter.name",
)
class UserOrgAmin(AppModelView):
"""User Organisations."""
column_searchable_list = (
"user.email",
"org.name",
)
class TaskAdmin(AppModelView):
"""Task model view."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "view_tasks.html"
can_edit = False
can_create = False
can_delete = True
column_searchable_list = [
"filename", "created_by.email", "created_by.name", "created_by.first_name",
"created_by.last_name", "org.name"
]
column_list = [
"task_type", "filename", "created_at", "org", "completed_at", "created_by", "expires_at",
"expiry_email_sent_at", "completed_count"
]
# form_excluded_columns = [
# "is_deleted", "completed_at", "expires_at", "expiry_email_sent_at", "organisation"
# ]
column_filters = (
filters.DateBetweenFilter(column=Task.created_at, name="Uploaded Date"),
filters.FilterEqual(column=Task.task_type, options=models.TaskType.options(), name="Task Type"),
)
column_formatters = dict(
task_type=lambda v, c, m, p: m.task_type.name.replace('_', ' ').title() if m.task_type else "N/A",
completed_count=lambda v, c, m, p: (
'' if not m.record_count else f"{m.completed_count} / {m.record_count} ({m.completed_percent:.1f}%)"),
)
@action("activate", "Activate for processing",
"""Are you sure you want to activate the selected tasks for batch processing?
NB! By clicking "OK" you are affirming that the all selected task records to be written are,
to the best of your knowledge, correct.""")
def activate(self, ids):
"""Acitave or reset and enqueue all records of selected tasks."""
self.activate_or_reset(ids)
@action("reset", "Reset for processing",
"""Are you sure you want to reset every record in selected task batch for processing?
NB! By clicking "OK" you are affirming that all the records of seleced tasks to be written are,
to the best of your knowledge, correct!""")
def reset(self, ids):
"""Acitave or reset and enqueue all records of selected tasks."""
self.activate_or_reset(ids)
def activate_or_reset(self, ids):
"""Acitave or reset and enqueue all records of selected tasks."""
count = 0
for t in Task.select().where(Task.id.in_(ids)):
try:
count += utils.activate_all_records(t) if request.form.get(
"action") == "activate" else utils.reset_all_records(t)
except Exception as ex:
flash(f"Failed to activate the selected records: {ex}", "danger")
else:
flash(f"{count} records were activated for batch processing.", "info")
class RecordModelView(AppModelView):
"""Task record model view."""
roles_required = Role.SUPERUSER | Role.ADMIN
list_template = "record_list.html"
column_exclude_list = (
"task",
"organisation",
)
form_excluded_columns = [
"task",
"organisation",
"processed_at",
"status",
]
column_export_exclude_list = (
"task",
)
can_edit = True
can_create = False
can_delete = True
can_view_details = True
can_export = True
form_widget_args = {"external_id": {"readonly": True}, "task": {"readonly": True}}
def render(self, template, **kwargs):
"""Pass the task to the render function as an added argument."""
if template == self.list_template and "task" not in kwargs:
task_id = request.args.get("task_id")
if task_id:
try:
kwargs["task"] = Task.get(id=task_id)
except Task.DoesNotExist:
flash(f"The task with ID: {task_id} doesn't exist.", "danger")
abort(404)
else:
return redirect(request.args.get("url") or url_for("task.index_view"))
return super().render(template, **kwargs)
def is_accessible(self):
"""Verify if the task view is accessible for the current user."""
if not super().is_accessible():
return False
# Added the feature for superuser to access task related to all research organiastion
if current_user.is_superuser:
return True
if request.method == "POST" and request.form.get("rowid"):
# get the first ROWID:
rowid = int(request.form.get("rowid"))
task_id = self.model.get(id=rowid).task_id
else:
task_id = self.current_task_id
if not task_id:
_id = request.args.get("id")
if not _id:
flash("Cannot invoke the task view without task ID", "danger")
flash("Missing or incorrect task ID value", "danger")
return False
else:
task_id = self.model.get(id=_id).task_id
try:
task = Task.get(id=task_id)
if task.org.id != current_user.organisation.id:
flash("Access denied! You cannot access this task.", "danger")
return False
except Task.DoesNotExist:
flash("The task doesn't exist.", "danger")
abort(404)
except ValueError as ex:
flash(str(ex), "danger")
return False
return True
def get_export_name(self, export_type='csv'):
"""Get export file name using the original imported file name.
:return: The exported csv file name.
"""
task_id = request.args.get("task_id")
if task_id:
try:
task = Task.get(id=task_id)
filename = os.path.splitext(task.filename)[0]
return "%s_%s.%s" % (filename, datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S"),
export_type)
except Task.DoesNotExist:
flash("The batch task doesn't exist", "danger")
abort(404)
return super().get_export_name(export_type=export_type)
@models.lazy_property
def record_processing_func(self):
"""Record processing function."""
return getattr(utils, f"process_{self.model.underscore_name()}s")
def enqueue_record(self, record_id):
"""Enqueue the specified record or all active and not yet processed ones."""
self.record_processing_func.queue(record_id=record_id)
@action("activate", "Activate for processing",
"""Are you sure you want to activate the selected records for batch processing?
By clicking "OK" you are affirming that the selected records to be written are,
to the best of your knowledge, correct!""")
def action_activate(self, ids):
"""Batch registraion of users."""
try:
status = "The record was activated at " + datetime.now().isoformat(timespec="seconds")
count = self.model.update(is_active=True, status=status).where(
((self.model.is_active.is_null()) | (self.model.is_active == False)), # noqa: E712
self.model.id.in_(ids)).execute()
if self.model == AffiliationRecord:
records = self.model.select().where(self.model.id.in_(ids)).order_by(
self.model.email, self.model.orcid)
for _, chunk in itertools.groupby(records, lambda r: (r.email, r.orcid, )):
self.enqueue_record([r.id for r in chunk])
else:
for record_id in ids:
self.enqueue_record(record_id)
except Exception as ex:
flash(f"Failed to activate the selected records: {ex}")
app.logger.exception("Failed to activate the selected records")
else:
flash(f"{count} records were activated for batch processing.")
@action("reset", "Reset for processing",
"Are you sure you want to reset the selected records for batch processing?")
def action_reset(self, ids):
"""Reset batch task records."""
status = "The record was reset at " + datetime.utcnow().isoformat(timespec="seconds")
task_id = None
with db.atomic() as transaction:
try:
if request.method == "POST" and request.form.get("rowid"):
# get the first ROWID:
rowid = int(request.form.get("rowid"))
task_id = self.model.get(id=rowid).task_id
else:
task_id = request.form.get('task_id')
task = Task.get(id=task_id)
count = self.model.update(
processed_at=None, status=status).where(self.model.is_active,
self.model.id.in_(ids)).execute()
if task.is_raw:
invitee_ids = [i.id for i in Invitee.select().join(
RecordInvitee).join(MessageRecord).where(MessageRecord.id.in_(ids))]
count = Invitee.update(
processed_at=None, status=status).where(Invitee.id.in_(invitee_ids)).execute()
emails = Invitee.select(Invitee.email).where(Invitee.id.in_(invitee_ids))
elif hasattr(self.model, "invitees"):
im = self.model.invitees.rel_model
count = im.update(
processed_at=None, status=status).where(im.record.in_(ids)).execute()
emails = im.select(im.email).where(im.record_id.in_(ids))
else:
emails = self.model.select(self.model.email).where(self.model.id.in_(ids))
# Delete | |
self.manager.loans.sync()
# This time, the feed contains entries.
feed = feedparser.parse(response.data)
entries = feed['entries']
overdrive_entry = [entry for entry in entries if entry['title'] == overdrive_book.title][0]
bibliotheca_entry = [entry for entry in entries if entry['title'] == bibliotheca_book.title][0]
eq_(overdrive_entry['opds_availability']['status'], 'available')
eq_(bibliotheca_entry['opds_availability']['status'], 'ready')
overdrive_links = overdrive_entry['links']
fulfill_link = [x for x in overdrive_links if x['rel'] == 'http://opds-spec.org/acquisition'][0]['href']
revoke_link = [x for x in overdrive_links if x['rel'] == OPDSFeed.REVOKE_LOAN_REL][0]['href']
bibliotheca_links = bibliotheca_entry['links']
borrow_link = [x for x in bibliotheca_links if x['rel'] == 'http://opds-spec.org/acquisition/borrow'][0]['href']
bibliotheca_revoke_links = [x for x in bibliotheca_links if x['rel'] == OPDSFeed.REVOKE_LOAN_REL]
assert urllib.quote("%s/fulfill" % overdrive_pool.id) in fulfill_link
assert urllib.quote("%s/revoke" % overdrive_pool.id) in revoke_link
assert urllib.quote("%s/%s/borrow" % (bibliotheca_pool.identifier.type, bibliotheca_pool.identifier.identifier)) in borrow_link
eq_(0, len(bibliotheca_revoke_links))
# Since we went out the the vendor APIs,
# patron.last_loan_activity_sync was updated.
assert patron.last_loan_activity_sync > new_sync_time
class TestAnnotationController(CirculationControllerTest):
def setup(self):
super(TestAnnotationController, self).setup()
self.pool = self.english_1.license_pools[0]
self.edition = self.pool.presentation_edition
self.identifier = self.edition.primary_identifier
def test_get_empty_container(self):
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.loans.authenticated_patron_from_request()
response = self.manager.annotations.container()
eq_(200, response.status_code)
# We've been given an annotation container with no items.
container = json.loads(response.data)
eq_([], container['first']['items'])
eq_(0, container['total'])
# The response has the appropriate headers.
allow_header = response.headers['Allow']
for method in ['GET', 'HEAD', 'OPTIONS', 'POST']:
assert method in allow_header
eq_(AnnotationWriter.CONTENT_TYPE, response.headers['Accept-Post'])
eq_(AnnotationWriter.CONTENT_TYPE, response.headers['Content-Type'])
eq_('W/""', response.headers['ETag'])
def test_get_container_with_item(self):
self.pool.loan_to(self.default_patron)
annotation, ignore = create(
self._db, Annotation,
patron=self.default_patron,
identifier=self.identifier,
motivation=Annotation.IDLING,
)
annotation.active = True
annotation.timestamp = datetime.datetime.now()
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.annotations.authenticated_patron_from_request()
response = self.manager.annotations.container()
eq_(200, response.status_code)
# We've been given an annotation container with one item.
container = json.loads(response.data)
eq_(1, container['total'])
item = container['first']['items'][0]
eq_(annotation.motivation, item['motivation'])
# The response has the appropriate headers.
allow_header = response.headers['Allow']
for method in ['GET', 'HEAD', 'OPTIONS', 'POST']:
assert method in allow_header
eq_(AnnotationWriter.CONTENT_TYPE, response.headers['Accept-Post'])
eq_(AnnotationWriter.CONTENT_TYPE, response.headers['Content-Type'])
expected_etag = 'W/"%s"' % annotation.timestamp
eq_(expected_etag, response.headers['ETag'])
expected_time = format_date_time(mktime(annotation.timestamp.timetuple()))
eq_(expected_time, response.headers['Last-Modified'])
def test_get_container_for_work(self):
self.pool.loan_to(self.default_patron)
annotation, ignore = create(
self._db, Annotation,
patron=self.default_patron,
identifier=self.identifier,
motivation=Annotation.IDLING,
)
annotation.active = True
annotation.timestamp = datetime.datetime.now()
other_annotation, ignore = create(
self._db, Annotation,
patron=self.default_patron,
identifier=self._identifier(),
motivation=Annotation.IDLING,
)
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.annotations.authenticated_patron_from_request()
response = self.manager.annotations.container_for_work(self.identifier.type, self.identifier.identifier)
eq_(200, response.status_code)
# We've been given an annotation container with one item.
container = json.loads(response.data)
eq_(1, container['total'])
item = container['first']['items'][0]
eq_(annotation.motivation, item['motivation'])
# The response has the appropriate headers - POST is not allowed.
allow_header = response.headers['Allow']
for method in ['GET', 'HEAD', 'OPTIONS']:
assert method in allow_header
assert 'Accept-Post' not in response.headers.keys()
eq_(AnnotationWriter.CONTENT_TYPE, response.headers['Content-Type'])
expected_etag = 'W/"%s"' % annotation.timestamp
eq_(expected_etag, response.headers['ETag'])
expected_time = format_date_time(mktime(annotation.timestamp.timetuple()))
eq_(expected_time, response.headers['Last-Modified'])
def test_post_to_container(self):
data = dict()
data['@context'] = AnnotationWriter.JSONLD_CONTEXT
data['type'] = "Annotation"
data['motivation'] = Annotation.IDLING
data['target'] = dict(source=self.identifier.urn, selector="epubcfi(/6/4[chap01ref]!/4[body01]/10[para05]/3:10)")
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth), method='POST', data=json.dumps(data)):
patron = self.manager.annotations.authenticated_patron_from_request()
patron.synchronize_annotations = True
# The patron doesn't have any annotations yet.
annotations = self._db.query(Annotation).filter(Annotation.patron==patron).all()
eq_(0, len(annotations))
response = self.manager.annotations.container()
# The patron doesn't have the pool on loan yet, so the request fails.
eq_(400, response.status_code)
annotations = self._db.query(Annotation).filter(Annotation.patron==patron).all()
eq_(0, len(annotations))
# Give the patron a loan and try again, and the request creates an annotation.
self.pool.loan_to(patron)
response = self.manager.annotations.container()
eq_(200, response.status_code)
annotations = self._db.query(Annotation).filter(Annotation.patron==patron).all()
eq_(1, len(annotations))
annotation = annotations[0]
eq_(Annotation.IDLING, annotation.motivation)
selector = json.loads(annotation.target).get("http://www.w3.org/ns/oa#hasSelector")[0].get('@id')
eq_(data['target']['selector'], selector)
# The response contains the annotation in the db.
item = json.loads(response.data)
assert str(annotation.id) in item['id']
eq_(annotation.motivation, item['motivation'])
def test_detail(self):
self.pool.loan_to(self.default_patron)
annotation, ignore = create(
self._db, Annotation,
patron=self.default_patron,
identifier=self.identifier,
motivation=Annotation.IDLING,
)
annotation.active = True
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.annotations.authenticated_patron_from_request()
response = self.manager.annotations.detail(annotation.id)
eq_(200, response.status_code)
# We've been given a single annotation item.
item = json.loads(response.data)
assert str(annotation.id) in item['id']
eq_(annotation.motivation, item['motivation'])
# The response has the appropriate headers.
allow_header = response.headers['Allow']
for method in ['GET', 'HEAD', 'OPTIONS', 'DELETE']:
assert method in allow_header
eq_(AnnotationWriter.CONTENT_TYPE, response.headers['Content-Type'])
def test_detail_for_other_patrons_annotation_returns_404(self):
patron = self._patron()
self.pool.loan_to(patron)
annotation, ignore = create(
self._db, Annotation,
patron=patron,
identifier=self.identifier,
motivation=Annotation.IDLING,
)
annotation.active = True
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.annotations.authenticated_patron_from_request()
# The patron can't see that this annotation exists.
response = self.manager.annotations.detail(annotation.id)
eq_(404, response.status_code)
def test_detail_for_missing_annotation_returns_404(self):
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.annotations.authenticated_patron_from_request()
# This annotation does not exist.
response = self.manager.annotations.detail(100)
eq_(404, response.status_code)
def test_detail_for_deleted_annotation_returns_404(self):
self.pool.loan_to(self.default_patron)
annotation, ignore = create(
self._db, Annotation,
patron=self.default_patron,
identifier=self.identifier,
motivation=Annotation.IDLING,
)
annotation.active = False
with self.request_context_with_library(
"/", headers=dict(Authorization=self.valid_auth)):
self.manager.annotations.authenticated_patron_from_request()
response = self.manager.annotations.detail(annotation.id)
eq_(404, response.status_code)
def test_delete(self):
self.pool.loan_to(self.default_patron)
annotation, ignore = create(
self._db, Annotation,
patron=self.default_patron,
identifier=self.identifier,
motivation=Annotation.IDLING,
)
annotation.active = True
with self.request_context_with_library(
"/", method='DELETE', headers=dict(Authorization=self.valid_auth)):
self.manager.annotations.authenticated_patron_from_request()
response = self.manager.annotations.detail(annotation.id)
eq_(200, response.status_code)
# The annotation has been marked inactive.
eq_(False, annotation.active)
class TestWorkController(CirculationControllerTest):
def setup(self):
super(TestWorkController, self).setup()
[self.lp] = self.english_1.license_pools
self.edition = self.lp.presentation_edition
self.datasource = self.lp.data_source.name
self.identifier = self.lp.identifier
def test_contributor(self):
m = self.manager.work_controller.contributor
# Find a real Contributor put in the system through the setup
# process.
[contribution] = self.english_1.presentation_edition.contributions
contributor = contribution.contributor
# The contributor is created with both .sort_name and
# .display_name, but we want to test what happens when both
# pieces of data aren't avaiable, so unset .sort_name.
contributor.sort_name = None
# No contributor name -> ProblemDetail
with self.request_context_with_library('/'):
response = m('', None, None)
eq_(404, response.status_code)
eq_(NO_SUCH_LANE.uri, response.uri)
eq_("No contributor provided", response.detail)
# Unable to load ContributorData from contributor name ->
# ProblemDetail
with self.request_context_with_library('/'):
response = m('Unknown Author', None, None)
eq_(404, response.status_code)
eq_(NO_SUCH_LANE.uri, response.uri)
eq_("Unknown contributor: Unknown Author", response.detail)
contributor = contributor.display_name
# Search index misconfiguration -> Problem detail
self.assert_bad_search_index_gives_problem_detail(
lambda: self.manager.work_controller.series(
contributor, None, None
)
)
# Bad facet data -> ProblemDetail
with self.request_context_with_library('/?order=nosuchorder'):
response = m(contributor, None, None)
eq_(400, response.status_code)
eq_(INVALID_INPUT.uri, response.uri)
# Bad pagination data -> ProblemDetail
with self.request_context_with_library('/?size=abc'):
response = m(contributor, None, None)
eq_(400, response.status_code)
eq_(INVALID_INPUT.uri, response.uri)
# Test an end-to-end success (not including a test that the
# search engine can actually find books by a given person --
# that's tested in core/tests/test_external_search.py).
with self.request_context_with_library('/'):
response = m(contributor, 'eng,spa', 'Children,Young Adult')
eq_(200, response.status_code)
eq_(OPDSFeed.ACQUISITION_FEED_TYPE, response.headers['Content-Type'])
feed = feedparser.parse(response.data)
# The feed is named after the person we looked up.
eq_(contributor, feed['feed']['title'])
# It's got one entry -- the book added to the search engine
# during test setup.
[entry] = feed['entries']
eq_(self.english_1.title, entry['title'])
# The feed has facet links.
links = feed['feed']['links']
facet_links = [link for link in links
if link['rel'] == 'http://opds-spec.org/facet']
eq_(8, len(facet_links))
# The feed was cached.
cached = self._db.query(CachedFeed).one()
eq_(CachedFeed.CONTRIBUTOR_TYPE, cached.type)
eq_(
'<NAME>ull-eng,spa-Children,Young+Adult',
cached.unique_key
)
# At this point we don't want to generate real feeds anymore.
# We can't do a real end-to-end test without setting up a real
# search index, which is obnoxiously slow.
#
# Instead, we will mock AcquisitionFeed.page, and examine the objects
# passed into it under different mock requests.
#
# Those objects, such as ContributorLane and
# ContributorFacets, are tested elsewhere, in terms of their
# effects on search objects such as Filter. Those search
# objects are the things that are tested against a real search
# index (in core).
#
# We know from the previous test that any results returned
# from the search engine are converted into an OPDS feed. Now
# we verify that an incoming request results in the objects
# we'd expect to use to generate the feed for that request.
class Mock(object):
@classmethod
def page(cls, **kwargs):
self.called_with = kwargs
return Response("An OPDS feed")
# Test a basic request with custom faceting, pagination, and a
# language and audience restriction. This will exercise nearly
# all the functionality we need to check.
languages = "some languages"
audiences = "some audiences"
sort_key = ["sort", "pagination", "key"]
with self.request_context_with_library(
"/?order=title&size=100&key=%s&entrypoint=Audio" % (
json.dumps(sort_key)
)
):
response = m(contributor, languages, audiences, feed_class=Mock)
# The Response served by Mock.page becomes the response to the
# incoming request.
eq_(200, response.status_code)
eq_("An OPDS feed", response.data)
# Now check all the keyword arguments that were passed into
# page().
kwargs = self.called_with
eq_(self._db, kwargs.pop('_db'))
eq_(self.manager._external_search, kwargs.pop('search_engine'))
# The feed is named after the contributor the request asked
# about.
eq_(contributor, kwargs.pop('title'))
# Query string arguments were taken into account when
# creating the Facets and Pagination objects.
facets = kwargs.pop('facets')
assert isinstance(facets, ContributorFacets)
eq_(AudiobooksEntryPoint, facets.entrypoint)
eq_('title', facets.order)
pagination = kwargs.pop('pagination')
assert isinstance(pagination, SortKeyPagination)
eq_(sort_key, pagination.last_item_on_previous_page)
| |
#!/bin/env python
# -*- coding: utf-8 -*-
""" run a batch of PDF generation
keeps an amazon simple DB with an inventory of EAD files and last
modified dates, this persists between runs
check_url is a recursive modifier function (with side effects) that is
a web crawler that adds all the new EAD to an array `files_to_generate`
and updates the amazon simple db as needed
If there are files_to_generate, a spot purchase of an 8 core 30G RAM
is initiated, and fabric is used to install `pdfu` and run the batch
in parallel using the -P option on xargs.
Once the batch has run, the spot machine is terminated.
The shadow file is regenerated even if there were no files to
generate detected.
July 1, 2013 is hardcoded as the epic for file changes, because a
complete batch was run at this time.
Epic reset to Dec 1, 2013 and simbledb domain re-set in order to
regenerate some backfiles. Need to switch simble db to record what
actually gets created; not what gets batched.
Epic reset to October 5, 2014. --generate-all added to rebuild files
Add --shadow-only command line paramater
Epic reset to October 1, 2016
Epic reset to April 1, 2018
October 7, 2019 -- simple-db and epic concept removed -- `shadow()` gets ran twice
"""
import argparse
from lxml.html import parse
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import os
import sys
import boto.utils
import boto
import datetime
from time import sleep
from fabric.api import env, run, sudo, put
import paramiko
import fabric
import StringIO
import socket
import urlparse
import tarfile
import time
import tempfile
import shutil
from collections import namedtuple
from pprint import pprint as pp
BATCH = datetime.datetime.now().isoformat()
def main(argv=None):
parser = argparse.ArgumentParser(description="run the PDF batch")
parser.add_argument(
'eads',
nargs=1,
help="URL for crawler to start harvesting EAD XML "
"won't follow redirects"
)
parser.add_argument(
'bucket',
nargs=1,
help="s3://bucket[/optional/path] where the generated PDF files go"
)
parser.add_argument(
'shadow',
nargs=1,
help=".tar.gz filename to store \"shadow\" file archive for XTF"
)
parser.add_argument('--shadow-prefix', default='pdf-shadow',
required=False, help="path the .tar.gz will unpack to")
parser.add_argument('--launch-only', dest='launch_only', action='store_true',
help='launch worker for manual batch',)
parser.add_argument('--shadow-only', dest='shadow_only', action='store_true',
help='just reshadow all',)
parser.add_argument('--generate-all', dest='all', action='store_true',
help='build all files',)
parser.add_argument('--ondemand', dest='ondemand', action='store_true',
help='use EC2 ondemand rather than EC2 spot market',)
if argv is None:
argv = parser.parse_args()
if argv.launch_only:
instance, hostname = launch_ec2(argv.ondemand)
print "workhost launched |{0}| |{1}|".format(instance, hostname)
poll_for_ssh(hostname)
print "can contact host with ssh to {0}".format(hostname)
remote_setup(hostname, instance)
print("remote machine ready for manual control")
exit(0)
print BATCH
print("checking PDF files on S3 to update shadow file and get list of current PDFs")
current_pdfs = shadow(argv.bucket[0], argv.shadow[0], argv.shadow_prefix)
last_modified_domain = current_pdfs
files_to_generate = []
if not argv.shadow_only:
print("checking EAD for PDF files to generate")
with requests.Session() as session:
# set up session and Retry for EAD crawling
retry = Retry(
#https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html
connect=3,
backoff_factor=1,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
check_url(
argv.eads[0],
last_modified_domain,
files_to_generate,
generate_all=argv.all,
session=session,
)
if files_to_generate: ## will be false if in shadow_only mode
print("there are files to generate")
batch = generate_batch(
files_to_generate,
argv.eads[0],
argv.bucket[0],
)
print("batch generated")
print(batch.getvalue())
instance, hostname = launch_ec2(argv.ondemand)
print "workhost launched |{0}| |{1}|".format(instance, hostname)
poll_for_ssh(hostname)
print "can contact host with ssh to {0}".format(hostname)
remote_setup(hostname, instance)
remote_process_pdf(hostname, batch, instance)
print "okay; done, terminate workhost"
terminate_ec2(instance)
print "updating shadow file for a second time"
shadow(argv.bucket[0], argv.shadow[0], argv.shadow_prefix)
def check_url(url, last_modified_domain,
files_to_generate, generate_all=False, session=None):
"""check if a URL is an XML file or directory based on the string value """
dir, ext = os.path.splitext(url)
# prime 2002 directory will have only .xml files or sub-directories
if ext == '.xml':
check_xml(url, last_modified_domain,
files_to_generate, generate_all=generate_all, session=session)
elif not ext:
check_dir(url, last_modified_domain,
files_to_generate, generate_all=generate_all, session=session)
def check_dir(url, last_modified_domain,
files_to_generate, generate_all=False, session=None):
"""scrape links from directory listing"""
sys.stdout.write('•')
doc = parse(url).getroot()
doc.make_links_absolute()
links = doc.xpath("//a[@href]/@href")
for link in links:
# skip links back to myself and don't go up directories
if not link == url and link.startswith(url):
check_url(link, last_modified_domain,
files_to_generate, generate_all=generate_all, session=session)
def check_xml(url, last_modified_domain,
files_to_generate, generate_all=False, session=None):
"""compare last_modifed in head with PDFs on S3
this needs processing"""
if generate_all:
# force regeneration of all files (skip any expensive steps)
add_to_list(url, False, files_to_generate, None)
else:
# expensive steps
# do a HEAD request and check last modified time
r = session.head(url)
last_modified_on_oac_header = r.headers['last-modified']
r.close()
last_modified_on_oac = boto.utils.parse_ts(last_modified_on_oac_header)
# look up this URL in the simple DB domain
new_key = url.replace('http://voro.cdlib.org/oac-ead/prime2002/', 'pdf/')
new_key = new_key.replace('.xml', '.pdf')
last_modified_item = last_modified_domain.get(new_key)
# decide if this should get added to the list
#
if not last_modified_item:
# the URL was not seen before
add_to_list(url, last_modified_domain,
files_to_generate, last_modified_on_oac_header)
elif last_modified_on_oac > boto.utils.parse_ts(
str(last_modified_item)
):
# OR last-modified is later than the database;
add_to_list(url, last_modified_domain,
files_to_generate, last_modified_on_oac_header)
# [pdfu]$ aws sdb delete-domain --domain-name ead_last_modified --region us-east-1
# [pdfu]$ aws sdb create-domain --domain-name ead_last_modified --region us-east-1
def add_to_list(url, last_modified_domain,
files_to_generate, last_modified_on_oac_header):
"""modify the files_to_generate list and last_modified_domain"""
# TODO the logic here could be better... need to keep a list of succussful
# batches so failed batches can be re-tried
# but then also need a way to detect and mark pathological cases
print(url)
files_to_generate.append(url)
def shadow(bucketurl, archive, prefix):
"""create shadow artifact for XTF index (so XTF can know what files
are in the bucket and the PDF sizes"""
parts = urlparse.urlsplit(bucketurl)
# SplitResult
# (scheme='s3', netloc='test.pdf', path='/dkd', query='', fragment='')
s3 = boto.connect_s3()
bucket = s3.get_bucket(parts.netloc)
tmp = tempfile.NamedTemporaryFile(delete=False)
tar = tarfile.open(fileobj=tmp, mode="w:gz")
current_pdfs = {}
for key in bucket.list():
# look for pdfs that match the user supplied path
if (key.name.endswith(u'.pdf') and not
parts.path or key.name.startswith(parts.path[1:])):
# write directly to a tar file
# http://stackoverflow.com/a/740839/1763984
shadowfile = StringIO.StringIO()
shadowfile.write(str(key.size))
shadowfile.seek(0)
shadowname = os.path.join(prefix, os.path.splitext(key.name)[0])
info = tarfile.TarInfo(shadowname)
info.size = len(shadowfile.buf)
# boto last_modified to Datetime
# http://stackoverflow.com/a/9688496/1763984
# Datetime to unixtime
# http://stackoverflow.com/a/255053/1763984
info.mtime = time.mktime(
boto.utils.parse_ts(key.last_modified).timetuple()
)
tar.addfile(tarinfo=info, fileobj=shadowfile)
current_pdfs[key.name] = key.last_modified
shadowfile.close()
tar.close()
tmp.flush()
os.chmod(tmp.name, 0664)
# local('/bin/tar ztf {0}'.format(tmp.name), capture=False)
if archive.startswith("s3://"):
inner_parts = urlparse.urlsplit(archive)
# SplitResult
# (scheme='s3', netloc='test.pdf', path='/dkd', query='', fragment='')
inner_bucket = s3.get_bucket(inner_parts.netloc)
inner_key = inner_bucket.new_key(inner_parts.path)
inner_key.set_contents_from_filename(tmp.name)
inner_key.set_acl('public-read')
else:
shutil.move(tmp.name, archive)
return current_pdfs
def launch_ec2(ondemand=False):
ami = "ami-0b69ea66ff7391e80"
arn = ("arn:aws:iam::563907706919:"
"instance-profile/s3-read-write")
key_name = "majorTom-worker"
# check http://aws.amazon.com/amazon-linux-ami/ for current AMI
instance_type = 'm3.2xlarge'
# 1.00/hr on demand 8vCPU 26 ECPU 30 G RAM
# see "xargs"
if ondemand:
instance = launch_instance_ondemand(ami, arn, key_name, instance_type)
else:
instance = launch_instance_spot(ami, arn, key_name, instance_type)
print('Waiting for instance to start...')
pp(instance)
status = instance.update()
while status == 'pending':
sleep(10)
sys.stdout.write('·')
status = instance.update()
if status == 'running':
instance.add_tag('Name', 'OAC_pdfu')
instance.add_tag('project', 'OAC_pdfu')
else:
print('invalid instance status')
exit(1)
if not(instance.public_dns_name):
print "needs hostname"
while not(instance.public_dns_name):
# TODO gets stuck in a loop right here sometimes
sleep(20)
sys.stdout.write('·')
return instance.id, instance.public_dns_name
def launch_instance_ondemand(ami, arn, key_name, instance_type):
connection = boto.connect_ec2()
print "connected, about to launch on demand instance"
reservation = connection.run_instances(
ami,
instance_profile_arn=arn,
instance_type=instance_type,
key_name=key_name,
)
return reservation.instances[0]
def launch_instance_spot(ami, arn, key_name, instance_type):
connection = boto.connect_ec2()
print "connected, about to reserve on spot market"
reservation = connection.request_spot_instances(
"1.00", # bid at on-demand rate
ami,
instance_profile_arn=arn,
instance_type=instance_type,
key_name=key_name,
# placement="us-east-1b",
)
spot_id = str(reservation[0].id)
print spot_id
# make a dummy spot_reservation using namedtuple
# to jumpstart the polling because
# connection.get_all_spot_instance_requests(spot_id)[0]
# was not setting spot_reservation.instance_id
Resholder = namedtuple('Resholder', 'instance_id status')
spot_reservation = Resholder(None, 'jumpstarting')
# poll for spot instance to start up
while spot_reservation.instance_id is None:
pp(spot_reservation.status)
sleep(20)
spot_reservation = connection.get_all_spot_instance_requests(
spot_id
)[0]
return connection.get_all_instances(
spot_reservation.instance_id
)[0].instances[0]
def poll_for_ssh(host):
# http://stackoverflow.com/a/2561727/1763984
# Set the timeout
original_timeout = socket.getdefaulttimeout()
new_timeout = 3
socket.setdefaulttimeout(new_timeout)
host_status = False
while not host_status:
try:
paramiko.Transport((host, 22))
host_status = True
except Exception as e:
pp(e)
sleep(20)
sys.stdout.write('⋅')
socket.setdefaulttimeout(original_timeout)
return host_status
def terminate_ec2(instance):
connection = boto.connect_ec2()
return connection.get_all_instances(instance)[0].instances[0].terminate()
def remote_setup(hostname, instance):
"""use fabric to run commands on the remote working node"""
SETUP_SUDO = [
'echo poweroff | at now + 12 hours',
'yum -y update',
'amazon-linux-extras install -y corretto8',
'yum -y groupinstall "Development Tools"',
# 'yum -y install python27-devel python27-virtualenv',
'yum -y install git ncurses-devel openssl-devel libjpeg-devel freetype-devel libtiff-devel lcms-devel mercurial libxslt-devel libxml2-devel libX11-devel',
]
SETUP_RUN = [
'git clone https://github.com/tingletech/pdfu.git',
'./pdfu/init.sh',
]
env.host_string = hostname
env.user = 'ec2-user'
# fabric docs say fabric could hang if a command fails and recommend
# to use try/finally
try:
pp(SETUP_SUDO)
| |
<gh_stars>1-10
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## satr.py
##
## Created on: Jan 9, 2018
## Author: <NAME>
## E-mail: <EMAIL>
##
#
#==============================================================================
from __future__ import print_function
import collections
import itertools
from minds.rule import Rule
import os
from pysat.card import *
from pysat.examples.lbx import LBX
from pysat.examples.rc2 import RC2
from pysat.formula import CNF, WCNF
from pysat.solvers import Solver
import resource
import socket
import six
from six.moves import range
import sys
#
#==============================================================================
class SATRules(object):
"""
Class implementing the new SAT-based approach.
"""
def __init__(self, data, options):
"""
Constructor.
"""
self.init_stime = resource.getrusage(resource.RUSAGE_SELF).ru_utime
self.init_ctime = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime
self.data = data
self.options = options
# init variable id pool
self.reset_idpool()
# samples divided into classes
self.samps = {self.data.fvmap.dir[(self.data.names[-1], v)]: [] for v in sorted(self.data.feats[-1])}
# covers by class
self.covrs = {self.data.fvmap.dir[(self.data.names[-1], v)]: [] for v in sorted(self.data.feats[-1])}
for i, s in enumerate(self.data.samps):
self.samps[s[-1]].append(i)
# binarize dataset if necessary
self.binarize()
# get missing values for each sample
self.get_missing()
self.cost = 0
def binarize(self):
"""
Do one-hot encoding.
"""
FFMap = collections.namedtuple('FFMap', ['dir', 'opp'])
self.ffmap = FFMap(dir={}, opp={})
curr_id = 0
vfmap = {} # mapping from a feature id to a list of feature ids
for r, (name, feats) in enumerate(zip(self.data.names[:-1], self.data.feats[:-1])):
fgroup = []
if len(feats) != 2:
vars_ = sorted([self.data.fvmap.dir[name, v] for v in feats])
for i, var in enumerate(vars_):
vfmap[var] = [-v for v in vars_]
vfmap[var][i] = var
self.ffmap.opp[i + curr_id] = var
fgroup.append(i + curr_id)
curr_id += len(feats)
else:
var = self.data.fvmap.dir[name, sorted(feats)[0]]
vfmap[var] = [var]
vfmap[-var] = [-var]
self.ffmap.opp[curr_id] = var
fgroup.append(curr_id)
curr_id += 1
self.ffmap.dir[r] = fgroup
# rewriting samples
for i in range(len(self.data.samps)):
samp, out = self.data.samps[i][:-1], self.data.samps[i][-1]
self.data.samps[i] = []
for l in samp:
self.data.samps[i].extend(vfmap[l])
self.data.samps[i].append(out)
self.nof_feats = curr_id
def get_missing(self):
"""
Get a list of missing values for each sample.
"""
self.data.vmiss = []
for s in self.data.samps:
missing = []
if len(s) < self.nof_feats + 1:
r = i = 0
while i < len(s) - 1:
if r in self.ffmap.dir[self.data.nm2id[self.data.fvmap.opp[abs(s[i])][0]]]:
i += 1
else:
missing.append(r)
r += 1
# adding the rest of the features
missing.extend(range(r, self.nof_feats))
# set is needed for testing inclusion
self.data.vmiss.append(set(missing))
def reset_idpool(self):
"""
Reset the pool of variable ids.
"""
self.idpool = IDPool(start_from=1)
def compute(self):
"""
Compute a decision set by minimizing the number of rules.
"""
self.cost = 0
# iterative over the number of terms
nof_terms = 1
self.time = 0.0
# depending on this option, we compute either one class or all of them
if self.options.to_compute == 'best':
computed = len(self.data.feats[-1])
self.labels = sorted(self.samps.keys())
elif self.options.to_compute == 'all':
computed = 0
self.labels = sorted(self.samps.keys())
else:
to_compute = self.options.to_compute.split(',')
computed = len(self.data.feats[-1]) - len(to_compute)
self.labels = [self.data.fvmap.dir[self.data.names[-1], c] for c in to_compute]
while True:
for label in self.labels:
if self.covrs[label]:
continue
# resetting the pool of ids
self.reset_idpool()
# the main part is encoding
enc = self.encode(label, nof_terms=nof_terms)
if self.options.verb:
print('c1 # of terms: {0}; enc: {1}v, {2}c; (class = {3})'.format(nof_terms,
enc.nv, len(enc.clauses), self.data.fvmap.opp[label][1]))
if self.options.pdump:
fname = 'formula.{0}@{1}.cnf'.format(os.getpid(), socket.gethostname())
enc.to_file(fname)
with Solver(name=self.options.solver, bootstrap_with=enc.clauses) as s:
res = s.solve()
if res:
model = s.get_model()
if self.options.opt:
model = self.optimize(enc)
self.extract_cover(label, model)
computed += 1
if computed >= len(self.data.feats[-1]):
self.stime = resource.getrusage(resource.RUSAGE_SELF).ru_utime - self.init_stime
self.ctime = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime - self.init_ctime
self.time = self.stime + self.ctime
return self.covrs
else:
nof_terms += 1
def encode(self, label, nof_terms=1):
"""
Encode the problem of computing a DS of size nof_terms.
"""
self.nof_terms = nof_terms
enc = CNF()
# constraint 6
for j in range(1, self.nof_terms + 1):
enc.append([-self.svar(j, r) for r in range(1, self.nof_feats + 1)])
# constraint 7
for j in range(1, self.nof_terms + 1):
for r in range(1, self.nof_feats + 1):
d0 = self.dvar0(j, r)
p0 = [-self.svar(j, r), self.lvar(j, r)]
enc.append([d0, -p0[0], -p0[1]])
enc.append([-d0, p0[0]])
enc.append([-d0, p0[1]])
d1 = self.dvar1(j, r)
p1 = [-self.svar(j, r), -self.lvar(j, r)]
enc.append([d1, -p1[0], -p1[1]])
enc.append([-d1, p1[0]])
enc.append([-d1, p1[1]])
# constraint 8
if len(self.labels) == 1: # distinguish one class from all the others
other_labels = set(self.samps.keys())
else: # distinguish the classes under question only
other_labels = set(self.labels)
other_labels.remove(label)
other_labels = sorted(other_labels)
for j in range(1, self.nof_terms + 1):
for lb in other_labels:
for q in self.samps[lb]:
cl = []
shift = 0
for r in range(1, self.nof_feats + 1):
if r - 1 in self.data.vmiss[q]:
# this feature is missing in q'th sample
cl.append(-self.svar(j, r))
shift += 1
elif self.data.samps[q][r - 1 - shift] > 0:
cl.append(self.dvar1(j, r))
else:
cl.append(self.dvar0(j, r))
enc.append(cl)
# constraint 9
for j in range(1, self.nof_terms + 1):
for q in self.samps[label]:
cr = self.crvar(j, q + 1)
cl = []
shift = 0
for r in range(1, self.nof_feats + 1):
if r - 1 in self.data.vmiss[q]:
# this feature is missing in q'th sample
cl.append(-self.svar(j, r))
shift += 1
elif self.data.samps[q][r - 1 - shift] > 0:
cl.append(self.dvar1(j, r))
else:
cl.append(self.dvar0(j, r))
enc.append([cr] + cl)
for l in cl:
enc.append([-cr, -l])
# symmetry breaking constraints
if self.options.bsymm:
self.add_bsymm(enc)
# constraint 10
if self.options.accuracy == 100.0:
for q in self.samps[label]:
enc.append([self.crvar(j, q + 1) for j in range(1, self.nof_terms + 1)])
else:
for q in self.samps[label]:
cv = self.cvvar(q + 1)
enc.append([-cv] + [self.crvar(j, q + 1) for j in range(1, self.nof_terms + 1)])
for j in range(1, self.nof_terms + 1):
enc.append([-self.crvar(j, q + 1), cv])
cnum = int(self.options.accuracy * len(self.samps[label]) / 100.0)
al = CardEnc.atleast([self.cvvar(q + 1) for q in self.samps[label]], bound=cnum, top_id=enc.nv, encoding=self.options.enc)
if al:
enc.extend(al.clauses)
# at most one value can be chosen for a feature
for feats in six.itervalues(self.ffmap.dir):
if len(feats) > 2:
for j in range(1, self.nof_terms + 1):
lits = [self.dvar0(j, r + 1) for r in feats] # atmost1 can be true
onev = CardEnc.atmost(lits, top_id=enc.nv, encoding=self.options.enc)
enc.extend(onev.clauses)
# saving comments
for j in range(1, self.nof_terms + 1):
for r in range(1, self.nof_feats + 1):
enc.comments.append('c s({0}, {1}) => v{2}'.format(j, r, self.svar(j, r)))
enc.comments.append('c l({0}, {1}) => v{2}'.format(j, r, self.lvar(j, r)))
enc.comments.append('c d0({0}, {1}) => v{2}'.format(j, r, self.dvar0(j, r)))
enc.comments.append('c d1({0}, {1}) => v{2}'.format(j, r, self.dvar1(j, r)))
for q in range(len(self.data.samps)):
enc.comments.append('c cr({0}, {1}) => v{2}'.format(j, q + 1, self.crvar(j, q + 1)))
for n, f in zip(self.data.names[:-1], self.data.feats[:-1]):
for v in f:
if self.data.fvmap.dir[(n, v)] > 0:
enc.comments.append('c {0} = {1} => positive'.format(n, v))
else:
enc.comments.append('c {0} = {1} => negative'.format(n, v))
return enc
def add_bsymm(self, enc):
"""
Symmetry breaking constraints.
"""
for j in range(2, self.nof_terms + 1):
enc.append([self.eqvar(j, 0)])
enc.append([-self.gtvar(j, 0)])
enc.append([self.gtvar(j, self.nof_feats)]) # enforcing SBPs
for r in range(1, self.nof_feats + 1):
# constraint 11
#
# left-hand side
lhs = -self.eqvar(j, r)
# term1
enc.append([-self.teqvar(j, r, 1), self.svar(j - 1, r)])
enc.append([-self.teqvar(j, r, 1), -self.svar(j, r)])
enc.append([self.teqvar(j, r, 1), -self.svar(j - 1, r), self.svar(j, r)])
# term2
enc.append([-self.teqvar(j, r, 2), -self.svar(j - 1, r)])
enc.append([-self.teqvar(j, r, 2), self.svar(j, r)])
enc.append([self.teqvar(j, r, 2), self.svar(j - 1, r), -self.svar(j, r)])
# term3
enc.append([-self.teqvar(j, r, 3), self.dvar1(j - 1, r)])
enc.append([-self.teqvar(j, r, 3), self.dvar0(j, r)])
enc.append([self.teqvar(j, r, 3), -self.dvar1(j - 1, r), -self.dvar0(j, r)])
# term4
enc.append([-self.teqvar(j, r, 4), self.dvar0(j - 1, r)])
enc.append([-self.teqvar(j, r, 4), self.dvar1(j, r)])
enc.append([self.teqvar(j, r, 4), -self.dvar0(j - 1, r), -self.dvar1(j, r)])
# right-hand side
cl = [-self.eqvar(j, r - 1),
self.teqvar(j, r, 1),
self.teqvar(j, r, 2),
self.teqvar(j, r, 3),
self.teqvar(j, r, 4)]
# final clauses
enc.append([-lhs] + cl)
for l in cl:
enc.append([-l, lhs])
# constraint 12
#
# left-hand side
lhs = self.gtvar(j, r)
# term1
enc.append([-self.tgtvar(j, r, 1), self.eqvar(j, r - 1)])
enc.append([-self.tgtvar(j, r, 1), -self.svar(j - 1, r)])
enc.append([-self.tgtvar(j, r, 1), self.svar(j, r)])
enc.append([self.tgtvar(j, r, 1), -self.eqvar(j, r - 1), self.svar(j - 1, r), -self.svar(j, r)])
# term2
enc.append([-self.tgtvar(j, r, 2), self.eqvar(j, r - 1)])
enc.append([-self.tgtvar(j, r, 2), self.dvar1(j - 1, r)])
enc.append([-self.tgtvar(j, r, 2), self.dvar0(j, r)])
enc.append([self.tgtvar(j, r, 2), -self.eqvar(j, r - 1), -self.dvar1(j - 1, r), -self.dvar0(j, r)])
# right-hand side
cl = [self.gtvar(j, r - 1),
self.tgtvar(j, r, 1),
self.tgtvar(j, r, 2)]
# final clauses
enc.append([-lhs] + cl)
for l in cl:
enc.append([-l, lhs])
| |
"""
Run random agent to test the 3D environment
"""
import numpy as np
import gym
import gym_pcgrl
from pdb import set_trace as TT
# from utils import make_vec_envs
from gym_pcgrl.envs.helper_3D import calc_num_regions, debug_path, get_string_map,\
get_tile_locations, calc_longest_path, run_dijkstra
import matplotlib.pyplot as plt
################################################################################
# test the helper functions
tile_types = ["AIR", "DIRT"]
######## Test the path finding func and region counting func in stairing logic #########
# test_map_1:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 29 = 59
test_map_1 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0]
],
[
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0]
]
]
# test_map_2:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 27 = 57
test_map_2 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 0]
]
]
# test_map_3:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 27 = 57
# info: identical to test_map_2, except that some unnecessary tiles are removed (to test region number)
test_map_3 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0] # diff: [0, 0, 0, 1, 0, 0, 0] in test_map_2
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0] # diff: [0, 0, 1, 1, 0, 0, 0] in test_map_2
]
]
# test_map_4:
# size: 3 * 6 * 6
# longest path length: 2 + 1 + 1 + 1 = 5
# info: small map for testing climbing stairs
test_map_4 = [
[
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
]
]
########### For testing the 3D plotting ###########
# test_map_5:
# size: 3 * 3 * 3
test_map_5 = [
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0],
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
]
############ Test the path finding func in the jumping logic #############
# Note: In Minecraft jumping, the extra head room of the staring position and extra head room of the position 1 before
# foothold needs to be garanteded
#
# |__
# O
# 大_ __
# | |
# | |
# test_map_6:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 2
# region number: 1
# jump: 1
# jump distance: 3
test_map_6 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
| |
GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
},
# XRayOutput
0x00400312L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# IntervalsAcquired
0x00181083L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Image'],
},
# HalfValueLayer
0x00400314L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# PresentationLUTShape
0x20500020L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Presentation State', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# OrganDose
0x00400316L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# VisualFieldTestReliabilityGlobalIndexSequence
0x00240317L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# OrganExposed
0x00400318L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# ContentCreatorName
0x00700084L: {
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Presentation State', 'Color Palette', 'Spatial Registration', 'Spatial Fiducials', 'Surface', 'Deformable Registration', 'Segmentation', 'Real World Value Mapping'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'SEGMENTATION IOD': ['Segmentation'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'SPATIAL REGISTRATION IOD': ['Spatial Registration'],
'REAL WORLD VALUE MAPPING IOD': ['Real World Value Mapping'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Deformable Registration'],
'SPATIAL FIDUCIALS IOD': ['Spatial Fiducials'],
'COLOR PALETTE IOD': ['Color Palette'],
'SURFACE SEGMENTATION IOD': ['Surface'],
},
# FiducialUID
0x0070031AL: {
'SPECTACLE PRESCIPTION REPORT IOD': ['Document'],
'KEY OBJECT SELECTION DOCUMENT IOD': ['Document'],
'MAMMOGRAPHY CAD SR IOD': ['Document'],
'BASIC TEXT SR IOD': ['Document'],
'X-RAY RADIATION DOSE SR IOD': ['Document'],
'PROCEDURE LOG IOD': ['Document'],
'ENHANCED SR IOD': ['Document'],
'CHEST CAD SR IOD': ['Document'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Document'],
None: ['Document'],
'IMPLANTATION PLAN SR DOCUMENT IOD': ['Document'],
'COMPREHENSIVE SR IOD': ['Document'],
'COLON CAD SR IOD': ['Document'],
},
# ScanVelocity
0x00181300L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# FiducialSetSequence
0x0070031CL: {
'SPATIAL FIDUCIALS IOD': ['Spatial Fiducials'],
None: ['Spatial Fiducials'],
},
# CardiacSignalSource
0x00189085L: {
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED CT IMAGE IOD': ['Image'],
},
# BillingProcedureStepSequence
0x00400320L: {
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
None: ['Modality Performed Procedure Step'],
},
# FilmConsumptionSequence
0x00400321L: {
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
None: ['Modality Performed Procedure Step'],
},
# DoseCalibrationFactor
0x00541322L: {
'PET IMAGE IOD': ['Image'],
None: ['Image'],
},
# ExposureModulationType
0x00189323L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# BillingSuppliesAndDevicesSequence
0x00400324L: {
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
None: ['Modality Performed Procedure Step'],
},
# ContentCreatorIdentificationCodeSequence
0x00700086L: {
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Presentation State', 'Color Palette', 'Spatial Registration', 'Spatial Fiducials', 'Surface', 'Deformable Registration', 'Segmentation', 'Real World Value Mapping'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'SEGMENTATION IOD': ['Segmentation'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'SPATIAL REGISTRATION IOD': ['Spatial Registration'],
'REAL WORLD VALUE MAPPING IOD': ['Real World Value Mapping'],
'DEFORMABLE SPATIAL REGISTRATION IOD': ['Deformable Registration'],
'SPATIAL FIDUCIALS IOD': ['Spatial Fiducials'],
'COLOR PALETTE IOD': ['Color Palette'],
'SURFACE SEGMENTATION IOD': ['Surface'],
},
# GridSpacingMaterial
0x00187041L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
},
# SUVType
0x00541006L: {
'PET IMAGE IOD': ['Series'],
None: ['Series'],
},
# ExposureTimeInms
0x00189328L: {
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
None: ['Image'],
},
# EchoTrainLength
0x00180091L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# MagneticFieldStrength
0x00180087L: {
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'MR SPECTROSCOPY IOD': ['Equipment'],
'ENHANCED MR IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
None: ['Image', 'Equipment'],
},
# StimulusPresentationTime
0x00240028L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# DateOfSecondaryCapture
0x00181012L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# RefractiveStateSequence
0x0022001BL: {
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
None: ['Image'],
},
# SpacingBetweenSlices
0x00180088L: {
'NM IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# ExposureInmAs
0x00189332L: {
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
None: ['Image'],
},
# ProjectionEponymousNameCodeSequence
0x00185104L: {
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
None: ['Image'],
},
# EventTimerSequence
0x00082133L: {
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
None: ['Image'],
},
# VisualFieldTestPointSequence
0x00240089L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# ImplantTemplateGroupVersion
0x00780024L: {
'IMPLANT TEMPLATE GROUP IOD': ['Implant Template Group'],
None: ['Implant Template Group'],
},
# PerformedSeriesSequence
0x00400340L: {
'MODALITY PERFORMED PROCEDURE STEP IOD': ['Modality Performed Procedure Step'],
None: ['Modality Performed Procedure Step'],
},
# BarcodeValue
0x22000005L: {
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'MEDIA CREATION MANAGEMENT IOD': ['Media Creation Management'],
None: ['Image', 'Media Creation Management'],
},
# GridThickness
0x00187042L: {
None: ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
},
# CTDIvol
0x00189345L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# CTDIPhantomTypeCodeSequence
0x00189346L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# TimeSliceVector
0x00540100L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# ServiceEpisodeID
0x00380060L: {
'BASIC STRUCTURED DISPLAY IOD': ['Study'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Study'],
'RT BRACHY TREATMENT RECORD IOD': ['Study'],
'RT STRUCTURE SET IOD': ['Study'],
'RT PLAN IOD': ['Study'],
'CR IMAGE IOD': ['Study'],
'RAW DATA IOD': ['Study'],
'MACULAR GRID THIICKNESS AND VOLUME REPORT IOD': ['Study'],
'ENHANCED MR IMAGE IOD': ['Study'],
'BASIC CARDIAC EP IOD': | |
<reponame>seiwailai/zulip<filename>zerver/lib/event_schema.py
# This module is a collection of testing helpers for validating the
# schema of "events" sent by Zulip's server-to-client push system.
#
# By policy, every event generated by Zulip's API should be validated
# by a test in test_events.py with a schema checker here (which is
# validated, in turn, against the OpenAPI documentation for GET
# /events in zulip.yaml and the fixtures used by the Zulip webapp
# frontend).
#
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html
#
# The general paradigm here is that if you have an event with type foo_bar
# then you declare foo_bar_event to be an instance of event_dict_type. And
# then you make a checker function by saying:
#
# check_foo_bar = make_checker(foo_bar_event)
#
# And then the caller can use the checker as follows:
#
# check_foo_bar(var_name, event)
#
# For more complicated events, you may write custom checkers that check
# aspects of the data that go beyond simply validating that the data
# matches an event_dict_type based schema. This typically happens with
# events where you either have a Union type or optional_keys.
#
# See check_delete_message and check_presence for examples of this
# paradigm.
from typing import Dict, List, Sequence, Set, Tuple, Union
from zerver.lib.data_types import (
DictType,
EnumType,
Equals,
ListType,
NumberType,
OptionalType,
StringDictType,
TupleType,
UnionType,
UrlType,
check_data,
event_dict_type,
make_checker,
)
from zerver.lib.topic import ORIG_TOPIC, TOPIC_LINKS, TOPIC_NAME
from zerver.models import Realm, Stream, Subscription, UserProfile
# These fields are used for "stream" events, and are included in the
# larger "subscription" events that also contain personal settings.
basic_stream_fields = [
("description", str),
("first_message_id", OptionalType(int)),
("history_public_to_subscribers", bool),
("invite_only", bool),
("is_announcement_only", bool),
("is_web_public", bool),
("message_retention_days", OptionalType(int)),
("name", str),
("rendered_description", str),
("stream_id", int),
("stream_post_policy", int),
("date_created", int),
]
subscription_fields: Sequence[Tuple[str, object]] = [
*basic_stream_fields,
("audible_notifications", OptionalType(bool)),
("color", str),
("desktop_notifications", OptionalType(bool)),
("email_address", str),
("email_notifications", OptionalType(bool)),
("in_home_view", bool),
("is_muted", bool),
("pin_to_top", bool),
("push_notifications", OptionalType(bool)),
("role", EnumType(Subscription.ROLE_TYPES)),
("stream_weekly_traffic", OptionalType(int)),
# We may try to remove subscribers from some events in
# the future for clients that don't want subscriber
# info.
("subscribers", ListType(int)),
("wildcard_mentions_notify", OptionalType(bool)),
]
equals_add_or_remove = UnionType(
[
# force vertical
Equals("add"),
Equals("remove"),
]
)
value_type = UnionType(
[
# force vertical formatting
bool,
int,
str,
]
)
optional_value_type = UnionType(
[
# force vertical formatting
bool,
int,
str,
Equals(None),
]
)
alert_words_event = event_dict_type(
required_keys=[
# force vertical formatting
("type", Equals("alert_words")),
("alert_words", ListType(str)),
]
)
check_alert_words = make_checker(alert_words_event)
attachment_message_type = DictType(
required_keys=[
# force vertical
("id", int),
("date_sent", int),
]
)
attachment_type = DictType(
required_keys=[
("id", int),
("name", str),
("size", int),
("path_id", str),
("create_time", int),
("messages", ListType(attachment_message_type)),
]
)
attachment_add_event = event_dict_type(
required_keys=[
("type", Equals("attachment")),
("op", Equals("add")),
("attachment", attachment_type),
("upload_space_used", int),
]
)
check_attachment_add = make_checker(attachment_add_event)
attachment_remove_event = event_dict_type(
required_keys=[
("type", Equals("attachment")),
("op", Equals("remove")),
("attachment", DictType([("id", int)])),
("upload_space_used", int),
]
)
check_attachment_remove = make_checker(attachment_remove_event)
attachment_update_event = event_dict_type(
required_keys=[
("type", Equals("attachment")),
("op", Equals("update")),
("attachment", attachment_type),
("upload_space_used", int),
]
)
check_attachment_update = make_checker(attachment_update_event)
custom_profile_field_type = DictType(
required_keys=[
("id", int),
("type", int),
("name", str),
("hint", str),
("field_data", str),
("order", int),
],
)
custom_profile_fields_event = event_dict_type(
required_keys=[
("type", Equals("custom_profile_fields")),
("fields", ListType(custom_profile_field_type)),
]
)
check_custom_profile_fields = make_checker(custom_profile_fields_event)
_check_stream_group = DictType(
required_keys=[
("name", str),
("id", int),
("description", str),
("streams", ListType(DictType(basic_stream_fields))),
]
)
default_stream_groups_event = event_dict_type(
required_keys=[
# force vertical
("type", Equals("default_stream_groups")),
("default_stream_groups", ListType(_check_stream_group)),
]
)
check_default_stream_groups = make_checker(default_stream_groups_event)
default_streams_event = event_dict_type(
required_keys=[
("type", Equals("default_streams")),
("default_streams", ListType(DictType(basic_stream_fields))),
]
)
check_default_streams = make_checker(default_streams_event)
# The event type has an unusual number of optional fields. The
# message_id/message_ids fields are conditional on the
# bulk_message_deletion client_capability, whereas the other fields
# are conditional on private vs. stream messages.
delete_message_event = event_dict_type(
required_keys=[
# force vertical
("type", Equals("delete_message")),
("message_type", EnumType(["private", "stream"])),
],
optional_keys=[
("message_id", int),
("message_ids", ListType(int)),
("stream_id", int),
("topic", str),
("recipient_id", int),
("sender_id", int),
],
)
_check_delete_message = make_checker(delete_message_event)
def check_delete_message(
var_name: str,
event: Dict[str, object],
message_type: str,
num_message_ids: int,
is_legacy: bool,
) -> None:
_check_delete_message(var_name, event)
keys = {"id", "type", "message_type"}
assert event["message_type"] == message_type
if message_type == "stream":
keys |= {"stream_id", "topic"}
elif message_type == "private":
keys |= {"recipient_id", "sender_id"}
else:
raise AssertionError("unexpected message_type")
if is_legacy:
assert num_message_ids == 1
keys.add("message_id")
else:
assert isinstance(event["message_ids"], list)
assert num_message_ids == len(event["message_ids"])
keys.add("message_ids")
assert set(event.keys()) == keys
has_zoom_token_event = event_dict_type(
required_keys=[
# force vertical
("type", Equals("has_zoom_token")),
("value", bool),
]
)
_check_has_zoom_token = make_checker(has_zoom_token_event)
def check_has_zoom_token(
# force vertical
var_name: str,
event: Dict[str, object],
value: bool,
) -> None:
_check_has_zoom_token(var_name, event)
assert event["value"] == value
_hotspot = DictType(
required_keys=[
# force vertical
("name", str),
("title", str),
("description", str),
("delay", NumberType()),
]
)
hotspots_event = event_dict_type(
required_keys=[
# force vertical
("type", Equals("hotspots")),
(
"hotspots",
ListType(_hotspot),
),
]
)
check_hotspots = make_checker(hotspots_event)
invites_changed_event = event_dict_type(
required_keys=[
# the most boring event...no metadata
("type", Equals("invites_changed")),
]
)
check_invites_changed = make_checker(invites_changed_event)
# This type, like other instances of TupleType, is a legacy feature of
# a very old Zulip API; we plan to replace it with an object as those
# are more extensible.
muted_topic_type = TupleType(
[
str, # stream name
str, # topic name
int, # timestamp
]
)
muted_topics_event = event_dict_type(
required_keys=[
("type", Equals("muted_topics")),
("muted_topics", ListType(muted_topic_type)),
]
)
check_muted_topics = make_checker(muted_topics_event)
muted_user_type = DictType(
required_keys=[
("id", int),
("timestamp", int),
]
)
muted_users_event = event_dict_type(
required_keys=[
("type", Equals("muted_users")),
("muted_users", ListType(muted_user_type)),
]
)
check_muted_users = make_checker(muted_users_event)
_check_topic_links = DictType(
required_keys=[
("text", str),
("url", str),
]
)
message_fields = [
("avatar_url", OptionalType(str)),
("client", str),
("content", str),
("content_type", Equals("text/html")),
("display_recipient", str),
("id", int),
("is_me_message", bool),
("reactions", ListType(dict)),
("recipient_id", int),
("sender_realm_str", str),
("sender_email", str),
("sender_full_name", str),
("sender_id", int),
("stream_id", int),
(TOPIC_NAME, str),
(TOPIC_LINKS, ListType(_check_topic_links)),
("submessages", ListType(dict)),
("timestamp", int),
("type", str),
]
message_event = event_dict_type(
required_keys=[
("type", Equals("message")),
("flags", ListType(str)),
("message", DictType(message_fields)),
]
)
check_message = make_checker(message_event)
# This legacy presence structure is intended to be replaced by a more
# sensible data structure.
presence_type = DictType(
required_keys=[
("status", EnumType(["active", "idle"])),
("timestamp", int),
("client", str),
("pushable", bool),
]
)
presence_event = event_dict_type(
required_keys=[
("type", Equals("presence")),
("user_id", int),
("server_timestamp", NumberType()),
("presence", StringDictType(presence_type)),
],
optional_keys=[
# force vertical
("email", str),
],
)
_check_presence = make_checker(presence_event)
def check_presence(
var_name: str,
event: Dict[str, object],
has_email: bool,
presence_key: str,
status: str,
) -> None:
_check_presence(var_name, event)
assert ("email" in event) == has_email
assert isinstance(event["presence"], dict)
# Our tests only have one presence value.
assert len(event["presence"]) == 1
assert list(event["presence"].keys())[0] == presence_key
assert list(event["presence"].values())[0]["status"] == status
# Type for the legacy user field; the `user_id` field is intended to
# replace this and we expect to remove this once clients have migrated
# to support the modern API.
reaction_legacy_user_type = DictType(
required_keys=[
# force vertical
("email", str),
("full_name", str),
("user_id", int),
]
# We should probably declare is_mirror_dummy as an optional field here.
)
reaction_add_event = event_dict_type(
required_keys=[
("type", Equals("reaction")),
("op", Equals("add")),
("message_id", int),
("emoji_name", str),
("emoji_code", str),
("reaction_type", str),
("user_id", int),
("user", reaction_legacy_user_type),
]
)
check_reaction_add = make_checker(reaction_add_event)
reaction_remove_event = event_dict_type(
required_keys=[
("type", Equals("reaction")),
("op", Equals("remove")),
("message_id", int),
("emoji_name", str),
("emoji_code", str),
("reaction_type", str),
("user_id", int),
("user", reaction_legacy_user_type),
]
)
check_reaction_remove = make_checker(reaction_remove_event)
realm_deactivated_event = event_dict_type(
required_keys=[
("type", Equals("realm")),
("op", Equals("deactivated")),
("realm_id", int),
]
)
check_realm_deactivated = make_checker(realm_deactivated_event)
bot_services_outgoing_type = DictType(
required_keys=[
# force vertical
("base_url", UrlType()),
("interface", int),
("token", str),
]
)
config_data_schema = StringDictType(str)
bot_services_embedded_type = DictType(
required_keys=[
# force vertical
("service_name", str),
("config_data", config_data_schema),
]
)
# Note that regular bots just get an empty list of services,
# so the sub_validator for ListType won't matter for them.
bot_services_type = ListType(
UnionType(
[
# force vertical
bot_services_outgoing_type,
bot_services_embedded_type,
]
),
)
bot_type = DictType(
required_keys=[
("user_id", int),
("api_key", str),
("avatar_url", str),
("bot_type", int),
("default_all_public_streams", bool),
("default_events_register_stream", OptionalType(str)),
("default_sending_stream", OptionalType(str)),
("email", str),
("full_name", str),
("is_active", bool),
("owner_id", int),
("services", bot_services_type),
]
)
realm_bot_add_event = event_dict_type(
required_keys=[
# force vertical
("type", Equals("realm_bot")),
("op", Equals("add")),
("bot", bot_type),
]
)
_check_realm_bot_add = make_checker(realm_bot_add_event)
def check_realm_bot_add(
var_name: str,
event: Dict[str, object],
) -> None:
_check_realm_bot_add(var_name, event)
assert isinstance(event["bot"], dict)
bot_type = event["bot"]["bot_type"]
services_field = f"{var_name}['bot']['services']"
services = event["bot"]["services"]
if bot_type == UserProfile.DEFAULT_BOT:
check_data(Equals([]), services_field, services)
elif bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
check_data(ListType(bot_services_outgoing_type, length=1), services_field, services)
elif bot_type == UserProfile.EMBEDDED_BOT:
check_data(ListType(bot_services_embedded_type, length=1), services_field, services)
else:
raise AssertionError(f"Unknown bot_type: {bot_type}")
bot_type_for_delete = DictType(
required_keys=[
# for legacy reasons we have a dict here
# with only one key
("user_id", int),
]
)
realm_bot_delete_event = event_dict_type(
required_keys=[
("type", Equals("realm_bot")),
("op", Equals("delete")),
("bot", bot_type_for_delete),
]
)
check_realm_bot_delete = make_checker(realm_bot_delete_event)
bot_type_for_remove = DictType(
required_keys=[
# Why does remove have full_name but delete doesn't?
# Why do we have both a remove and a delete event
# for bots? I don't know the answer as I write this.
("full_name", str),
("user_id", int),
]
)
realm_bot_remove_event = event_dict_type(
required_keys=[
("type", Equals("realm_bot")),
("op", Equals("remove")),
("bot", bot_type_for_remove),
]
)
check_realm_bot_remove = make_checker(realm_bot_remove_event)
bot_type_for_update = DictType(
required_keys=[
# force vertical
("user_id", int),
],
optional_keys=[
("api_key", str),
("avatar_url", str),
("default_all_public_streams", bool),
("default_events_register_stream", OptionalType(str)),
("default_sending_stream", OptionalType(str)),
("full_name", str),
("owner_id", int),
("services", bot_services_type),
],
)
realm_bot_update_event = event_dict_type(
required_keys=[
("type", Equals("realm_bot")),
("op", | |
True)
# self.trans_fc = nn.Sequential(
# nn.Linear(512,68*2),#20
# )
# def forward(self, f_landmark, trans_input, length_mots):
# b, d, n_mel, l = trans_input.shape
# trans_input = trans_input.reshape(b, d*n_mel, l)
# # f_landmark = f_landmark.unsqueeze(-1).repeat(1, 1, l)
# trans_input = torch.cat([trans_input,f_landmark], 1).permute(0,2,1).reshape(b*l, d*n_mel+512)
# trans_input = self.extract_feature(trans_input).reshape(b, l, 512).permute(0, 2, 1).unsqueeze(2)
# mask = torch.ones((b, l), dtype=torch.bool).to(trans_input.device)
# for index, lm in enumerate(length_mots):
# mask[index, :lm] = False
# # print('trans mask: ', mask, mask.shape)
# # assert(0)
# pos_emb = self.position_embedding(trans_input, mask.unsqueeze(1))
# # print(pos_emb[0, 510, 0, :], pos_emb.shape)
# # print(pos_emb[0, 251, 0, :], pos_emb.shape)
# out = self.metrans(trans_input, mask, pos_emb)
# # print('transformer out: ', out, out.shape)
# fc_out = self.trans_fc(out.reshape(b*l, 512)).reshape(b, l, 68*2)
# return fc_out
class Generator(nn.Module):
def __init__(self, dim_in=48, style_dim=48, max_conv_dim=48*8, w_hpf=1, F0_channel=0, audio=False):
super().__init__()
self.audio = audio
self.stem = nn.Conv2d(1, dim_in, 3, 1, 1)
self.encode = nn.ModuleList()
self.decode = nn.ModuleList()
self.to_out = nn.Sequential(
nn.InstanceNorm2d(dim_in, affine=True),
nn.LeakyReLU(0.2),
nn.Conv2d(dim_in, 1, 1, 1, 0))
self.F0_channel = F0_channel
# down/up-sampling blocks
repeat_num = 4 #int(np.log2(img_size)) - 4
if w_hpf > 0:
repeat_num += 1
for lid in range(repeat_num):
if lid in [1, 3]:
_downtype = 'timepreserve'
else:
_downtype = 'half'
dim_out = min(dim_in*2, max_conv_dim)
self.encode.append(
ResBlk(dim_in, dim_out, normalize=True, downsample=_downtype))
self.decode.insert(
0, AdainResBlk(dim_out, dim_in, style_dim,
w_hpf=w_hpf, upsample=_downtype)) # stack-like
dim_in = dim_out
# bottleneck blocks (encoder)
for _ in range(2):
self.encode.append(
ResBlk(dim_out, dim_out, normalize=True))
# F0 blocks
if F0_channel != 0:
self.decode.insert(
0, AdainResBlk(dim_out + int(F0_channel / 2), dim_out, style_dim, w_hpf=w_hpf))
# bottleneck blocks (decoder)
for _ in range(2):
self.decode.insert(
0, AdainResBlk(dim_out + int(F0_channel / 2), dim_out + int(F0_channel / 2), style_dim, w_hpf=w_hpf))
if F0_channel != 0:
self.F0_conv = nn.Sequential(
ResBlk(F0_channel, int(F0_channel / 2), normalize=True, downsample="half"),
)
if w_hpf > 0:
device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.hpf = HighPass(w_hpf, device)
def forward(self, x, s, masks=None, F0=None):
x = self.stem(x)
cache = {}
for block in self.encode:
if (masks is not None) and (x.size(2) in [32, 64, 128]):
cache[x.size(2)] = x
x = block(x)
if F0 is not None:
F0 = self.F0_conv(F0)
F0 = F.adaptive_avg_pool2d(F0, [x.shape[-2], x.shape[-1]])
x = torch.cat([x, F0], axis=1)
# print('model 230 x+F0 shape:', x.shape) # 5,74?
for block in self.decode:
x = block(x, s)
if (masks is not None) and (x.size(2) in [32, 64, 128]):
mask = masks[0] if x.size(2) in [32] else masks[1]
mask = F.interpolate(mask, size=x.size(2), mode='bilinear')
x = x + self.hpf(mask * cache[x.size(2)])
# print('model 303 generator output:', x.shape, self.to_out(x).shape) # model 237 generator output: torch.Size([b, 64(c), 80(numl), 296(length)]) torch.Size([1, 1, 80, 296])
if self.audio:
return self.to_out(x)
else:
return self.to_out(x), x
class MappingNetwork(nn.Module):
def __init__(self, latent_dim=16, style_dim=48, num_domains=2, hidden_dim=384):
super().__init__()
layers = []
layers += [nn.Linear(latent_dim, hidden_dim)]
layers += [nn.ReLU()]
for _ in range(3):
layers += [nn.Linear(hidden_dim, hidden_dim)]
layers += [nn.ReLU()]
self.shared = nn.Sequential(*layers)
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared += [nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, style_dim))]
def forward(self, z, y):
h = self.shared(z)
out = []
for layer in self.unshared:
out += [layer(h)]
out = torch.stack(out, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
return s
class StyleEncoder(nn.Module):
def __init__(self, dim_in=48, style_dim=48, num_domains=2, max_conv_dim=384):
super().__init__()
blocks = []
blocks += [nn.Conv2d(1, dim_in, 3, 1, 1)]
repeat_num = 4
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 5, 1, 0)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.LeakyReLU(0.2)]
self.shared = nn.Sequential(*blocks)
self.unshared = nn.ModuleList()
for _ in range(num_domains):
self.unshared += [nn.Linear(dim_out, style_dim)]
def forward(self, x, y):
h = self.shared(x)
h = h.view(h.size(0), -1)
out = []
for layer in self.unshared:
out += [layer(h)]
out = torch.stack(out, dim=1) # (batch, num_domains, style_dim)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
s = out[idx, y] # (batch, style_dim)
return s
class Discriminator(nn.Module):
def __init__(self, dim_in=48, num_domains=2, max_conv_dim=384, repeat_num=4):
super().__init__()
# real/fake discriminator
self.dis = Discriminator2d(dim_in=dim_in, num_domains=num_domains,
max_conv_dim=max_conv_dim, repeat_num=repeat_num)
# adversarial classifier
self.cls = Discriminator2d(dim_in=dim_in, num_domains=num_domains,
max_conv_dim=max_conv_dim, repeat_num=repeat_num)
self.num_domains = num_domains
def forward(self, x, y):
return self.dis(x, y)
def classifier(self, x):
return self.cls.get_feature(x)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class Discriminator2d(nn.Module):
def __init__(self, dim_in=48, num_domains=2, max_conv_dim=384, repeat_num=4):
super().__init__()
blocks = []
blocks += [nn.Conv2d(1, dim_in, 3, 1, 1)]
for lid in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ResBlk(dim_in, dim_out, downsample='half')]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 5, 1, 0)]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.AdaptiveAvgPool2d(1)]
blocks += [nn.Conv2d(dim_out, num_domains, 1, 1, 0)]
self.main = nn.Sequential(*blocks)
def get_feature(self, x):
out = self.main(x)
out = out.view(out.size(0), -1) # (batch, num_domains)
return out
def forward(self, x, y):
out = self.get_feature(x)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
out = out[idx, y] # (batch)
return out
# LSTM version of DiscriminatorLmk
class DiscriminatorLmk(nn.Module):
def __init__(self, num_domains=2):
super().__init__()
self.num_domians = num_domains
self.extract_feature = nn.Sequential(
nn.Linear(68*2,256),
nn.ReLU(True),
nn.Linear(256,512),
nn.ReLU(True),
)
self.lstm = nn.LSTM(512,256,3,batch_first = True)
# self.lstm_fc = nn.Sequential(
# nn.Linear(256, num_domains),
# nn.Tanh())
self.decision = nn.Sequential(
nn.Linear(256,1),
)
self.aggregator = nn.AdaptiveAvgPool1d(1)
# self.activate = nn.Sigmoid()
def get_feature(self, x):
b, l, m, d = x.shape
x = x.reshape(b, l, m*d).reshape(b*l, m*d)
lstm_input = self.extract_feature(x)
lstm_input = lstm_input.view(b, l, -1) # (batch, length, dims)
hidden = (torch.autograd.Variable(torch.zeros(3, lstm_input.size(0), 256).cuda()),# torch.Size([3, 16, 256])
torch.autograd.Variable(torch.zeros(3, lstm_input.size(0), 256).cuda()))# torch.Size([3, 16, 256])
lstm_out, _ = self.lstm(lstm_input, hidden) #torch.Size([16, 16, 256])
decision = self.decision(lstm_out.reshape(b*l, 256)).reshape(b, l)
return decision
def forward(self, x, length):
decision = self.get_feature(x)
ds = []
for index, ll in enumerate(length):
ds.append(self.aggregator(decision[index, :ll].reshape(1,1,ll)))
decision = torch.cat(ds, 2).squeeze()
return decision
class DiscriminatorLmkTR(nn.Module):
def __init__(self, num_domains=2):
super().__init__()
self.num_domians = num_domains
self.extract_feature = nn.Sequential(
nn.Linear(68*2,256),
nn.ReLU(True),
nn.Linear(256,512),
nn.ReLU(True),
)
self.position_embedding = PositionEmbeddingSine(512, normalize=True)
self.metrans = MotEncoderTra(d_model=512, nhead=8, num_encoder_layers=3,
dim_feedforward=1024, dropout=0.1,
activation="relu", normalize_before=False)
# self.trans_fc = nn.Sequential(
# nn.Linear(256, num_domains),
# nn.Tanh())
self.decision = nn.Sequential(
nn.Linear(512,1),
)
self.aggregator = nn.AdaptiveAvgPool1d(1)
# self.activate = nn.Sigmoid()
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.xavier_uniform_(m.weight, gain=1)
if isinstance(m, nn.Linear):
# trunc_normal_(m.weight, std=.03)
nn.init.xavier_uniform_(m.weight, gain=1)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def get_feature(self, x, length_mots):
b, l, m, d = x.shape
x = x.reshape(b, l, m*d).reshape(b*l, m*d)
trans_input = self.extract_feature(x)
trans_input = trans_input.view(b, l, -1).permute(0, 2, 1).unsqueeze(2)
mask = torch.ones((b, l), dtype=torch.bool).to(trans_input.device)
for index, lm in enumerate(length_mots):
mask[index, :lm] = False
pos_emb = self.position_embedding(trans_input, mask.unsqueeze(1))
out = self.metrans(trans_input, mask, pos_emb)
decision = self.decision(out.reshape(b*l, 512)).reshape(b, l)
return decision
def forward(self, x, length_mots):
decision = self.get_feature(x, length_mots)
ds = []
for index, ll in enumerate(length_mots):
ds.append(self.aggregator(decision[index, :ll].reshape(1,1,ll)))
decision = torch.cat(ds, 2).squeeze()
return decision
class DiscriminatorLmkTR468(nn.Module):
def __init__(self, num_domains=2):
super().__init__()
self.num_domians = num_domains
self.extract_feature = nn.Sequential(
nn.Linear(468*2,256),
nn.ReLU(True),
nn.Linear(256,512),
nn.ReLU(True),
)
self.position_embedding = PositionEmbeddingSine(512, normalize=True)
self.metrans = MotEncoderTra(d_model=512, nhead=8, num_encoder_layers=3,
dim_feedforward=1024, dropout=0.1,
activation="relu", normalize_before=False)
self.tran_fc = nn.Sequential(
nn.Linear(512, num_domains),
nn.Tanh())
self.aggregator = nn.AdaptiveAvgPool1d(1)
# self.activate = nn.Sigmoid()
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.xavier_uniform_(m.weight, gain=1)
if isinstance(m, nn.Linear):
# trunc_normal_(m.weight, std=.03)
nn.init.xavier_uniform_(m.weight, gain=1)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def get_feature(self, x, length_mots):
b, l, m, d = x.shape
x = x.reshape(b, l, m*d).reshape(b*l, m*d)
trans_input = self.extract_feature(x)
trans_input = trans_input.view(b, l, -1).permute(0, 2, 1).unsqueeze(2)
mask = torch.ones((b, l), dtype=torch.bool).to(trans_input.device)
for index, lm in enumerate(length_mots):
mask[index, :lm] = False
pos_emb = self.position_embedding(trans_input, mask.unsqueeze(1))
out = self.metrans(trans_input, mask, pos_emb)
decision = self.tran_fc(out.reshape(b*l, 512)).reshape(b, l, -1)
return decision
def forward(self, x, y, length_mots):
dec_out = self.get_feature(x, length_mots)
# ds = []
# for index, ll in enumerate(length_mots):
# ds.append(self.aggregator(decision[index, :ll].reshape(1,1,ll)))
# decision = torch.cat(ds, 2).squeeze()
outs = []
for index, ll in enumerate(length_mots):
tmp = dec_out[index, :ll, :].permute(1,0)
outs.append(self.aggregator(tmp.reshape(1, self.num_domians, ll)))
out = torch.cat(outs, 0).squeeze()
idx = torch.LongTensor(range(y.size(0))).to(y.device)
out = out[idx, y] # (batch)
return out
# LSTM version of EmoClassifier
class EmoClassifier(nn.Module):
def __init__(self, num_domains=2):
super().__init__()
self.num_domians = num_domains
self.extract_feature = nn.Sequential(
nn.Linear(68*2,256),
nn.ReLU(True),
nn.Linear(256,512),
nn.ReLU(True),
)
self.lstm = nn.LSTM(512,256,3,batch_first = True)
self.lstm_fc = nn.Sequential(
nn.Linear(256, num_domains),
nn.Tanh())
self.aggregator = nn.AdaptiveAvgPool1d(1)
def get_feature(self, x):
b, | |
pymc3_random(pm.Normal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)
def test_truncated_normal(self):
def ref_rand(size, mu, sigma, lower, upper):
return st.truncnorm.rvs((lower-mu)/sigma, (upper-mu)/sigma, size=size, loc=mu, scale=sigma)
pymc3_random(pm.TruncatedNormal, {'mu': R, 'sigma': Rplusbig, 'lower':-Rplusbig, 'upper':Rplusbig},
ref_rand=ref_rand)
def test_skew_normal(self):
def ref_rand(size, alpha, mu, sigma):
return st.skewnorm.rvs(size=size, a=alpha, loc=mu, scale=sigma)
pymc3_random(pm.SkewNormal, {'mu': R, 'sigma': Rplus, 'alpha': R}, ref_rand=ref_rand)
def test_half_normal(self):
def ref_rand(size, tau):
return st.halfnorm.rvs(size=size, loc=0, scale=tau ** -0.5)
pymc3_random(pm.HalfNormal, {'tau': Rplus}, ref_rand=ref_rand)
def test_wald(self):
# Cannot do anything too exciting as scipy wald is a
# location-scale model of the *standard* wald with mu=1 and lam=1
def ref_rand(size, mu, lam, alpha):
return st.wald.rvs(size=size, loc=alpha)
pymc3_random(pm.Wald,
{'mu': Domain([1., 1., 1.]), 'lam': Domain(
[1., 1., 1.]), 'alpha': Rplus},
ref_rand=ref_rand)
def test_beta(self):
def ref_rand(size, alpha, beta):
return st.beta.rvs(a=alpha, b=beta, size=size)
pymc3_random(pm.Beta, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)
def test_exponential(self):
def ref_rand(size, lam):
return nr.exponential(scale=1. / lam, size=size)
pymc3_random(pm.Exponential, {'lam': Rplus}, ref_rand=ref_rand)
def test_laplace(self):
def ref_rand(size, mu, b):
return st.laplace.rvs(mu, b, size=size)
pymc3_random(pm.Laplace, {'mu': R, 'b': Rplus}, ref_rand=ref_rand)
def test_lognormal(self):
def ref_rand(size, mu, tau):
return np.exp(mu + (tau ** -0.5) * st.norm.rvs(loc=0., scale=1., size=size))
pymc3_random(pm.Lognormal, {'mu': R, 'tau': Rplusbig}, ref_rand=ref_rand)
def test_student_t(self):
def ref_rand(size, nu, mu, lam):
return st.t.rvs(nu, mu, lam**-.5, size=size)
pymc3_random(pm.StudentT, {'nu': Rplus, 'mu': R, 'lam': Rplus}, ref_rand=ref_rand)
def test_cauchy(self):
def ref_rand(size, alpha, beta):
return st.cauchy.rvs(alpha, beta, size=size)
pymc3_random(pm.Cauchy, {'alpha': R, 'beta': Rplusbig}, ref_rand=ref_rand)
def test_half_cauchy(self):
def ref_rand(size, beta):
return st.halfcauchy.rvs(scale=beta, size=size)
pymc3_random(pm.HalfCauchy, {'beta': Rplusbig}, ref_rand=ref_rand)
def test_gamma_alpha_beta(self):
def ref_rand(size, alpha, beta):
return st.gamma.rvs(alpha, scale=1. / beta, size=size)
pymc3_random(pm.Gamma, {'alpha': Rplusbig, 'beta': Rplusbig}, ref_rand=ref_rand)
def test_gamma_mu_sigma(self):
def ref_rand(size, mu, sigma):
return st.gamma.rvs(mu**2 / sigma**2, scale=sigma ** 2 / mu, size=size)
pymc3_random(pm.Gamma, {'mu': Rplusbig, 'sigma': Rplusbig}, ref_rand=ref_rand)
def test_inverse_gamma(self):
def ref_rand(size, alpha, beta):
return st.invgamma.rvs(a=alpha, scale=beta, size=size)
pymc3_random(pm.InverseGamma, {'alpha': Rplus, 'beta': Rplus}, ref_rand=ref_rand)
def test_pareto(self):
def ref_rand(size, alpha, m):
return st.pareto.rvs(alpha, scale=m, size=size)
pymc3_random(pm.Pareto, {'alpha': Rplusbig, 'm': Rplusbig}, ref_rand=ref_rand)
def test_ex_gaussian(self):
def ref_rand(size, mu, sigma, nu):
return nr.normal(mu, sigma, size=size) + nr.exponential(scale=nu, size=size)
pymc3_random(pm.ExGaussian, {'mu': R, 'sigma': Rplus, 'nu': Rplus}, ref_rand=ref_rand)
def test_vonmises(self):
def ref_rand(size, mu, kappa):
return st.vonmises.rvs(size=size, loc=mu, kappa=kappa)
pymc3_random(pm.VonMises, {'mu': R, 'kappa': Rplus}, ref_rand=ref_rand)
def test_triangular(self):
def ref_rand(size, lower, upper, c):
scale = upper - lower
c_ = (c - lower) / scale
return st.triang.rvs(size=size, loc=lower, scale=scale, c=c_)
pymc3_random(pm.Triangular, {'lower': Runif, 'upper': Runif + 3, 'c': Runif + 1}, ref_rand=ref_rand)
def test_flat(self):
with pm.Model():
f = pm.Flat('f')
with pytest.raises(ValueError):
f.random(1)
def test_half_flat(self):
with pm.Model():
f = pm.HalfFlat('f')
with pytest.raises(ValueError):
f.random(1)
def test_binomial(self):
pymc3_random_discrete(pm.Binomial, {'n': Nat, 'p': Unit}, ref_rand=st.binom.rvs)
def test_beta_binomial(self):
pymc3_random_discrete(pm.BetaBinomial, {'n': Nat, 'alpha': Rplus, 'beta': Rplus},
ref_rand=self._beta_bin)
def _beta_bin(self, n, alpha, beta, size=None):
return st.binom.rvs(n, st.beta.rvs(a=alpha, b=beta, size=size))
def test_bernoulli(self):
pymc3_random_discrete(pm.Bernoulli, {'p': Unit},
ref_rand=lambda size, p=None: st.bernoulli.rvs(p, size=size))
def test_poisson(self):
pymc3_random_discrete(pm.Poisson, {'mu': Rplusbig}, size=500, ref_rand=st.poisson.rvs)
def test_negative_binomial(self):
def ref_rand(size, alpha, mu):
return st.nbinom.rvs(alpha, alpha / (mu + alpha), size=size)
pymc3_random_discrete(pm.NegativeBinomial, {'mu': Rplusbig, 'alpha': Rplusbig},
size=100, fails=50, ref_rand=ref_rand)
def test_geometric(self):
pymc3_random_discrete(pm.Geometric, {'p': Unit}, size=500, fails=50, ref_rand=nr.geometric)
def test_discrete_uniform(self):
def ref_rand(size, lower, upper):
return st.randint.rvs(lower, upper + 1, size=size)
pymc3_random_discrete(pm.DiscreteUniform, {'lower': -NatSmall, 'upper': NatSmall},
ref_rand=ref_rand)
def test_discrete_weibull(self):
def ref_rand(size, q, beta):
u = np.random.uniform(size=size)
return np.ceil(np.power(np.log(1 - u) / np.log(q), 1. / beta)) - 1
pymc3_random_discrete(pm.DiscreteWeibull, {'q': Unit, 'beta': Rplusdunif},
ref_rand=ref_rand)
@pytest.mark.parametrize('s', [2, 3, 4])
def test_categorical_random(self, s):
def ref_rand(size, p):
return nr.choice(np.arange(p.shape[0]), p=p, size=size)
pymc3_random_discrete(pm.Categorical, {'p': Simplex(s)}, ref_rand=ref_rand)
def test_constant_dist(self):
def ref_rand(size, c):
return c * np.ones(size, dtype=int)
pymc3_random_discrete(pm.Constant, {'c': I}, ref_rand=ref_rand)
def test_mv_normal(self):
def ref_rand(size, mu, cov):
return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)
def ref_rand_tau(size, mu, tau):
return ref_rand(size, mu, linalg.inv(tau))
def ref_rand_chol(size, mu, chol):
return ref_rand(size, mu, np.dot(chol, chol.T))
def ref_rand_uchol(size, mu, chol):
return ref_rand(size, mu, np.dot(chol.T, chol))
for n in [2, 3]:
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'cov': PdMatrix(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'tau': PdMatrix(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_tau)
pymc3_random(pm.MvNormal, {'mu': Vector(R, n), 'chol': PdMatrixChol(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_chol)
pymc3_random(
pm.MvNormal,
{'mu': Vector(R, n), 'chol': PdMatrixCholUpper(n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand_uchol,
extra_args={'lower': False}
)
def test_matrix_normal(self):
def ref_rand(size, mu, rowcov, colcov):
return st.matrix_normal.rvs(mean=mu, rowcov=rowcov, colcov=colcov, size=size)
# def ref_rand_tau(size, mu, tau):
# return ref_rand(size, mu, linalg.inv(tau))
def ref_rand_chol(size, mu, rowchol, colchol):
return ref_rand(size, mu, rowcov=np.dot(rowchol, rowchol.T),
colcov=np.dot(colchol, colchol.T))
def ref_rand_uchol(size, mu, rowchol, colchol):
return ref_rand(size, mu, rowcov=np.dot(rowchol.T, rowchol),
colcov=np.dot(colchol.T, colchol))
for n in [2, 3]:
pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowcov': PdMatrix(n), 'colcov': PdMatrix(n)},
size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand)
# pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'tau': PdMatrix(n)},
# size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_tau)
pymc3_random(pm.MatrixNormal, {'mu': RealMatrix(n, n), 'rowchol': PdMatrixChol(n), 'colchol': PdMatrixChol(n)},
size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_chol)
# pymc3_random(
# pm.MvNormal,
# {'mu': RealMatrix(n, n), 'rowchol': PdMatrixCholUpper(n), 'colchol': PdMatrixCholUpper(n)},
# size=n, valuedomain=RealMatrix(n, n), ref_rand=ref_rand_uchol,
# extra_args={'lower': False}
# )
def test_kronecker_normal(self):
def ref_rand(size, mu, covs, sigma):
cov = pm.math.kronecker(covs[0], covs[1]).eval()
cov += sigma**2 * np.identity(cov.shape[0])
return st.multivariate_normal.rvs(mean=mu, cov=cov, size=size)
def ref_rand_chol(size, mu, chols, sigma):
covs = [np.dot(chol, chol.T) for chol in chols]
return ref_rand(size, mu, covs, sigma)
def ref_rand_evd(size, mu, evds, sigma):
covs = []
for eigs, Q in evds:
covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))
return ref_rand(size, mu, covs, sigma)
sizes = [2, 3]
sigmas = [0, 1]
for n, sigma in zip(sizes, sigmas):
N = n**2
covs = [RandomPdMatrix(n), RandomPdMatrix(n)]
chols = list(map(np.linalg.cholesky, covs))
evds = list(map(np.linalg.eigh, covs))
dom = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)
mu = Domain([np.random.randn(N)*0.1], edges=(None, None), shape=N)
std_args = {'mu': mu}
cov_args = {'covs': covs}
chol_args = {'chols': chols}
evd_args = {'evds': evds}
if sigma is not None and sigma != 0:
std_args['sigma'] = Domain([sigma], edges=(None, None))
else:
for args in [cov_args, chol_args, evd_args]:
args['sigma'] = sigma
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand, extra_args=cov_args, model_args=cov_args)
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand_chol, extra_args=chol_args,
model_args=chol_args)
pymc3_random(
pm.KroneckerNormal, std_args, valuedomain=dom,
ref_rand=ref_rand_evd, extra_args=evd_args,
model_args=evd_args)
def test_mv_t(self):
def ref_rand(size, nu, Sigma, mu):
normal = st.multivariate_normal.rvs(cov=Sigma, size=size).T
chi2 = st.chi2.rvs(df=nu, size=size)
return mu + np.sqrt(nu) * (normal / chi2).T
for n in [2, 3]:
pymc3_random(pm.MvStudentT,
{'nu': Domain([5, 10, 25, 50]), 'Sigma': PdMatrix(
n), 'mu': Vector(R, n)},
size=100, valuedomain=Vector(R, n), ref_rand=ref_rand)
def test_dirichlet(self):
def ref_rand(size, a):
return st.dirichlet.rvs(a, size=size)
for n in [2, 3]:
pymc3_random(pm.Dirichlet, {'a': Vector(Rplus, n)},
valuedomain=Simplex(n), size=100, ref_rand=ref_rand)
def test_multinomial(self):
def ref_rand(size, p, n):
return nr.multinomial(pvals=p, n=n, size=size)
for n in [2, 3]:
pymc3_random_discrete(pm.Multinomial, {'p': Simplex(n), 'n': Nat},
valuedomain=Vector(Nat, n), size=100, ref_rand=ref_rand)
def test_gumbel(self):
def ref_rand(size, mu, beta):
return st.gumbel_r.rvs(loc=mu, scale=beta, size=size)
pymc3_random(pm.Gumbel, {'mu': R, 'beta': Rplus}, ref_rand=ref_rand)
def test_logistic(self):
def ref_rand(size, mu, s):
return st.logistic.rvs(loc=mu, scale=s, size=size)
pymc3_random(pm.Logistic, {'mu': R, 's': Rplus}, ref_rand=ref_rand)
def test_logitnormal(self):
def ref_rand(size, mu, sigma):
return expit(st.norm.rvs(loc=mu, scale=sigma, size=size))
pymc3_random(pm.LogitNormal, {'mu': R, 'sigma': Rplus}, ref_rand=ref_rand)
@pytest.mark.xfail(condition=(theano.config.floatX == "float32"), reason="Fails on float32")
def test_interpolated(self):
for mu in R.vals:
for sigma in Rplus.vals:
#pylint: disable=cell-var-from-loop
def ref_rand(size):
return st.norm.rvs(loc=mu, scale=sigma, size=size)
class TestedInterpolated (pm.Interpolated):
def __init__(self, **kwargs):
x_points = np.linspace(mu - 5 * sigma, mu + 5 * sigma, 100)
pdf_points = st.norm.pdf(x_points, loc=mu, scale=sigma)
super().__init__(
x_points=x_points,
pdf_points=pdf_points,
**kwargs
)
pymc3_random(TestedInterpolated, {}, ref_rand=ref_rand)
@pytest.mark.skip('Wishart random sampling not implemented.\n'
'See https://github.com/pymc-devs/pymc3/issues/538')
def test_wishart(self):
# Wishart non current recommended for use:
# https://github.com/pymc-devs/pymc3/issues/538
# for n in [2, 3]:
# pymc3_random_discrete(Wisvaluedomainhart,
# {'n': Domain([2, 3, 4, 2000]) , 'V': PdMatrix(n) },
# valuedomain=PdMatrix(n),
# ref_rand=lambda n=None, V=None, size=None: \
# st.wishart(V, df=n, size=size))
pass
def test_lkj(self):
for n in [2, 10, 50]:
#pylint: disable=cell-var-from-loop
shape = n*(n-1)//2
def ref_rand(size, eta):
beta = eta - 1 + n/2
return (st.beta.rvs(size=(size, shape), a=beta, b=beta)-.5)*2
class TestedLKJCorr (pm.LKJCorr):
def __init__(self, **kwargs):
kwargs.pop('shape', None)
super().__init__(n=n, **kwargs)
pymc3_random(TestedLKJCorr,
{'eta': Domain([1., 10., 100.])},
size=10000//n,
ref_rand=ref_rand)
def test_normalmixture(self):
def ref_rand(size, w, mu, sigma):
component = np.random.choice(w.size, size=size, p=w)
return np.random.normal(mu[component], sigma[component], size=size)
pymc3_random(pm.NormalMixture, {'w': Simplex(2),
'mu': Domain([[.05, 2.5], [-5., 1.]], edges=(None, None)),
'sigma': Domain([[1, 1], [1.5, 2.]], edges=(None, None))},
extra_args={'comp_shape': 2},
size=1000,
ref_rand=ref_rand)
pymc3_random(pm.NormalMixture, {'w': Simplex(3),
'mu': Domain([[-5., 1., 2.5]], edges=(None, None)),
'sigma': Domain([[1.5, 2., 3.]], edges=(None, None))},
extra_args={'comp_shape': 3},
size=1000,
ref_rand=ref_rand)
def test_mixture_random_shape():
# test the shape broadcasting in mixture random
y = np.concatenate([nr.poisson(5, size=10),
nr.poisson(9, size=10)])
with pm.Model() as m:
comp0 = pm.Poisson.dist(mu=np.ones(2))
w0 = pm.Dirichlet('w0', a=np.ones(2))
like0 = pm.Mixture('like0',
w=w0,
comp_dists=comp0,
observed=y)
comp1 = pm.Poisson.dist(mu=np.ones((20, 2)),
shape=(20, 2))
w1 = pm.Dirichlet('w1', | |
fit_reg, mask_bad_pix, mask_emline, user_mask, mask_metal, cosmology, run_dir, verbose=verbose, plot=True)
# non-SDSS spectrum
elif (not sdss_spec):
lam_gal,galaxy,noise,z,ebv,velscale,fwhm_gal,fit_mask = prepare_user_spec(fits_file, spec, wave, err, fwhm, z, ebv, fit_reg, mask_emline, user_mask, mask_metal, cosmology, run_dir, verbose=verbose, plot=True)
binnum = spaxelx = spaxely = None
# Write to Log
write_log((fit_options,mcmc_options,comp_options,losvd_options,host_options,power_options,poly_options,opt_feii_options,uv_iron_options,balmer_options,
plot_options,output_options),'fit_information',run_dir)
####################################################################################################################################################################################
# Generate host-galaxy template
if (fit_host==True) & (lam_gal[0]>1680.2):
host_template = generate_host_template(lam_gal, host_options, fwhm_gal,fit_mask, velscale, verbose=verbose)
elif (fit_host==True) & (lam_gal[0]<1680.2):
host_template = None
fit_host = False
comp_options["fit_host"]=False
if verbose:
print('\n - Host galaxy SSP template disabled because template is outside of fitting region.')
elif (fit_host==False):
host_template = None
# Load stellar templates if fit_losvd=True
if (fit_losvd==True):
stel_templates = prepare_stellar_templates(galaxy, lam_gal, fit_reg, velscale, fwhm_gal,fit_mask, losvd_options, run_dir)
elif (fit_losvd==False):
stel_templates = None
# For the Optical FeII, UV Iron, and Balmer templates, we disable the templates if the fitting region
# is entirely outside of the range of the templates. This saves resources.
# Check conditions for and generate Optical FeII templates
# Veron-Cetty et al. (2004)
if (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="VC04") & (lam_gal[-1]>=3400.0) & (lam_gal[0]<=7200.0):
opt_feii_templates = initialize_opt_feii(lam_gal,opt_feii_options,fwhm_gal,fit_mask,velscale)
elif (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="VC04") & ((lam_gal[-1]<3400.0) | (lam_gal[0]>7200.0)):
if verbose:
print('\n - Optical FeII template disabled because template is outside of fitting region.')
fit_opt_feii = False
comp_options["fit_opt_feii"]=False
opt_feii_templates = None
write_log((),'update_opt_feii',run_dir)
# Kovacevic et al. (2010)
elif (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="K10") & (lam_gal[-1]>=4400.0) & (lam_gal[0]<=5500.0):
opt_feii_templates = initialize_opt_feii(lam_gal,opt_feii_options,fwhm_gal,fit_mask,velscale)
elif (fit_opt_feii==True) & (opt_feii_options["opt_template"]["type"]=="K10") & ((lam_gal[-1]<4400.0) | (lam_gal[0]>5500.0)):
if verbose:
print('\n - Optical FeII template disabled because template is outside of fitting region.')
opt_feii_templates = None
fit_opt_feii = False
comp_options["fit_opt_feii"]=False
opt_feii_templates = None
write_log((),'update_opt_feii',run_dir)
elif (fit_opt_feii==False):
opt_feii_templates = None
# Generate UV Iron template - Vestergaard & Wilkes (2001)
if (fit_uv_iron==True) & (lam_gal[-1]>=1074.0) & (lam_gal[0]<=3100.0):
uv_iron_template = initialize_uv_iron(lam_gal,uv_iron_options,fwhm_gal,fit_mask,velscale)
elif (fit_uv_iron==True) & ((lam_gal[-1]<1074.0) | (lam_gal[0]>3100.0)):
if verbose:
print('\n - UV Iron template disabled because template is outside of fitting region.')
uv_iron_template = None
fit_uv_iron = False
comp_options["fit_uv_iron"]=False
uv_iron_template = None
write_log((),'update_uv_iron',run_dir)
elif (fit_uv_iron==False):
uv_iron_template = None
# Generate Balmer continuum
if (fit_balmer==True) & (lam_gal[0]<3500.0):
balmer_template = initialize_balmer(lam_gal,balmer_options,fwhm_gal,fit_mask,velscale)
elif (fit_balmer==True) & (lam_gal[0]>=3500.0):
if verbose:
print('\n - Balmer continuum disabled because template is outside of fitting region.')
balmer_template = None
fit_balmer = False
comp_options["fit_balmer"]=False
balmer_template = None
write_log((),'update_balmer',run_dir)
elif (fit_balmer==False):
balmer_template = None
####################################################################################################################################################################################
# Initialize maximum likelihood parameters
if verbose:
print('\n Initializing parameters for Maximum Likelihood Fitting.')
print('----------------------------------------------------------------------------------------------------')
param_dict, line_list, combined_line_list, soft_cons = initialize_pars(lam_gal,galaxy,noise,fit_reg,fwhm_gal,fit_mask,velscale,
comp_options,user_lines,user_constraints,combined_lines,losvd_options,host_options,power_options,poly_options,
opt_feii_options,uv_iron_options,balmer_options,
run_dir,fit_type='init',fit_stat=fit_stat,
fit_opt_feii=fit_opt_feii,fit_uv_iron=fit_uv_iron,fit_balmer=fit_balmer,
fit_losvd=fit_losvd,fit_host=fit_host,fit_power=fit_power,fit_poly=fit_poly,
fit_narrow=fit_narrow,fit_broad=fit_broad,fit_outflow=fit_outflow,fit_absorp=fit_absorp,
tie_line_fwhm=tie_line_fwhm,tie_line_voff=tie_line_voff,verbose=verbose)
# Output all free parameters of fit prior to fitting (useful for diagnostics)
if output_pars and verbose:
output_free_pars(line_list,param_dict,soft_cons)
write_log((line_list,param_dict,soft_cons),'output_line_list',run_dir)
return
elif not output_pars and verbose:
output_free_pars(line_list,param_dict,soft_cons)
write_log((line_list,param_dict,soft_cons),'output_line_list',run_dir)
elif not output_pars and not verbose:
write_log((line_list,param_dict,soft_cons),'output_line_list',run_dir)
#### Line Testing ################################################################################################################################################################################
if (test_line["bool"]==True):
# If line test, check to make sure line is in line list
if (isinstance(test_line["line"],str)) and (test_line["line"] not in line_list):
shutil.rmtree(run_dir)
print("\n Line to test not found in line list! Make sure line is within fitting region for test.\n")
return
elif (isinstance(test_line["line"],list)) and not (np.all([False if line not in line_list else True for line in test_line["line"]])):
shutil.rmtree(run_dir)
print("\n Line to test not found in line list! Make sure line is within fitting region for test.\n")
return
if verbose:
print("\n Testing for %s" % (test_line["line"]))
line_test(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
fit_reg,
user_lines,
user_constraints,
combined_lines,
test_line,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=False,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose,
binnum=binnum,
spaxelx=spaxelx,
spaxely=spaxely)
# Exit BADASS
print(' - Line testing complete for %s! \n' % fits_file.parent.name)
return
####################################################################################################################################################################################
#### Outflow Testing ################################################################################################################################################################################
if (test_outflows==True):
# If test_outflow, check to make sure the line list has outflow lines in it
if (len([line for line in line_list if line_list[line]["line_type"]=="out"])==0):
shutil.rmtree(run_dir)
print("\n There are no outflow lines in the line list to test! Make sure fit_outflow = True and are within fitting range.\n")
return
if verbose:
print("\n Testing for outflows...")
line_test(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
fit_reg,
user_lines,
user_constraints,
combined_lines,
test_line,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=True,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose,
binnum=binnum,
spaxelx=spaxelx,
spaxely=spaxely)
# Exit BADASS
print(' - Outflow testing complete for %s! \n' % fits_file.parent.name)
return
####################################################################################################################################################################################
# Peform maximum likelihood
result_dict, comp_dict = max_likelihood(param_dict,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
z,
cosmology,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
velscale,
run_dir,
fit_type='init',
fit_stat=fit_stat,
output_model=False,
test_outflows=False,
n_basinhop=n_basinhop,
max_like_niter=max_like_niter,
verbose=verbose)
if (mcmc_fit==False):
# If not performing MCMC fitting, terminate BADASS here and write
# parameters, uncertainties, and components to a fits file
# Write final parameters to file
# Header information
header_dict = {}
header_dict["z_sdss"] = z
header_dict["med_noise"] = np.median(noise)
header_dict["velscale"] = velscale
#
write_max_like_results(result_dict,comp_dict,header_dict,fit_mask,run_dir,binnum,spaxelx,spaxely)
# Make interactive HTML plot
if plot_HTML:
plotly_best_fit(fits_file.parent.name,line_list,fit_mask,run_dir)
print(' - Done fitting %s! \n' % fits_file.parent.name)
sys.stdout.flush()
return
#######################################################################################################
# Initialize parameters for emcee
if verbose:
print('\n Initializing parameters for MCMC.')
print('----------------------------------------------------------------------------------------------------')
param_dict, line_list, combined_line_list, soft_cons = initialize_pars(lam_gal,galaxy,noise,fit_reg,fwhm_gal,fit_mask,velscale,
comp_options,user_lines,user_constraints,combined_lines,losvd_options,host_options,power_options,poly_options,
opt_feii_options,uv_iron_options,balmer_options,
run_dir,fit_type='final',fit_stat=fit_stat,
fit_opt_feii=fit_opt_feii,fit_uv_iron=fit_uv_iron,fit_balmer=fit_balmer,
fit_losvd=fit_losvd,fit_host=fit_host,fit_power=fit_power,fit_poly=fit_poly,
fit_narrow=fit_narrow,fit_broad=fit_broad,fit_outflow=fit_outflow,fit_absorp=fit_absorp,
tie_line_fwhm=tie_line_fwhm,tie_line_voff=tie_line_voff,
remove_lines=False,verbose=verbose)
#
if verbose:
output_free_pars(line_list,param_dict,soft_cons)
#
# Replace initial conditions with best fit max. likelihood parameters (the old switcharoo)
for key in result_dict:
if key in param_dict:
param_dict[key]['init']=result_dict[key]['med']
# We make an exception for FeII temperature if Kovadevic et al. (2010) templates are used because
# temperature is not every sensitive > 8,000 K. This causes temperature parameter to blow up
# during the initial max. likelihood fitting, causing it to be initialized for MCMC at an
# unreasonable value. We therefroe re-initializethe FeiI temp start value to 10,000 K.
if 'feii_temp' in param_dict:
param_dict['feii_temp']['init']=10000.0
#######################################################################################################
# Run emcee
if verbose:
print('\n Performing MCMC iterations...')
print('----------------------------------------------------------------------------------------------------')
# Extract relevant stuff from dicts
param_names = [key for key in param_dict ]
init_params = [param_dict[key]['init'] for key in param_dict ]
bounds = [param_dict[key]['plim'] for key in param_dict ]
# Check number of walkers
# If number of walkers < 2*(# of params) (the minimum required), then set it to that
if nwalkers<2*len(param_names):
if verbose:
print('\n Number of walkers < 2 x (# of parameters)! Setting nwalkers = %d' % (2.0*len(param_names)))
nwalkers = int(2.0*len(param_names))
ndim, nwalkers = len(init_params), nwalkers # minimum walkers = 2*len(params)
# initialize walker starting positions based on parameter estimation from Maximum Likelihood fitting
pos = initialize_walkers(init_params,param_names,bounds,soft_cons,nwalkers,ndim)
# Run emcee
# args = arguments of lnprob (log-probability function)
lnprob_args=(param_names,
bounds,
line_list,
combined_line_list,
soft_cons,
lam_gal,
galaxy,
noise,
comp_options,
losvd_options,
host_options,
power_options,
poly_options,
opt_feii_options,
uv_iron_options,
balmer_options,
outflow_test_options,
host_template,
opt_feii_templates,
uv_iron_template,
balmer_template,
stel_templates,
fwhm_gal,
fit_mask,
fit_stat,
velscale,
run_dir)
emcee_data = run_emcee(pos,ndim,nwalkers,run_dir,lnprob_args,init_params,param_names,
auto_stop,conv_type,min_samp,ncor_times,autocorr_tol,write_iter,write_thresh,
burn_in,min_iter,max_iter,verbose=verbose)
sampler_chain, burn_in, flux_blob, eqwidth_blob, cont_flux_blob, int_vel_disp_blob, log_like_blob = emcee_data
# Add chains to each parameter in param dictionary
for k,key in enumerate(param_names):
if key in param_dict:
param_dict[key]['chain']=sampler_chain[:,:,k]
if verbose:
print('\n > Fitting MCMC chains...')
# These three functions produce parameter, flux, and luminosity histograms and chains from the MCMC sampling.
# Free parameter values, uncertainties, and plots
param_dict = param_plots(param_dict,burn_in,run_dir,plot_param_hist=plot_param_hist,verbose=verbose)
# Add tied parameters
param_dict = add_tied_parameters(param_dict, line_list)
# Log Like Function values plots
log_like_dict = log_like_plot(log_like_blob, burn_in, nwalkers, run_dir, plot_param_hist=plot_param_hist,verbose=verbose)
# Flux values, uncertainties, and plots
flux_dict = flux_plots(flux_blob, burn_in, nwalkers, run_dir, plot_flux_hist=plot_flux_hist,verbose=verbose)
# Luminosity values, uncertainties, and plots
lum_dict = lum_plots(flux_dict, burn_in, nwalkers, z, run_dir, H0=cosmology["H0"],Om0=cosmology["Om0"],plot_lum_hist=plot_lum_hist,verbose=verbose)
# Continuum luminosity
cont_lum_dict = cont_lum_plots(cont_flux_blob, burn_in, nwalkers, z, run_dir, H0=cosmology["H0"],Om0=cosmology["Om0"],plot_lum_hist=plot_lum_hist,verbose=verbose)
# Equivalent widths, uncertainties, and plots
eqwidth_dict = eqwidth_plots(eqwidth_blob, burn_in, nwalkers, run_dir, plot_eqwidth_hist=plot_eqwidth_hist, verbose=verbose)
# Auxiliary Line Dict (Combined FWHMs and Fluxes of MgII and CIV)
int_vel_disp_dict = int_vel_disp_plots(int_vel_disp_blob, burn_in, nwalkers, z, run_dir, H0=cosmology["H0"],Om0=cosmology["Om0"],plot_param_hist=plot_param_hist,verbose=verbose)
# If stellar velocity is fit, estimate the systemic velocity of the galaxy;
# SDSS redshifts are based on average emission line redshifts.
extra_dict = {}
extra_dict["LOG_LIKE"] = log_like_dict
if ('stel_vel' in param_dict):
if verbose:
print('\n > Estimating systemic velocity of galaxy...')
z_dict = | |
from __future__ import division, unicode_literals, absolute_import
import numpy as np
import signal
import tracemalloc
import os
import logging
logger = logging.getLogger(__name__)
import dynesty
from dynesty.utils import unitcheck
from ..utils import estimate_nmcmc, list_2_dict
from ...pipe import data_container, display_memory_usage
def void():
pass
def reflect(u):
idxs_even = np.mod(u, 2) < 1
u[idxs_even] = np.mod(u[idxs_even], 1)
u[~idxs_even] = 1 - np.mod(u[~idxs_even], 1)
return u
def initialize_proposals(maxmcmc, minmcmc, nact):
# initialize proposals
return BajesDynestyProposal(maxmcmc, walks=minmcmc, nact=nact)
def resample(samples, weights):
if abs(np.sum(weights) - 1.) > 1e-30:
# Guarantee that the weights will sum to 1.
weights = np.array(weights) / np.sum(weights)
# Make N subdivisions and choose positions with a consistent random offset.
nsamples = len(weights)
positions = (np.random.random() + np.arange(nsamples)) / nsamples
# Resample the data.
idx = []
cumulative_sum = np.cumsum(weights)
i, j = 0, 0
while i < nsamples:
if positions[i] < cumulative_sum[j]:
idx.append(j)
i += 1
else:
j += 1
idx = np.array(idx, dtype=np.int)
return samples[idx]
def get_prior_samples_dynesty(nlive, ndim, like_fn, ptform_fn):
n = 0
u = []
v = []
logl = []
while n < nlive:
_u = np.random.uniform(0,1,ndim)
_v = ptform_fn(_u)
_l = like_fn(_v)
if not np.isinf(_l):
u.append(_u)
v.append(_v)
logl.append(_l)
n +=1
return [np.array(u), np.array(v), np.array(logl)]
class SamplerNest(object):
def __init__(self, posterior,
nlive, tolerance=0.1, ncheckpoint=0,
# bounding
bound_method='multi', vol_check=8., vol_dec=0.5,
# update
bootstrap=0, enlarge=1.5, facc=0.5, update_interval=None,
# proposal
proposals=None, nact = 5., maxmcmc=4096, minmcmc=32,
# first update
first_min_ncall = None, first_min_eff = 10,
# others
nprocs=None, pool=None, use_slice=False, use_gw=False,
outdir='./', resume='/resume.pkl', seed=None, **kwargs):
self.resume = resume
self.outdir = outdir
# restore inference from existing container
if os.path.exists(self.outdir + self.resume):
self.restore_inference(pool)
# initialize a new inference
else:
# initialize signal
try:
signal.signal(signal.SIGTERM, self.store_inference_and_exit)
signal.signal(signal.SIGINT, self.store_inference_and_exit)
signal.signal(signal.SIGALRM, self.store_inference_and_exit)
except AttributeError:
logger.warning("Impossible to set signal attributes.")
# initialize nested parameters
self.nlive = nlive
self.tol = tolerance
# auxiliary arguments
self.names = posterior.prior.names
self.ndim = len(self.names)
self.log_prior_fn = posterior.log_prior
if ncheckpoint == 0:
# disable resume
logger.info("Disabling checkpoint ...")
self.ncheckpoint = 100 # print step
self.store_flag = False
else:
# enable resume
logger.info("Enabling checkpoint ...")
self.ncheckpoint = ncheckpoint
self.store_flag = True
# initialize seed
if seed == None:
import time
self.seed = int(time.time())
else:
self.seed = seed
np.random.seed(self.seed)
if self.nlive < self.ndim*(self.ndim-1)//2:
logger.warning("Given number of live points < Ndim*(Ndim-1)/2. This may generate problems in the exploration of the parameters space.")
# set up periodic and reflective boundaries
periodic_inds = np.concatenate(np.where(np.array(posterior.prior.periodics) == 1))
reflective_inds = np.concatenate(np.where(np.array(posterior.prior.periodics) == 0))
# initialize proposals
if proposals == None:
logger.info("Initializing proposal methods ...")
proposals = initialize_proposals(maxmcmc, minmcmc, nact)
if first_min_ncall == None:
first_min_ncall = 2 * nlive
if nprocs == None:
nprocs = 1
# initialize keyword args for dynesty
sampler_kwargs = { 'ndim': self.ndim,
'nlive': nlive,
'bound': bound_method,
'sample': 'rwalk',
'periodic': periodic_inds,
'reflective': reflective_inds,
'facc': facc,
'vol_check': vol_check,
'vol_dec': vol_dec,
'walks': minmcmc,
'enlarge': enlarge,
'bootstrap': bootstrap,
'pool': pool,
'queue_size': max(nprocs-1,1),
'update_interval': update_interval,
'first_update': {'min_ncall':first_min_ncall, 'min_eff': first_min_eff},
'use_pool': {'prior_transform': True,'loglikelihood': True, 'propose_point': True,'update_bound': True}
}
like_fn = posterior.log_like
ptform_fn = posterior.prior_transform
self.sampler = self.initialize_sampler(like_fn, ptform_fn, sampler_kwargs)
# clean up sampler
del self.sampler.cite
del self.sampler.kwargs['cite']
self.sampler.rstate = np.random
# set proposal
self.sampler.evolve_point = proposals.propose
def __getstate__(self):
self_dict = self.__dict__.copy()
# if 'sampler' in list(self_dict.keys()):
# self_dict['sampler'].pool = None
# self_dict['sampler'].M = None
# self_dict['sampler'].rstate = None
return self_dict
def initialize_sampler(self, like_fn, ptform_fn, kwargs):
# extract prior samples, ensuring finite logL
logger.info("Extracting prior samples ...")
live_points = get_prior_samples_dynesty(kwargs['nlive'], kwargs['ndim'], like_fn, ptform_fn)
kwargs['live_points'] = live_points
# initialize dynesty sampler
logger.info("Initializing nested sampler ...")
sampler = dynesty.NestedSampler(loglikelihood=like_fn, prior_transform=ptform_fn, **kwargs)
del sampler._PROPOSE
del sampler._UPDATE
return sampler
def store_inference_and_exit(self, signum=None, frame=None):
# exit function when signal is revealed
logger.info("Run interrupted by signal {}, checkpoint and exit.".format(signum))
os._exit(signum)
def restore_inference(self, pool):
# extract container
logger.info("Restoring inference from existing container ...")
dc = data_container(self.outdir + self.resume)
container = dc.load()
# sampler check
if container.tag != 'nest':
logger.error("Container carries a {} inference, while NEST was requested.".format(container.tag.upper()))
raise AttributeError("Container carries a {} inference, while NEST was requested.".format(container.tag.upper()))
previous_inference = container.inference
# extract previous variables and methods
for kw in list(previous_inference.__dict__.keys()):
self.__setattr__(kw, previous_inference.__dict__[kw])
# re-initialize pool
self.sampler.pool = pool
self.sampler.M = pool.map
# re-initialize seed
if self.seed == None:
import time
self.seed = int(time.time())
np.random.seed(self.seed)
self.sampler.rstate = np.random
# re-initialize signal
try:
signal.signal(signal.SIGTERM, self.store_inference_and_exit)
signal.signal(signal.SIGINT, self.store_inference_and_exit)
signal.signal(signal.SIGALRM, self.store_inference_and_exit)
except AttributeError:
logger.warning("Impossible to set signal attributes.")
def store_inference(self):
# save inference in pickle file
dc = data_container(self.outdir+self.resume)
dc.store('tag', 'nest')
dc.store('inference', self)
dc.save()
def run(self):
# run the sampler
logger.info("Running {} live points ...".format(self.nlive))
for results in self.sampler.sample(dlogz=self.tol,save_samples=True,add_live=False):
if self.sampler.it%self.ncheckpoint==0:
(worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results
self.update_sampler([eff/100.,nc,loglstar,logz,h,delta_logz])
if tracemalloc.is_tracing():
display_memory_usage(tracemalloc.take_snapshot())
tracemalloc.clear_traces()
# add live points to nested samples
logger.info("Adding live points in nested samples")
self.sampler.add_final_live(print_progress=False)
# final store inference
self.store_inference()
def update_sampler(self, args):
acc, nc, logl, logz, h, d_logz = args
# store inference
if self.store_flag:
self.store_inference()
logger.info(" - it : {:d} - eff : {:.3f} - ncall : {:.0f} - logL : {:.3g} - logLmax : {:.3g} - logZ : {:.3g} - H : {:.3g} - dlogZ : {:.3g}".format(self.sampler.it,acc,nc,logl,np.max(self.sampler.live_logl),logz,h,d_logz))
def get_posterior(self):
self.results = self.sampler.results
self.nested_samples = self.results.samples
logger.info(" - number of nested samples : {}".format(len(self.nested_samples)))
# extract posteriors
ns = []
wt = []
scale = np.array(self.sampler.saved_scale)
for i in range(len(self.nested_samples)):
# start appending from the first update
# if scale[i] < 1. :
this_params = list_2_dict(self.nested_samples[i], self.names)
logpr = self.log_prior_fn(this_params)
logl = np.float(self.results.logl[i])
ns.append(np.append(self.nested_samples[i], [logl, logpr]))
wt.append(np.float(self.results.logwt[i]-self.results.logz[-1]))
ns = np.array(ns)
wt = np.exp(np.array(wt))
names = np.append(self.names , ['logL', 'logPrior'])
# resample nested samples into posterior samples
self.posterior_samples = resample(ns, wt)
self.real_nout = len(self.posterior_samples)
# extract evidence
self.logZ = np.array(self.results.logz)
self.logZerr = self.results.logzerr
logger.info(" - number of posterior samples : {}".format(self.real_nout))
post_file = open(self.outdir + '/posterior.dat', 'w')
post_file.write('#')
for n in range(self.ndim+2):
post_file.write('{}\t'.format(names[n]))
post_file.write('\n')
for i in range(self.real_nout):
for j in range(self.ndim+2):
post_file.write('{}\t'.format(self.posterior_samples[i][j]))
post_file.write('\n')
post_file.close()
evidence_file = open(self.outdir + '/evidence.dat', 'w')
evidence_file.write('#\tlogX\tlogZ\tlogZerr\n')
for xi,zi,ei in zip(self.results.logvol,self.logZ,self.logZerr):
evidence_file.write('{}\t{}\t{}\n'.format(xi,zi,ei))
evidence_file.close()
def make_plots(self):
try:
import matplotlib.pyplot as plt
except Exception:
logger.warning("Impossible to produce standard plots. Cannot import matplotlib.")
try:
fig = plt.figure()
plt.plot(self.results.logvol, self.results.logl)
plt.xlim(( np.min(self.results.logvol),np.max(self.results.logvol) ))
plt.ylim((0.,1.1*(np.max(self.results.logl))))
plt.ylabel('lnL - lnZnoise')
plt.xlabel('lnX')
plt.savefig(self.outdir+'/lnL_lnX.png', dpi=200)
plt.close()
fig = plt.figure()
plt.fill_between(self.results.logvol, self.logBF-self.logZerr, self.logBF+self.logZerr, alpha=0.6, color='royalblue')
plt.plot(self.results.logvol, self.logBF, color='navy', label='logBF')
plt.plot(self.results.logvol, self.results.logl, color='slateblue', label='logL', ls='--')
plt.xlim(( np.min(self.results.logvol),np.max(self.results.logvol) ))
ylim_max = np.max([ np.max(self.logBF) ,np.max(self.results.logl)])
plt.ylim((0.,1.2*ylim_max))
plt.xlabel('logX')
plt.savefig(self.outdir+'/lnBF_lnX.png', dpi=200)
plt.close()
except Exception:
pass
class SamplerDyNest(SamplerNest):
def __init__(self, posterior, nbatch=512, **kwargs):
# initialize dynamic nested parameters
self.nbatch = nbatch
self.init_flag = False
# initialize dynesty inference
super(SamplerDyNest, self).__init__(posterior, **kwargs)
# extract prior samples, ensuring finite logL
logger.info("Extracting prior samples ...")
self.p0 = get_prior_samples_dynesty(kwargs['nlive'], self.ndim, self.sampler.loglikelihood, self.sampler.prior_transform)
# set customized proposal
dynesty.dynesty._SAMPLING["rwalk"] = self.sampler.evolve_point
dynesty.nestedsamplers._SAMPLING["rwalk"] = self.sampler.evolve_point
def __getstate__(self):
# get __dict__ of parent class
inher_dict = SamplerNest.__getstate__(self)
# get __dict__ of this class
self_dict = self.__dict__.copy()
# merge them
full_dict = {**inher_dict, **self_dict}
# if 'sampler' in list(full_dict.keys()):
# full_dict['sampler'].pool = None
# full_dict['sampler'].M = None
# full_dict['sampler'].rstate = None
return full_dict
def initialize_sampler(self, like_fn, ptform_fn, kwargs):
logger.info("Initializing nested sampler ...")
return dynesty.DynamicNestedSampler(like_fn, ptform_fn, **kwargs)
def restore_inference(self, pool):
# extract container
logger.info("Restoring inference from existing container ...")
dc = data_container(self.outdir + self.resume)
container = dc.load()
# sampler check
if container.tag != 'dynest':
logger.error("Container carries a {} inference, while DYNEST was requested.".format(container.tag.upper()))
raise AttributeError("Container carries a {} inference, while DYNEST was requested.".format(container.tag.upper()))
previous_inference = container.inference
# extract previous variables and methods
for kw in list(previous_inference.__dict__.keys()):
self.__setattr__(kw, previous_inference.__dict__[kw])
# re-initialize pool
self.sampler.pool = pool
self.sampler.M = pool.map
# re-initialize seed
if self.seed == None:
import time
self.seed = int(time.time())
np.random.seed(self.seed)
self.sampler.rstate = np.random
# re-initialize signal
try:
signal.signal(signal.SIGTERM, self.store_inference_and_exit)
signal.signal(signal.SIGINT, self.store_inference_and_exit)
signal.signal(signal.SIGALRM, self.store_inference_and_exit)
except AttributeError:
logger.warning("Impossible to set signal attributes.")
def store_inference(self):
# save inference in pickle file
dc = data_container(self.outdir+self.resume)
dc.store('tag', 'dynest')
dc.store('inference', self)
dc.save()
def update_sampler(self, args):
acc, | |
fk, **kwargs):
"""
Find a related item by id for tags.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_fk_get_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:return: Tag
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_tags_fk_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_tags_fk_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_tags_fk_get`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_tags_fk_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/tags/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tag',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_tags_fk_put(self, id, nk, fk, **kwargs):
"""
Update a related item by id for tags.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_fk_put(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:param Tag data:
:return: Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_tags_fk_put_with_http_info(id, nk, fk, **kwargs)
else:
(data) = self.portals_id_designs_nk_tags_fk_put_with_http_info(id, nk, fk, **kwargs)
return data
def portals_id_designs_nk_tags_fk_put_with_http_info(self, id, nk, fk, **kwargs):
"""
Update a related item by id for tags.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_fk_put_with_http_info(id, nk, fk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str fk: Foreign key for tags (required)
:param Tag data:
:return: Tag
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'fk', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_tags_fk_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_tags_fk_put`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_tags_fk_put`")
# verify the required parameter 'fk' is set
if ('fk' not in params) or (params['fk'] is None):
raise ValueError("Missing the required parameter `fk` when calling `portals_id_designs_nk_tags_fk_put`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/tags/{fk}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
if 'fk' in params:
path_params['fk'] = params['fk']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Tag',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_tags_get(self, id, nk, **kwargs):
"""
Queries tags of Design.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_get(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str filter:
:return: list[Tag]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_tags_get_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_tags_get_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_tags_get_with_http_info(self, id, nk, **kwargs):
"""
Queries tags of Design.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_get_with_http_info(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param str filter:
:return: list[Tag]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'nk', 'filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_designs_nk_tags_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_designs_nk_tags_get`")
# verify the required parameter 'nk' is set
if ('nk' not in params) or (params['nk'] is None):
raise ValueError("Missing the required parameter `nk` when calling `portals_id_designs_nk_tags_get`")
collection_formats = {}
resource_path = '/Portals/{id}/designs/{nk}/tags'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'nk' in params:
path_params['nk'] = params['nk']
query_params = {}
if 'filter' in params:
query_params['filter'] = params['filter']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Tag]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_designs_nk_tags_post(self, id, nk, **kwargs):
"""
Creates a new instance in tags of this model.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_designs_nk_tags_post(id, nk, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str nk: Foreign key for designs. (required)
:param Tag data:
:return: Tag
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_designs_nk_tags_post_with_http_info(id, nk, **kwargs)
else:
(data) = self.portals_id_designs_nk_tags_post_with_http_info(id, nk, **kwargs)
return data
def portals_id_designs_nk_tags_post_with_http_info(self, id, nk, **kwargs):
"""
Creates a new instance in tags of this model.
This method makes a synchronous HTTP request by | |
#
# Unicode Mapping generated by uni2python.xsl
#
unicode_map = {
0x00100: r"\={A}", # LATIN CAPITAL LETTER A WITH MACRON
0x00101: r"\={a}", # LATIN SMALL LETTER A WITH MACRON
0x00102: r"\u{A}", # LATIN CAPITAL LETTER A WITH BREVE
0x00103: r"\u{a}", # LATIN SMALL LETTER A WITH BREVE
0x00104: r"\k{A}", # LATIN CAPITAL LETTER A WITH OGONEK
0x00105: r"\k{a}", # LATIN SMALL LETTER A WITH OGONEK
0x00106: r"\'{C}", # LATIN CAPITAL LETTER C WITH ACUTE
0x00107: r"\'{c}", # LATIN SMALL LETTER C WITH ACUTE
0x00108: r"\^{C}", # LATIN CAPITAL LETTER C WITH CIRCUMFLEX
0x00109: r"\^{c}", # LATIN SMALL LETTER C WITH CIRCUMFLEX
0x0010A: r"\.{C}", # LATIN CAPITAL LETTER C WITH DOT ABOVE
0x0010B: r"\.{c}", # LATIN SMALL LETTER C WITH DOT ABOVE
0x0010C: r"\v{C}", # LATIN CAPITAL LETTER C WITH CARON
0x0010D: r"\v{c}", # LATIN SMALL LETTER C WITH CARON
0x0010E: r"\v{D}", # LATIN CAPITAL LETTER D WITH CARON
0x0010F: r"\v{d}", # LATIN SMALL LETTER D WITH CARON
0x00110: r"\DJ{}", # LATIN CAPITAL LETTER D WITH STROKE
0x00111: r"\dj{}", # LATIN SMALL LETTER D WITH STROKE
0x00112: r"\={E}", # LATIN CAPITAL LETTER E WITH MACRON
0x00113: r"\={e}", # LATIN SMALL LETTER E WITH MACRON
0x00114: r"\u{E}", # LATIN CAPITAL LETTER E WITH BREVE
0x00115: r"\u{e}", # LATIN SMALL LETTER E WITH BREVE
0x00116: r"\.{E}", # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00117: r"\.{e}", # LATIN SMALL LETTER E WITH DOT ABOVE
0x00118: r"\k{E}", # LATIN CAPITAL LETTER E WITH OGONEK
0x00119: r"\k{e}", # LATIN SMALL LETTER E WITH OGONEK
0x0011A: r"\v{E}", # LATIN CAPITAL LETTER E WITH CARON
0x0011B: r"\v{e}", # LATIN SMALL LETTER E WITH CARON
0x0011C: r"\^{G}", # LATIN CAPITAL LETTER G WITH CIRCUMFLEX
0x0011D: r"\^{g}", # LATIN SMALL LETTER G WITH CIRCUMFLEX
0x0011E: r"\u{G}", # LATIN CAPITAL LETTER G WITH BREVE
0x0011F: r"\u{g}", # LATIN SMALL LETTER G WITH BREVE
0x00120: r"\.{G}", # LATIN CAPITAL LETTER G WITH DOT ABOVE
0x00121: r"\.{g}", # LATIN SMALL LETTER G WITH DOT ABOVE
0x00122: r"\c{G}", # LATIN CAPITAL LETTER G WITH CEDILLA
0x00123: r"\c{g}", # LATIN SMALL LETTER G WITH CEDILLA
0x00124: r"\^{H}", # LATIN CAPITAL LETTER H WITH CIRCUMFLEX
0x00125: r"\^{h}", # LATIN SMALL LETTER H WITH CIRCUMFLEX
0x00126: r"{\fontencoding{LELA}\selectfont\char40}", # LATIN CAPITAL LETTER H WITH STROKE
0x00128: r"\~{I}", # LATIN CAPITAL LETTER I WITH TILDE
0x00129: r"\~{\i}", # LATIN SMALL LETTER I WITH TILDE
0x0012A: r"\={I}", # LATIN CAPITAL LETTER I WITH MACRON
0x0012B: r"\={\i}", # LATIN SMALL LETTER I WITH MACRON
0x0012C: r"\u{I}", # LATIN CAPITAL LETTER I WITH BREVE
0x0012D: r"\u{\i}", # LATIN SMALL LETTER I WITH BREVE
0x0012E: r"\k{I}", # LATIN CAPITAL LETTER I WITH OGONEK
0x0012F: r"\k{i}", # LATIN SMALL LETTER I WITH OGONEK
0x00130: r"\.{I}", # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x00131: r"\i{}", # LATIN SMALL LETTER DOTLESS I
0x00132: r"IJ", # LATIN CAPITAL LIGATURE IJ
0x00133: r"ij", # LATIN SMALL LIGATURE IJ
0x00134: r"\^{J}", # LATIN CAPITAL LETTER J WITH CIRCUMFLEX
0x00135: r"\^{\j}", # LATIN SMALL LETTER J WITH CIRCUMFLEX
0x00136: r"\c{K}", # LATIN CAPITAL LETTER K WITH CEDILLA
0x00137: r"\c{k}", # LATIN SMALL LETTER K WITH CEDILLA
0x00138: r"{\fontencoding{LELA}\selectfont\char91}", # LATIN SMALL LETTER KRA
0x00139: r"\'{L}", # LATIN CAPITAL LETTER L WITH ACUTE
0x0013A: r"\'{l}", # LATIN SMALL LETTER L WITH ACUTE
0x0013B: r"\c{L}", # LATIN CAPITAL LETTER L WITH CEDILLA
0x0013C: r"\c{l}", # LATIN SMALL LETTER L WITH CEDILLA
0x0013D: r"\v{L}", # LATIN CAPITAL LETTER L WITH CARON
0x0013E: r"\v{l}", # LATIN SMALL LETTER L WITH CARON
0x0013F: r"{\fontencoding{LELA}\selectfont\char201}", # LATIN CAPITAL LETTER L WITH MIDDLE DOT
0x00140: r"{\fontencoding{LELA}\selectfont\char202}", # LATIN SMALL LETTER L WITH MIDDLE DOT
0x00141: r"\L{}", # LATIN CAPITAL LETTER L WITH STROKE
0x00142: r"\l{}", # LATIN SMALL LETTER L WITH STROKE
0x00143: r"\'{N}", # LATIN CAPITAL LETTER N WITH ACUTE
0x00144: r"\'{n}", # LATIN SMALL LETTER N WITH ACUTE
0x00145: r"\c{N}", # LATIN CAPITAL LETTER N WITH CEDILLA
0x00146: r"\c{n}", # LATIN SMALL LETTER N WITH CEDILLA
0x00147: r"\v{N}", # LATIN CAPITAL LETTER N WITH CARON
0x00148: r"\v{n}", # LATIN SMALL LETTER N WITH CARON
0x00149: r"'n", # LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
0x0014A: r"\NG{}", # LATIN CAPITAL LETTER ENG
0x0014B: r"\ng{}", # LATIN SMALL LETTER ENG
0x0014C: r"\={O}", # LATIN CAPITAL LETTER O WITH MACRON
0x0014D: r"\={o}", # LATIN SMALL LETTER O WITH MACRON
0x0014E: r"\u{O}", # LATIN CAPITAL LETTER O WITH BREVE
0x0014F: r"\u{o}", # LATIN SMALL LETTER O WITH BREVE
0x00150: r"\H{O}", # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x00151: r"\H{o}", # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x00152: r"\OE{}", # LATIN CAPITAL LIGATURE OE
0x00153: r"\oe{}", # LATIN SMALL LIGATURE OE
0x00154: r"\'{R}", # LATIN CAPITAL LETTER R WITH ACUTE
0x00155: r"\'{r}", # LATIN SMALL LETTER R WITH ACUTE
0x00156: r"\c{R}", # LATIN CAPITAL LETTER R WITH CEDILLA
0x00157: r"\c{r}", # LATIN SMALL LETTER R WITH CEDILLA
0x00158: r"\v{R}", # LATIN CAPITAL LETTER R WITH CARON
0x00159: r"\v{r}", # LATIN SMALL LETTER R WITH CARON
0x0015A: r"\'{S}", # LATIN CAPITAL LETTER S WITH ACUTE
0x0015B: r"\'{s}", # LATIN SMALL LETTER S WITH ACUTE
0x0015C: r"\^{S}", # LATIN CAPITAL LETTER S WITH CIRCUMFLEX
0x0015D: r"\^{s}", # LATIN SMALL LETTER S WITH CIRCUMFLEX
0x0015E: r"\c{S}", # LATIN CAPITAL LETTER S WITH CEDILLA
0x0015F: r"\c{s}", # LATIN SMALL LETTER S WITH CEDILLA
0x00160: r"\v{S}", # LATIN CAPITAL LETTER S WITH CARON
0x00161: r"\v{s}", # LATIN SMALL LETTER S WITH CARON
0x00162: r"\c{T}", # LATIN CAPITAL LETTER T WITH CEDILLA
0x00163: r"\c{t}", # LATIN SMALL LETTER T WITH CEDILLA
0x00164: r"\v{T}", # LATIN CAPITAL LETTER T WITH CARON
0x00165: r"\v{t}", # LATIN SMALL LETTER T WITH CARON
0x00166: r"{\fontencoding{LELA}\selectfont\char47}", # LATIN CAPITAL LETTER T WITH STROKE
0x00167: r"{\fontencoding{LELA}\selectfont\char63}", # LATIN SMALL LETTER T WITH STROKE
0x00168: r"\~{U}", # LATIN CAPITAL LETTER U WITH TILDE
0x00169: r"\~{u}", # LATIN SMALL LETTER U WITH TILDE
0x0016A: r"\={U}", # LATIN CAPITAL LETTER U WITH MACRON
0x0016B: r"\={u}", # LATIN SMALL LETTER U WITH MACRON
0x0016C: r"\u{U}", # LATIN CAPITAL LETTER U WITH BREVE
0x0016D: r"\u{u}", # LATIN SMALL LETTER U WITH BREVE
0x0016E: r"\r{U}", # LATIN CAPITAL LETTER U WITH RING ABOVE
0x0016F: r"\r{u}", # LATIN SMALL LETTER U WITH RING ABOVE
0x00170: r"\H{U}", # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00171: r"\H{u}", # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00172: r"\k{U}", # LATIN CAPITAL LETTER U WITH OGONEK
0x00173: r"\k{u}", # LATIN SMALL LETTER U WITH OGONEK
0x00174: r"\^{W}", # LATIN CAPITAL LETTER W WITH CIRCUMFLEX
0x00175: r"\^{w}", # LATIN SMALL LETTER W WITH CIRCUMFLEX
0x00176: r"\^{Y}", # LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
0x00177: r"\^{y}", # LATIN SMALL LETTER Y WITH CIRCUMFLEX
0x00178: r'\"{Y}', # LATIN CAPITAL LETTER Y WITH DIAERESIS
0x00179: r"\'{Z}", # LATIN CAPITAL LETTER Z WITH ACUTE
0x0017A: r"\'{z}", # LATIN SMALL LETTER Z WITH ACUTE
0x0017B: r"\.{Z}", # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x0017C: r"\.{z}", # LATIN SMALL LETTER Z WITH DOT ABOVE
0x0017D: r"\v{Z}", # LATIN CAPITAL LETTER Z WITH CARON
0x0017E: r"\v{z}", # LATIN SMALL LETTER Z WITH CARON
0x00195: r"\texthvlig{}", # LATIN SMALL LETTER HV
0x0019E: r"\textnrleg{}", # LATIN SMALL LETTER N WITH LONG RIGHT LEG
0x001C2: r"\textdoublepipe{}", # LATIN LETTER ALVEOLAR CLICK
0x001F5: r"\'{g}", # LATIN SMALL LETTER G WITH ACUTE
0x00259: r"\ensuremath{\Elzschwa}", # LATIN SMALL LETTER SCHWA
0x00261: r"g", # LATIN SMALL LETTER SCRIPT G
0x00278: r"\textphi{}", # LATIN SMALL LETTER PHI
0x0029E: r"\textturnk{}", # LATIN SMALL LETTER TURNED K
0x002A4: r"\textdyoghlig{}", # LATIN SMALL LETTER DEZH DIGRAPH
0x0025B: r"\textvarepsilon{}", # LATIN SMALL LETTER OPEN E
0x002BC: r"'", # MODIFIER LETTER APOSTROPHE
0x002C7: r"\textasciicaron{}", # CARON
0x002C8: r"\ensuremath{\Elzverts}", # MODIFIER LETTER VERTICAL LINE
0x002D8: r"\textasciibreve{}", # BREVE
0x002D9: r"\textperiodcentered{}", # DOT ABOVE
0x002DA: r"\r{}", # RING ABOVE
0x002DB: r"\k{}", # OGONEK
0x002DC: r"\texttildelow{}", # SMALL TILDE
0x002DD: r"\H{}", # DOUBLE ACUTE ACCENT
0x002E5: r"\tone{55}", # MODIFIER LETTER EXTRA-HIGH TONE BAR
0x002E6: r"\tone{44}", # MODIFIER LETTER HIGH TONE BAR
0x002E7: r"\tone{33}", # MODIFIER LETTER MID TONE BAR
0x002E8: r"\tone{22}", # MODIFIER LETTER LOW TONE BAR
0x002E9: r"\tone{11}", # MODIFIER LETTER EXTRA-LOW TONE BAR
0x00300: r"\`", # COMBINING GRAVE ACCENT
0x00301: r"\'", # COMBINING ACUTE ACCENT
0x00302: r"\^", # COMBINING CIRCUMFLEX ACCENT
0x00303: r"\~", # COMBINING TILDE
0x00304: r"\=", # COMBINING MACRON
0x00306: r"\u", # COMBINING BREVE
0x00307: r"\.", # COMBINING DOT ABOVE
0x00308: r'\"', # COMBINING DIAERESIS
0x0030A: r"\r", # COMBINING RING ABOVE
0x0030B: r"\H", # COMBINING DOUBLE ACUTE ACCENT
0x0030C: r"\v", # COMBINING CARON
0x0030F: r"\cyrchar\C", # COMBINING DOUBLE GRAVE ACCENT
0x00327: r"\c", # COMBINING CEDILLA
0x00386: r"\'{A}", # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00388: r"\'{E}", # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00389: r"\'{H}", # GREEK CAPITAL LETTER ETA WITH TONOS
0x0038A: r"\'{}{I}", # GREEK CAPITAL LETTER IOTA WITH TONOS
0x0038C: r"\'{}O", # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00393: r"\ensuremath{\Gamma}", # GREEK CAPITAL LETTER GAMMA
0x00394: r"\ensuremath{\Delta}", # GREEK CAPITAL LETTER DELTA
0x00395: r"\ensuremath{\Epsilon}", # GREEK CAPITAL LETTER EPSILON
0x0039B: | |
self._changed_layout = self._ui.changed_layout
self._changed_widget.hide()
conn_str = self.BLACS_connection
if self.remote_process_client is not None:
conn_str += " via %s:%d" % (self.remote_process_client.host, self.remote_process_client.port)
self._ui.device_name.setText(
"<b>%s</b> [conn: %s]" % (str(self.device_name), conn_str)
)
elide_label(self._ui.device_name, self._ui.horizontalLayout, Qt.ElideRight)
elide_label(self._ui.state_label, self._ui.state_label_layout, Qt.ElideRight)
# Insert an OutputBox into the splitter, initially hidden:
self._output_box = OutputBox(self._ui.splitter)
self._ui.splitter.setCollapsible(self._ui.splitter.count() - 2, True)
self._output_box.output_textedit.hide()
# connect signals
self._ui.button_clear_smart_programming.clicked.connect(self.on_force_full_buffered_reprogram)
self._ui.button_clear_smart_programming.setEnabled(False)
self.force_full_buffered_reprogram = True
self._ui.button_show_terminal.toggled.connect(self.set_terminal_visible)
self._ui.button_close.clicked.connect(self.hide_error)
self._ui.button_restart.clicked.connect(self.restart)
self._update_error_and_tab_icon()
self.supports_smart_programming(False)
# Restore settings:
self.restore_builtin_save_data(self.settings.get('saved_data', {}))
# This should be done beofre the main_loop starts or else there is a race condition as to whether the
# self._mode variable is even defined!
# However it must be done after the UI is created!
self.mode = MODE_MANUAL
self.state = 'idle'
# Setup the not responding timeout
self._timeout = QTimer()
self._timeout.timeout.connect(self.check_time)
self._timeout.start(1000)
# Launch the mainloop
self._mainloop_thread = threading.Thread(target = self.mainloop)
self._mainloop_thread.daemon = True
self._mainloop_thread.start()
# Add the tab to the notebook
self.notebook.addTab(self._ui,self.device_name)
self._ui.show()
def _get_remote_configuration(self):
# Create and return zprocess remote process client, if the device is configured
# as a remote device, else None:
PRIMARY_BLACS = '__PrimaryBLACS'
table = self.settings['connection_table']
properties = table.find_by_name(self.device_name).properties
if properties.get('gui', PRIMARY_BLACS) != PRIMARY_BLACS:
msg = "Remote BLACS GUIs not yet supported by BLACS"
raise NotImplementedError(msg)
remote_server_name = properties.get('worker', PRIMARY_BLACS)
if remote_server_name != PRIMARY_BLACS:
remote_server_device = table.find_by_name(remote_server_name)
if remote_server_device.parent.name != PRIMARY_BLACS:
msg = "Multi-hop remote workers not yet supported by BLACS"
raise NotImplementedError(msg)
remote_host, remote_port = remote_server_device.parent_port.split(':')
remote_port = int(remote_port)
return RemoteProcessClient(remote_host, remote_port)
return None
def get_builtin_save_data(self):
"""Get builtin settings to be restored like whether the terminal is
visible. Not to be overridden."""
return {'_terminal_visible': self._ui.button_show_terminal.isChecked(),
'_splitter_sizes': self._ui.splitter.sizes()}
def get_all_save_data(self):
save_data = self.get_builtin_save_data()
if hasattr(self, 'get_save_data'):
tab_save_data = self.get_save_data()
if isinstance(tab_save_data, dict):
save_data.update(tab_save_data)
else:
self.logger.warning('Incorrect format for tab save data from the get_save_data() method. Data should be a dict. Data was: %s'%tab_save_data)
return save_data
def restore_builtin_save_data(self, data):
"""Restore builtin settings to be restored like whether the terminal is
visible. Not to be overridden."""
self.set_terminal_visible(data.get('_terminal_visible', False))
if '_splitter_sizes' in data:
self._ui.splitter.setSizes(data['_splitter_sizes'])
def update_from_settings(self, settings):
self.restore_builtin_save_data(settings['saved_data'])
def supports_smart_programming(self,support):
self._supports_smart_programming = bool(support)
if self._supports_smart_programming:
self._ui.button_clear_smart_programming.show()
else:
self._ui.button_clear_smart_programming.hide()
def on_force_full_buffered_reprogram(self):
self.force_full_buffered_reprogram = True
@property
def force_full_buffered_reprogram(self):
return self._force_full_buffered_reprogram
@force_full_buffered_reprogram.setter
def force_full_buffered_reprogram(self,value):
self._force_full_buffered_reprogram = bool(value)
self._ui.button_clear_smart_programming.setEnabled(not bool(value))
@property
@inmain_decorator(True)
def error_message(self):
return self._error
@error_message.setter
@inmain_decorator(True)
def error_message(self,message):
#print message
#print self._error
if message != self._error:
self._error = message
self._update_error_and_tab_icon()
@inmain_decorator(True)
def _update_error_and_tab_icon(self):
"""Udate and show the error message for the tab, and update the icon
and text colour on the tab"""
prefix = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n<html><head><meta name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:"MS Shell Dlg 2"; font-size:7.8pt; font-weight:400; font-style:normal;">'
suffix = '</body></html>'
#print threading.current_thread().name
self._ui.error_message.setHtml(prefix+self._not_responding_error_message+self._error+suffix)
if self._error or self._not_responding_error_message:
self._ui.notresponding.show()
self._tab_text_colour = 'red'
if self.error_message:
if self.state == 'fatal error':
self._tab_icon = self.ICON_FATAL_ERROR
else:
self._tab_icon = self.ICON_ERROR
else:
self._ui.notresponding.hide()
self._tab_text_colour = 'black'
if self.state == 'idle':
self._tab_icon = self.ICON_OK
else:
self._tab_icon = self.ICON_BUSY
self.set_tab_icon_and_colour()
@inmain_decorator(True)
def set_tab_icon_and_colour(self):
"""Set the tab icon and the colour of its text to the values of
self._tab_icon and self._tab_text_colour respectively"""
if self._ui.parentWidget() is None:
return
self.notebook = self._ui.parentWidget().parentWidget()
if self.notebook is not None:
currentpage = self.notebook.indexOf(self._ui)
if currentpage == -1:
# shutting down:
return
icon = QIcon(self._tab_icon)
self.notebook.tabBar().setTabIcon(currentpage, icon)
self.notebook.tabBar().setTabTextColor(currentpage, QColor(self._tab_text_colour))
def get_tab_layout(self):
return self._layout
@property
def device_name(self):
return self._device_name
# sets the mode, switches between MANUAL, BUFFERED, TRANSITION_TO_BUFFERED and TRANSITION_TO_STATIC
@property
def mode(self):
return self._mode
@mode.setter
def mode(self,mode):
self._mode = mode
self._update_state_label()
@property
def state(self):
return self._state
@state.setter
def state(self,state):
self._state = state
self._time_of_last_state_change = time.time()
self._update_state_label()
self._update_error_and_tab_icon()
@inmain_decorator(True)
def _update_state_label(self):
if self.mode == 1:
mode = 'Manual'
elif self.mode == 2:
mode = 'Transitioning to buffered'
elif self.mode == 4:
mode = 'Transitioning to manual'
elif self.mode == 8:
mode = 'Buffered'
else:
raise RuntimeError('self.mode for device %s is invalid. It must be one of MODE_MANUAL, MODE_TRANSITION_TO_BUFFERED, MODE_TRANSITION_TO_MANUAL or MODE_BUFFERED'%(self.device_name))
self._ui.state_label.setText('<b>%s mode</b> - State: %s'%(mode,self.state))
# Todo: Update icon in tab
def create_worker(self,name,WorkerClass,workerargs=None):
"""Set up a worker process. WorkerClass can either be a subclass of Worker, or a
string containing a fully qualified import path to a worker. The latter is
useful if the worker class is in a separate file with global imports or other
import-time behaviour that is undesirable to have run in the main process, for
example if the imports may not be available to the main process (as may be the
case once remote worker processes are implemented and the worker may be on a
separate computer). The worker process will not be started immediately, it will
be started once the state machine mainloop begins running. This way errors in
startup will be handled using the normal state machine machinery."""
if workerargs is None:
workerargs = {}
workerargs['is_remote'] = self.remote_process_client is not None
if name in self.workers:
raise Exception('There is already a worker process with name: %s'%name)
if name == 'GUI':
# This is here so that we can display "(GUI)" in the status bar and have the user confident this is actually happening in the GUI,
# not in a worker process named GUI
raise Exception('You cannot call a worker process "GUI". Why would you want to? Your worker process cannot interact with the BLACS GUI directly, so you are just trying to confuse yourself!')
if isinstance(WorkerClass, type):
worker = WorkerClass(
process_tree,
output_redirection_port=self._output_box.port,
remote_process_client=self.remote_process_client,
startup_timeout=30
)
elif isinstance(WorkerClass, str):
# If we were passed a string for the WorkerClass, it is an import path
# for where the Worker class can be found. Pass it to zprocess.Process,
# which will do the import in the subprocess only.
worker = Process(
process_tree,
output_redirection_port=self._output_box.port,
remote_process_client=self.remote_process_client,
startup_timeout=30,
subclass_fullname=WorkerClass
)
else:
raise TypeError(WorkerClass)
self.workers[name] = (worker,None,None)
self.event_queue.put(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True,False,[Tab._initialise_worker,[(name, workerargs),{}]], priority=-1)
def _initialise_worker(self, worker_name, workerargs):
yield (self.queue_work(worker_name, 'init', worker_name, self.device_name, workerargs))
if self.error_message:
raise Exception('Device failed to initialise')
@define_state(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True)
def _timeout_add(self,delay,execute_timeout):
QTimer.singleShot(delay,execute_timeout)
def statemachine_timeout_add(self,delay,statefunction,*args,**kwargs):
# Add the timeout to our set of registered timeouts. Timeouts
# can thus be removed by the user at ay time by calling
# self.timeouts.remove(function)
self._timeouts.add(statefunction)
# Here's a function which executes the timeout once, then queues
# itself up again after a delay:
def execute_timeout():
# queue up the state function, but only if it hasn't been
# removed from self.timeouts:
if statefunction in self._timeouts and self._timeout_ids[statefunction] == unique_id:
# Only queue up the state if we are in an allowed mode
if statefunction._allowed_modes&self.mode:
statefunction(*args, **kwargs)
# queue up another call to this function (execute_timeout)
# after the delay time:
self._timeout_add(delay,execute_timeout)
# Store a unique ID for this timeout so that we don't confuse
# other timeouts for this one when checking to see that this
# timeout hasn't been removed:
unique_id = get_unique_id()
self._timeout_ids[statefunction] = unique_id
# queue the first run:
#QTimer.singleShot(delay,execute_timeout)
execute_timeout()
# Returns True if the timeout was removed
def statemachine_timeout_remove(self,statefunction):
if statefunction in self._timeouts:
self._timeouts.remove(statefunction)
return True
return False
# returns True if at least one timeout was removed, else returns False
def statemachine_timeout_remove_all(self):
# As a consistency check, we overwrite self._timeouts to an empty set always
# This must be done after the check to see if it is empty (if self._timeouts) so do not refactor this code!
if self._timeouts:
self._timeouts = set()
return True
else:
self._timeouts = set()
return False
@define_state(MODE_MANUAL|MODE_BUFFERED|MODE_TRANSITION_TO_BUFFERED|MODE_TRANSITION_TO_MANUAL,True)
def shutdown_workers(self):
"""Ask all workers to shutdown"""
for worker_name in self.workers:
yield(self.queue_work(worker_name, 'shutdown'))
self.shutdown_workers_complete = True
def close_tab(self, finalise=True):
"""Close the tab, terminate subprocesses and join the mainloop thread. If
finalise=False, then do not terminate subprocesses or join the mainloop. In this
case, callers must manually call finalise_close_tab() to perform these
potentially blocking operations"""
self.logger.info('close_tab called')
self._timeout.stop()
for worker, to_worker, from_worker in self.workers.values():
# If the worker is still | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
###AWG
sys.path.append('/home/pulseepr/Sources/AWG/Examples/python')
###sys.path.append('/home/anatoly/AWG/spcm_examples/python')
##sys.path.append('/home/anatoly/awg_files/python')
#sys.path.append('C:/Users/User/Desktop/Examples/python')
import numpy as np
import atomize.device_modules.config.config_utils as cutil
import atomize.general_modules.general_functions as general
from pyspcm import *
from spcm_tools import *
class Spectrum_M4I_4450_X8:
def __init__(self):
#### Inizialization
# setting path to *.ini file
self.path_current_directory = os.path.dirname(__file__)
self.path_config_file = os.path.join(self.path_current_directory, 'config','Spectrum_M4I_4450_X8_config.ini')
# configuration data
#config = cutil.read_conf_util(self.path_config_file)
self.specific_parameters = cutil.read_specific_parameters(self.path_config_file)
# Channel assignments
#ch0 = self.specific_parameters['ch0'] # TRIGGER
self.timebase_dict = {'ms': 1000000, 'us': 1000, 'ns': 1, }
self.channel_dict = {'CH0': 0, 'CH1': 1, }
self.coupling_dict = {'DC': 0, 'AC': 1, }
self.impedance_dict = {'1 M': 0, '50': 1, }
self.sample_rate_list = [1907, 3814, 7629, 15258, 30517, 61035, 122070, 244140, 488281, 976562, \
1953125, 3906250, 7812500, 15625000, 31250000, 62500000, 125000000, \
250000000, 500000000]
self.hf_mode_range_list = [500, 1000, 2500, 5000]
self.buffered_mode_range_list = [200, 500, 1000, 2000, 5000, 10000]
# Limits and Ranges (depends on the exact model):
#clock = float(self.specific_parameters['clock'])
# Delays and restrictions
# MaxDACValue corresponds to the amplitude of the output signal; MaxDACValue - Amplitude and so on
# lMaxDACValue = int32 (0)
# spcm_dwGetParam_i32 (hCard, SPC_MIINST_MAXADCVALUE, byref(lMaxDACValue))
# lMaxDACValue.value = lMaxDACValue.value - 1
#maxCAD = 8191 # MaxCADValue of the AWG card - 1
#minCAD = -8192
self.amplitude_max = 2500 # mV
self.amplitude_min = 80 # mV
self.sample_rate_max = 500 # MHz
self.sample_rate_min = 0.001907 # MHz
self.sample_ref_clock_max = 100 # MHz
self.sample_ref_clock_min = 10 # MHz
self.averages_max = 100000
self.delay_max = 8589934576
self.delay_min = 0
# Test run parameters
# These values are returned by the modules in the test run
if len(sys.argv) > 1:
self.test_flag = sys.argv[1]
else:
self.test_flag = 'None'
if self.test_flag != 'test':
# Collect all parameters for digitizer settings
self.sample_rate = 500 # MHz
self.clock_mode = 1 # 1 is Internal; 32 is External
self.reference_clock = 100 # MHz
self.card_mode = 1 # 1 is Single; 2 is Average (Multi);
self.trigger_ch = 2 # 1 is Software; 2 is External
self.trigger_mode = 1 # 1 is Positive; 2 is Negative; 8 is High; 10 is Low
self.aver = 2 # 0 is infinity
self.delay = 0 # in sample rate; step is 32; rounded
self.channel = 3 # 1 is CH0; 2 is CH1; 3 is CH0 + CH1
self.points = 128 # number of points
self.posttrig_points = 64 # number of posttrigger points
self.input_mode = 1 # 1 is HF mode; 0 is Buffered
self.amplitude_0 = 500 # amlitude for CH0 in mV
self.amplitude_1 = 500 # amlitude for CH1 in mV
self.offset_0 = 0 # offset for CH0 in percentage
self.offset_1 = 0 # offset for CH1 in percentage
self.coupling_0 = 0 # coupling for CH0; AC is 1; DC is 0
self.coupling_1 = 0 # coupling for CH1
self.impedance_0 = 1 # impedance for CH0; 1 M is 0; 50 is 0
self.impedance_1 = 1 # impedance for CH1;
# change of settings
self.setting_change_count = 0
# state counter
self.state = 0
self.read = 0
# integration window
self.win_left = 0
self.win_right = 1
elif self.test_flag == 'test':
self.test_sample_rate = '500 MHz'
self.test_clock_mode = 'Internal'
self.test_ref_clock = 100
self.test_card_mode = 'Single'
self.test_trigger_ch = 'External'
self.test_trigger_mode = 'Positive'
self.test_averages = 10
self.test_delay = 0
self.test_channel = 'CH0'
self.test_amplitude = 'CH0: 500 mV; CH1: 500 mV'
self.test_num_segments = 1
self.test_points = 128
self.test_posttrig_points = 64
self.test_input_mode = 'HF'
self.test_offset = 'CH0: 10'
self.test_coupling = 'CH0: DC'
self.test_impedance = 'CH0: 50'
self.test_integral = 10**-9 # in V*s
# Collect all parameters for digitizer settings
self.sample_rate = 500
self.clock_mode = 1
self.reference_clock = 100
self.card_mode = 1
self.trigger_ch = 2
self.trigger_mode = 1
self.aver = 2
self.delay = 0
self.channel = 3
self.points = 128
self.posttrig_points = 64
self.input_mode = 1
self.amplitude_0 = 500
self.amplitude_1 = 500
self.offset_0 = 0
self.offset_1 = 0
self.coupling_0 = 0
self.coupling_1 = 0
self.impedance_0 = 1
self.impedance_1 = 1
# change of settings
self.setting_change_count = 0
# state counter
self.state = 0
self.read = 0
# integration window
self.win_left = 0
self.win_right = 1
# Module functions
def digitizer_name(self):
answer = 'Spectrum M4I.4450-X8'
return answer
def digitizer_setup(self):
"""
Write settings to the digitizer. No argument; No output
Everything except the buffer information will be write to the digitizer
This function should be called after all functions that change settings are called
"""
if self.test_flag != 'test':
if self.state == 0:
# open card
self.hCard = spcm_hOpen ( create_string_buffer (b'/dev/spcm1') )
self.state = 1
if self.hCard == None:
general.message("No card found...")
sys.exit()
else:
pass
spcm_dwSetParam_i32 (self.hCard, SPC_TIMEOUT, 10000)
# general parameters of the card; internal/external clock
if self.clock_mode == 1:
spcm_dwSetParam_i64 (self.hCard, SPC_SAMPLERATE, int( 1000000 * self.sample_rate ))
elif self.clock_mode == 32:
spcm_dwSetParam_i32 (self.hCard, SPC_CLOCKMODE, self.clock_mode)
spcm_dwSetParam_i64 (self.hCard, SPC_REFERENCECLOCK, MEGA(self.reference_clock))
spcm_dwSetParam_i64 (self.hCard, SPC_SAMPLERATE, int( 1000000 * self.sample_rate ) )
# change card mode and memory
if self.card_mode == 1:
spcm_dwSetParam_i32(self.hCard, SPC_CARDMODE, self.card_mode)
spcm_dwSetParam_i32(self.hCard, SPC_MEMSIZE, self.points)
spcm_dwSetParam_i32(self.hCard, SPC_POSTTRIGGER, self.posttrig_points)
elif self.card_mode == 2:
spcm_dwSetParam_i32(self.hCard, SPC_CARDMODE, self.card_mode)
spcm_dwSetParam_i32(self.hCard, SPC_MEMSIZE, int( self.points * self.aver ) )
# segment size should be multiple of memory size
spcm_dwSetParam_i32(self.hCard, SPC_SEGMENTSIZE, self.points )
spcm_dwSetParam_i32(self.hCard, SPC_POSTTRIGGER, self.posttrig_points)
# trigger
spcm_dwSetParam_i32(self.hCard, SPC_TRIG_TERM, 1) # 50 Ohm trigger load
spcm_dwSetParam_i32(self.hCard, SPC_TRIG_ORMASK, self.trigger_ch) # software / external
if self.trigger_ch == 2:
spcm_dwSetParam_i32(self.hCard, SPC_TRIG_EXT0_MODE, self.trigger_mode)
# loop
#spcm_dwSetParam_i32(self.hCard, SPC_LOOPS, self.loop)
# trigger delay
spcm_dwSetParam_i32( self.hCard, SPC_TRIG_DELAY, int(self.delay) )
# set the output channels
spcm_dwSetParam_i32 (self.hCard, SPC_PATH0, self.input_mode)
spcm_dwSetParam_i32 (self.hCard, SPC_PATH1, self.input_mode)
spcm_dwSetParam_i32 (self.hCard, SPC_CHENABLE, self.channel)
spcm_dwSetParam_i32 (self.hCard, SPC_AMP0, self.amplitude_0)
spcm_dwSetParam_i32 (self.hCard, SPC_AMP1, self.amplitude_1)
if ( self.amplitude_0 != 1000 or self.amplitude_0 != 10000 ) and self.input_mode == 0:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
elif self.input_mode == 1:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
spcm_dwSetParam_i32 (self.hCard, SPC_ACDC0, self.coupling_0)
spcm_dwSetParam_i32 (self.hCard, SPC_ACDC1, self.coupling_1)
# in HF mode impedance is fixed
if self.input_mode == 0:
spcm_dwSetParam_i32 (self.hCard, SPC_50OHM0, self.impedance_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_50OHM1, self.impedance_1 )
# define the memory size / max amplitude
#llMemSamples = int64 (self.memsize)
#lBytesPerSample = int32(0)
#spcm_dwGetParam_i32 (hCard, SPC_MIINST_BYTESPERSAMPLE, byref(lBytesPerSample))
#lSetChannels = int32 (0)
#spcm_dwGetParam_i32 (hCard, SPC_CHCOUNT, byref (lSetChannels))
# The Spectrum driver also contains a register that holds the value of the decimal value of the full scale representation of the installed ADC. This
# value should be used when converting ADC values (in LSB) into real-world voltage values, because this register also automatically takes any
# specialities into account, such as slightly reduced ADC resolution with reserved codes for gain/offset compensation.
self.lMaxDACValue = int32 (0)
spcm_dwGetParam_i32 (self.hCard, SPC_MIINST_MAXADCVALUE, byref(self.lMaxDACValue))
#if lMaxDACValue.value == maxCAD:
# pass
#else:
# general.message('maxCAD value does not equal to lMaxDACValue.value')
# sys.exit()
if self.channel == 1 or self.channel == 2:
if self.card_mode == 1:
self.qwBufferSize = uint64 (self.points * 2 * 1) # in bytes. samples with 2 bytes each, one channel active
elif self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 1)
elif self.channel == 3:
if self.card_mode == 1:
self.qwBufferSize = uint64 (self.points * 2 * 2) # in bytes. samples with 2 bytes each
elif self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 2)
self.lNotifySize = int32 (0) # driver should notify program after all data has been transfered
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
# to run several important checks
#if self.setting_change_count == 1:
# if self.card_mode == 32768 and self.sequence_mode == 0:
# self.buf = self.define_buffer_single()[0]
# elif self.card_mode == 32768 and self.sequence_mode == 0:
# self.buf = self.define_buffer_single_joined()[0]
# elif self.card_mode == 512 and self.sequence_mode == 0:
# self.buf = self.define_buffer_multi()[0]
#else:
pass
def digitizer_get_curve(self, integral = False):
"""
Start digitizer. No argument; No output
Default settings:
Sample clock is 500 MHz; Clock mode is 'Internal'; Reference clock is 100 MHz; Card mode is 'Single';
Trigger channel is 'External'; Trigger mode is 'Positive'; Number of averages is | |
m.x118 >= 0)
m.c220 = Constraint(expr= m.x119 >= 0)
m.c221 = Constraint(expr= m.x120 >= 0)
m.c222 = Constraint(expr= m.x121 >= 0)
m.c223 = Constraint(expr= m.x122 >= 0)
m.c224 = Constraint(expr= - 4*m.b59 + m.x123 >= 0)
m.c225 = Constraint(expr= m.x124 >= 0)
m.c226 = Constraint(expr= m.x125 >= 0)
m.c227 = Constraint(expr= m.x126 >= 0)
m.c228 = Constraint(expr= m.x127 >= 0)
m.c229 = Constraint(expr= m.x128 >= 0)
m.c230 = Constraint(expr= m.x129 >= 0)
m.c231 = Constraint(expr= - 8*m.b2 + m.x194 <= 0)
m.c232 = Constraint(expr= - 8*m.b3 + m.x195 <= 0)
m.c233 = Constraint(expr= - 8*m.b4 + m.x196 <= 0)
m.c234 = Constraint(expr= - 8*m.b5 + m.x197 <= 0)
m.c235 = Constraint(expr= - 8*m.b6 + m.x198 <= 0)
m.c236 = Constraint(expr= - 8*m.b7 + m.x199 <= 0)
m.c237 = Constraint(expr= - 8*m.b8 + m.x200 <= 0)
m.c238 = Constraint(expr= - 8*m.b9 + m.x201 <= 0)
m.c239 = Constraint(expr= - 8*m.b10 + m.x202 <= 0)
m.c240 = Constraint(expr= - 8*m.b11 + m.x203 <= 0)
m.c241 = Constraint(expr= - 8*m.b12 + m.x204 <= 0)
m.c242 = Constraint(expr= - 8*m.b13 + m.x205 <= 0)
m.c243 = Constraint(expr= - 8*m.b14 + m.x206 <= 0)
m.c244 = Constraint(expr= - 8*m.b15 + m.x207 <= 0)
m.c245 = Constraint(expr= - 8*m.b16 + m.x208 <= 0)
m.c246 = Constraint(expr= - 8*m.b17 + m.x209 <= 0)
m.c247 = Constraint(expr= - 8*m.b18 + m.x210 <= 0)
m.c248 = Constraint(expr= - 8*m.b19 + m.x211 <= 0)
m.c249 = Constraint(expr= - 8*m.b20 + m.x212 <= 0)
m.c250 = Constraint(expr= - 8*m.b21 + m.x213 <= 0)
m.c251 = Constraint(expr= - 8*m.b22 + m.x214 <= 0)
m.c252 = Constraint(expr= - 8*m.b23 + m.x215 <= 0)
m.c253 = Constraint(expr= - 8*m.b24 + m.x216 <= 0)
m.c254 = Constraint(expr= - 8*m.b25 + m.x217 <= 0)
m.c255 = Constraint(expr= - 8*m.b26 + m.x218 <= 0)
m.c256 = Constraint(expr= - 8*m.b27 + m.x219 <= 0)
m.c257 = Constraint(expr= - 8*m.b28 + m.x220 <= 0)
m.c258 = Constraint(expr= - 8*m.b29 + m.x221 <= 0)
m.c259 = Constraint(expr= - 8*m.b30 + m.x222 <= 0)
m.c260 = Constraint(expr= - 8*m.b31 + m.x223 <= 0)
m.c261 = Constraint(expr= - 8*m.b32 + m.x224 <= 0)
m.c262 = Constraint(expr= - 8*m.b33 + m.x225 <= 0)
m.c263 = Constraint(expr= - 8*m.b34 + m.x226 <= 0)
m.c264 = Constraint(expr= - 8*m.b35 + m.x227 <= 0)
m.c265 = Constraint(expr= - 8*m.b36 + m.x228 <= 0)
m.c266 = Constraint(expr= - 8*m.b37 + m.x229 <= 0)
m.c267 = Constraint(expr= - 8*m.b38 + m.x230 <= 0)
m.c268 = Constraint(expr= - 8*m.b39 + m.x231 <= 0)
m.c269 = Constraint(expr= - 8*m.b40 + m.x232 <= 0)
m.c270 = Constraint(expr= - 8*m.b41 + m.x233 <= 0)
m.c271 = Constraint(expr= - 8*m.b42 + m.x234 <= 0)
m.c272 = Constraint(expr= - 8*m.b43 + m.x235 <= 0)
m.c273 = Constraint(expr= - 8*m.b44 + m.x236 <= 0)
m.c274 = Constraint(expr= - 8*m.b45 + m.x237 <= 0)
m.c275 = Constraint(expr= - 8*m.b46 + m.x238 <= 0)
m.c276 = Constraint(expr= - 8*m.b47 + m.x239 <= 0)
m.c277 = Constraint(expr= - 8*m.b48 + m.x240 <= 0)
m.c278 = Constraint(expr= - 8*m.b49 + m.x241 <= 0)
m.c279 = Constraint(expr= - 8*m.b50 + m.x242 <= 0)
m.c280 = Constraint(expr= - 8*m.b51 + m.x243 <= 0)
m.c281 = Constraint(expr= - 8*m.b52 + m.x244 <= 0)
m.c282 = Constraint(expr= - 8*m.b53 + m.x245 <= 0)
m.c283 = Constraint(expr= - 8*m.b54 + m.x246 <= 0)
m.c284 = Constraint(expr= - 8*m.b55 + m.x247 <= 0)
m.c285 = Constraint(expr= - 8*m.b56 + m.x248 <= 0)
m.c286 = Constraint(expr= - 8*m.b57 + m.x249 <= 0)
m.c287 = Constraint(expr= - 8*m.b58 + m.x250 <= 0)
m.c288 = Constraint(expr= - 8*m.b59 + m.x251 <= 0)
m.c289 = Constraint(expr= - 8*m.b60 + m.x252 <= 0)
m.c290 = Constraint(expr= - 8*m.b61 + m.x253 <= 0)
m.c291 = Constraint(expr= - 8*m.b62 + m.x254 <= 0)
m.c292 = Constraint(expr= - 8*m.b63 + m.x255 <= 0)
m.c293 = Constraint(expr= - 8*m.b64 + m.x256 <= 0)
m.c294 = Constraint(expr= - 8*m.b65 + m.x257 <= 0)
m.c295 = Constraint(expr= - 100*m.b2 + m.x258 >= 0)
m.c296 = Constraint(expr= - 100*m.b3 + m.x259 >= 0)
m.c297 = Constraint(expr= - 100*m.b10 + m.x266 >= 0)
m.c298 = Constraint(expr= - 100*m.b11 + m.x267 >= 0)
m.c299 = Constraint(expr= - 100*m.b18 + m.x274 >= 0)
m.c300 = Constraint(expr= - 100*m.b19 + m.x275 >= 0)
m.c301 = Constraint(expr= - 100*m.b26 + m.x282 >= 0)
m.c302 = Constraint(expr= - 100*m.b27 + m.x283 >= 0)
m.c303 = Constraint(expr= - 100*m.b34 + m.x290 >= 0)
m.c304 = Constraint(expr= - 100*m.b35 + m.x291 >= 0)
m.c305 = Constraint(expr= - 100*m.b42 + m.x298 >= 0)
m.c306 = Constraint(expr= - 100*m.b43 + m.x299 >= 0)
m.c307 = Constraint(expr= - 100*m.b50 + m.x306 >= 0)
m.c308 = Constraint(expr= - 100*m.b51 + m.x307 >= 0)
m.c309 = Constraint(expr= - 100*m.b58 + m.x314 >= 0)
m.c310 = Constraint(expr= - 100*m.b59 + m.x315 >= 0)
m.c311 = Constraint(expr= - 100*m.b2 + m.x258 <= 0)
m.c312 = Constraint(expr= - 100*m.b3 + m.x259 <= 0)
m.c313 = Constraint(expr= - 100*m.b4 + m.x260 <= 0)
m.c314 = Constraint(expr= - 100*m.b5 + m.x261 <= 0)
m.c315 = Constraint(expr= - 100*m.b6 + m.x262 <= 0)
m.c316 = Constraint(expr= - 100*m.b7 + m.x263 <= 0)
m.c317 = Constraint(expr= - 100*m.b8 + m.x264 <= 0)
m.c318 = Constraint(expr= - 100*m.b9 + m.x265 <= 0)
m.c319 = Constraint(expr= - 100*m.b10 + m.x266 <= 0)
m.c320 = Constraint(expr= - 100*m.b11 + m.x267 <= 0)
m.c321 = Constraint(expr= - 100*m.b12 + m.x268 <= 0)
m.c322 = Constraint(expr= - 100*m.b13 + m.x269 <= 0)
m.c323 = Constraint(expr= - 100*m.b14 + m.x270 <= 0)
m.c324 = Constraint(expr= - 100*m.b15 + m.x271 <= 0)
m.c325 = Constraint(expr= - 100*m.b16 + m.x272 <= 0)
m.c326 = Constraint(expr= - 100*m.b17 + m.x273 <= 0)
m.c327 = Constraint(expr= - 100*m.b18 + m.x274 <= 0)
m.c328 = Constraint(expr= - 100*m.b19 + m.x275 <= 0)
m.c329 = Constraint(expr= - 100*m.b20 + m.x276 <= 0)
m.c330 = Constraint(expr= - 100*m.b21 + m.x277 <= 0)
m.c331 = Constraint(expr= - 100*m.b22 + m.x278 <= 0)
m.c332 = Constraint(expr= - 100*m.b23 + m.x279 <= 0)
m.c333 = Constraint(expr= - 100*m.b24 + m.x280 <= 0)
m.c334 = Constraint(expr= - 100*m.b25 + m.x281 <= 0)
m.c335 = Constraint(expr= - 100*m.b26 + m.x282 <= 0)
m.c336 = Constraint(expr= - 100*m.b27 + m.x283 <= 0)
m.c337 = Constraint(expr= - 100*m.b28 + m.x284 <= 0)
m.c338 = Constraint(expr= - 100*m.b29 + m.x285 <= 0)
m.c339 = Constraint(expr= - 100*m.b30 + m.x286 <= 0)
m.c340 = Constraint(expr= - 100*m.b31 + m.x287 <= 0)
m.c341 = Constraint(expr= - 100*m.b32 + m.x288 <= 0)
m.c342 = Constraint(expr= - 100*m.b33 + m.x289 <= 0)
m.c343 = Constraint(expr= - 100*m.b34 + m.x290 <= 0)
m.c344 = Constraint(expr= - 100*m.b35 + m.x291 <= 0)
m.c345 = Constraint(expr= - 100*m.b36 + m.x292 <= 0)
m.c346 = Constraint(expr= - 100*m.b37 + m.x293 <= 0)
m.c347 = Constraint(expr= - 100*m.b38 + m.x294 <= 0)
m.c348 = Constraint(expr= - 100*m.b39 + m.x295 <= 0)
m.c349 = Constraint(expr= - 100*m.b40 + m.x296 <= 0)
m.c350 = Constraint(expr= - 100*m.b41 + m.x297 <= 0)
m.c351 = Constraint(expr= - 100*m.b42 + m.x298 <= 0)
m.c352 = Constraint(expr= - 100*m.b43 + m.x299 <= 0)
m.c353 = Constraint(expr= - 100*m.b44 + m.x300 <= 0)
m.c354 = Constraint(expr= - 100*m.b45 + m.x301 <= 0)
m.c355 = Constraint(expr= - 100*m.b46 + m.x302 <= 0)
m.c356 = Constraint(expr= - 100*m.b47 + m.x303 <= 0)
m.c357 = Constraint(expr= - 100*m.b48 + m.x304 <= 0)
m.c358 = Constraint(expr= - 100*m.b49 + m.x305 <= 0)
m.c359 = Constraint(expr= - 100*m.b50 + m.x306 <= 0)
m.c360 = Constraint(expr= - 100*m.b51 + m.x307 <= 0)
m.c361 = Constraint(expr= - 100*m.b52 + m.x308 <= 0)
m.c362 = Constraint(expr= - 100*m.b53 + m.x309 <= 0)
m.c363 = Constraint(expr= - 100*m.b54 + m.x310 <= 0)
m.c364 = Constraint(expr= - 100*m.b55 + m.x311 <= 0)
m.c365 = Constraint(expr= - 100*m.b56 + m.x312 <= 0)
m.c366 = Constraint(expr= - 100*m.b57 + m.x313 <= 0)
m.c367 = Constraint(expr= - 100*m.b58 + m.x314 <= 0)
m.c368 = Constraint(expr= - 100*m.b59 + m.x315 <= 0)
m.c369 = Constraint(expr= - 100*m.b60 + m.x316 <= 0)
m.c370 = Constraint(expr= - 100*m.b61 + m.x317 <= 0)
m.c371 = Constraint(expr= - 100*m.b62 + m.x318 <= 0)
m.c372 = Constraint(expr= - 100*m.b63 + m.x319 <= 0)
m.c373 = Constraint(expr= - 100*m.b64 + m.x320 <= 0)
m.c374 = Constraint(expr= - 100*m.b65 + m.x321 <= 0)
m.c375 = Constraint(expr= m.x258 - m.x322 - m.x323 - m.x324 - m.x325 == 0)
m.c376 = Constraint(expr= m.x259 - m.x326 - m.x327 - m.x328 - m.x329 == 0)
m.c377 = Constraint(expr= m.x260 - m.x330 - m.x331 - m.x332 - m.x333 == 0)
m.c378 = Constraint(expr= m.x261 - m.x334 - m.x335 - m.x336 - m.x337 == 0)
m.c379 = Constraint(expr= m.x262 - m.x338 - m.x339 - m.x340 - m.x341 == 0)
m.c380 = Constraint(expr= | |
import sys
from elegantrl.run import *
'''custom env'''
class PendulumEnv(gym.Wrapper): # [ElegantRL.2021.11.11]
def __init__(self, gym_env_id='Pendulum-v1', target_return=-200):
# Pendulum-v0 gym.__version__ == 0.17.0
# Pendulum-v1 gym.__version__ == 0.21.0
gym.logger.set_level(40) # Block warning
super(PendulumEnv, self).__init__(env=gym.make(gym_env_id))
# from elegantrl.envs.Gym import get_gym_env_info
# get_gym_env_info(env, if_print=True) # use this function to print the env information
self.env_num = 1 # the env number of VectorEnv is greater than 1
self.env_name = gym_env_id # the name of this env.
self.max_step = 200 # the max step of each episode
self.state_dim = 3 # feature number of state
self.action_dim = 1 # feature number of action
self.if_discrete = False # discrete action or continuous action
self.target_return = target_return # episode return is between (-1600, 0)
def reset(self):
return self.env.reset().astype(np.float32)
def step(self, action: np.ndarray):
# PendulumEnv set its action space as (-2, +2). It is bad. # https://github.com/openai/gym/wiki/Pendulum-v0
# I suggest to set action space as (-1, +1) when you design your own env.
state, reward, done, info_dict = self.env.step(action * 2) # state, reward, done, info_dict
return state.astype(np.float32), reward, done, info_dict
'''demo'''
def demo_continuous_action_off_policy_redq():
env_name = ['Pendulum-v0',
'Pendulum-v1',
'LunarLanderContinuous-v2',
'BipedalWalker-v3', ][ENV_ID]
agent = [AgentREDqSAC, AgentSyncREDqSAC][-1]
gpu_id = GPU_ID # >=0 means GPU ID, -1 means CPU
if env_name in {'Pendulum-v0', 'Pendulum-v1'}:
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
1 1.60e+03 -979.95 | -979.95 165.5 200 0 | -2.01 1.56 0.57 1.00
1 5.44e+04 -827.21 | -900.53 214.7 200 0 | -2.28 0.50 -50.04 0.81
1 6.72e+04 -242.52 | -242.52 155.3 200 0 | -0.79 1.10 -59.92 0.72
1 7.76e+04 -238.89 | -238.89 107.1 200 0 | -0.73 1.38 -66.98 0.66
1 8.72e+04 -238.53 | -238.53 98.6 200 0 | -0.83 1.22 -69.88 0.63
1 9.60e+04 -149.37 | -149.37 136.2 200 0 | -0.31 1.81 -69.55 0.64
| UsedTime: 798 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
3 1.60e+03-1199.01 |-1199.01 252.2 200 0 | -2.83 2.03 0.59 1.00
3 4.96e+04-1024.38 |-1024.38 172.7 200 0 | -2.32 0.39 -45.36 0.84
3 6.16e+04 -327.84 | -327.84 127.8 200 0 | -1.61 0.63 -58.63 0.76
3 7.20e+04 -198.66 | -198.66 82.0 200 0 | -0.86 0.92 -65.27 0.70
3 8.88e+04 -156.18 | -169.78 86.5 200 0 | -0.45 1.09 -70.35 0.62
3 1.03e+05 -139.14 | -139.14 74.5 200 0 | -0.46 1.29 -75.14 0.65
| UsedTime: 1070 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
3 1.60e+03-1391.83 |-1391.83 241.9 200 0 | -3.19 2.26 0.50 1.00
3 5.04e+04-1045.01 |-1104.32 85.4 200 0 | -2.66 0.90 -42.83 0.83
3 6.16e+04 -218.75 | -218.75 151.5 200 0 | -1.63 0.59 -53.07 0.76
3 7.12e+04 -201.54 | -201.54 134.3 200 0 | -0.88 0.83 -59.02 0.70
3 8.80e+04 -182.13 | -193.92 106.1 200 0 | -0.60 1.84 -63.65 0.65
3 9.52e+04 -167.25 | -167.25 84.2 200 0 | -0.31 0.86 -66.83 0.68
3 1.02e+05 -139.24 | -139.24 73.5 200 0 | -0.83 1.29 -61.21 0.72
| UsedTime: 1068 |
"""
env = PendulumEnv(env_name, target_return=-150)
"TotalStep: 1e5, TargetReward: -200, UsedTime: 600s"
args = Arguments(agent, env)
args.reward_scale = 2 ** -1 # RewardRange: -1800 < -200 < -50 < 0
args.gamma = 0.97
args.target_step = args.max_step * 2
args.eval_times = 2 ** 3
elif env_name == 'LunarLanderContinuous-v2':
"""
| Arguments Remove cwd: ./LunarLanderContinuous-v2_SyncREDqSAC_0
################################################################################
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
0 4.22e+03 -254.23 | -254.23 120.0 134 54 | -0.57 0.78 0.10 0.15
0 6.40e+04 -100.91 | -100.91 54.9 976 56 | 0.00 0.24 0.50 0.16
0 8.87e+04 -55.83 | -55.83 87.8 813 280 | 0.00 0.30 2.94 0.18
0 1.10e+05 -43.72 | -43.72 96.1 478 313 | 0.01 0.36 4.41 0.19
0 1.26e+05 112.65 | 112.65 125.1 598 274 | -0.00 0.37 4.63 0.19
0 1.59e+05 147.64 | 101.66 97.5 826 143 | 0.02 0.37 7.11 0.18
0 2.29e+05 166.62 | 150.04 75.7 707 185 | 0.02 0.54 14.27 0.19
0 2.39e+05 187.70 | 187.70 76.0 591 175 | 0.01 0.59 12.77 0.19
0 2.84e+05 195.30 | 166.09 89.9 599 247 | 0.03 0.68 14.24 0.17
0 2.92e+05 195.30 | 166.12 82.5 586 252 | 0.03 0.55 12.08 0.17
0 3.00e+05 217.07 | 217.07 30.2 448 125 | 0.01 0.59 14.13 0.17
| UsedTime: 2852 |
"""
# env = gym.make('LunarLanderContinuous-v2')
# get_gym_env_args(env=env, if_print=True)
env_func = gym.make
env_args = {'env_num': 1,
'env_name': 'LunarLanderContinuous-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': False,
'target_return': 200,
'id': 'LunarLanderContinuous-v2'}
args = Arguments(agent, env_func=env_func, env_args=env_args)
args.target_step = args.max_step
args.gamma = 0.99
args.reward_scale = 2 ** -2
args.eval_times = 2 ** 5
elif env_name == 'BipedalWalker-v3':
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
0 7.28e+03 -111.85 | -111.85 0.2 109 3 | -0.26 0.69 0.04 0.02
0 1.24e+05 -96.80 | -103.65 0.2 50 0 | -0.42 0.19 -0.20 0.02
0 1.75e+05 -84.13 | -84.13 15.8 236 352 | -1.18 0.20 -2.09 0.02
0 1.96e+05 -53.51 | -53.51 10.9 1600 0 | -0.03 0.16 -4.75 0.03
0 2.30e+05 -34.98 | -37.49 3.0 1600 0 | -0.02 0.24 -5.33 0.03
0 2.89e+05 -26.53 | -26.53 12.0 1600 0 | -0.03 0.17 -4.17 0.05
0 3.03e+05 -24.50 | -24.50 18.1 1600 0 | -0.02 0.16 -1.19 0.05
0 3.18e+05 3.44 | 3.44 53.4 1299 466 | 0.00 0.16 -1.71 0.04
0 3.30e+05 29.50 | 29.50 75.0 1263 515 | 0.03 0.16 -1.08 0.04
0 3.40e+05 107.37 | 107.37 86.7 1365 412 | 0.05 0.16 0.24 0.04
0 3.59e+05 206.41 | 137.65 99.3 1540 233 | 0.08 0.14 0.62 0.03
0 3.89e+05 269.83 | 269.83 42.6 1581 74 | 0.16 0.10 -0.25 0.04
0 4.01e+05 281.13 | 281.13 76.3 1503 246 | 0.12 0.09 -0.27 0.04
0 4.12e+05 301.77 | 301.77 1.0 1467 28 | 0.17 0.09 0.28 0.04
| UsedTime: 3294 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
0 6.68e+03 -111.72 | -111.72 0.1 103 3 | -0.10 0.69 0.05 0.02
0 1.06e+05 -93.81 | -93.81 0.1 116 7 | -0.16 0.18 -0.09 0.02
0 1.31e+05 -58.96 | -58.96 25.0 1125 704 | -0.11 0.13 -1.04 0.02
0 1.74e+05 -32.93 | -91.37 0.4 113 2 | -0.15 0.11 -1.13 0.02
0 4.20e+05 -32.93 | -33.38 15.8 1600 0 | -0.01 0.04 -0.37 0.02
0 4.39e+05 16.15 | 16.15 26.3 1600 0 | 0.01 0.04 -1.40 0.02
0 4.52e+05 40.90 | 10.57 42.8 1600 0 | -0.02 0.04 -1.03 0.02
0 4.84e+05 63.49 | 63.49 40.0 1600 0 | 0.02 0.04 -0.72 0.02
0 4.90e+05 63.49 | 53.72 56.5 1490 294 | 0.03 0.04 -0.55 0.02
0 5.04e+05 71.35 | 43.56 50.1 1600 0 | 0.03 0.04 -1.07 0.02
0 5.23e+05 93.06 | 93.06 90.7 1535 253 | 0.03 0.03 0.21 0.02
0 5.36e+05 173.26 | 121.82 123.4 1279 557 | 0.10 0.03 -0.62 0.02
0 5.49e+05 253.63 | 253.63 43.9 1582 68 | 0.08 0.03 0.70 0.02
0 5.63e+05 266.36 | 163.69 154.3 1030 476 | 0.17 0.04 0.68 0.03
0 5.85e+05 302.20 | 302.20 0.9 1318 18 | 0.17 0.04 1.30 0.03
| UsedTime: 8100 |
"""
env_func = gym.make
env_args = {'env_num': 1,
'env_name': 'BipedalWalker-v3',
'max_step': 1600,
'state_dim': 24,
'action_dim': 4,
'if_discrete': False,
'target_return': 300,
'id': 'BipedalWalker-v3', }
args = Arguments(agent, env_func=env_func, env_args=env_args)
args.target_step = args.max_step
args.gamma = 0.98
args.eval_times = 2 ** 4
else:
raise ValueError('env_name:', env_name)
args.learner_gpus = gpu_id
args.random_seed += gpu_id
if_check = 0
if if_check:
train_and_evaluate(args)
else:
train_and_evaluate_mp(args)
def demo_continuous_action_off_policy():
env_name = ['Pendulum-v0',
'Pendulum-v1',
'LunarLanderContinuous-v2',
'BipedalWalker-v3',
''][ENV_ID]
gpu_id = GPU_ID # >=0 means GPU ID, -1 means CPU
if env_name in {'Pendulum-v0', 'Pendulum-v1'}:
env = PendulumEnv(env_name, target_return=-500)
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
2 1.60e+03-1147.49 |-1147.49 179.2 200 0 | -2.61 0.90 0.55 1.00
2 5.84e+04 -121.61 | | |
<reponame>mtakahiro/mirage
#! /usr/bin/env python
"""
Create a mirage-format seed image from
an input fits file. This fits file must contain a
valid WCS, and be distortion-free.
This code will extract a sub-image from the input
fits file. The subimage is centered at the input
RA and Dec (crop_center_ra, crop_center_dec) and
its size is determined by the aperture input.
The extracted sub-image is then blotted (resampled)
onto the requested NIRCam detector's FOV, including
the distortion model.
The resulting image is then saved in such a format
that it can be used as input in the obs_generator.py,
image_simulator.py, or disperser code in the
mirage package. Essentially this script
is designed to take the place of catalog_seed_image.py
in the case where the user has a distortion-free
mosaic image that they wish to break up into
NIRCam images.
Another way to use this code would be for an input
fits image of some extended object that the user
wishes to distort according to the NIRCam distortion
model. This distorted file can then be placed in
an extended object catalog which will be used by
the catalog_seed_image.py step. In this method,
multiple extended objets could be distorted and
added to the same frame.
Inputs:
mosaic_file: The name of the fits file containing the
distortion-free image to be blotted. The code
currently assumes that the mosaic_file is
oriented north-up.
From here, there are two ways to call fits_seed_image.py
1. parameter file: a yaml input file matching the format
(optional) required by other steps of the nircam
data simulator
2. Manual inputs:
aperture -
aperture name matching an entry in the
subarray definition file (e.g. NRCB5_FULL)
crop_center_ra, crop_center_dec -
The RA and Dec at the center of the sub-image
to crop from mosaic_file. This, in combination
with the array size (determined from aperture)
define the sub-image to use.
blot_center_ra, blot_center_dec -
The RA and Dec at the center of the blotted
sub-image. If these are equal to crop_center_ra
and dec, then the center of the cropped image
will remain at the center of the blotted image.
blot_pav3 -
Position angle of the blotted image.
flux_cal_file -
Ascii file listing the zeropoints for all
NIRCam filters. By setting this value equal
to 'config', the code will use the flux
calibration file that is packaged with the
NIRCam data simulator. This information is
used only to populate the PHOTFLAM, PHOTFNU,
and PHOTPLAM header keywords.
filter -
The NIRCam filter to use. This is used to
select the appropriate row from the flux
calibration file.
pupil -
The NIRCam pupil value to use. This is used
to select the appropriate row from the flux
calibration file in the case where a filter
that is located in the pupil wheel is used.
If the pupil value does not start with 'F'
(e.g. F405N), then it is ignored.
grism_source_image -
True/False. If you intend to send the seed
image output to the disperser software, then
set this value to True. The output image will
then be made larger than the nominal aperture
size by a factor of sqrt(2), in order for the
disperser to keep track of sources just outside
the nominal FOV, whose dispersed spectra may
fall onto the detector.
outfile -
Name of the file to contain the output seed
image.
outdir -
Directory in which to place outfile
Outputs:
self.seedimage, self.seed_segmap, self.seedinfo
contain the seed (countrate) image, the associated
segmentation map, and header information required
by subsequent steps of the nircam data simulator.
The segmentation map is only used in the case where
the seed image is dispersed by a call to the
disperser software.
Example calls:
See the Simulated_data_from_mosaic_image.ipynb notebook
in the examples directory
"""
import argparse
import copy
import logging
import os
import pkg_resources
import sys
from astropy.io import fits, ascii
import datetime
from math import radians
import numpy as np
from photutils import detect_sources
from photutils import TopHatWindow, TukeyWindow, CosineBellWindow, SplitCosineBellWindow, HanningWindow
from photutils.centroids import centroid_2dg
from photutils.psf import resize_psf
from photutils.psf.matching import create_matching_kernel
import pysiaf
from scipy.ndimage import shift
from scipy.signal import fftconvolve
import yaml
from . import crop_mosaic, blot_image
from mirage.logging import logging_functions
from mirage.psf.psf_selection import get_psf_wings
from mirage.psf import tools
from mirage.seed_image.save_seed import save
from mirage.reference_files import crds_tools
from mirage.utils.constants import EXPTYPES, LOG_CONFIG_FILENAME, STANDARD_LOGFILE_NAME
from mirage.utils.flux_cal import fluxcal_info
from mirage.utils.siaf_interface import get_siaf_information
config_files = {'nircam': {'flux_cal': 'NIRCam_zeropoints.list'},
'niriss': {'flux_cal': 'niriss_zeropoints.list'}
}
KNOWN_PSF_TELESCOPES = {"JWST", "HST", "SPITZER"}
classpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
log_config_file = os.path.join(classpath, 'logging', LOG_CONFIG_FILENAME)
logging_functions.create_logger(log_config_file, STANDARD_LOGFILE_NAME)
class ImgSeed:
def __init__(self, paramfile=None, mosaic_file=None, data_extension_number=0, wcs_extension_number=0,
cropped_file='cropped_image.fits', outdir=None, blotted_file=None, psf_file=None,
mosaic_fwhm=None, mosaic_fwhm_units='arcsec', gaussian_psf=True, save_intermediates=False):
"""Create a seed image from a distortionless input mosaic. The user can supply
a PSF associated with the mosaic in the ``psf_file`` parameter. If ``psf_file``
is None and ``gaussian_psf`` is True, then astropy's Gaussian2D model will be
used to construct a PSF for the mosaic. If ``psf_file`1` is None and ``gaussian_psf``
is False, and the mosaic comes from a "known" telescope, then a custom function
will be used to construct the PSF. Currently the "known" telescopes include
JWST, HST, and Spitzer IRAC.
Note that this code cannot be used in cases where the mosaic's PSF FWHM is
larger than the JWST PSF, as translating the mosaic to the JWST PSF would then
be sharpening the image.
Parameters
----------
paramfile : str
Name of Mirage yaml input file used to supply instrument info
mosaic_file : str
Name of the fits file containing the mosaic file to use
data_extension_number : int
Extension number within the mosaic FITS file where the data
array is located
wcs_extension_number : int
Extension number within the mosaic FITS file where the
world coordinate system information is located
cropped_file : str
Name of the file to save the cropped image into. If None,
the cropped image is not saved
outdir : str
Name of the output directory to save the output files into.
If None, the output directory listed in ``paramfile`` will
be used.
blotted_file : str
Name of FITS file to save resampled image into
psf_file : str
Name of FITS file containing a PSF corresponding to the mosaic
data. This will be used to create a matching kernel to allow
convolution of the mosaic data to match the JWST PSF. If None,
a PSF will be created using an astropy Gaussian2D model or,
in the case of a mosaic from JWST, a webbpsf-generated PSF
will be read in.
mosaic_fwhm : float
FWHM of the PSF in the mosaic. Can be in pixels or arcseconds,
as specified by the ``mosaic_fwhm_units`` keyword. If None,
an astropy Gaussian2D model will be fit to the PSF to estimate
the FWHM
mosaic_fwhm_units : str
Units of ``mosaic_fwhm``. Can be 'pixels' or 'arcsec'
gaussian_psf : bool
If ``psf_file`` is None and this is True, the mosaic PSF will
be created using an astropy Gaussian2D model.
save_intermediates : bool
If True, intermediate outputs, including the PSFs and the
matching PSF kernel, as well as the cropped image after
convolution with the matching PSF kernel, will be saved.
"""
allowed_mosaic_fwhm_units = ['pixels', 'arcsec']
self.mosaic_file = mosaic_file
self.data_extension_number = data_extension_number
self.wcs_extension_number = wcs_extension_number
self.crop_center_ra = 0.
self.crop_center_dec = 0.
self.channel = None
self.detector = None
self.blot_center_ra = 0.
self.blot_center_dec = 0.
self.blot_pav3 = 0.
self.aperture = ''
self.flux_cal_file = 'config'
self.distortion_file = 'crds'
self.filter = ''
self.pupil = ''
self.grism_source_image = False
self.cropped_file = cropped_file
self.blotted_file = blotted_file
self.outdir = outdir
self.grism_direct_factor = np.sqrt(2.)
self.psf_file = psf_file
self.gaussian_psf = gaussian_psf
self.mosaic_fwhm = mosaic_fwhm
if mosaic_fwhm_units not in allowed_mosaic_fwhm_units:
raise ValueError(("ERROR: mosaic_fwhm_units must be one of: {}"
.format(allowed_mosaic_fwhm_units)))
self.mosaic_fwhm_units = mosaic_fwhm_units
self.save_intermediates = save_intermediates
# Locate the module files, so that we know where to look
# for config subdirectory
self.modpath = pkg_resources.resource_filename('mirage', '')
# self.coords contains the factor by which the
# nominal output array size needs to be increased
# (used for WFSS mode), as well as the coordinate
# offset between the nominal output array coordinates,
# and those of the expanded array. These are needed
# mostly for WFSS observations, where the nominal output
# array will not sit centered in the expanded output image.
self.coords = {'x': 1., 'xoffset': 0., 'y': 1., 'yoffset': 0.}
# If a paramfile is provided, read in and
# set params
self.paramfile = paramfile
if | |
% 4 == 0:
elements.append(frame[i_][0])
if i % 4 == 1:
coordinates.append(frame[i_])
if i % 4 == 2:
velocities.append(frame[i_])
if i % 4 == 3:
forces.append(frame[i_])
frame_data['atom_ids'] = np.array(elements)
frame_data['coordinates'] = np.array(coordinates, dtype=float)
if velocities:
frame_data['velocities'] = np.array(velocities, dtype=float)
if forces:
frame_data['forces'] = np.array(forces, dtype=float)
return frame_data
def analysis(
self, frames='all', ncpus=1, _ncpus=1, override=False, **kwargs
):
"""
Perform structural analysis on a frame/ set of frames.
Depending on the passed parameters a frame, a list of particular
frames, a range of frames (from, to), or all frames can be analysed
with this function.
The analysis is performed on each frame and each discrete molecule in
that frame separately. The steps are as follows:
1. A frame is extracted and returned as a :class:`MolecularSystem`.
2. If `swap_atoms` is set the atom ids are swapped.
3. If `forcefield` is set the atom ids are deciphered.
4. If `rebuild` is set the molecules in the system are rebuild.
5. Each discrete molecule is extracted as :class:`Molecule`
6. Each molecule is analysed with :func:`Molecule.full_analysis()`
7. Analysis output populates the :attr:`analysis_output` dictionary.
As the analysis of trajectories often have to be unique, many options
are conditional.
A side effect of this function is that the analysed frames are also
returned to the :attr:`frames` mimicking the behaviour of the
:func:`get_frames()`.
Parameters
----------
frames : :class:`int` or :class:`list` or :class:`touple` or :class:`str`
Specified frame (:class:`int`), or frames (:class:`list`), or
range (:class:`touple`), or `all`/`everything` (:class:`str`).
(default='all')
override : :class:`bool`
If True, an output already storred in :attr:`analysis_output` can
be override. (default=False)
swap_atoms : :class:`dict`, optional
If this kwarg is passed with an appropriate dictionary a
:func:`pywindow.molecular.MolecularSystem.swap_atom_keys()` will
be applied to the extracted frame.
forcefield : :class:`str`, optional
If this kwarg is passed with appropriate forcefield keyword a
:func:`pywindow.molecular.MolecularSystem.decipher_atom_keys()`
will be applied to the extracted frame.
modular : :class:`bool`, optional
If this kwarg is passed a
:func:`pywindow.molecular.MolecularSystem.make_modular()`
will be applied to the extracted frame. (default=False)
rebuild : :class:`bool`, optional
If this kwarg is passed a `rebuild=True` is passed to
:func:`pywindow.molecular.MolecularSystem.make_modular()` that
will be applied to the extracted frame. (default=False)
ncpus : :class:`int`, optional
If ncpus > 1, then the analysis is performed in parallel for the
specified number of parallel jobs. Otherwise, it runs in serial.
(default=1)
Returns
-------
None : :class:`NoneType`
The function returns `None`, the analysis output is
returned to :attr:`analysis_output` dictionary.
"""
frames_for_analysis = []
# First populate the frames_for_analysis list.
if isinstance(frames, int):
frames_for_analysis.append(frames)
if isinstance(frames, list):
for frame in frames:
if isinstance(frame, int):
frames_for_analysis.append(frame)
else:
raise _FunctionError(
"The list should be populated with integers only."
)
if isinstance(frames, tuple):
if isinstance(frames[0], int) and isinstance(frames[1], int):
for frame in range(frames[0], frames[1]):
frames_for_analysis.append(frame)
else:
raise _FunctionError(
"The tuple should contain only two integers "
"for the begining and the end of the frames range."
)
if isinstance(frames, str):
if frames in ['all', 'everything']:
for frame in range(0, self.no_of_frames):
frames_for_analysis.append(frame)
else:
raise _FunctionError(
"Didn't recognise the keyword. (see manual)"
)
# The override keyword by default is False. So we check if any of the
# frames were already analysed and if so we delete them from the list.
# However, if the override is set to True, then we just proceed.
if override is False:
frames_for_analysis_new = []
for frame in frames_for_analysis:
if frame not in self.analysis_output.keys():
frames_for_analysis_new.append(frame)
frames_for_analysis = frames_for_analysis_new
if ncpus == 1:
for frame in frames_for_analysis:
analysed_frame = self._analysis_serial(frame, _ncpus, **kwargs)
self.analysis_output[frame] = analysed_frame
if ncpus > 1:
self._analysis_parallel(frames_for_analysis, ncpus, **kwargs)
def _analysis_serial(self, frame, _ncpus, **kwargs):
settings = {
'rebuild': False,
'modular': False,
}
settings.update(kwargs)
molecular_system = self._get_frame(
self.trajectory_map[frame], frame, extract_data=True, **kwargs
)
if settings['modular'] is True:
molecular_system.make_modular(rebuild=settings['rebuild'])
molecules = molecular_system.molecules
else:
molecules = {'0': molecular_system.system_to_molecule()}
results = {}
for molecule in molecules:
mol = molecules[molecule]
if 'molsize' in settings:
molsize = settings['molsize']
if isinstance(molsize, int):
if mol.no_of_atoms == molsize:
results[molecule] = mol.full_analysis(
_ncpus=_ncpus, **kwargs)
if isinstance(molsize, tuple) and isinstance(molsize[0], str):
if molsize[0] in ['bigger', 'greater', 'larger', 'more']:
if mol.no_of_atoms > molsize[1]:
results[molecule] = mol.full_analysis(
_ncpus=_ncpus, **kwargs)
if molsize[0] in ['smaller', 'less']:
if mol.no_of_atoms > molsize[1]:
results[molecule] = mol.full_analysis(
_ncpus=_ncpus, **kwargs)
if molsize[0] in ['not', 'isnot', 'notequal', 'different']:
if mol.no_of_atoms != molsize[1]:
results[molecule] = mol.full_analysis(
_ncpus=_ncpus, **kwargs)
if molsize[0] in ['is', 'equal', 'exactly']:
if mol.no_of_atoms == molsize[1]:
results[molecule] = mol.full_analysis(
_ncpus=_ncpus, **kwargs)
if molsize[0] in ['between', 'inbetween']:
if molsize[1] < mol.no_of_atoms < molsize[2]:
results[molecule] = mol.full_analysis(
_ncpus=_ncpus, **kwargs)
else:
results[molecule] = mol.full_analysis(_ncpus=_ncpus, **kwargs)
return results
def _analysis_parallel_execute(self, frame, **kwargs):
settings = {
'rebuild': False,
'modular': False,
}
settings.update(kwargs)
molecular_system = self._get_frame(
self.trajectory_map[frame], frame, extract_data=True, **kwargs
)
if settings['modular'] is True:
molecular_system.make_modular(rebuild=settings['rebuild'])
molecules = molecular_system.molecules
else:
molecules = {'0': molecular_system.system_to_molecule()}
results = {}
for molecule in molecules:
mol = molecules[molecule]
if 'molsize' in settings:
molsize = settings['molsize']
if isinstance(molsize, int):
if mol.no_of_atoms == molsize:
results[molecule] = mol.full_analysis(**kwargs)
if isinstance(molsize, tuple) and isinstance(molsize[0], str):
if molsize[0] in ['bigger', 'greater', 'larger', 'more']:
if mol.no_of_atoms > molsize[1]:
results[molecule] = mol.full_analysis(**kwargs)
if molsize[0] in ['smaller', 'less']:
if mol.no_of_atoms > molsize[1]:
results[molecule] = mol.full_analysis(**kwargs)
if molsize[0] in ['not', 'isnot', 'notequal', 'different']:
if mol.no_of_atoms != molsize[1]:
results[molecule] = mol.full_analysis(**kwargs)
if molsize[0] in ['is', 'equal', 'exactly']:
if mol.no_of_atoms == molsize[1]:
results[molecule] = mol.full_analysis(**kwargs)
if molsize[0] in ['between', 'inbetween']:
if molsize[1] < mol.no_of_atoms < molsize[2]:
results[molecule] = mol.full_analysis(**kwargs)
else:
results[molecule] = mol.full_analysis(**kwargs)
return frame, results
def _analysis_parallel(self, frames, ncpus, **kwargs):
try:
pool = Pool(processes=ncpus)
parallel = [
pool.apply_async(
self._analysis_parallel_execute,
args=(frame, ),
kwds=kwargs) for frame in frames
]
results = [p.get() for p in parallel if p.get()]
pool.terminate()
for i in results:
self.analysis_output[i[0]] = i[1]
except TypeError:
pool.terminate()
raise _ParallelAnalysisError("Parallel analysis failed.")
def _check_HISTORY(self):
"""
"""
self.check_log = ""
line = 0
binary_step = 0
timestep = 0
timestep_flag = 'timestep'
progress = 0
warning_1 = "No comment line is present as the file header.\n"
warning_2 = " ".join(
(
"Second header line is missing from the file",
"that contains information on the system's periodicity",
"and the type of the trajectory file.\n"
)
)
warning_3 = " ".join(
(
"Comment line encountered in the middle of",
"the trajectory file.\n"
)
)
error_1 = "The trajectory is discontinous.\n"
error_2 = "The file contains an empty line.\n"
with open(self.filepath, 'r') as trajectory_file:
# We open the HISTORY trajectory file
with closing(
mmap(
trajectory_file.fileno(), 0,
access=ACCESS_READ)) as file_binary_map:
# We use this binary mapping feature that instead of loading
# the full file into memory beforehand it only
# maps the content. Especially useful with enormous files
while binary_step < len(file_binary_map):
line += 1
binary_line = file_binary_map.readline()
binary_step = binary_step + len(binary_line)
progress_old = progress
progress = round(binary_step * 100 / len(file_binary_map),
0)
string_line = binary_line.decode("utf-8").strip(
'\n').split()
# Warning 1
if line == 1:
if string_line[0] != 'DLFIELD':
self.check_log = " ".join(
(self.check_log, "Line {0}:".format(line),
warning_1)
)
# Warning 2
if line == 2:
if len(string_line) != 3:
self.check_log = " ".join(
(self.check_log, "Line {0}:".format(line),
warning_2)
)
# Error 1
if string_line:
if string_line[0] == timestep_flag:
old_timestep = timestep
timestep = int(string_line[1])
if old_timestep > timestep:
error = " ".join(
"Line {0}:".format(line), error_1
)
raise _TrajectoryError(error)
# Error 2
if len(string_line) == 0:
error = " ".join(
"Line {0}:".format(line), error_2
)
raise _TrajectoryError(error)
def save_analysis(self, filepath=None, **kwargs):
"""
Dump the content of :attr:`analysis_output` as JSON dictionary.
Parameters
----------
filepath : :class:`str`
The filepath for the JSON file.
Returns
-------
None : :class:`NoneType`
"""
# We pass a copy of the analysis attribute dictionary.
dict_obj = deepcopy(self.analysis_output)
# If no filepath is provided we create one.
if filepath is None:
filepath = "_".join(
(str(self.system_id), "pywindow_analysis")
)
filepath = '/'.join((os.getcwd(), filepath))
# Dump the dictionary to json file.
Output().dump2json(dict_obj, filepath, default=to_list, **kwargs)
return
def save_frames(self, frames, filepath=None, filetype='pdb', **kwargs):
settings = {
"pdb": Output()._save_pdb,
"xyz": Output()._save_xyz,
"decipher": True,
"forcefield": None,
}
settings.update(kwargs)
if filetype.lower() not in settings.keys():
raise _FormatError("The '{0}' file format is not supported".format(
filetype))
frames_to_get = | |
import requests
import json
import sys
import collections
import jinja2
import ipaddress
import time
import re
import urllib3
urllib3.disable_warnings()
# Global options for debugging
PRINT_PAYLOAD = True
# flag variable to avoid pushing anything to APIC
PUSH_TO_APIC = False
PRINT_RESPONSE_TEXT_ALWAYS = False
PRINT_RESPONSE_TEXT_ON_FAIL = True
# Global path to main json directory
json_path = 'C:/path_to_json_template_dir/jsondata/'
# Global list of allowed statuses
valid_status = ['created', 'created,modified', 'deleted']
# Exception Classes
class InsufficientArgs(Exception):
pass
class InvalidArg(Exception):
pass
class LoginFailed(Exception):
pass
# Function to validate input for each method
def process_kwargs(required_args, optional_args, **kwargs):
# Validate all required kwargs passed
if all(item in kwargs for item in required_args.keys()) is not True:
raise InsufficientArgs('Insufficient required arguments.')
# Load all required args values from kwargs
for item in kwargs:
if item in required_args.keys():
required_args[item] = kwargs[item]
for item in kwargs:
if item in optional_args.keys():
optional_args[item] = kwargs[item]
# Combine option and required dicts for Jinja template render
# the following syntax is supported from Python3.6, we replace
# it with the manual copy.
# templateVars = { **required_args, **optional_args }
templateVars = required_args.copy()
templateVars.update(optional_args)
return(templateVars)
# Function to execute HTTP Post
def post(apic, payload, cookies, uri, section=''):
if PRINT_PAYLOAD or not PUSH_TO_APIC:
print('Adding to the object: "'+uri+'" the following json string:')
print(payload)
s = requests.Session()
r = ''
if PUSH_TO_APIC:
while r == '':
try:
r = s.post('https://{}/api/node/{}.json'.format(apic, uri),
data=payload, cookies=cookies, verify=False)
status = r.status_code
except requests.exceptions.ConnectionError as e:
print("Connection error, pausing before retrying. Error: {}"
.format(e))
time.sleep(5)
except Exception as e:
print("Method {} failed. Exception: {}".format(section[:-5], e))
status = 666
return(status)
if PRINT_RESPONSE_TEXT_ALWAYS:
print(r.text)
if status != 200 and PRINT_RESPONSE_TEXT_ON_FAIL:
print(r.text)
else:
return 200
return status
# Class must be instantiated with APIC IP address, username, and password
# the login method returns the APIC cookies.
class FabLogin(object):
def __init__(self, apic, user, pword):
self.apic = apic
self.user = user
self.pword = pword
def login(self):
# Load login json payload
payload = '''
{{
"aaaUser": {{
"attributes": {{
"name": "{user}",
"pwd": <PASSWORD>}"
}}
}}
}}
'''.format(user=self.user, pword=self.pword)
payload = json.loads(payload,
object_pairs_hook=collections.OrderedDict)
s = requests.Session()
# Try the request, if exception, exit program w/ error
try:
# Verify is disabled as there are issues if it is enabled
r = s.post('https://{}/api/mo/aaaLogin.json'.format(self.apic),
data=json.dumps(payload), verify=False)
# Capture HTTP status code from the request
status = r.status_code
# Capture the APIC cookie for all other future calls
cookies = r.cookies
# Log login status/time(?) somewhere
if status == 400:
print("Error 400 - Bad Request - ABORT!")
print("Probably have a bad URL")
sys.exit()
if status == 401:
print("Error 401 - Unauthorized - ABORT!")
print("Probably have incorrect credentials")
sys.exit()
if status == 403:
print("Error 403 - Forbidden - ABORT!")
print("Server refuses to handle your request")
sys.exit()
if status == 404:
print("Error 404 - Not Found - ABORT!")
print("Seems like you're trying to POST to a page that doesn't"
" exist.")
sys.exit()
except Exception as e:
print("Something went wrong logging into the APIC - ABORT!")
# Log exit reason somewhere
raise LoginFailed(e)
self.cookies = cookies
return cookies
# Class must be instantiated with APIC IP address and cookies
class FabPodPol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabPodPol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: Name of the node being deployed
# id: ID of the node being deploeyd as an integer (i.e. 101)
# serial: Serial number of device being deployed
# descr: (Optional) Description of the node
# fabric: (Optional) Default is 1 - will be relevant for xconnect
# pod: (Optional) Default is 1 - will be relevant for multipod
def comission_hw(self, **kwargs):
# Dicts for required and optional args
required_args = {'name': '',
'id': '',
'serial': ''}
optional_args = {'descr': '',
'fabric': '1',
'pod': '1'}
# Validate inputs, return dict of template vars
templateVars = process_kwargs(required_args, optional_args, **kwargs)
# Validate inputs
if not int(templateVars['id']):
raise InvalidArg('ID must be an integer')
else:
templateVars['id'] = int(templateVars['id'])
if not int(templateVars['fabric']):
raise InvalidArg('Fabric ID must be an integer')
else:
templateVars['fabric'] = int(templateVars['fabric'])
if not int(templateVars['pod']):
raise InvalidArg('Pod ID must be an integer')
else:
templateVars['pod'] = int(templateVars['pod'])
# Locate template for method
template_file = "comission_hw.json"
template = self.templateEnv.get_template(template_file)
# Render template w/ values from dicts
payload = template.render(templateVars)
# Handle request
uri = 'mo/uni'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# address: Name/IP of the NTP server
# status: created | created,modified | deleted
def ntp(self, **kwargs):
required_args = {'address': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['address']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "ntp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of the node being deployed
# address: IP of DNS Server
# status: (Of the DNS Server) created | created,modified | deleted
# domain: (Optional) DNS Domain
# domain_status: (Optional) created | created,modified | deleted
# preferred: (Optional) yes | no
# domain_default: (Optional) yes | no
def dns(self, **kwargs):
required_args = {'address': '',
'status': ''}
optional_args = {'domain': '',
'domain_status': 'deleted',
'preferred': 'no',
'domain_default': 'no'}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not ipaddress.ip_address(templateVars['address']):
raise InvalidArg('Address must be a valid IPv4 address')
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "dns.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/dnsp-default'
status = post(self.apic, payload, self.cookies, uri, template_file)
template_file = "dns_profile.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/dnsp-default/rsProfileToEpg'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# asn: Fabric BGP ASN as an integer
# status: created | created,modified | deleted
def fabric_bgp(self, **kwargs):
required_args = {'asn': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not (int(templateVars['asn']) in range(1, 65536)):
raise InvalidArg('Invalid BGP ASN')
else:
templateVars['asn'] = int(templateVars['asn'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fabric_bgp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/bgpInstP-default/as'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# rr: ID of node to be route reflector
# status: created | created,modified | deleted
def fabric_rr(self, **kwargs):
required_args = {'rr': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if not int(templateVars['rr']):
raise InvalidArg('Route Reflector ID must be an integer')
else:
templateVars['rr'] = int(templateVars['rr'])
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "fabric_rr.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/bgpInstP-default/rr/node-{}'.format(
templateVars['rr'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the following kwargs.
# name: Name of pod-policy to be created
# status: created | created,modified | deleted
def pod_pol(self, **kwargs):
required_args = {'name': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "pod_pol.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/funcprof'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
template_file = "pod_pol_assign.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/fabric/podprof-default/pods-default-typ-ALL/rspodPGrp'
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Class must be instantiated with APIC IP address and cookies
class FabAccPol(object):
def __init__(self, apic, cookies):
self.apic = apic
self.cookies = cookies
self.templateLoader = jinja2.FileSystemLoader(
searchpath=(json_path + 'FabAccPol/'))
self.templateEnv = jinja2.Environment(loader=self.templateLoader)
# Method must be called with the following kwargs.
# name: The name of the CDP policy
# state: enabled | disabled
# status: created | created,modified | deleted
def cdp(self, **kwargs):
required_args = {'name': '',
'state': '',
'status': ''}
optional_args = {}
templateVars = process_kwargs(required_args, optional_args, **kwargs)
if templateVars['status'] not in valid_status:
raise InvalidArg('Status invalid')
template_file = "cdp.json"
template = self.templateEnv.get_template(template_file)
payload = template.render(templateVars)
uri = 'mo/uni/infra/cdpIfP-{}'.format(templateVars['name'])
status = post(self.apic, payload, self.cookies, uri, template_file)
return status
# Method must be called with the | |
device configurations of listed OSPFv3 processes with provided configurations
cisco.nxos.nxos_ospfv3:
config:
processes:
- process_id: 102
router_id: 198.51.100.1
address_family:
afi: ipv6
safi: unicast
areas:
- area_id: 0.0.0.100
filter_list:
- route_map: rmap_8
direction: in
ranges:
- not_advertise: true
prefix: 2001:db2::/32
redistribute:
- protocol: eigrp
id: 130
route_map: rmap_1
- protocol: direct
route_map: ospf102-direct-connect
vrfs:
- vrf: zone1
router_id: 198.51.100.129
areas:
- area_id: 0.0.0.102
nssa:
default_information_originate: True
no_summary: True
state: replaced
# Task output
# -------------
# before:
# processes:
# - process_id: "100"
# router_id: 203.0.113.20
# - address_family:
# afi: ipv4
# safi: unicast
# areas:
# - area_id: 0.0.0.100
# filter_list:
# - direction: out
# route_map: rmap_2
# - direction: in
# route_map: rmap_1
# ranges:
# - not_advertise: true
# prefix: 2001:db2::/32
# - cost: 120
# prefix: 2001:db3::/32
# redistribute:
# - protocol: direct
# route_map: ospf102-direct-connect
# - id: "120"
# protocol: eigrp
# route_map: rmap_1
# process_id: "102"
# router_id: 198.51.100.1
# vrfs:
# - areas:
# - area_id: 0.0.0.102
# nssa:
# default_information_originate: true
# no_summary: true
# - area_id: 0.0.0.103
# nssa:
# no_summary: true
# translate:
# type7:
# always: true
# router_id: 198.51.100.129
# vrf: zone1
# - auto_cost:
# reference_bandwidth: 45
# unit: Gbps
# vrf: zone2
#
# commands:
# - router ospf 102
# - address-family ipv6 unicast
# - redistribute eigrp 130 route-map rmap_1
# - no redistribute eigrp 120 route-map rmap_1
# - area 0.0.0.100 filter-list route-map rmap_8 in
# - no area 0.0.0.100 filter-list route-map rmap_2 out
# - no area 0.0.0.100 range 2001:db3::/32
# - vrf zone1
# - no area 0.0.0.103 nssa
# - no area 0.0.0.103 nssa translate type7 always
# - no vrf zone2
#
# after:
# processes:
# - process_id: "100"
# router_id: 203.0.113.20
# - address_family:
# afi: ipv6
# safi: unicast
# areas:
# - area_id: 0.0.0.100
# filter_list:
# - direction: in
# route_map: rmap_8
# ranges:
# - not_advertise: true
# prefix: 2001:db2::/32
# redistribute:
# - protocol: direct
# route_map: ospf102-direct-connect
# - id: "130"
# protocol: eigrp
# route_map: rmap_1
# process_id: "102"
# router_id: 198.51.100.1
# vrfs:
# - areas:
# - area_id: 0.0.0.102
# nssa:
# default_information_originate: true
# no_summary: true
# router_id: 198.51.100.129
# vrf: zone1
# After state:
# ------------
# nxos-9k-rdo# sh running-config | section "^router ospfv3"
# router ospfv3 100
# router-id 203.0.113.20
# router ospfv3 102
# router-id 198.51.100.1
# address-family ipv6 unicast
# redistribute direct route-map ospf102-direct-connect
# redistribute eigrp 130 route-map rmap_1
# area 0.0.0.100 filter-list route-map rmap_8 in
# area 0.0.0.100 range 198.51.100.64/27 not-advertise
# vrf zone1
# router-id 198.51.100.129
# area 0.0.0.102 nssa no-summary default-information-originate
# Using overridden
# Before state:
# ------------
# nxos-9k-rdo# sh running-config | section "^router ospfv3"
# router ospfv3 100
# router-id 203.0.113.20
# router ospfv3 102
# router-id 198.51.100.1
# address-family ipv6 unicast
# redistribute direct route-map ospf102-direct-connect
# redistribute eigrp 120 route-map rmap_1
# area 0.0.0.100 filter-list route-map rmap_2 out
# area 0.0.0.100 filter-list route-map rmap_1 in
# area 0.0.0.100 range 2001:db2::/32 not-advertise
# area 0.0.0.100 range 2001:db3::/32 cost 120
# vrf zone1
# router-id 198.51.100.129
# area 0.0.0.102 nssa no-summary default-information-originate
# area 0.0.0.103 nssa no-summary
# area 0.0.0.103 nssa translate type7 always
# vrf zone2
# auto-cost reference-bandwidth 45 Gbps
- name: Override all OSPFv3 configuration with provided configuration
cisco.nxos.nxos_ospfv3:
config:
processes:
- process_id: 104
router_id: 203.0.113.20
- process_id: 102
router_id: 198.51.100.1
shutdown: true
state: overridden
# Task output
# -------------
# before:
# processes:
# - process_id: "100"
# router_id: 203.0.113.20
# - address_family:
# afi: ipv4
# safi: unicast
# areas:
# - area_id: 0.0.0.100
# filter_list:
# - direction: out
# route_map: rmap_2
# - direction: in
# route_map: rmap_1
# ranges:
# - not_advertise: true
# prefix: 2001:db2::/32
# - cost: 120
# prefix: 2001:db3::/32
# redistribute:
# - protocol: direct
# route_map: ospf102-direct-connect
# - id: "120"
# protocol: eigrp
# route_map: rmap_1
# process_id: "102"
# router_id: 198.51.100.1
# vrfs:
# - areas:
# - area_id: 0.0.0.102
# nssa:
# default_information_originate: true
# no_summary: true
# - area_id: 0.0.0.103
# nssa:
# no_summary: true
# translate:
# type7:
# always: true
# router_id: 198.51.100.129
# vrf: zone1
# - auto_cost:
# reference_bandwidth: 45
# unit: Gbps
# vrf: zone2
#
# commands:
# - no router ospfv3 100
# - router ospfv3 104
# - router-id 203.0.113.20
# - router ospfv3 102
# - shutdown
# - address-family ipv6 unicast
# - no redistribute direct route-map ospf102-direct-connect
# - no redistribute eigrp 120 route-map rmap_1
# - no area 0.0.0.100 filter-list route-map rmap_2 out
# - no area 0.0.0.100 filter-list route-map rmap_1 in
# - no area 0.0.0.100 range 2001:db2::/32
# - no area 0.0.0.100 range 2001:db3::/32
# - no vrf zone1
# - no vrf zone2
#
# after:
# processes:
# - process_id: "102"
# router_id: 198.51.100.1
# shutdown: true
# address_family:
# afi: ipv6
# safi: unicast
# - process_id: "104"
# router_id: 203.0.113.20
# After state:
# ------------
# nxos-9k-rdo# sh running-config | section "^router ospfv3"
# router ospfv3 102
# router-id 198.51.100.1
# address-family ipv6 unicast
# shutdown
# router ospfv3 104
# router-id 203.0.113.20
# Using deleted to delete a single OSPF process
# Before state:
# ------------
# nxos-9k-rdo# sh running-config | section "^router ospf .*"
# router ospfv3 100
# router-id 203.0.113.20
# router ospfv3 102
# router-id 198.51.100.1
# address-family ipv6 unicast
# redistribute direct route-map ospf102-direct-connect
# redistribute eigrp 120 route-map rmap_1
# area 0.0.0.100 filter-list route-map rmap_2 out
# area 0.0.0.100 filter-list route-map rmap_1 in
# area 0.0.0.100 range 2001:db2::/32 not-advertise
# area 0.0.0.100 range 2001:db3::/32 cost 120
# vrf zone1
# router-id 198.51.100.129
# area 0.0.0.102 nssa no-summary default-information-originate
# area 0.0.0.103 nssa no-summary
# area 0.0.0.103 nssa translate type7 always
# vrf zone2
# auto-cost reference-bandwidth 45 Gbps
- name: Delete a single OSPFv3 process
cisco.nxos.nxos_ospfv3:
config:
processes:
- process_id: 102
state: deleted
# Task output
# -------------
# before:
# processes:
# - process_id: "100"
# router_id: 203.0.113.20
# - address_family:
# afi: ipv4
# safi: unicast
# areas:
# - area_id: 0.0.0.100
# filter_list:
# - direction: out
# route_map: rmap_2
# - direction: in
# route_map: rmap_1
# ranges:
# - not_advertise: true
# prefix: 2001:db2::/32
# - cost: 120
# prefix: 2001:db3::/32
# redistribute:
# - protocol: direct
# route_map: ospf102-direct-connect
# - id: "120"
# protocol: eigrp
# route_map: rmap_1
# process_id: "102"
# router_id: 198.51.100.1
# vrfs:
# - areas:
# - area_id: 0.0.0.102
# nssa:
# default_information_originate: true
# no_summary: true
# - area_id: 0.0.0.103
# nssa:
# no_summary: true
# translate:
# type7:
# always: true
# router_id: 198.51.100.129
# vrf: zone1
# - auto_cost:
# reference_bandwidth: 45
# unit: Gbps
# vrf: zone2
#
# commands:
# - no router ospfv3 102
#
# after:
# processes:
# - process_id: "100"
# router_id: 203.0.113.20
# After state:
# ------------
# nxos-9k-rdo# sh running-config | section "^router ospfv3"
# router ospfv3 100
# router-id 203.0.113.20
# Using deleted all OSPFv3 processes from the device
# Before state:
# ------------
# nxos-9k-rdo# sh running-config | section "^router ospfv3"
# router ospfv3 100
# router-id 203.0.113.20
# router ospfv3 102
# router-id 198.51.100.1
# address-family ipv6 unicast
# redistribute direct route-map ospf102-direct-connect
# redistribute eigrp 120 route-map rmap_1
# area 0.0.0.100 filter-list route-map rmap_2 out
# area 0.0.0.100 filter-list route-map rmap_1 in
# area 0.0.0.100 range 2001:db2::/32 not-advertise
# area 0.0.0.100 range 2001:db3::/32 cost 120
# vrf zone1
# router-id 198.51.100.129
# area 0.0.0.102 nssa no-summary default-information-originate
# area 0.0.0.103 nssa no-summary
# area 0.0.0.103 nssa translate type7 always
# vrf zone2
# auto-cost reference-bandwidth 45 Gbps
- name: Delete all OSPFv3 processes from the device
cisco.nxos.nxos_ospfv3:
state: deleted
# Task output
# -------------
# before:
# processes:
# - process_id: "100"
# router_id: 203.0.113.20
# - address_family:
# afi: ipv4
# safi: unicast
# areas:
# - area_id: 0.0.0.100
# filter_list:
# - direction: out
# route_map: rmap_2
# - direction: in
# route_map: rmap_1
# ranges:
# - not_advertise: true
# prefix: 2001:db2::/32
# - cost: 120
# prefix: 2001:db3::/32
# redistribute:
# - protocol: direct
# route_map: ospf102-direct-connect
# - id: "120"
# protocol: eigrp
# route_map: rmap_1
# process_id: "102"
# router_id: 198.51.100.1
# vrfs:
# - areas:
# - area_id: 0.0.0.102
# nssa:
# default_information_originate: true
# no_summary: true
# - area_id: 0.0.0.103
# nssa:
# no_summary: true
# translate:
# type7:
# always: true
# router_id: 198.51.100.129
# vrf: zone1
# - auto_cost:
# reference_bandwidth: 45
# unit: Gbps
# vrf: zone2
#
# commands:
# - no router ospfv3 100
# - no router ospfv3 102
#
# after: {}
# After state:
# ------------
# nxos-9k-rdo# sh running-config | section "^router ospfv3"
# nxos-9k-rdo#
# Using rendered
- name: Render platform specific configuration lines with state rendered (without connecting to the device)
cisco.nxos.nxos_ospfv3:
config:
processes:
- process_id: 100
router_id: 203.0.113.20
- process_id: 102
router_id: 198.51.100.1
address_family:
afi: ipv6
safi: unicast
areas:
- area_id: 0.0.0.100
filter_list:
- route_map: rmap_1
direction: in
- route_map: rmap_2
direction: out
ranges:
- prefix: 2001:db2::/32
not_advertise: true
- prefix: 2001:db3::/32
cost: 120
redistribute:
- protocol: eigrp
id: 120
route_map: rmap_1
- protocol: direct
route_map: ospf102-direct-connect
vrfs:
- vrf: zone1
router_id: 198.51.100.129
areas:
- area_id: 0.0.0.102
nssa:
default_information_originate: true
no_summary: true
- area_id: 0.0.0.103
nssa:
no_summary: true
translate:
type7:
always: true
- vrf: zone2
auto_cost:
reference_bandwidth: 45
unit: Gbps
state: rendered
# Task Output (redacted)
# -----------------------
# rendered:
# - router ospfv3 100
# - router-id 203.0.113.20
# - router ospfv3 102
# - router-id 198.51.100.1
# - address-family ipv6 unicast
# - redistribute eigrp 120 route-map rmap_1
# - redistribute direct route-map ospf102-direct-connect
# - area 0.0.0.100 filter-list route-map rmap_1 in
# - area 0.0.0.100 filter-list route-map rmap_2 out
# - area 0.0.0.100 range 2001:db2::/32 not-advertise
# - area 0.0.0.100 range 2001:db3::/32 cost 120
# - vrf zone1
# - router-id 198.51.100.129
# - area 0.0.0.102 nssa no-summary default-information-originate
# - area 0.0.0.103 nssa no-summary
# - area 0.0.0.103 nssa translate type7 | |
else:
l_discrete[current_bin].append(l)
if len(l_discrete)==bins+1:
l_discrete[bins-1].extend(l_discrete[bins])
del l_discrete[bins]
del bin_scores[-1]
bin_scores[-1]= bin_scores[-1]-cut/2.0 + \
(predictions[-1]-(bin_scores[-1]-cut/2.0))/2.0
except EmptyBinException:
if bins>1:
bins-=1
else:
raise Exception("Could not discretize data!")
else:
return bin_scores, l_discrete
def _empirical_probability(self, l_discrete):
""" Return dictionary of empirical class probabilities for discretized label list."""
plot_emp_prob = {}
len_list = {}
for label in range(len(self.class_labels)):
plot_emp_prob[label]=[]
len_list[label]=[]
for score_list in l_discrete.values():
len_list[label].append(len(score_list))
plot_emp_prob[label].append(score_list.count(label)/ \
float(len(score_list)))
return len_list, plot_emp_prob
def store_state(self, result_dir, index=None):
""" Stores plots of score distribution and sigmoid fit or/and
the calculated probabilities with the corresponding label.
.. todo:: change plot calculations to upper if else syntax
.. todo:: add the corresponding data point to the saved probabilities
"""
if self.store :
# Create the directory for the stored results
from pySPACE.tools.filesystem import create_directory
import os
node_dir = os.path.join(result_dir, self.__class__.__name__)
create_directory(node_dir)
# Safe the probabilities in a pickle file
if( self.store_probabilities ):
import pickle
f_name=node_dir + "/probabilities_%d.pickle" % self.current_split
pickle.dump(self.probabilities, open(f_name,'w'))
if self.store_plots:
# reliable plot of training (before sigmoid fit)
sort_index = numpy.argsort(self.scores)
labels = numpy.array(self.labels)[sort_index]
predictions = numpy.array(self.scores)[sort_index]
plot_scores_train,l_discrete_train=self._discretize(predictions, labels)
len_list_train, plot_emp_prob_train = self._empirical_probability(l_discrete_train)
# training data after sigmoid fit
fApB = predictions * self.A + self.B
new_predictions = [(int(fApB[i]<0)+int(fApB[i]>=0)*numpy.exp(-fApB[i]))/ \
(1.0+numpy.exp((-1)**int(fApB[i]>=0)*fApB[i])) \
for i in range(len(fApB))]
plot_scores_train_fit, l_discrete_train_fit = \
self._discretize(new_predictions,labels)
len_list_train_fit, plot_emp_prob_train_fit = \
self._empirical_probability(l_discrete_train_fit)
# test data before sigmoid fit
test_scores = []
test_labels = []
for data, label in self.input_node.request_data_for_testing():
test_scores.append(data.prediction)
test_labels.append(self.class_labels.index(label))
sort_index = numpy.argsort(test_scores)
labels = numpy.array(test_labels)[sort_index]
predictions = numpy.array(test_scores)[sort_index]
plot_scores_test,l_discrete_test = self._discretize(predictions, labels)
len_list_test, plot_emp_prob_test = self._empirical_probability(l_discrete_test)
# test data after sigmoid fit
fApB = predictions * self.A + self.B
new_predictions = [(int(fApB[i]<0)+int(fApB[i]>=0)*numpy.exp(-fApB[i]))/ \
(1.0+numpy.exp((-1)**int(fApB[i]>=0)*fApB[i])) \
for i in range(len(fApB))]
plot_scores_test_fit, l_discrete_test_fit = \
self._discretize(new_predictions,labels)
len_list_test_fit, plot_emp_prob_test_fit = \
self._empirical_probability(l_discrete_test_fit)
import pylab
from matplotlib.transforms import offset_copy
pylab.close()
fig = pylab.figure(figsize=(10,10))
ax = pylab.subplot(2,2,1)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x,y,s in zip(plot_scores_train,plot_emp_prob_train[1],len_list_train[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_train[0],plot_scores_train[-1]),(0,1),'-')
x = numpy.arange(plot_scores_train[0],plot_scores_train[-1],.02)
y = 1/(1+numpy.exp(self.A*x+self.B))
pylab.plot(x,y,'-')
pylab.xlim(plot_scores_train[0],plot_scores_train[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM prediction Score (training data)")
pylab.ylabel("Empirical Probability")
ax = pylab.subplot(2,2,2)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x, y, s in zip(plot_scores_train_fit, plot_emp_prob_train_fit[1],
len_list_train_fit[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_train_fit[0],plot_scores_train_fit[-1]),(0,1),'-')
pylab.xlim(plot_scores_train_fit[0],plot_scores_train_fit[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM Probability (training data)")
pylab.ylabel("Empirical Probability")
ax = pylab.subplot(2,2,3)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x,y,s in zip(plot_scores_test,plot_emp_prob_test[1],len_list_test[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_test[0],plot_scores_test[-1]),(0,1),'-')
x = numpy.arange(plot_scores_test[0],plot_scores_test[-1],.02)
y = 1/(1+numpy.exp(self.A*x+self.B))
pylab.plot(x,y,'-')
pylab.xlim(plot_scores_test[0],plot_scores_test[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM prediction Scores (test data)")
pylab.ylabel("Empirical Probability")
ax = pylab.subplot(2,2,4)
transOffset=offset_copy(ax.transData,fig=fig,x=0.05,y=0.1,units='inches')
for x, y, s in zip(plot_scores_test_fit, plot_emp_prob_test_fit[1],
len_list_test_fit[1]):
pylab.plot((x,),(y,),'ro')
pylab.text(x,y,'%d' % s, transform=transOffset)
pylab.plot((plot_scores_test_fit[0],plot_scores_test_fit[-1]),(0,1),'-')
pylab.xlim(plot_scores_test_fit[0],plot_scores_test_fit[-1])
pylab.ylim(0,1)
pylab.xlabel("SVM Probability (test data)")
pylab.ylabel("Empirical Probability")
pylab.savefig(node_dir + "/reliable_diagrams_%d.png" % self.current_split)
class SigmoidTransformationNode(BaseNode):
""" Transform score to interval [0,1] with a sigmoid function
The new decision border will be at 0.5.
.. warning::
This is NOT a probability mapping and parameters should be set for
the function.
This node is intended to be externally optimized, such that it
generalizes the threshold optimization for soft metrics.
The used sigmoid fit function is :math:`\\frac{1}{1+e^{Ax+B}}`.
It is 0.5 at :math:`x = -\\frac{B}{A}`.
**Parameters**
:A:
Scaling of prediction value. See above.
(*optional, default: -1*)
:B:
Shifting of scaled prediction. See above.
(*optional, default: 0*)
:offset:
Has the meaning of :math:`-\\frac{B}{A}` and replaces the parameter B if used.
(*optional, default: None*)
:class_labels:
Determines the order of classes, i.e. the mapping of class labels
onto integers. The first element of the list should be the negative
class, the second should be the positive class.
In the context positive should be the class mapped greater than 0.5
and the other class should be the negative one.
If the original prediction value had the same orientation,
*A* should be chosen negative.
(*optional, default: ['Standard','Target']*)
**Exemplary Call**
.. code-block:: yaml
-
node : SigTrans
parameters :
class_labels : ['Standard','Target']
"""
input_types=["PredictionVector"]
def __init__(self, class_labels = ['Standard','Target'],
A = -1, B = 0, offset = None,
**kwargs):
super(SigmoidTransformationNode, self).__init__(**kwargs)
if not(offset is None):
B = -A *offset
self.set_permanent_attributes(class_labels = class_labels,
A = A,
B = B)
def is_trainable(self):
return False
def is_supervised(self):
return False
def _execute(self, data):
""" Evaluate each prediction with the sigmoid mapping learned. """
# code simply copied from PlattsSigmoidFitNode fur eventual future changes
fApB = data.prediction * self.A + self.B
if fApB<0:
new_prediction=1/(1.0+numpy.exp(fApB))
else:
new_prediction=numpy.exp(-fApB)/(numpy.exp(-fApB)+1.0)
# enforce mapping to interval [0,1]
new_prediction = max(0,min(1,new_prediction))
new_label = self.class_labels[0] if new_prediction <= 0.5 \
else self.class_labels[1]
return PredictionVector(label=new_label,
prediction=new_prediction,
predictor=data.predictor)
class LinearTransformationNode(BaseNode):
""" Scaling and offset shift, and relabeling due to new decision boundary
Having a prediction value x it is mapped to (x+*offset*)*scaling*.
If the result is lower than the *decision boundary* it is mapped to the
first class label for the negative class and otherwise to the second
positive class.
**Parameters**
:class labels: This mandatory parameter defines the ordering of class
labels for the mapping after the transformation.
If this parameter is not specified, the label remains
unchanged. This is for example feasible for regression
mappings.
.. note:: This parameter could be also used to change
class label strings, but this would probably
cause problems in the evaluation step.
(*recommended, default: None*)
:offset: Shift of the prediction value.
(*optional, default: 0*)
:scaling: Scaling factor applied after offset shift.
(*optional, default: 1*)
:decision_boundary: Everything lower this value is classified as
class one and everything else as class two. By default
no labels are changed.
**Exemplary Call**
.. code-block:: yaml
- node : LinearTransformation
parameters :
class_labels : ['Standard', 'Target']
offset : 1
scaling : 42
decision_boundary : 3
"""
def __init__(self, class_labels=None, offset=0, scaling=1,
decision_boundary=None, **kwargs):
super(LinearTransformationNode, self).__init__(**kwargs)
if class_labels is None or decision_boundary is None:
decision_boundary = None
class_labels = None
self.set_permanent_attributes(class_labels=class_labels,
scaling=scaling,
offset=offset,
decision_boundary=decision_boundary,
)
def _execute(self, x):
""" (x+o)*s < d """
p = x.prediction
prediction = (p+self.offset)*self.scaling
if self.decision_boundary is None:
label = x.label
elif self.decision_boundary < prediction:
label = self.class_labels[0]
else:
label = self.class_labels[1]
return PredictionVector(prediction=prediction, label=label,
predictor=x.predictor)
class LinearFitNode(BaseNode):
""" Linear mapping between score and [0,1]
This node maps the unbounded SVM score linear to bound it between [0,1].
If the result can be interpreted as probability can be seen in the
reliable diagrams.
**Parameters**
:class_labels:
Determines the order of classes, i.e. the mapping of class labels
onto integers. The first element of the list should be the negative
class, the second should be the positive class.
If this parameter is not specified, the order is determined based on
the order of occurrence in the training data (which is more or less
arbitrary).
(*optional, default: []*)
:store:
If True 'reliable diagrams' of the training and test data are stored.
A discretization of the scores is made to calculate empirical
probabilities. The number of scores per bin is displayed on every
data point in the figure and shows how accurate the estimate
is (the higher the number the better). If the fit is reliable the
empirical probabilities should scatter around the diagonal in the
right plots.
**Exemplary Call**
.. code-block:: yaml
-
node : LinearFit
parameters :
class_labels : ['Standard','Target']
"""
def __init__(self, class_labels = [], **kwargs):
super(LinearFitNode, self).__init__(**kwargs)
self.set_permanent_attributes(class_labels = class_labels,
scores = [],
labels = [])
def is_trainable(self):
return True
def is_supervised(self):
return True
def _train(self, data, class_label):
""" Collect SVM output and true labels. """
self._train_phase_started = True
self.scores.append(data.prediction)
if class_label not in self.class_labels:
self.class_labels.append(class_label)
self.labels.append(self.class_labels.index(class_label))
def _stop_training(self):
""" Compute max range of the score according to the class."""
positive_inst = [score for score,label in \
zip(self.scores,self.labels) if label==1]
negative_inst = [score for score,label in \
zip(self.scores,self.labels) if label==0]
self.max_range = (abs(min(negative_inst)),max(positive_inst))
def _execute(self, x):
""" Evaluate each prediction with the linear | |
self.functions.itervalues() }
self._node_addresses = sorted(self.nodes.keys())
self._function_addresses = sorted(self.functions.keys())
self._stale_lookup = False
#--------------------------------------------------------------------------
# Metadata Collection
#--------------------------------------------------------------------------
@not_mainthread
def _async_refresh(self, result_queue, function_addresses, progress_callback):
"""
The main routine for the asynchronous metadata refresh worker.
TODO/FUTURE: this should be cleaned up / refactored
"""
# pause our rename listening hooks (more performant collection)
if self._rename_hooks:
self._rename_hooks.unhook()
#
# if the caller provided no function addresses to target for refresh,
# we will perform a complete metadata refresh of all database defined
# functions. let's retrieve that list from the disassembler now...
#
if not function_addresses:
function_addresses = disassembler.execute_read(
disassembler.get_function_addresses
)()
# refresh database properties that we wish to cache
self._async_refresh_properties()
# refresh the core database metadata asynchronously
completed = self._async_collect_metadata(
function_addresses,
progress_callback
)
# regenerate the instruction list from collected metadata
self._refresh_instructions()
# refresh the internal function/node fast lookup lists
self._refresh_lookup()
#
# NOTE:
#
# creating the hooks inline like this is less than ideal, but they
# they have been moved here (from the metadata constructor) to
# accomodate shortcomings of the Binary Ninja API.
#
# TODO/FUTURE/V35:
#
# it would be nice to move these back to the constructor once the
# Binary Ninja API allows us to detect BV / sessions as they are
# created, and able to load plugins on such events.
#
#----------------------------------------------------------------------
# create the disassembler hooks to listen for rename events
if not self._rename_hooks:
self._rename_hooks = disassembler.create_rename_hooks()
self._rename_hooks.renamed = self._name_changed
self._rename_hooks.metadata = weakref.proxy(self)
#----------------------------------------------------------------------
# reinstall the rename listener hooks now that the refresh is done
self._rename_hooks.hook()
# send the refresh result (good/bad) incase anyone is still listening
if completed:
self.cached = True
result_queue.put(True)
else:
result_queue.put(False)
# clean up our thread's reference as it is basically done/dead
self._refresh_worker = None
# thread exit...
return
@disassembler.execute_read
def _async_refresh_properties(self):
"""
Refresh a selection of interesting database properties.
"""
self.filename = disassembler.get_root_filename()
self.imagebase = disassembler.get_imagebase()
@not_mainthread
def _async_collect_metadata(self, function_addresses, progress_callback):
"""
Collect metadata from the underlying database (interruptable).
"""
CHUNK_SIZE = 150
completed = 0
start = time.time()
#----------------------------------------------------------------------
for addresses_chunk in chunks(function_addresses, CHUNK_SIZE):
#
# collect function metadata from the open database in groups of
# CHUNK_SIZE. collect_function_metadata() takes a list of function
# addresses and collects their metadata in a thread-safe manner
#
fresh_metadata = collect_function_metadata(addresses_chunk)
# update our database metadata cache with the new function metadata
self._update_functions(fresh_metadata)
# report incremental progress to an optional progress_callback
if progress_callback:
completed += len(addresses_chunk)
progress_callback(completed, len(function_addresses))
# if the refresh was canceled, stop collecting metadata and bail
if self._stop_threads:
return False
# sleep some so we don't choke the mainthread
time.sleep(.0015)
#----------------------------------------------------------------------
end = time.time()
logger.debug("Metadata collection took %s seconds" % (end - start))
# refresh completed normally / was not interrupted
return True
def _update_functions(self, fresh_metadata):
"""
Update stored function metadata with the given fresh metadata.
Returns a map of {address: function metadata} that has been updated.
"""
blank_function = FunctionMetadata(-1)
#
# the first step is to loop through the 'fresh' function metadata that
# has been given to us, and identify what is truly new or different
# from any existing metadata we hold.
#
for function_address, new_metadata in fresh_metadata.iteritems():
# extract the 'old' metadata from the database metadata cache
old_metadata = self.functions.get(function_address, blank_function)
#
# if the fresh metadata for this function is identical to the
# existing metadata we have collected for it, there's nothing
# else for us to do -- just ignore it.
#
if old_metadata == new_metadata:
continue
# delete nodes that explicitly no longer exist
old = old_metadata.nodes.viewkeys() - new_metadata.nodes.viewkeys()
for node_address in old:
del self.nodes[node_address]
#
# the newly collected metadata for a given function is empty, this
# indicates that the function has been deleted. we go ahead and
# remove its old function metadata from the db metadata entirely
#
if new_metadata.empty:
del self.functions[function_address]
continue
# add or overwrite the new/updated basic blocks
self.nodes.update(new_metadata.nodes)
# save the new/updated function
self.functions[function_address] = new_metadata
#
# since the node / function metadata cache has probably changed, we
# will need to refresh the internal fast lookup lists. this flag is
# only really used for debugging, and will probably be removed
# in the TODO/FUTURE collection refactor (v0.9?)
#
self._stale_lookup = True
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
@mainthread
def _name_changed(self, address, new_name, local_name=None):
"""
Handler for rename event in IDA.
TODO/FUTURE: refactor this to not be so IDA-specific
"""
# we should never care about local renames (eg, loc_40804b), ignore
if local_name or new_name.startswith("loc_"):
return 0
# get the function that this address falls within
function = self.get_function(address)
# if the address does not fall within a function (might happen?), ignore
if not function:
return 0
#
# ensure the renamed address matches the function start before
# renaming the function in our metadata cache.
#
# I am not sure when this would not be the case (globals? maybe)
# but I'd rather not find out.
#
if address != function.address:
return
# if the name isn't actually changing (misfire?) nothing to do
if new_name == function.name:
return
logger.debug("Name changing @ 0x%X" % address)
logger.debug(" Old name: %s" % function.name)
logger.debug(" New name: %s" % new_name)
# rename the function, and notify metadata listeners
#function.name = new_name
function.refresh_name()
self._notify_function_renamed()
# necessary for IDP/IDB_Hooks
return 0
#--------------------------------------------------------------------------
# Callbacks
#--------------------------------------------------------------------------
def function_renamed(self, callback):
"""
Subscribe a callback for function rename events.
"""
register_callback(self._function_renamed_callbacks, callback)
def _notify_function_renamed(self):
"""
Notify listeners of a function rename event.
"""
notify_callback(self._function_renamed_callbacks)
#------------------------------------------------------------------------------
# Function Metadata
#------------------------------------------------------------------------------
class FunctionMetadata(object):
"""
Function level metadata cache.
"""
def __init__(self, address):
# function metadata
self.address = address
self.name = None
# node metadata
self.nodes = {}
self.edges = collections.defaultdict(list)
# fixed/baked/computed metrics
self.size = 0
self.node_count = 0
self.edge_count = 0
self.instruction_count = 0
self.cyclomatic_complexity = 0
# collect metdata from the underlying database
if address != -1:
self._build_metadata()
#--------------------------------------------------------------------------
# Properties
#--------------------------------------------------------------------------
@property
def instructions(self):
"""
Return the instruction addresses in this function.
"""
return set([ea for node in self.nodes.itervalues() for ea in node.instructions])
@property
def empty(self):
"""
Return a bool indicating whether the object is populated.
"""
return len(self.nodes) == 0
#--------------------------------------------------------------------------
# Public
#--------------------------------------------------------------------------
@disassembler.execute_read
def refresh_name(self):
"""
Refresh the function name against the open database.
"""
self.name = disassembler.get_function_name_at(self.address)
#--------------------------------------------------------------------------
# Metadata Population
#--------------------------------------------------------------------------
def _build_metadata(self):
"""
Collect function metadata from the underlying database.
"""
self.name = disassembler.get_function_name_at(self.address)
self._refresh_nodes()
self._finalize()
def _refresh_nodes(self):
"""
This will be replaced with a disassembler-specific function at runtime.
NOTE: Read the 'MONKEY PATCHING' section at the end of this file.
"""
raise RuntimeError("This function should have been monkey patched...")
def _ida_refresh_nodes(self):
"""
Refresh function node metadata against an open IDA database.
"""
function_metadata = self
function_metadata.nodes = {}
# get function & flowchart object from IDA database
function = idaapi.get_func(self.address)
flowchart = idaapi.qflow_chart_t("", function, idaapi.BADADDR, idaapi.BADADDR, 0)
#
# now we will walk the flowchart for this function, collecting
# information on each of its nodes (basic blocks) and populating
# the function & node metadata objects.
#
for node_id in xrange(flowchart.size()):
node = flowchart[node_id]
# NOTE/COMPAT
if disassembler.USING_IDA7API:
node_start = node.start_ea
node_end = node.end_ea
else:
node_start = node.startEA
node_end = node.endEA
#
# the node current node appears to have a size of zero. This means
# that another flowchart / function owns this node so we can just
# ignore it...
#
if node_start == node_end:
continue
# create a new metadata object for this node
node_metadata = NodeMetadata(node_start, node_end, node_id)
#
# establish a relationship between this node (basic block) and
# this function metadata (its parent)
#
node_metadata.function = function_metadata
function_metadata.nodes[node_start] = node_metadata
# compute all of the edges between nodes in the current function
for node_metadata in function_metadata.nodes.itervalues():
edge_src = node_metadata.instructions[-1]
for edge_dst in idautils.CodeRefsFrom(edge_src, True):
if edge_dst in function_metadata.nodes:
function_metadata.edges[edge_src].append(edge_dst)
def _binja_refresh_nodes(self):
"""
Refresh function | |
"""
Full CI based on determinants rather than on CSFs.
The approach is the one introduced by
Olsen J Chem Phys 89 2185 (1988)
It is also described in the book
Molecular electronic structure theory,
by Helgaker, <NAME>.
There it is called 'Minimal operator count (MOC) method'
written by <NAME>
Notation:
Book of Helgaker.
"""
import itertools
import numpy as np
import scipy.sparse as spspa
import scipy.sparse.linalg as spspalin
import scipy.linalg as splin
from scipy.special import binom
try: from PyQuante.cints import ijkl2intindex
except:
print "cints import failed in CI.py"
from PyQuante.pyints import ijkl2intindex
from PyQuante.CI import TransformInts
def single_excitations(n):
singles = []
for p in xrange(n):
for q in xrange(n):
singles.append((p,q))
return singles
def double_excitations(n):
doubles = []
for p in xrange(n):
for q in xrange(n):
for r in xrange(n):
for s in xrange(n):
doubles.append((p,q,r,s))
return doubles
def transform_one_ints(h,orbs):
""" Transform the one-electron Hamilton matrix from basis function
representation to MO basis,
orbs is the coefficient matrix with rows indexing orbitals and
colums indexing the basis function coefficients.
See
http://vergil.chemistry.gatech.edu/resources/programming/mp2-transform-project.pdf
for details.
For very large basis sizes, this might need to be calculated on
the fly.
"""
return np.dot(orbs.T, np.dot(h,orbs))
def e_pq_on_string(p,q,string):
"""
apply the excitation operator a^+_p a_q on a string
This gives new string and a phase factor.
It must have been checked that q is in string and p is not!
"""
if q not in string:
""" annihilate vacuum """
return 0,0
if p in string and p!=q:
""" try to create already occupied orbital which was
not destroyed """
return 0,0
# action of E_pq on string j gives new string e_pq_string:
e_pq_string = list(string)
# determine phase factor
phase_q = (-1)**e_pq_string.index(q)
# apply annihilator q
e_pq_string.remove(q)
# apply creator p
e_pq_string.append(p)
e_pq_string.sort()
phase_p = (-1)**e_pq_string.index(p)
return phase_p*phase_q, e_pq_string
class FCISolver(object):
""" Interface to the scipy.sparse.linalg.eigs eigenvalue solver"""
def __init__(self, h, ERI, enuke, orbs, n_elec, multiplicity, m_s, k=4, sigma_eigs=None, which='SA', v0=None, maxiter=None, tol=0, return_eigenvectors=True ):
"""
Parameters:
h : one-electron integrals over basis functions
ERI : electron repulsion integrals over basis functions
enuke : The nuclear attraction energy, Molecule.get_enuke()
orbs : coefficient matrix from HF calculation giving the
orbitals in rows and the bfs coeffs in columns
n_elec : total number of electron
multiplicity: 2*S+1
m_s : M_s component of total spin.
keyword parameters passed to eigs solver (see scipy docs):
k : number of eigenvalues computed
sigma_eigs : number to which eigenvalues are close
(should be set to increase performance)
which : if set to 'SR' calculate k smalles real part egenvalues
v0 : initial vector to start from
perhaps HF vector (1,0,0...)
maxiter : maximum number of Arnoldi updates allowed
tol : tolerance in calculation of eigenvalues
0 means machine precision
return_eigenvectors: return eigenvector in addition to eigenvalues
if set to True
"""
self.enuke = enuke
self.k = k
self.sigma_eigs = sigma_eigs
self.which = which
self.v0 = None
self.maxiter=maxiter
self.tol=tol
self.return_eigenvectors = return_eigenvectors
# number of alpha electrons
self.n_alpha = 0.5*n_elec + m_s
# number of beta electrons
self.n_beta = 0.5*n_elec - m_s
# number of orbitals
self.n_orbs = orbs.shape[0]
# number of alpha strings
self.len_alpha = int(binom(self.n_orbs,self.n_alpha))
# number of beta strings
self.len_beta = int(binom(self.n_orbs,self.n_beta))
assert self.n_alpha +self.n_beta == n_elec
# Instantiate Sigma class
self.SigmaInst = Sigma(np.eye(self.len_alpha, self.len_beta), h, ERI, orbs, n_elec, multiplicity, m_s)
# shape of the H matrix
self.H_mat_shape = (self.len_alpha*self.len_beta , self.len_alpha*self.len_beta)
# shape of the coefficient matrix in Sigma class
self.c_mat_shape = (self.len_alpha , self.len_beta)
# shape of the corresponding vector passed to eigs
self.c_vec_shape = self.len_alpha*self.len_beta
# Linear operator passed to eigensolver
self.LinOp = spspalin.LinearOperator(self.H_mat_shape, self.matvec, dtype=np.float64)
def matvec(self, vec):
""" The reshaped matrix vector step needed for the iterations
in eigs solver.
The steps are:
1. reshape vec to matrix
2. get sigma
3. reshape back and return
"""
vec_mat = vec.reshape( self.c_mat_shape )
self.SigmaInst.c_mat = vec_mat
new_vec_mat = self.SigmaInst.get_sigma()
return new_vec_mat.reshape(self.c_vec_shape)
def iterate(self):
eva, eve = spspalin.eigsh(self.LinOp,k=self.k, sigma = self.sigma_eigs, which = self.which, v0 = self.v0, maxiter= self.maxiter, tol=self.tol, return_eigenvectors = self.return_eigenvectors)
print "diagonalization sucessful"
self.eva, self.eve = self.sort_and_add_enuke(eva,eve)
return self.eva, self.eve
def sort_and_add_enuke(self, eva, eve):
""" sort the eva end eve and add the nuclear attraction energy
to eva. """
# sort
indx = eva.argsort()
eva = eva[indx]
eve = eve[:,indx]
# add enuke
eva += self.enuke
return eva, eve
class FCIExactSolver(object):
"""
In contrast to FCISolver, this method build the full CI
Hamiltonian matrix explicitly, and then diagonalizes it exactly.
It is only suitable for small CI spaces and is more intendend for
debugging purposes.
"""
def __init__(self, h, ERI, enuke, orbs, n_elec, multiplicity, m_s):
"""
Parameters:
h : one-electron integrals over basis functions
ERI : electron repulsion integrals over basis functions
orbs : coefficient matrix from HF calculation giving the
orbitals in rows and the bfs coeffs in columns
n_elec : total number of electron
multiplicity: 2*S+1
m_s : M_s component of total spin.
"""
# Instantiate FCISolver class to access necessarry structures.
self.FCISolverInst = FCISolver(h, ERI, enuke, orbs, n_elec, multiplicity, m_s)
def get_H_mat(self):
""" build the Hamiltonian matrix in the I_c = I_alpha I_beta space.
The principle is as follows:
With the Sigma class we have a (hopefully efficient) method to
calculate priducts of the Hamiltonian matrix (tensor) with a
coefficient vector (matrix).
The, e.g., 1st column of a matrix A is obtained by the
multiplication of A with the vector (1,0,0...,0).
This principle is applied for each of the len_alpha*len_beta
components of the coefficient vector.
The reshaping of the coeffitient vector to an coefficient
matrix is handled by the matvec method of FCISolver class.
"""
self.H_mat = np.zeros((self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta,self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta))
for col in xrange(self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta):
""" loop over c_mat vector """
vec = np.zeros((self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta))
vec[col] = 1.
self.H_mat[:,col] = self.FCISolverInst.matvec(vec)
print "build of H_mat successful."
def diagonalize(self):
""" diagonalize the Hamiltonian matrix """
try: self.H_mat
except: self.get_H_mat()
eva, eve = splin.eigh(self.H_mat)
self.eva, self.eve = self.FCISolverInst.sort_and_add_enuke(eva,eve)
print "diagonalization successful"""
return self.eva, self.eve
class Graph(object):
""" graph object determining vertex weights and arc weights in
reverse lexical ordering.
see Helgaker section 11.8.2
Different to Helgaker:
Attention: orbital numbering starts with 0!
Attention: address starts also with 0!
"""
def __init__(self, n_orbs, n_electron):
"""
n_orbs : number of orbitals
n_electron : number of electrons
"""
self.n_orbs = n_orbs
self.n_electron = n_electron
self.get_vertex_weights()
self.get_arc_weights()
self.get_occupations()
assert int(binom(self.n_orbs,self.n_electron)) == len(self.occupations)
def get_vertex_weights(self):
""" get the vertex weights
vertices are indexed as a two-dimensional n_orbs+1 x
n_electron+1 array:
rows: orbitals
columns: number of electrons
"""
self.vert_weights = np.zeros((self.n_orbs+1,self.n_electron+1), dtype=np.int32)
self.vert_weights[0,0] = 1
for row in xrange(1,self.n_orbs+1):
for column in xrange(self.n_electron+1):
if column > row:
""" upper triangle is left out """
continue
if row > column+ self.n_orbs - self.n_electron:
continue
if column==0:
"""check if vertex is allowed"""
self.vert_weights[row,column] = self.vert_weights[row-1,column]
else:
self.vert_weights[row,column] = self.vert_weights[row-1,column] + self.vert_weights[row-1,column-1]
def get_arc_weights(self):
""" get the arc weights
arc weigths for vertical arcs. Represented as (n,N) array
"""
self.arc_weights = np.zeros((self.n_orbs, self.n_electron), dtype=np.int32)
for row in xrange(self.n_orbs):
for column in xrange(self.n_electron):
if column > row:
""" upper triangle is left out """
continue
if row > column+ self.n_orbs - self.n_electron:
""" lower part """
continue
self.arc_weights[row,column] = self.vert_weights[row,column+1]
def address(self, occupation):
""" get the address of a string given its occupation as, e.g.,
(0,2,3) means string a^+_0 a^+_2 a^+_3
Attention: orbital numbering starts with 0!
Attention: address starts also with 0!
occupation : SORTED list of creation operators (integers)
"""
address = 0
for index in xrange(self.n_electron):
address += self.arc_weights[occupation[index],index]
return address
def get_occupations(self):
""" return a list of occupations (list of lists) in reverse
lexical | |
#!/usr/bin/env python
from __future__ import print_function
r"""
Runs the Sentieon Genomics Tools workflows using Google Pipelines API
"""
import yaml
import json
import argparse
import os
import sys
import copy
import time
import ssl
import warnings
import logging
import random
import google.auth
import googleapiclient.errors
from apiclient.discovery import build
from pprint import pformat
from googleapiclient.errors import HttpError
script_dir = os.path.dirname(os.path.realpath(__file__))
germline_yaml = script_dir + "/germline.yaml"
somatic_yaml = script_dir + "/somatic.yaml"
ccdg_yaml = script_dir + "/ccdg.yaml"
default_json = script_dir + "/runner_default.json"
target_url_base = (
"https://www.googleapis.com/compute/v1/projects/{project}/"
"zones/{zone}/instances/{instance}"
)
def cloud_storage_exists(client, gs_path, user_project=None):
try:
bucket, blob = gs_path[5:].split("/", 1)
bucket = client.bucket(bucket, user_project=user_project)
blob = bucket.blob(blob)
res = blob.exists()
except Exception as err: # Catch all exceptions
print(
"Error polling file in Google Cloud Storage: " + str(err),
file=sys.stderr,
)
raise ValueError(
"Error: Could not find {gs_path} in Google Cloud "
"Storage".format(**locals())
)
return res
def _check_inputs_exist(
job_vars, credentials, project=None, user_project=None
):
from google.cloud import storage
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"Your application has authenticated "
"using end user credentials from Google Cloud "
"SDK",
)
client = storage.Client(project=project, credentials=credentials)
# The DBSNP, BQSR and Realign sites files
sites_files = []
sites_files += (
job_vars["BQSR_SITES"].split(",") if job_vars["BQSR_SITES"] else []
)
sites_files += (
job_vars["REALIGN_SITES"].split(",")
if job_vars["REALIGN_SITES"]
else []
)
sites_files += [job_vars["DBSNP"]] if job_vars["DBSNP"] else []
for sites_file in sites_files:
if not cloud_storage_exists(
client, sites_file, user_project=user_project
):
logging.error("Could not find supplied file {}".format(sites_file))
sys.exit(-1)
if sites_file.endswith("vcf.gz"):
if not cloud_storage_exists(
client, sites_file + ".tbi", user_project=user_project
):
logging.error(
"Could not find index for file {}".format(sites_file)
)
sys.exit(-1)
else:
if not cloud_storage_exists(
client, sites_file + ".idx", user_project=user_project
):
logging.error(
"Could not find index for file {}".format(sites_file)
)
sys.exit(-1)
# The data input files
gs_split_files = (
job_vars["FQ1"],
job_vars["TUMOR_FQ1"],
job_vars["FQ2"],
job_vars["TUMOR_FQ2"],
job_vars["BAM"],
job_vars["TUMOR_BAM"],
)
gs_files = ()
for split_file in gs_split_files:
if not split_file:
continue
for input_file in split_file.split(","):
if not cloud_storage_exists(
client, input_file, user_project=user_project
):
logging.error(
"Could not find the supplied file {}".format(input_file)
)
sys.exit(-1)
for input_file in gs_files:
if not cloud_storage_exists(
client, input_file, user_project=user_project
):
logging.error(
"Could not file the supplied file {}".format(input_file)
)
sys.exit(-1)
# All reference files
ref = job_vars["REF"]
ref_base = ref[:-3] if ref.endswith(".fa") else ref[:-6]
if not cloud_storage_exists(client, ref, user_project=user_project):
logging.error("Reference file not found")
sys.exit(-1)
if not cloud_storage_exists(
client, ref + ".fai", user_project=user_project
):
logging.error("Reference fai index not found")
sys.exit(-1)
if not cloud_storage_exists(
client, ref + ".dict", user_project=user_project
) and not cloud_storage_exists(
client, ref_base + ".dict", user_project=user_project
):
logging.error("Reference dict index not found")
sys.exit(-1)
# FQ specific
if job_vars["FQ1"] or job_vars["TUMOR_FQ1"]:
for suffix in [".amb", ".ann", ".bwt", ".pac", ".sa"]:
if not cloud_storage_exists(
client, ref + suffix, user_project=user_project
) and not cloud_storage_exists(
client, ref + ".64" + suffix, user_project=user_project
):
logging.error(
"Reference BWA index {} not found".format(suffix)
)
sys.exit(-1)
# BAM specific
bam_vars = ("BAM", "TUMOR_BAM")
for bam_type in bam_vars:
if job_vars[bam_type]:
for bam in job_vars[bam_type].split(","):
if not cloud_storage_exists(
client, bam + ".bai", user_project=user_project
) and not cloud_storage_exists(
client, bam + "bai", user_project=user_project
):
logging.error("BAM supplied but BAI not found")
sys.exit(-1)
def parse_args(vargs=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"pipeline_config", nargs="?", help="The json configuration file"
)
parser.add_argument(
"--verbose", "-v", action="count", help="Increase the runner verbosity"
)
parser.add_argument(
"--no_check_inputs_exist",
action="store_true",
help="Do not check that the input files exist before running the "
"pipeline",
)
parser.add_argument(
"--polling_interval",
type=float,
default=30,
help="Seconds between polling the running operation",
)
parser.add_argument(
"--requester_project",
default=None,
help="A project to charge for local 'requester pays' requests",
)
return parser.parse_args(vargs)
def setup_logging(verbosity=0):
logging.getLogger("googleapiclient." "discovery_cache").setLevel(
logging.ERROR
)
log_format = "%(filename)s::%(funcName)s [%(levelname)s] %(message)s"
if verbosity is None or verbosity < 1:
log_level = logging.WARNING
elif verbosity == 1:
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
def main(
pipeline_config,
polling_interval=30,
check_inputs_exist=True,
requester_project=None,
):
# Grab input arguments from the json file
try:
job_vars = json.load(open(default_json))
except ValueError as e:
logging.error("Error reading the default json file: " + default_json)
raise e
job_vars.update(pipeline_config)
preemptible_tries = int(job_vars["PREEMPTIBLE_TRIES"])
if job_vars["NONPREEMPTIBLE_TRY"]:
non_preemptible_tries = 1
preemptible = True if preemptible_tries > 0 else False
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
"Your application has authenticated "
"using end user credentials from Google Cloud "
"SDK",
)
credentials, project_id = google.auth.default()
# Warn with depreciated JSON keys
if "MIN_RAM_GB" in job_vars or "MIN_CPU" in job_vars:
logging.warning(
"'MIN_RAM_GB' and 'MIN_CPU' are now ignored. "
"Please use 'MACHINE_TYPE' to specify the instance "
"type"
)
# Grab the yaml for the workflow
pipeline = job_vars["PIPELINE"]
if pipeline == "GERMLINE":
pipeline_yaml = germline_yaml
elif pipeline == "SOMATIC":
pipeline_yaml = somatic_yaml
elif pipeline == "CCDG":
pipeline_yaml = ccdg_yaml
else:
logging.error(
"Pipeline '" + pipeline + "'. Valid "
"values are 'GERMLINE' and 'SOMATIC'"
)
sys.exit(-1)
try:
pipeline_dict = yaml.safe_load(open(pipeline_yaml))
except IOError:
logging.error('No yaml "{}" found.'.format(pipeline_yaml))
sys.exit(-1)
# Try not to create nearly empty directories
while job_vars["OUTPUT_BUCKET"].endswith("/"):
job_vars["OUTPUT_BUCKET"] = job_vars["OUTPUT_BUCKET"][:-1]
# Some basic error checking to fail early
if not job_vars["PROJECT_ID"]:
logging.error("Please supply a PROJECT_ID")
sys.exit(-1)
project = job_vars["PROJECT_ID"]
# Shared errors
if job_vars["FQ1"] and job_vars["BAM"]:
logging.error("Please supply either 'FQ1' or 'BAM' (not both)")
sys.exit(-1)
if job_vars["INTERVAL"] and job_vars["INTERVAL_FILE"]:
logging.error("Please supply either 'INTERVAL' or 'INTERVAL_FILE'")
sys.exit(-1)
if (job_vars["FQ1"] and job_vars["READGROUP"]) and (
len(job_vars["FQ1"].split(","))
!= len(job_vars["READGROUP"].split(","))
):
logging.error(
"The number of fastq files must match the number of "
"supplied readgroups"
)
sys.exit(-1)
# Pipeline specific errors
if pipeline == "GERMLINE" or pipeline == "CCDG":
if not job_vars["FQ1"] and not job_vars["BAM"]:
logging.error("Please supply either 'FQ1' or 'BAM'")
sys.exit(-1)
if (
job_vars["NO_HAPLOTYPER"]
and job_vars["NO_METRICS"]
and job_vars["NO_BAM_OUTPUT"]
):
logging.error("No output files requested")
sys.exit(-1)
if job_vars["RECALIBRATED_OUTPUT"] and job_vars["BQSR_SITES"] is None:
logging.error(
"Cannot output a recalibrated BAM file without "
"running BQSR. Please supply 'BQSR_SITES'"
)
sys.exit(-1)
valid_algos = ("Haplotyper", "DNAscope")
if job_vars["CALLING_ALGO"] not in valid_algos:
logging.error(
job_vars["CALLING_ALGO"] + "' is not a "
"valid germline variant calling algo. Please set "
"'CALLING_ALGO' to one of " + str(valid_algos)
)
sys.exit(-1)
# Additional CCDG checks
if pipeline == "CCDG":
if job_vars["BQSR_SITES"] is None:
logging.error(
"The CCDG pipeline requires known sites for "
"BQSR. Please supply 'BQSR_SITES'"
)
sys.exit(-1)
elif pipeline == "SOMATIC":
if job_vars["TUMOR_FQ1"] and job_vars["TUMOR_BAM"]:
logging.error(
"Please supply either 'TUMOR_FQ1' or 'TUMOR_BAM' " "(not both)"
)
sys.exit(-1)
if not job_vars["TUMOR_FQ1"] and not job_vars["TUMOR_BAM"]:
logging.error("Please supply either 'TUMOR_FQ1' or 'TUMOR_BAM'")
sys.exit(-1)
if job_vars["RUN_TNSNV"] and not job_vars["REALIGN_SITES"]:
logging.error(
"TNsnv requires indel realignment. Please supply "
"'REALIGN_SITES'"
)
sys.exit(-1)
if (
job_vars["NO_BAM_OUTPUT"]
and job_vars["NO_VCF"]
and job_vars["NO_METRICS"]
):
logging.error("No output files requested")
sys.exit(-1)
if (job_vars["TUMOR_FQ1"] and job_vars["TUMOR_READGROUP"]) and (
len(job_vars["TUMOR_FQ1"].split(","))
!= len(job_vars["TUMOR_READGROUP"].split(","))
):
logging.error(
"The number of tumor fastq files must match the "
"number of supplied readgroups"
)
sys.exit(-1)
valid_algos = ("TNhaplotyper", "TNhaplotyper2", "TNscope", "TNsnv")
if job_vars["CALLING_ALGO"] not in valid_algos:
logging.error(
job_vars["CALLING_ALGO"] + "' is not a "
"valid somatic variant calling algo. Please set "
"'CALLING_ALGO' to one of " + str(valid_algos)
)
sys.exit(-1)
if check_inputs_exist:
_check_inputs_exist(
job_vars,
credentials,
project=project,
user_project=requester_project,
)
# Resources dict
zones = job_vars["ZONES"].split(",") if job_vars["ZONES"] else []
if not zones:
logging.error("Please supply at least one zone to run the pipeline")
region = zones[0][:-2]
disk = {
"name": "local-disk",
"type": "local-ssd",
"sizeGb": int(job_vars["DISK_SIZE"]),
}
vm_dict = {
"machineType": job_vars["MACHINE_TYPE"],
"preemptible": preemptible,
"disks": [disk],
"serviceAccount": {
"scopes": ["https://www.googleapis.com/auth/cloud-platform"]
},
"cpuPlatform": job_vars["CPU_PLATFORM"],
}
resources_dict = {"zones": zones, "virtualMachine": vm_dict}
# Environment
env_dict = {}
for input_var in pipeline_dict["inputParameters"]:
env_dict[input_var["name"]] = job_vars[input_var["name"]]
if env_dict[input_var["name"]] is None:
env_dict[input_var["name"]] = "None"
# Action
if pipeline == "GERMLINE":
_cmd = "/opt/sentieon/gc_germline.sh"
elif pipeline == "SOMATIC":
_cmd = "/opt/sentieon/gc_somatic.sh"
elif pipeline == "CCDG":
_cmd = "/opt/sentieon/gc_ccdg_germline.sh"
else:
logging.error("Error: Unknown pipeline " + pipeline)
sys.exit(-1)
run_action = {
"containerName": "run-pipeline",
"imageUri": job_vars["DOCKER_IMAGE"],
"commands": ["/bin/bash", _cmd],
"mounts": [
{"disk": "local-disk", "path": "/mnt/work", "readOnly": False}
],
}
cleanup_action = {
"containerName": "cleanup",
"imageUri": job_vars["DOCKER_IMAGE"],
"commands": [
"/bin/bash",
"-c",
(
"gsutil cp /google/logs/action/1/stderr "
'"{}/worker_logs/stderr.txt" && '
"gsutil cp /google/logs/action/1/stdout "
'"{}/worker_logs/stdout.txt"'
).format(job_vars["OUTPUT_BUCKET"], job_vars["OUTPUT_BUCKET"]),
],
"alwaysRun": True,
}
# Build the API services
service = build("lifesciences", "v2beta", credentials=credentials)
compute_service = build("compute", "v1", credentials=credentials)
# Check the PIPELINE_REGION
name = "projects/" + project
locations = service.projects().locations().list(name=name).execute()
service_parent = []
for _loc in locations.get("locations", []):
if _loc["locationId"] == job_vars["PIPELINE_REGION"]:
service_parent.append(_loc["name"])
if len(service_parent) != 1:
possible_regions = ", ".join(
[x["locationId"] for x in locations["locations"]]
)
logging.error(
"Unknown PIPELINE_REGION '{}'. Please choose from: "
"{}".format(job_vars["PIPELINE_REGION"], | |
SHA3_224 = 0x0000000E
SHA3_256 = 0x0000000F
SHA3_384 = 0x00000010
SHA3_512 = 0x00000011
class InteropFunction(enum.Enum):
# KMIP 2.0
BEGIN = 0x00000001
END = 0x00000002
RESET = 0x00000003
class ItemType(enum.Enum):
# KMIP 2.0
STRUCTURE = 0x00000001
INTEGER = 0x00000002
LONG_INTEGER = 0x00000003
BIG_INTEGER = 0x00000004
ENUMERATION = 0x00000005
BOOLEAN = 0x00000006
TEXT_STRING = 0x00000007
BYTE_STRING = 0x00000008
DATE_TIME = 0x00000009
INTERVAL = 0x0000000A
DATE_TIME_EXTENDED = 0x0000000B
class KeyCompressionType(enum.Enum):
# KMIP 1.0
EC_PUBLIC_KEY_TYPE_UNCOMPRESSED = 0x00000001
EC_PUBLIC_KEY_TYPE_X9_62_COMPRESSED_PRIME = 0x00000002
EC_PUBLIC_KEY_TYPE_X9_62_COMPRESSED_CHAR2 = 0x00000003
EC_PUBLIC_KEY_TYPE_X9_62_HYBRID = 0x00000004
class KeyFormatType(enum.Enum):
# KMIP 1.0
RAW = 0x00000001
OPAQUE = 0x00000002
PKCS_1 = 0x00000003
PKCS_8 = 0x00000004
X_509 = 0x00000005
EC_PRIVATE_KEY = 0x00000006
TRANSPARENT_SYMMETRIC_KEY = 0x00000007
TRANSPARENT_DSA_PRIVATE_KEY = 0x00000008
TRANSPARENT_DSA_PUBLIC_KEY = 0x00000009
TRANSPARENT_RSA_PRIVATE_KEY = 0x0000000A
TRANSPARENT_RSA_PUBLIC_KEY = 0x0000000B
TRANSPARENT_DH_PRIVATE_KEY = 0x0000000C
TRANSPARENT_DH_PUBLIC_KEY = 0x0000000D
TRANSPARENT_ECDSA_PRIVATE_KEY = 0x0000000E # Deprecated in KMIP 1.4, designated '(Reserved)' in KMIP 2.0
TRANSPARENT_ECDSA_PUBLIC_KEY = 0x0000000F # Deprecated in KMIP 1.4, designated '(Reserved)' in KMIP 2.0
TRANSPARENT_ECDH_PRIVATE_KEY = 0x00000010 # Deprecated in KMIP 1.4, designated '(Reserved)' in KMIP 2.0
TRANSPARENT_ECDH_PUBLIC_KEY = 0x00000011 # Deprecated in KMIP 1.4, designated '(Reserved)' in KMIP 2.0
TRANSPARENT_ECMQV_PRIVATE_KEY = 0x00000012 # Deprecated in KMIP 1.4, designated '(Reserved)' in KMIP 2.0
TRANSPARENT_ECMQV_PUBLIC_KEY = 0x00000013 # Deprecated in KMIP 1.4, designated '(Reserved)' in KMIP 2.0
# KMIP 1.3
TRANSPARENT_EC_PRIVATE_KEY = 0x00000014
TRANSPARENT_EC_PUBLIC_KEY = 0x00000015
# KMIP 1.4
PKCS_12 = 0x00000016
class KeyRoleType(enum.Enum):
# KMIP 1.0
BDK = 0x00000001
CVK = 0x00000002
DEK = 0x00000003
MKAC = 0x00000004
MKSMC = 0x00000005
MKSMI = 0x00000006
MKDAC = 0x00000007
MKDN = 0x00000008
MKCP = 0x00000009
MKOTH = 0x0000000A
KEK = 0x0000000B
MAC_16609 = 0x0000000C
MAC_97971 = 0x0000000D
MAC_97972 = 0x0000000E
MAC_97973 = 0x0000000F
MAC_97974 = 0x00000010
MAC_97975 = 0x00000011
ZPK = 0x00000012
PVKIBM = 0x00000013
PVKPVV = 0x00000014
PVKOTH = 0x00000015
# KMIP 1.4
DUKPT = 0x00000016
IV = 0x00000017
TRKBK = 0x00000018
class KeyValueLocationType(enum.Enum):
# KMIP 1.2
UNINTERPRETED_TEXT_STRING = 0x00000001
URI = 0x00000002
class KeyWrapType(enum.Enum):
NOT_WRAPPED = 0x00000001
AS_REGISTERED = 0x00000002
class KMIPVersion(OrderedEnum):
KMIP_1_0 = 1.0
KMIP_1_1 = 1.1
KMIP_1_2 = 1.2
KMIP_1_3 = 1.3
KMIP_1_4 = 1.4
KMIP_2_0 = 2.0
class LinkType(enum.Enum):
# KMIP 1.0
CERTIFICATE_LINK = 0x00000101
PUBLIC_KEY_LINK = 0x00000102
PRIVATE_KEY_LINK = 0x00000103
DERIVATION_BASE_OBJECT_LINK = 0x00000104
DERIVED_KEY_LINK = 0x00000105
REPLACEMENT_OBJECT_LINK = 0x00000106
REPLACED_OBJECT_LINK = 0x00000107
# KMIP 1.2
PARENT_LINK = 0x00000108
CHILD_LINK = 0x00000109
PREVIOUS_LINK = 0x0000010A
NEXT_LINK = 0x0000010B
# KMIP 1.4
PKCS12_CERTIFICATE_LINK = 0x0000010C
PKCS12_PASSWORD_LINK = 0x0000010D
# KMIP 2.0
WRAPPING_KEY_LINK = 0x0000010E
class MaskGenerator(enum.Enum):
# KMIP 1.4
MGF1 = 0x00000001
class NameType(enum.Enum):
# KMIP 1.0
UNINTERPRETED_TEXT_STRING = 0x00000001
URI = 0x00000002
class NISTKeyType(enum.Enum):
# KMIP 2.0
PRIVATE_SIGNATURE_KEY = 0x00000001
PUBLIC_SIGNATURE_VERIFICATION_KEY = 0x00000002
SYMMETRIC_AUTHENTICATION_KEY = 0x00000003
PRIVATE_AUTHENTICATION_KEY = 0x00000004
PUBLIC_AUTHENTICATION_KEY = 0x00000005
SYMMETRIC_DATA_ENCRYPTION_KEY = 0x00000006
SYMMETRIC_KEY_WRAPPING_KEY = 0x00000007
SYMMETRIC_RANDOM_NUMBER_GENERATION_KEY = 0x00000008
SYMMETRIC_MASTER_KEY = 0x00000009
PRIVATE_KEY_TRANSPORT_KEY = 0x0000000A
PUBLIC_KEY_TRANSPORT_KEY = 0x0000000B
SYMMETRIC_KEY_AGREEMENT_KEY = 0x0000000C
PRIVATE_STATIC_KEY_AGREEMENT_KEY = 0x0000000D
PUBLIC_STATIC_KEY_AGREEMENT_KEY = 0x0000000E
PRIVATE_EPHEMERAL_KEY_AGREEMENT_KEY = 0x0000000F
PUBLIC_EPHEMERAL_KEY_AGREEMENT_KEY = 0x00000010
SYMMETRIC_AUTHORIZATION_KEY = 0x00000011
PRIVATE_AUTHORIZATION_KEY = 0x00000012
PUBLIC_AUTHORIZATION_KEY = 0x00000013
class ObjectGroupMember(enum.Enum):
# KMIP 1.1
GROUP_MEMBER_FRESH = 0x00000001
GROUP_MEMBER_DEFAULT = 0x00000002
class ObjectType(enum.Enum):
# KMIP 1.0
CERTIFICATE = 0x00000001
SYMMETRIC_KEY = 0x00000002
PUBLIC_KEY = 0x00000003
PRIVATE_KEY = 0x00000004
SPLIT_KEY = 0x00000005
TEMPLATE = 0x00000006 # Deprecated in KMIP 1.3, designated '(Reserved)' in KMIP 2.0
SECRET_DATA = 0x00000007
OPAQUE_DATA = 0x00000008
# KMIP 1.2
PGP_KEY = 0x00000009
# KMIP 2.0
CERTIFICATE_REQUEST = 0x0000000A
class OpaqueDataType(enum.Enum):
NONE = 0x80000000 # Not defined by the standard, but we need something.
# The standard does say that values starting 0x8xxxxxx
# are considered extensions
class Operation(enum.Enum):
# KMIP 1.0
CREATE = 0x00000001
CREATE_KEY_PAIR = 0x00000002
REGISTER = 0x00000003
REKEY = 0x00000004
DERIVE_KEY = 0x00000005
CERTIFY = 0x00000006
RECERTIFY = 0x00000007
LOCATE = 0x00000008
CHECK = 0x00000009
GET = 0x0000000A
GET_ATTRIBUTES = 0x0000000B
GET_ATTRIBUTE_LIST = 0x0000000C
ADD_ATTRIBUTE = 0x0000000D
MODIFY_ATTRIBUTE = 0x0000000E
DELETE_ATTRIBUTE = 0x0000000F
OBTAIN_LEASE = 0x00000010
GET_USAGE_ALLOCATION = 0x00000011
ACTIVATE = 0x00000012
REVOKE = 0x00000013
DESTROY = 0x00000014
ARCHIVE = 0x00000015
RECOVER = 0x00000016
VALIDATE = 0x00000017
QUERY = 0x00000018
CANCEL = 0x00000019
POLL = 0x0000001A
NOTIFY = 0x0000001B
PUT = 0x0000001C
# KMIP 1.1
REKEY_KEY_PAIR = <KEY>
DISCOVER_VERSIONS = 0x0000001E
# KMIP 1.2
ENCRYPT = 0x0000001F
DECRYPT = 0x00000020
SIGN = 0x00000021
SIGNATURE_VERIFY = 0x00000022
MAC = 0x00000023
MAC_VERIFY = 0x00000024
RNG_RETRIEVE = 0x00000025
RNG_SEED = 0x00000026
HASH = 0x00000027
CREATE_SPLIT_KEY = 0x00000028
JOIN_SPLIT_KEY = 0x00000029
# KMIP 1.4
IMPORT = 0x0000002A
EXPORT = 0x0000002B
# KMIP 2.0
LOG = 0x0000002C
LOGIN = 0x0000002D
LOGOUT = 0x0000002E
DELEGATED_LOGIN = 0x0000002F
ADJUST_ATTRIBUTE = 0x00000030
SET_ATTRIBUTE = 0x00000031
SET_ENDPOINT_ROLE = 0x00000032
PKCS_11 = 0x00000033
INTEROP = 0x00000034
REPROVISION = 0x00000035
class PaddingMethod(enum.Enum):
# KMIP 1.0
NONE = 0x00000001
OAEP = 0x00000002
PKCS5 = 0x00000003
SSL3 = 0x00000004
ZEROS = 0x00000005
ANSI_X923 = 0x00000006
ISO_10126 = 0x00000007
PKCS1v15 = 0x00000008
X931 = 0x00000009
PSS = 0x0000000A
class PKCS11Function(enum.Enum):
# KMIP 2.0
#
# These values are the 1-based offset count of the function in the
# CK_FUNCTION_LIST_3_0 structure as specified in the OASIS PKCS#11
# Cryptographic Token Interface Base Specification Version 3.0 document.
#
# The above document is not currently available, so this set of
# enumerations is intentionally left empty as a placeholder. It should
# be filled in in a future update.
PLACEHOLDER = 'Do not use this.'
class PKCS11ReturnCode(enum.Enum):
# KMIP 2.0
#
# These values are specified in the CK_RV values in the OASIS PKCS#11
# Cryptographic Token Interface Base Specification Version 3.0 document.
#
# The above document is not currently available, so this set of
# enumerations is intentionally left empty as a placeholder. It should
# be filled in in a future update.
PLACEHOLDER = 'Do not use this.'
class Policy(enum.Enum):
ALLOW_ALL = "Allow All"
ALLOW_OWNER = "Allow Owner"
DISALLOW_ALL = "Disallow All"
class ProfileName(enum.Enum):
# KMIP 1.3
BASELINE_SERVER_BASIC_KMIPv12 = 0x00000001
BASELINE_SERVER_TLSv12_KMIPv12 = 0x00000002
BASELINE_CLIENT_BASIC_KMIPv12 = 0x00000003
BASELINE_CLIENT_TLSv12_KMIPv12 = 0x00000004
COMPLETE_SERVER_BASIC_KMIPv12 = 0x00000005
COMPLETE_SERVER_TLSv12_KMIPv12 = 0x00000006
TAPE_LIBRARY_CLIENT_KMIPv10 = 0x00000007
TAPE_LIBRARY_CLIENT_KMIPv11 = 0x00000008
TAPE_LIBRARY_CLIENT_KMIPv12 = 0x00000009
TAPE_LIBRARY_SERVER_KMIPv10 = 0x0000000A
TAPE_LIBRARY_SERVER_KMIPv11 = 0x0000000B
TAPE_LIBRARY_SERVER_KMIPv12 = 0x0000000C
SYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv10 = 0x0000000D
SYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv11 = 0x0000000E
SYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv12 = 0x0000000F
SYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv10 = 0x00000010
SYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv11 = 0x00000011
SYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv12 = 0x00000012
ASYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv10 = 0x00000013
ASYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv11 = 0x00000014
ASYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv12 = 0x00000015
ASYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv10 = 0x00000016
ASYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv11 = 0x00000017
ASYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv12 = 0x00000018
BASIC_CRYPTOGRAPHIC_CLIENT_KMIPv12 = 0x00000019
BASIC_CRYPTOGRAPHIC_SERVER_KMIPv12 = 0x0000001A
ADVANCED_CRYPTOGRAPHIC_CLIENT_KMIPv12 = 0x0000001B
ADVANCED_CRYPTOGRAPHIC_SERVER_KMIPv12 = 0x0000001C
RNG_CRYPTOGRAPHIC_CLIENT_KMIPv12 = 0x0000001D
RNG_CRYPTOGRAPHIC_SERVER_KMIPv12 = 0x0000001E
BASIC_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv10 = 0x0000001F
INTERMEDIATE_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv10 = 0x00000020
ADVANCED_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv10 = 0x00000021
BASIC_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv11 = 0x00000022
INTERMEDIATE_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv11 = 0x00000023
ADVANCED_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv11 = 0x00000024
BASIC_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv12 = 0x00000025
INTERMEDIATE_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv12 = 0x00000026
ADVANCED_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv12 = 0x00000027
SYMMETRIC_KEY_FOUNDRY_SERVER_KMIPv10 = 0x00000028
SYMMETRIC_KEY_FOUNDRY_SERVER_KMIPv11 = 0x00000029
SYMMETRIC_KEY_FOUNDRY_SERVER_KMIPv12 = 0x0000002A
OPAQUE_MANAGED_OBJECT_STORE_CLIENT_KMIPv10 = 0x0000002B
OPAQUE_MANAGED_OBJECT_STORE_CLIENT_KMIPv11 = 0x0000002C
OPAQUE_MANAGED_OBJECT_STORE_CLIENT_KMIPv12 = 0x0000002D
OPAQUE_MANAGED_OBJECT_STORE_SERVER_KMIPv10 = 0x0000002E
OPAQUE_MANAGED_OBJECT_STORE_SERVER_KMIPv11 = 0x0000002F
OPAQUE_MANAGED_OBJECT_STORE_SERVER_KMIPv12 = 0x00000030
SUITE_B_MINLOS_128_CLIENT_KMIPv10 = 0x00000031
SUITE_B_MINLOS_128_CLIENT_KMIPv11 = 0x00000032
SUITE_B_MINLOS_128_CLIENT_KMIPv12 = 0x00000033
SUITE_B_MINLOS_128_SERVER_KMIPv10 = 0x00000034
SUITE_B_MINLOS_128_SERVER_KMIPv11 = 0x00000035
SUITE_B_MINLOS_128_SERVER_KMIPv12 = 0x00000036
SUITE_B_MINLOS_192_CLIENT_KMIPv10 = 0x00000037
SUITE_B_MINLOS_192_CLIENT_KMIPv11 = 0x00000038
SUITE_B_MINLOS_192_CLIENT_KMIPv12 = 0x00000039
SUITE_B_MINLOS_192_SERVER_KMIPv10 = 0x0000003A
SUITE_B_MINLOS_192_SERVER_KMIPv11 = 0x0000003B
SUITE_B_MINLOS_192_SERVER_KMIPv12 = 0x0000003C
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_CLIENT_KMIPv10 = 0x0000003D
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_CLIENT_KMIPv11 = 0x0000003E
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_CLIENT_KMIPv12 = 0x0000003F
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_SERVER_KMIPv10 = 0x00000040
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_SERVER_KMIPv11 = 0x00000041
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_SERVER_KMIPv12 = 0x00000042
HTTPS_CLIENT_KMIPv10 = 0x00000043
HTTPS_CLIENT_KMIPv11 = 0x00000044
HTTPS_CLIENT_KMIPv12 = 0x00000045
HTTPS_SERVER_KMIPv10 = 0x00000046
HTTPS_SERVER_KMIPv11 = 0x00000047
HTTPS_SERVER_KMIPv12 = 0x00000048
JSON_CLIENT_KMIPv10 = 0x00000049
JSON_CLIENT_KMIPv11 = 0x0000004A
JSON_CLIENT_KMIPv12 = 0x0000004B
JSON_SERVER_KMIPv10 = 0x0000004C
JSON_SERVER_KMIPv11 = 0x0000004D
JSON_SERVER_KMIPv12 = 0x0000004E
XML_CLIENT_KMIPv10 = 0x0000004F
XML_CLIENT_KMIPv11 = 0x00000050
XML_CLIENT_KMIPv12 = 0x00000051
XML_SERVER_KMIPv10 = 0x00000052
XML_SERVER_KMIPv11 = 0x00000053
XML_SERVER_KMIPv12 = 0x00000054
BASELINE_SERVER_BASIC_KMIPv13 = 0x00000055
BASELINE_SERVER_TLSv12_KMIPv13 = 0x00000056
BASELINE_CLIENT_BASIC_KMIPv13 = 0x00000057
BASELINE_CLIENT_TLSv12_KMIPv13 = 0x00000058
COMPLETE_SERVER_BASIC_KMIPv13 = 0x00000059
COMPLETE_SERVER_TLSv12_KMIPv13 = 0x0000005A
TAPE_LIBRARY_CLIENT_KMIPv13 = 0x0000005B
TAPE_LIBRARY_SERVER_KMIPv13 = 0x0000005C
SYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv13 = 0x0000005D
SYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv13 = 0x0000005E
ASYMMETRIC_KEY_LIFECYCLE_CLIENT_KMIPv13 = 0x0000005F
ASYMMETRIC_KEY_LIFECYCLE_SERVER_KMIPv13 = 0x00000060
BASIC_CRYPTOGRAPHIC_CLIENT_KMIPv13 = 0x00000061
BASIC_CRYPTOGRAPHIC_SERVER_KMIPv13 = 0x00000062
ADVANCED_CRYPTOGRAPHIC_CLIENT_KMIPv13 = 0x00000063
ADVANCED_CRYPTOGRAPHIC_SERVER_KMIPv13 = 0x00000064
RNG_CRYPTOGRAPHIC_CLIENT_KMIPv13 = 0x00000065
RNG_CRYPTOGRAPHIC_SERVER_KMIPv13 = 0x00000066
BASIC_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv13 = 0x00000067
INTERMEDIATE_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv13 = 0x00000068
ADVANCED_SYMMETRIC_KEY_FOUNDRY_CLIENT_KMIPv13 = 0x00000069
SYMMETRIC_KEY_FOUNDRY_SERVER_KMIPv13 = 0x0000006A
OPAQUE_MANAGED_OBJECT_STORE_CLIENT_KMIPv13 = 0x0000006B
OPAQUE_MANAGED_OBJECT_STORE_SERVER_KMIPv13 = 0x0000006C
SUITE_B_MINLOS_128_CLIENT_KMIPv13 = 0x0000006D
SUITE_B_MINLOS_128_SERVER_KMIPv13 = 0x0000006E
SUITE_B_MINLOS_192_CLIENT_KMIPv13 = 0x0000006F
SUITE_B_MINLOS_192_SERVER_KMIPv13 = 0x00000070
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_CLIENT_KMIPv13 = 0x00000071
STORAGE_ARRAY_WITH_SELF_ENCRYPTING_DRIVE_SERVER_KMIPv13 = 0x00000072
HTTPS_CLIENT_KMIPv13 = 0x00000073
HTTPS_SERVER_KMIPv13 = 0x00000074
JSON_CLIENT_KMIPv13 = 0x00000075
JSON_SERVER_KMIPv13 = 0x00000076
XML_CLIENT_KMIPv13 = 0x00000077
XML_SERVER_KMIPv13 = 0x00000078
# KMIP 1.4
BASELINE_SERVER_BASIC_KMIPv14 = 0x00000079
| |
<filename>python/src/cca/ebt/sourcecode_metrics_for_survey_cpp.py
#!/usr/bin/env python3
'''
Source code metrics for C programs
Copyright 2013-2018 RIKEN
Copyright 2018-2020 Chiba Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = '<NAME> <<EMAIL>>'
import logging
from .sourcecode_metrics_for_survey_base import (get_lver, get_proj_list,
ftbl_list_to_orange,
MetricsBase)
from .metrics_queries_cpp import QUERY_TBL
from cca.ccautil.virtuoso import VIRTUOSO_PW, VIRTUOSO_PORT
from cca.factutil.entity import SourceCodeEntity
logger = logging.getLogger()
FOP_TBL = { # number of FP operations (for SPARC64 VIIIfx)
'nint': 2,
'jnint': 2,
'cos': 29,
'dcos': 31,
'exp': 19,
'dexp': 23,
'log': 19,
'alog': 19,
'dlog': 23,
'mod': 8,
'amod': 8,
'dmod': 8,
'sign': 2,
'dsign': 2,
'sin': 29,
'dsin': 31,
'sqrt': 11,
'dsqrt': 21,
'tan': 58,
'dtan': 64,
}
FOP_TBL_DBL_EXTRA = {
'cos': 2,
'exp': 4,
'log': 4,
'sin': 2,
'sqrt': 10,
'tan': 6,
}
FOP_TBL_VA = {
'max': (lambda n: n - 1),
'amax1': (lambda n: n - 1),
'dmax1': (lambda n: n - 1),
'min': (lambda n: n - 1),
'amin1': (lambda n: n - 1),
'dmin1': (lambda n: n - 1),
}
LINES_OF_CODE = 'lines_of_code'
MAX_LOOP_DEPTH = 'max_loop_depth'
MAX_FUSIBLE_LOOPS = 'max_fusible_loops'
MAX_MERGEABLE_ARRAYS = 'max_mergeable_arrays'
MAX_ARRAY_RANK = 'max_array_rank'
MAX_LOOP_LEVEL = 'max_loop_level'
N_BRANCHES = 'branches'
N_STMTS = 'stmts'
N_FP_OPS = 'fp_ops'
N_OPS = 'ops'
N_CALLS = 'calls'
N_A_REFS = ['array_refs0', 'array_refs1', 'array_refs2']
N_IND_A_REFS = ['indirect_array_refs0', 'indirect_array_refs1',
'indirect_array_refs2']
N_DBL_A_REFS = ['dbl_array_refs0', 'dbl_array_refs1', 'dbl_array_refs2']
BF = ['bf0', 'bf1', 'bf2']
META_KEYS = ['proj', 'ver', 'path', 'sub', 'lnum', 'digest']
abbrv_tbl = {
LINES_OF_CODE: 'LOC',
MAX_LOOP_DEPTH: 'LpD',
MAX_FUSIBLE_LOOPS: 'FLp',
MAX_MERGEABLE_ARRAYS: 'MA',
MAX_ARRAY_RANK: 'ARk',
MAX_LOOP_LEVEL: 'LLv',
N_BRANCHES: 'Br',
N_STMTS: 'St',
N_FP_OPS: 'FOp',
N_OPS: 'Op',
N_CALLS: 'Ca',
N_A_REFS[0]: 'AR0',
N_IND_A_REFS[0]: 'IAR0',
N_DBL_A_REFS[0]: 'DAR0',
N_A_REFS[1]: 'AR1',
N_IND_A_REFS[1]: 'IAR1',
N_DBL_A_REFS[1]: 'DAR1',
N_A_REFS[2]: 'AR2',
N_IND_A_REFS[2]: 'IAR2',
N_DBL_A_REFS[2]: 'DAR2',
BF[0]: 'BF0',
BF[1]: 'BF1',
BF[2]: 'BF2',
}
###
def count_aas(aas):
c = 0
for aa in aas:
if aa.startswith(','):
c += 2
else:
c += 1
return c
def get_nfops(name, nargs, double=False):
nfop = 1
try:
nfop = FOP_TBL_VA[name](nargs)
except KeyError:
nfop = FOP_TBL.get(name, 1)
if double:
nfop += FOP_TBL_DBL_EXTRA.get(name, 0)
prec = 's'
if double:
prec = 'd'
logger.debug('%s{%s}(%d) --> %d' % (name, prec, nargs, nfop))
return nfop
def make_feature_tbl():
v = {
'meta': {
'proj': '',
'ver': '',
'path': '',
'sub': '',
'lnum': '',
},
BF[0]: 0.0,
BF[1]: 0.0,
BF[2]: 0.0,
N_FP_OPS: 0,
N_OPS: 0,
N_A_REFS[0]: 0,
N_IND_A_REFS[0]: 0,
N_DBL_A_REFS[0]: 0,
N_A_REFS[1]: 0,
N_IND_A_REFS[1]: 0,
N_DBL_A_REFS[1]: 0,
N_A_REFS[2]: 0,
N_IND_A_REFS[2]: 0,
N_DBL_A_REFS[2]: 0,
N_BRANCHES: 0,
N_STMTS: 0,
N_CALLS: 0,
LINES_OF_CODE: 0,
MAX_LOOP_LEVEL: 0,
MAX_ARRAY_RANK: 0,
MAX_LOOP_DEPTH: 0,
MAX_FUSIBLE_LOOPS: 0,
MAX_MERGEABLE_ARRAYS: 0,
}
return v
def ftbl_to_string(ftbl):
meta_str = '%(proj)s:%(ver)s:%(path)s:%(sub)s:%(lnum)s' % ftbl['meta']
cpy = ftbl.copy()
cpy['meta'] = meta_str
ks = ftbl.keys()
ks.remove('meta')
fmt = '%(meta)s ('
fmt += ','.join(['%s:%%(%s)s' % (abbrv_tbl[k], k) for k in ks])
fmt += ')'
s = fmt % cpy
return s
class Metrics(MetricsBase):
def __init__(self, proj_id, method='odbc',
pw=VIRTUOSO_PW, port=VIRTUOSO_PORT):
super().__init__(proj_id, method, pw, port)
def find_ftbl(self, key):
md = self.get_metadata(key)
fn = md['fn']
digest = md['digest']
(ver, path, lnum) = key
ftbl = make_feature_tbl()
ftbl['meta'] = {
'proj': self._proj_id,
'ver': ver,
'path': path,
'fn': fn,
'lnum': str(lnum),
'digest': digest,
}
fop = self.get_value(N_FP_OPS, key)
if fop > 0:
for lv in range(3):
if BF[lv] in ftbl:
aa = self.get_value(N_A_REFS[lv], key)
daa = self.get_value(N_DBL_A_REFS[lv], key)
saa = aa - daa
bf = float(saa * 4 + daa * 8) / float(fop)
# print('!!! {} -> fop={} aa[{}]={} daa[{}]={} bf[{}]={}'.format(key, fop, lv, aa, lv, daa, lv, bf))
ftbl[BF[lv]] = bf
for item in ftbl.keys():
try:
ftbl[item] = self._result_tbl[item][key]
except KeyError:
pass
return ftbl
def key_to_string(self, key):
(ver, loc, fn, loop, vname) = key
e = SourceCodeEntity(uri=loop)
lnum = e.get_range().get_start_line()
s = '%s:%s:%s:%s' % (ver, loc, fn, lnum)
return s
def set_metrics(self, name, _key, value, add=False):
# print('!!! set_metrics: name={} key={} value={} add={}'.format(name, _key, value, add))
(ver, loc, fn, loop, vname) = _key
ent = SourceCodeEntity(uri=loop)
lnum = ent.get_range().get_start_line()
key = (ver, loc, str(lnum))
key_str = '%s:%s:%s' % key
logger.debug('%s(%s): %s -> %s' % (self.key_to_string(_key),
key_str, name, value))
loop_d = self.get_loop_digest(_key)
self._metadata_tbl[key] = {'fn': fn, 'digest': loop_d}
try:
tbl = self._result_tbl[name]
except KeyError:
tbl = {}
self._result_tbl[name] = tbl
if add:
v = tbl.get(key, 0)
tbl[key] = v + value
else:
tbl[key] = value
def finalize_ipp(self):
logger.info('finalizing call graph...')
query = QUERY_TBL['fd_fd'] % {'proj': self._graph_uri}
for qvs, row in self._sparql.query(query):
callee = row['callee']
fd = row['fd']
self.ipp_add(callee, fd)
query = QUERY_TBL['loop_fd'] % {'proj': self._graph_uri}
for qvs, row in self._sparql.query(query):
callee = row['callee']
loop = row['loop']
self.ipp_add(callee, loop, is_loop=True)
def build_tree(self, f=None):
query = QUERY_TBL['loop_loop'] % {'proj': self._graph_uri}
children_tbl = {}
parent_tbl = {}
for qvs, row in self._sparql.query(query):
ver = row['ver']
loc = row['loc']
fn = row.get('fn', '')
loop = row['loop']
loop_d = row['loop_d']
vname = ''
child_loop = row.get('child_loop', None)
child_loop_d = row.get('child_loop_d', '')
child_vname = ''
lver = get_lver(ver)
key = (lver, loc, fn, loop, vname)
self.set_loop_digest(key, loop_d)
if f:
f(key, row)
try:
child_loops = children_tbl[key]
except KeyError:
child_loops = []
children_tbl[key] = child_loops
if child_loop:
child_key = (lver, loc, fn, child_loop, child_vname)
self.set_loop_digest(child_key, child_loop_d)
if child_key not in child_loops:
child_loops.append(child_key)
parent_tbl[child_key] = key
self.ipp_add(child_loop, loop, is_loop=True)
roots = []
for k in children_tbl.keys():
if k not in parent_tbl:
roots.append(k)
r = SourceCodeEntity(uri=self.get_loop_of_key(k)).get_range()
lines = r.get_end_line() - r.get_start_line() + 1
self.set_metrics(LINES_OF_CODE, k, lines)
logger.info('%d top loops found' % len(roots))
tree = {'children': children_tbl, 'parent': parent_tbl, 'roots': roots}
self.set_tree(tree)
return tree
def get_key(self, row):
ver = row['ver']
loc = row['loc']
fn = row.get('fn', '')
loop = row['loop']
vname = ''
lver = get_lver(ver)
key = (lver, loc, fn, loop, vname)
return key
def calc_array_metrics(self):
logger.info('calculating array metrics...')
try:
query = QUERY_TBL['arrays'] % {'proj': self._graph_uri}
tbl = {}
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
array = row['dtor']
tyc = row['tyc']
rank = int(row['rank'])
try:
arrays = tbl[key]
except KeyError:
arrays = []
tbl[key] = arrays
arrays.append((array, (tyc, rank)))
def get(key):
arrays = tbl.get(key, [])
max_rank = 0
t = {}
for (a, spec) in arrays:
(tyc, rank) = spec
if rank > max_rank:
max_rank = rank
try:
t[spec] += 1
except KeyError:
t[spec] = 1
max_mergeable_arrays = 0
for spec in t.keys():
if t[spec] > max_mergeable_arrays:
max_mergeable_arrays = t[spec]
return {'max_rank': max_rank, 'max_mergeable_arrays': max_mergeable_arrays}
tree = self.get_tree()
for key in tree['roots']:
data = {'max_rank': 0, 'max_mergeable_arrays': 0}
def f(k):
d = get(k)
if d['max_rank'] > data['max_rank']:
data['max_rank'] = d['max_rank']
if d['max_mergeable_arrays'] > data['max_mergeable_arrays']:
data['max_mergeable_arrays'] = d['max_mergeable_arrays']
self.iter_tree(tree, key, f)
logger.debug('key=%s' % (self.key_to_string(key)))
logger.debug('max_mergeable_arrays=%(max_mergeable_arrays)d max_rank=%(max_rank)d' % data)
self.set_metrics(MAX_MERGEABLE_ARRAYS, key, data['max_mergeable_arrays'])
self.set_metrics(MAX_ARRAY_RANK, key, data['max_rank'])
except KeyError:
pass
logger.info('done.')
def calc_in_loop_metrics(self):
logger.info('calculating other in_loop metrics...')
try:
query = QUERY_TBL['in_loop'] % {'proj': self._graph_uri}
def make_data():
return {'nbr': 0,
'nes': 0,
'nop': 0,
'nc': 0,
}
tbl = {}
for qvs, row in self._sparql.query(query):
key = self.get_key(row)
data = make_data()
data['nbr'] = int(row['nbr'] or '0')
data['nes'] = int(row['nes'] or '0')
data['nop'] = int(row['nop'] or '0')
data['nc'] = int(row['nc'] or '0')
tbl[key] = data
fd = row['fd']
if fd:
self.ipp_add(row['loop'], fd)
tree = self.get_tree()
for key in tree['roots']:
data = make_data()
def f(k):
d = tbl.get(k, None)
if d:
data['nbr'] += d['nbr']
data['nes'] += d['nes']
data['nop'] += d['nop']
data['nc'] += d['nc']
self.iter_tree(tree, key, f)
self.set_metrics(N_BRANCHES, key, data['nbr'])
self.set_metrics(N_STMTS, key, data['nes'])
self.set_metrics(N_OPS, key, data['nop'])
self.set_metrics(N_CALLS, key, data['nc'])
except KeyError:
raise
logger.info('done.')
# end of calc_in_loop_metrics
def calc_aref_in_loop_metrics(self, lv): # level: 0, 1, 2
logger.info('calculating other aref_in_loop metrics (lv=%d)...' % lv)
try:
if lv == 0:
qtbl = QUERY_TBL['aref0_in_loop']
elif lv == 1 or lv == 2:
qtbl = QUERY_TBL['aref12_in_loop']
else:
logger.warning(f'illegal level: {lv}')
return
tbl = {}
kinds = ['aa', 'iaa', 'daa']
def make_data():
d = {}
for k in kinds:
d[k] = set()
return d
for kind in kinds:
query = qtbl[kind] % {'proj': self._graph_uri, 'level': lv}
for qvs, row in self._sparql.query(query):
key | |
<gh_stars>10-100
"""The MIT License (MIT)
Copyright (c) 2018 EvieePy
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import discord
import asyncio
import inspect
from typing import Union
import utils
async def pager(entries, chunk: int):
for x in range(0, len(entries), chunk):
yield entries[x:x + chunk]
class Paginator:
__slots__ = ()
def __init__(self, **kwargs):
self.entries = kwargs.get('entries', None)
self.prepend_extras = kwargs.get('prepend_extras', None)
self.append_extras = kwargs.get('append_extras', None)
self.title = kwargs.get('title', None)
self.colour = kwargs.get('colour', None)
self.footer = kwargs.get('footer', None)
self.length = kwargs.get('length', 10)
self.prepend = kwargs.get('prepend', '')
self.append = kwargs.get('append', '')
self.format = kwargs.get('fmt', '')
self.timeout = kwargs.get('timeout', 90)
self.controller = None
self.pages = []
self.names = []
self.base = None
self.current = 0
self.previous = 0
self.eof = 0
self.controls = {'⏮': 'start', '◀': -1, '⏹': 'stop',
'▶': +1, '⏭': 'end'}
def chunker(self, entries: Union[list, tuple], chunk: int):
"""Create chunks of our entries for pagination."""
for x in range(0, len(entries), chunk):
yield entries[x:x + chunk]
def indexer(self, ctx, ctrl):
pass
async def reaction_controller(self, ctx):
pass
async def stop_controller(self, message):
try:
await message.delete()
except discord.HTTPException:
pass
try:
self.controller.cancel()
except Exception:
pass
def formmater(self, chunk):
return '\n'.join(f'{self.prepend}{self.fmt}{value}{self.fmt[::-1]}{self.append}' for value in chunk)
async def paginate(self, ctx):
if self.extras:
self.pages = [p for p in self.extras if isinstance(p, discord.Embed)]
if self.entries:
chunks = [c async for c in pager(self.entries, self.length)]
for index, chunk in enumerate(chunks):
page = discord.Embed(title=f'{self.title} - {index + 1}/{len(chunks)}', color=self.colour)
page.description = self.formmater(chunk)
if self.footer:
page.set_footer(text=self.footer)
self.pages.append(page)
if not self.pages:
raise utils.EvieeBaseException('There must be enough data to create at least 1 page for pagination.')
self.eof = float(len(self.pages) - 1)
self.controls['⏭'] = self.eof
self.controller = ctx.bot.loop.create_task(self.reaction_controller(ctx))
class SimplePaginator:
__slots__ = ('entries', 'extras', 'title', 'description', 'colour', 'footer', 'length', 'prepend', 'append',
'fmt', 'timeout', 'ordered', 'controls', 'controller', 'pages', 'current', 'previous', 'eof', 'base',
'names')
def __init__(self, **kwargs):
self.entries = kwargs.get('entries', None)
self.extras = kwargs.get('extras', None)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.colour = kwargs.get('colour', 0xffd4d4)
self.footer = kwargs.get('footer', None)
self.length = kwargs.get('length', 10)
self.prepend = kwargs.get('prepend', '')
self.append = kwargs.get('append', '')
self.fmt = kwargs.get('fmt', '')
self.timeout = kwargs.get('timeout', 90)
self.ordered = kwargs.get('ordered', False)
self.controller = None
self.pages = []
self.names = []
self.base = None
self.current = 0
self.previous = 0
self.eof = 0
self.controls = {'⏮': 0.0, '◀': -1, '⏹': 'stop',
'▶': +1, '⏭': None}
async def indexer(self, ctx, ctrl):
if ctrl == 'stop':
ctx.bot.loop.create_task(self.stop_controller(self.base))
elif isinstance(ctrl, int):
self.current += ctrl
if self.current > self.eof or self.current < 0:
self.current -= ctrl
else:
self.current = int(ctrl)
async def reaction_controller(self, ctx):
bot = ctx.bot
author = ctx.author
self.base = await ctx.send(embed=self.pages[0])
if len(self.pages) == 1:
await self.base.add_reaction('⏹')
else:
for reaction in self.controls:
try:
await self.base.add_reaction(reaction)
except discord.HTTPException:
return
def check(r, u):
if str(r) not in self.controls.keys():
return False
elif u.id == bot.user.id or r.message.id != self.base.id:
return False
elif u.id != author.id:
return False
return True
while True:
try:
react, user = await bot.wait_for('reaction_add', check=check, timeout=self.timeout)
except asyncio.TimeoutError:
return ctx.bot.loop.create_task(self.stop_controller(self.base))
control = self.controls.get(str(react))
try:
await self.base.remove_reaction(react, user)
except discord.HTTPException:
pass
self.previous = self.current
await self.indexer(ctx, control)
if self.previous == self.current:
continue
try:
await self.base.edit(embed=self.pages[self.current])
except KeyError:
pass
async def stop_controller(self, message):
try:
await message.delete()
except discord.HTTPException:
pass
try:
self.controller.cancel()
except Exception:
pass
def formmater(self, chunk):
return '\n'.join(f'{self.prepend}{self.fmt}{value}{self.fmt[::-1]}{self.append}' for value in chunk)
async def paginate(self, ctx):
if self.extras:
self.pages = [p for p in self.extras if isinstance(p, discord.Embed)]
if self.entries:
chunks = [c async for c in pager(self.entries, self.length)]
for index, chunk in enumerate(chunks):
page = discord.Embed(title=f'{self.title} - {index + 1}/{len(chunks)}', color=self.colour)
page.description = self.formmater(chunk)
if self.footer:
page.set_footer(text=self.footer)
self.pages.append(page)
if not self.pages:
raise utils.EvieeBaseException('There must be enough data to create at least 1 page for pagination.')
self.eof = float(len(self.pages) - 1)
self.controls['⏭'] = self.eof
self.controller = ctx.bot.loop.create_task(self.reaction_controller(ctx))
class HelpPaginator(SimplePaginator):
__slots__ = ()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.controls['🔢'] = 'selector'
self.timeout = 180
@property
def invalid_cogs(self):
return 'AbstractorCommands'
def get_embed(self, name, cog):
embed = discord.Embed(title=f'{name} - Help',
description=f'```ini\n{inspect.cleandoc(cog.__doc__ or "N/A")}\n```',
colour=cog.colour)
embed.set_thumbnail(url=cog.thumbnail)
return embed
def set_pages(self):
length = len(self.pages)
for index, embed in enumerate(self.pages):
embed.set_footer(text=f'Page {index + 1}/{length} | Base commands are displayed in < >')
for index, name in enumerate(self.names):
self.names[index] = f'{index + 1} - `{name}`'
async def del_msg(self, *args):
for msg in args:
try:
await msg.delete()
except discord.HTTPException:
return
async def wait_for(self, ctx):
def check(m):
return m.author == ctx.author
msg = await ctx.send('What page would you like to goto?')
while True:
try:
resp = await ctx.bot.wait_for('message', check=check, timeout=60)
except asyncio.TimeoutError:
return await self.del_msg(msg)
try:
index = int(resp.content)
except ValueError:
await ctx.send('Invalid number, please enter a valid page number.', delete_after=10)
return await self.del_msg(resp)
if index > len(self.pages) or index < 1:
await ctx.send('Invalid number, please enter a valid page number.', delete_after=10)
return await self.del_msg(resp)
else:
await self.del_msg(msg, resp)
self.previous = self.current
self.current = index - 1
try:
return await self.base.edit(embed=self.pages[self.current])
except KeyError:
pass
async def indexer(self, ctx, ctrl):
if ctrl == 'stop':
ctx.bot.loop.create_task(self.stop_controller(self.base))
elif ctrl == 'selector':
ctx.bot.loop.create_task(self.wait_for(ctx))
elif isinstance(ctrl, int):
self.current += ctrl
if self.current > self.eof or self.current < 0:
self.current -= ctrl
else:
self.current = int(ctrl)
async def command_formatter(self, ctx, _cog):
cog = ctx.bot.get_cog(_cog)
if not cog or cog.private:
return
embed = self.get_embed(_cog, cog)
self.names.append(_cog)
for index, command in enumerate(sorted(ctx.bot.get_cog_commands(_cog), key=lambda c: c.name)):
if command.hidden:
continue
try:
await command.can_run(ctx)
except Exception:
continue
short = inspect.cleandoc(command.short_doc) if command.short_doc else 'No help!'
if (index + 1) % 8 == 0:
self.pages.append(embed)
embed = self.get_embed(_cog, cog)
self.names.append(_cog)
if isinstance(command, utils.AbstractorGroup):
abstractors = ', '.join(sorted(command.abstractors))
else:
abstractors = None
if isinstance(command, utils.EvieeCommandGroup):
subs = '\n'.join([f' `{s}`' for s in sorted(command.commands, key=lambda _:_.name)])
embed.add_field(name=f'{command.name} - [Group]'
f'{"<{}>".format(abstractors) if abstractors else ""}',
value=f'{short}\n{subs}\n', inline=False)
else:
embed.add_field(name=command.name, value=short, inline=False)
self.pages.append(embed)
async def paginate(self, ctx):
valid_cogs = [cog for cog in ctx.bot.cogs if ctx.bot.get_cog_commands(cog) and cog not in self.invalid_cogs]
first = discord.Embed(title='Eviee - Help',
description=
'For more help and resources visit:\n\n'
'[Official Server](http://discord.gg/Hw7RTtr)\n'
'[Vote for Eviee](https://discordbots.org/bot/319047630048985099/vote)\n',
colour=0xffb2b2)
howto = discord.Embed(title='Help - How-to',
description='⏮ - `To beginning`:\n'
'◀ - `Page left`:\n'
'⏹ - `Close`:\n'
'▶ - `Page right`:\n'
'⏭ - `To End`:\n'
'🔢 - `Page Selector`:\n\n',
colour=0xffb2b2)
howto.add_field(name='Additional Info:', value='For additional info on how to use a specific command,'
' use:\n\n'
'`help <command>` (Without the `<>`).\n\n'
'This may be used on all commands or their sub commands.\n\n',)
basecom = discord.Embed(title='Help - Base Commands',
description='Eviee implements a command system which aims to be as human '
'friendly as possible, called: `Base Commands`\n\n'
'Base Commands will show up in the help like this:\n\n'
'**command - [Group]<base commands>**\n'
'For example:\n'
'`prefix - [Group]<add, remove, reset>`\n\n'
'This for example allows prefix add to be used in the'
' following ways:\n\n'
f'`{ctx.clean_prefix}prefix add ...` **or**\n'
f'`{ctx.clean_prefix}add prefix ...`',
colour=0xffb2b2)
first.set_thumbnail(url=ctx.bot.user.avatar_url)
basecom.set_thumbnail(url='https://i.imgur.com/E0ewLAN.png')
howto.set_thumbnail(url='https://i.imgur.com/QwvPYWr.png')
self.pages.extend((first, howto, basecom))
self.names.extend(('Intro', 'Howto', 'Base Commands'))
for cog in sorted(valid_cogs):
await self.command_formatter(ctx, cog)
self.set_pages()
cats = [c async for c in pager(self.names, int(len(self.names) / 2))]
for n in cats:
joined = '\n'.join(n)
self.pages[0].add_field(name='\u200b', value=joined)
self.pages[0].add_field(name='\u200b',
value=f'Only commands which are valid for {ctx.author.mention} will be shown.\n')
self.eof = float(len(self.pages) - 1)
self.controls['⏭'] = self.eof
self.controller = ctx.bot.loop.create_task(self.reaction_controller(ctx))
class ProfilePaginator(SimplePaginator):
__slots__ = ('member', 'grabbed')
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.controls['📷'] = 'avatar'
self.member = kwargs.get('member')
self.grabbed = False
async def | |
''' Character Class
Character class for py-fighter game.
Takes input of character data, screen, x position, and y position.
Has functions to make move.
- character_data takes the form of a Python dictionary with all key
components and data for character. This is usually stored in a JSON. We
have decided to implement like this as it allows us to add future
characters or makesignificant changes to the characters without having
to edit the python code.
character_data follows the following structure:
{
"actions": ["running", "idle"], <-- These lines should not be
edited
"directions": ["left", "right"], <-- from the preset as changing
them may cause the game to break
"running": {
"left": [[0, 0], [0, 1], [0, 2], [0, 3],
[0, 0], [0, 4], [0, 5], [0, 6]],
"right": [[2, 0], [2, 1], [2, 2], [2, 3],
[2, 0], [2, 4], [2, 5], [2, 6]]
},
"idle": {
"left": [[0, 0], [1, 0]],
"right": [[2, 0], [3, 0]]
},
^ These nested lists give the coordinates within the grid (in ^
order) of where the images that make up the given action can
be found in the sprite sheet. There should be one
dictionary for each action, containing a list for each
direction
"path": "graphics/spritesheets/basic-character.png", <-- File path
of the
"background": [0, 255, 0], <-- Background colour spritesheet
"gridsize": [4, 9], <-- Grid size of sprite sheet (zero-indexed)
"charsize": [32, 32], <-- Size of character in pixels
"scaledsize": [128, 128], <-- Size to scale character to in pixels
"speed": 1, <-- Speed of character in pixels per frame
"gravity": 1, <-- Gravitationaly speed in pixels per frame
"refresh": 10, <-- Number of frames between character refresh
"initialstate": ["running", "right"] <-- State the character is
initially spawned in (eg
direction facing)
}
We get this from two different sources - the spritesheet JSON which
gives a mapping between grid coordinates and the corresponding images
on the spritesheet, and the character specific config which contains
data about the specific character.
The main source of inspiration for this class and its sub class was the
website on producing a chess game:
https://ehmatthes.github.io/pcc_2e/beyond_pcc/pygame_sprite_sheets/#a-
simple-sprite-sheet
This site shows how to blit images to a screen, and ways to split up
a sprites sheet.
I didn't take any inspiration for animating the sprite across frames
I just assumed I could put the images in a data structure and iterate
over them
@author: Robert (Unless stated otherwise)
'''
import pygame
import json
from classes.spritesheet import SpriteSheet
from classes.weapon import *
with open('json/spritesheet.JSON') as sprite_sheet_json:
SPRITESHEET_JSON = json.load(sprite_sheet_json)
class Character(pygame.sprite.Sprite):
''' Character Class - Used to display and animate sprites from
sprite sheets on screen. Usually won't be initialised directly,
rather its two child classes (Player and NPC) will be called.
'''
def __init__(self, character_data, background, screen,
x_position, y_position, arm_type = 'arms'):
''' Init Character
Function takes and unpacks relevat information from the
characters JSON dictionary
'''
# Initi for sprite
pygame.sprite.Sprite.__init__(self)
# Assigning character data to self.charactar_data
self.character_data = character_data
self.addSpritesheetJSON()
# Putting object to screen
self.screen = screen
### Unpacking some important JSON dictionary into variables
self.speed = character_data['speed']
self.gravity = character_data['gravity']
self.jump_speed = character_data['jump_speed']
self.state = character_data['initialstate']
self.refresh_rate = character_data['refresh']
### Health and stats data
self.alive = True
self.__max_health = character_data['initial_health_points']
self.__health = self.__max_health
self.strength = character_data['initial_strength']
# Character Position
self.position = [x_position, y_position]
# Load sprite sheet and extract frames to dictionary
self.loadSpriteSheets(character_data)
# Adding screen to object
self.image = self.images[self.state[0]][self.state[1]]
self.image_index = 0
self.plot_rect = self.image[self.image_index].get_rect()
self.plot_rect.center = self.position
self.rect = pygame.Rect((0, 0, self.width, self.height))
self.rect.center = self.plot_rect.center
self.feet_rect = pygame.Rect((0, 0, self.width, self.height // 10))
self.feet_rect.bottomleft = self.rect.bottomleft
# setup score
self.score = 0
# Get Character Arms TODO MAY need updating to reflect some
# enemies having own arms/other arms
self.arms = WEAPON_TYPES[arm_type](self)
self.healthbar = HealthBar(self)
# Important move variables
self.refresh_counter = 0
self.x_y_moving = False
self.recoil_status = (False, 0)
# Storing dimension variables to self
self.screen_dims = (screen.get_width(), screen.get_height())
# Referencing important background variables
self.changeMap(background)
##### TO GO TO JSON
self.is_falling = False
self.is_jumping = False
self.attacking = False
self.init_attacking = False
self.jumps_in_action = 0
self.max_jumps_in_action = 2
self.attack_frame_counter = 0
self.thrown_projectiles = pygame.sprite.Group()
def changeMap(self, background):
''' changeMap(background) - used to update to new map
Function to update player with new background. Call this on
player when new map produced, map refers to class containing
sprite group of tiles, and map_matrix
'''
self.background = background
self.map_matrix = background.map_matrix
self.tiles_group = background.map_group
self.tile_rect = []
for tile in self.tiles_group:
self.tile_rect.append(tile.rect)
def addSpritesheetJSON(self):
''' addSpritesheetJSON
Loads spritesheet interpretation data from SPRITESHEET_JSON
'''
for key in SPRITESHEET_JSON.keys():
self.character_data[key] = SPRITESHEET_JSON[key]
def loadSpriteSheets(self, character_data):
''' loadSpriteSheets(self, character_data)
Procedure which loads spritesheet from path given, and extracts
each frame of the sprite and stores to dictionary self.images
These can then be updated depending on this instances state
'''
self.spritesheet = SpriteSheet(character_data['path'])
char_size = character_data['charsize']
scale_factor = character_data['scale_factor']
scaled_size = [char_size[0] * scale_factor,
char_size[1] * scale_factor]
self.scaled_size = scaled_size
background_colour = character_data['background']
image_types = character_data['actions']
image_directions = character_data['directions']
graphic_dims = character_data['graphic_dims']
self.width = graphic_dims[0] * scale_factor
self.height = graphic_dims[1] * scale_factor
self.images = {}
# Importing images into self.images dictionary
# This interacts with spritesheet code from https://ehmatthes.gi
# thub.io/pcc_2e/beyond_pcc/pygame_sprite_sheets/#a-simple-sprit
# e-sheet
# to load sprites into a dictinoary
for image_type in image_types:
self.images[image_type] = {}
for image_direction in image_directions:
self.images[image_type][image_direction] = []
for coords in character_data[image_type][image_direction]:
specific_image = pygame.transform.scale(
self.spritesheet.image_at(coords,char_size),
scaled_size
)
specific_image.set_colorkey(background_colour)
self.images[image_type][image_direction] += \
[specific_image]
def changeArms(self, arm_type):
self.arms = WEAPON_TYPES[arm_type](self)
def addTarget(self, target_group):
''' Adds group of enemies to player
'''
self.target_group = target_group
def spriteCollision(self, other):
if pygame.sprite.collide_rect(self, other):
print('COLLISION')
else:
print('NO COLLISION')
def attack(self, target, type = 1):
''' Attack function - Attacks player assigned to it
Causes player being attacked to recoil in opposite direction,
and lose health.
'''
if self.rect[0] < target.rect[0]:
direction = 1
else:
direction = -1
self.arms.attack(direction, target)
def isFacingTarget(self, target):
'''
Function to check whether self is facing a particular enemy
'''
if self.state[1] == 'left' and \
(self.rect.centerx > target.rect.centerx):
return True
elif self.state[1] == 'right' and \
(self.rect.centerx < target.rect.centerx):
return True
return False
def recoil(self, force, direction):
''' Recoil function - Loses health from attack and sets recoil
counter
Recoil counter processed in display function. Each frame pushes
character back while recoiling.
'''
self.health -= force
self.score -= force // 5
self.recoil_status = (True, direction)
self.recoil_counter = 5
def update(self):
''' Update function
Updates position of characters subject to state.
'''
# Check if attacking, if attacking change state to attacking for
# one frame
if (self.attacking) and (self.init_attacking == False):
self.pre_attack_action, self.pre_action_direction = self.state
self.updateState('attack', self.state[1])
self.init_attacking = True
elif self.init_attacking:
self.attack_frame_counter += 1
if self.attack_frame_counter == self.refresh_rate:
self.attack_frame_counter = 0
self.attacking = False
self.init_attacking = False
self.updateState(self.pre_attack_action, self.state[1])
# Update verticle subject to jumping
#if self.state[0] == 'jumping':
if self.is_jumping:
self.applyJump()
else:
if not self.collisionWithGround() :
self.is_falling = True
# Updating position subject to gravity
if self.is_falling:
self.applyGravity()
# Updating subject to recoil. If character is recoiling, move
# in recoil direction
if self.recoil_status[0]:
if self.recoil_counter == 0:
self.recoil_status = (False, 0)
self.moveX(15 * self.recoil_status[1])
self.recoil_counter = self.recoil_counter - 1
# Update x/y subject to status
if self.x_y_moving:
if self.state[1] == 'right':
self.moveX(self.speed)
if self.state[1] == 'left':
move_speed = -1 * self.speed
self.moveX(move_speed)
def syncRects(self):
self.feet_rect.bottomleft = self.rect.bottomleft
self.plot_rect.center = self.rect.center
def display(self):
''' Display function
Updates image if required, and displays image(s) to screen
'''
# Update state image TODO CHANGE image code
self.image = self.images[self.state[0]][self.state[1]]
# Updating counter, and if necessary incrementing image
self.refresh_counter += 1
if self.refresh_counter == self.refresh_rate:
self.incrementImage()
# Catch frames changed mid refresh
if self.image_index >= len(self.image):
self.incrementImage()
self.syncRects()
# ###################################################
# # TODO DELETE THE FOLLOWING CODE - FOR TESTING ONLY
# surf = pygame.Surface((self.rect.width, self.rect.height))
# surf.fill((100, 100, 0))
# self.screen.blit(surf, self.rect)
# surf | |
<reponame>ezgimercan/SlicerMorph
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import fnmatch
import numpy as np
import random
import math
import re
import csv
#
# MeshDistanceMeasurement
#
class MeshDistanceMeasurement(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "MeshDistanceMeasurement" # TODO make this more human readable by adding spaces
self.parent.categories = ["SlicerMorph.SlicerMorph Labs"]
self.parent.dependencies = []
self.parent.contributors = ["<NAME> (UW), <NAME> (UW)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This module takes a semi-landmark file from a template image and transfers the semi-landmarks to a group of specimen using TPS and projection.
"""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """
This module was developed by <NAME> for SlicerMorph. SlicerMorph was originally supported by an NSF/DBI grant, "An Integrated Platform for Retrieval, Visualization and Analysis of 3D Morphology From Digital Biological Collections"
awarded to <NAME> (1759883), <NAME> (1759637), and <NAME> (1759839).
https://nsf.gov/awardsearch/showAward?AWD_ID=1759883&HistoricalAwards=false
""" # replace with organization, grant and thanks.
# Additional initialization step after application startup is complete
slicer.app.connect("startupCompleted()", registerSampleData)
#
# Register sample data sets in Sample Data module
#
def registerSampleData():
"""
Add data sets to Sample Data module.
"""
# It is always recommended to provide sample data for users to make it easy to try the module,
# but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.
import SampleData
iconsPath = os.path.join(os.path.dirname(__file__), 'Resources/Icons')
# To ensure that the source code repository remains small (can be downloaded and installed quickly)
# it is recommended to store data sets that are larger than a few MB in a Github release.
# TemplateKey1
SampleData.SampleDataLogic.registerCustomSampleDataSource(
# Category and sample name displayed in Sample Data module
category='TemplateKey',
sampleName='TemplateKey1',
# Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.
# It can be created by Screen Capture module, "Capture all views" option enabled, "Number of images" set to "Single".
thumbnailFileName=os.path.join(iconsPath, 'TemplateKey1.png'),
# Download URL and target file name
uris="https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95",
fileNames='TemplateKey1.nrrd',
# Checksum to ensure file integrity. Can be computed by this command:
# import hashlib; print(hashlib.sha256(open(filename, "rb").read()).hexdigest())
checksums = 'SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95',
# This node name will be used when the data set is loaded
nodeNames='TemplateKey1'
)
# TemplateKey2
SampleData.SampleDataLogic.registerCustomSampleDataSource(
# Category and sample name displayed in Sample Data module
category='TemplateKey',
sampleName='TemplateKey2',
thumbnailFileName=os.path.join(iconsPath, 'TemplateKey2.png'),
# Download URL and target file name
uris="https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97",
fileNames='TemplateKey2.nrrd',
checksums = 'SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97',
# This node name will be used when the data set is loaded
nodeNames='TemplateKey2'
)
#
# MeshDistanceMeasurementWidget
#
class MeshDistanceMeasurementWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def onSelect(self):
self.applyButton.enabled = bool (self.meshDirectory.currentPath and self.landmarkDirectory.currentPath and self.modelSelector.currentNode() and self.baseLMSelect.currentNode()) and (bool(self.semilandmarkDirectory.currentPath)==bool(self.baseSLMSelect.currentNode()))
def onSelectBaseSLM(self):
self.semilandmarkDirectory.enabled = bool(self.baseSLMSelect.currentNode())
self.applyButton.enabled = bool (self.meshDirectory.currentPath and self.landmarkDirectory.currentPath and self.modelSelector.currentNode() and self.baseLMSelect.currentNode()) and (bool(self.semilandmarkDirectory.currentPath)==bool(self.baseSLMSelect.currentNode()))
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Parameters Area
#
parametersCollapsibleButton = ctk.ctkCollapsibleButton()
parametersCollapsibleButton.text = "Parameters"
self.layout.addWidget(parametersCollapsibleButton)
# Layout within the dummy collapsible button
parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)
#
# Select base mesh
#
self.modelSelector = slicer.qMRMLNodeComboBox()
self.modelSelector.nodeTypes = ( ("vtkMRMLModelNode"), "" )
self.modelSelector.selectNodeUponCreation = False
self.modelSelector.addEnabled = False
self.modelSelector.removeEnabled = False
self.modelSelector.noneEnabled = True
self.modelSelector.showHidden = False
self.modelSelector.setMRMLScene( slicer.mrmlScene )
parametersFormLayout.addRow("Base mesh: ", self.modelSelector)
#
# Select base landmark file
#
#self.baseLMFile=ctk.ctkPathLineEdit()
#self.baseLMFile.setToolTip( "Select file specifying base landmarks" )
#parametersFormLayout.addRow("Base landmark file: ", self.baseLMFile)
self.baseLMSelect = slicer.qMRMLNodeComboBox()
self.baseLMSelect.nodeTypes = ( ('vtkMRMLMarkupsFiducialNode'), "" )
self.baseLMSelect.selectNodeUponCreation = False
self.baseLMSelect.addEnabled = False
self.baseLMSelect.removeEnabled = False
self.baseLMSelect.noneEnabled = True
self.baseLMSelect.showHidden = False
self.baseLMSelect.showChildNodeTypes = False
self.baseLMSelect.setMRMLScene( slicer.mrmlScene )
parametersFormLayout.addRow("Base landmarks: ", self.baseLMSelect)
#
# Select base semi-landmark file
#
#self.baseLMFile=ctk.ctkPathLineEdit()
#self.baseLMFile.setToolTip( "Select file specifying base landmarks" )
#parametersFormLayout.addRow("Base landmark file: ", self.baseLMFile)
self.baseSLMSelect = slicer.qMRMLNodeComboBox()
self.baseSLMSelect.nodeTypes = ( ('vtkMRMLMarkupsFiducialNode'), "" )
self.baseSLMSelect.selectNodeUponCreation = False
self.baseSLMSelect.addEnabled = False
self.baseSLMSelect.removeEnabled = False
self.baseSLMSelect.noneEnabled = True
self.baseSLMSelect.showHidden = False
self.baseSLMSelect.showChildNodeTypes = False
self.baseSLMSelect.setMRMLScene( slicer.mrmlScene )
parametersFormLayout.addRow("Base semi-landmarks: ", self.baseSLMSelect)
#
# Select meshes directory
#
self.meshDirectory=ctk.ctkPathLineEdit()
self.meshDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.meshDirectory.setToolTip( "Select directory containing meshes" )
parametersFormLayout.addRow("Mesh directory: ", self.meshDirectory)
#
# Select landmarks directory
#
self.landmarkDirectory=ctk.ctkPathLineEdit()
self.landmarkDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.landmarkDirectory.setToolTip( "Select directory containing landmarks" )
parametersFormLayout.addRow("Landmark directory: ", self.landmarkDirectory)
#
# Select semi-landmarks directory
#
self.semilandmarkDirectory=ctk.ctkPathLineEdit()
self.semilandmarkDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.semilandmarkDirectory.setToolTip( "Select directory containing semi-landmarks" )
self.semilandmarkDirectory.enabled = False
parametersFormLayout.addRow("Semi-landmark directory: ", self.semilandmarkDirectory)
#
# Select output directory
#
self.outputDirectory=ctk.ctkPathLineEdit()
self.outputDirectory.filters = ctk.ctkPathLineEdit.Dirs
self.outputDirectory.currentPath = slicer.app.temporaryPath
self.outputDirectory.setToolTip( "Select directory to save output distance maps" )
parametersFormLayout.addRow("Output directory: ", self.outputDirectory)
#
# Select distance metric
#
self.unSignedDistanceOption=qt.QRadioButton()
self.unSignedDistanceOption.setChecked(True)
parametersFormLayout.addRow("Unsigned Distance: ", self.unSignedDistanceOption)
self.signedDistanceOption=qt.QRadioButton()
self.signedDistanceOption.setChecked(False)
parametersFormLayout.addRow("Signed Distance: ", self.signedDistanceOption)
#
# Apply Button
#
self.applyButton = qt.QPushButton("Apply")
self.applyButton.toolTip = "Generate MeshDistanceMeasurements."
self.applyButton.enabled = False
parametersFormLayout.addRow(self.applyButton)
# connections
self.modelSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onSelect)
self.baseLMSelect.connect('currentNodeChanged(vtkMRMLNode*)', self.onSelect)
self.meshDirectory.connect('validInputChanged(bool)', self.onSelect)
self.baseSLMSelect.connect('currentNodeChanged(bool)', self.onSelectBaseSLM)
self.semilandmarkDirectory.connect('validInputChanged(bool)', self.onSelect)
self.applyButton.connect('clicked(bool)', self.onApplyButton)
# Add vertical spacer
self.layout.addStretch(1)
def cleanup(self):
pass
def onApplyButton(self):
logic = MeshDistanceMeasurementLogic()
logic.run(self.modelSelector.currentNode(), self.baseLMSelect.currentNode(), self.baseSLMSelect.currentNode(), self.meshDirectory.currentPath,
self.landmarkDirectory.currentPath, self.semilandmarkDirectory.currentPath, self.outputDirectory.currentPath, self.signedDistanceOption.checked)
#
# MeshDistanceMeasurementLogic
#
class MeshDistanceMeasurementLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def mergeLandmarks(self, templateLM, templateSLM):
landmarkPoint = [0,0,0]
for landmarkIndex in range(templateLM.GetNumberOfMarkups()):
templateLM.GetMarkupPoint(0,landmarkIndex,landmarkPoint)
templateSLM.AddFiducialFromArray(landmarkPoint)
return templateSLM
def findCorrespondingFilePath(self, searchDirectory, templateFileName):
fileList = os.listdir(searchDirectory)
regex = re.compile(r'\d+')
subjectID = [int(x) for x in regex.findall(templateFileName)][0]
for filename in fileList:
if str(subjectID) in filename:
return os.path.join(searchDirectory, filename), subjectID
def run(self, templateMesh, templateLM, templateSLM, meshDirectory, lmDirectory, slmDirectory, outDirectory, signedDistanceOption):
if(bool(templateSLM) and bool(slmDirectory)):
templateLMTotal = self.mergeLandmarks(templateLM, templateSLM)
else:
templateLMTotal = templateLM
#get template points as vtk array
templatePoints = vtk.vtkPoints()
p=[0,0,0]
for i in range(templateLMTotal.GetNumberOfMarkups()):
templateLMTotal.GetMarkupPoint(0,i,p)
templatePoints.InsertNextPoint(p)
# write selected triangles to table
tableNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLTableNode', 'Mean Mesh Distances')
col1=tableNode.AddColumn()
col1.SetName('Subject ID')
col2=tableNode.AddColumn()
col2.SetName('Mesh RMSE')
tableNode.SetColumnType('Subject ID',vtk.VTK_STRING)
tableNode.SetColumnType('Mean Mesh Distance',vtk.VTK_STRING)
subjectCount = 0
for meshFileName in os.listdir(meshDirectory):
if(not meshFileName.startswith(".")):
#get corresponding landmarks
lmFilePath, subjectID = self.findCorrespondingFilePath(lmDirectory, meshFileName)
if(lmFilePath):
currentLMNode = slicer.util.loadMarkupsFiducialList(lmFilePath)
if bool(slmDirectory): # add semi-landmarks if present
slmFilePath, sID = self.findCorrespondingFilePath(slmDirectory, meshFileName)
if(slmFilePath):
currentSLMNode = slicer.util.loadMarkupsFiducialList(slmFilePath)
currentLMTotal = self.mergeLandmarks(templateLM, currentSLMNode)
else:
print("problem with reading semi-landmarks")
return False
else:
currentLMTotal = currentLMNode
else:
print("problem with reading landmarks")
return False
# if mesh and lm file with same subject id exist, load into scene
meshFilePath = os.path.join(meshDirectory, meshFileName)
currentMeshNode = slicer.util.loadModel(meshFilePath)
#get subject points into vtk array
subjectPoints = vtk.vtkPoints()
p=[0,0,0]
for i in range(currentLMTotal.GetNumberOfMarkups()):
currentLMTotal.GetMarkupPoint(0,i,p)
subjectPoints.InsertNextPoint(p)
transform = vtk.vtkThinPlateSplineTransform()
transform.SetSourceLandmarks( subjectPoints )
transform.SetTargetLandmarks( templatePoints )
transform.SetBasisToR() # for 3D transform
transformNode=slicer.mrmlScene.AddNewNodeByClass("vtkMRMLTransformNode","TPS")
transformNode.SetAndObserveTransformToParent(transform)
currentMeshNode.SetAndObserveTransformNodeID(transformNode.GetID())
slicer.vtkSlicerTransformLogic().hardenTransform(currentMeshNode)
distanceFilter = vtk.vtkDistancePolyDataFilter()
distanceFilter.SetInputData(0,templateMesh.GetPolyData())
distanceFilter.SetInputData(1,currentMeshNode.GetPolyData())
distanceFilter.SetSignedDistance(signedDistanceOption)
distanceFilter.Update()
distanceMap = distanceFilter.GetOutput()
distanceArray = distanceMap.GetPointData().GetArray('Distance')
#meanDistance = np.average(distanceArray)
meanDistance = self.rmse(distanceArray)
#save output distance map
outputNode=slicer.mrmlScene.AddNewNodeByClass("vtkMRMLModelNode","ouputDistanceMap")
outputNode.SetAndObservePolyData(distanceMap)
outputFilename = os.path.join(outDirectory, str(subjectID) + '.vtp')
slicer.util.saveNode(outputNode, outputFilename)
#update table and subjectCount
tableNode.AddEmptyRow()
tableNode.SetCellText(subjectCount,0,str(subjectID))
tableNode.SetCellText(subjectCount,1,str(meanDistance))
subjectCount+=1
# clean up
slicer.mrmlScene.RemoveNode(outputNode)
slicer.mrmlScene.RemoveNode(transformNode)
slicer.mrmlScene.RemoveNode(currentMeshNode)
slicer.mrmlScene.RemoveNode(currentLMNode)
def rmse(self, signedDistanceArray):
return np.sqrt(np.square(signedDistanceArray).mean())
def takeScreenshot(self,name,description,type=-1):
# show the message even if not taking a screen shot
slicer.util.delayDisplay('Take screenshot: '+description+'.\nResult is available in the Annotations module.', 3000)
lm = slicer.app.layoutManager()
# switch on the type to get the requested window
widget = 0
if type == slicer.qMRMLScreenShotDialog.FullLayout:
# full layout
widget = lm.viewport()
elif type == slicer.qMRMLScreenShotDialog.ThreeD:
# just the 3D window
widget = lm.threeDWidget(0).threeDView()
elif type == slicer.qMRMLScreenShotDialog.Red:
# red slice window
widget = lm.sliceWidget("Red")
elif type == slicer.qMRMLScreenShotDialog.Yellow:
# yellow slice window
widget = lm.sliceWidget("Yellow")
elif type == slicer.qMRMLScreenShotDialog.Green:
# green slice window
widget = lm.sliceWidget("Green")
else:
# default to using the full window
widget = slicer.util.mainWindow()
# reset the type so that the node is set correctly
type = slicer.qMRMLScreenShotDialog.FullLayout
# grab and convert to vtk image data
qimage = ctk.ctkWidgetsUtils.grabWidget(widget)
imageData = vtk.vtkImageData()
slicer.qMRMLUtils().qImageToVtkImageData(qimage,imageData)
annotationLogic = slicer.modules.annotations.logic()
annotationLogic.CreateSnapShot(name, description, type, 1, imageData)
def process(self, inputVolume, outputVolume, imageThreshold, invert=False, showResult=True):
"""
Run the processing algorithm.
Can be used without GUI widget.
:param inputVolume: volume to be thresholded
:param outputVolume: thresholding result
:param imageThreshold: values above/below this threshold will be set to 0
:param invert: if True then values above the threshold will | |
for field_name in fields:
field = self._fields.get(field_name)
if field is None:
continue
if field.type in ('many2many', 'one2many'):
continue
elif field.type == 'many2one':
data[field_name] = _get_first_not_null_id(field_name, self) # take the first not null
elif field.type == 'text':
data[field_name] = '\n\n'.join(it for it in self.mapped(field_name) if it)
else:
data[field_name] = _get_first_not_null(field_name, self)
# define the resulting type ('lead' or 'opportunity')
data['type'] = self._merge_get_result_type()
return data
def _mail_body(self, fields):
""" generate the message body with the changed values
:param fields : list of fields to track
:returns a list of message bodies for the corresponding leads
"""
bodies = []
for lead in self:
title = "%s : %s\n" % (_('Merged opportunity') if lead.type == 'opportunity' else _('Merged lead'), lead.name)
body = [title]
_fields = self.env['ir.model.fields'].search([
('name', 'in', fields or []),
('model_id.model', '=', lead._name),
])
for field in _fields:
value = getattr(lead, field.name, False)
if field.ttype == 'selection':
selections = lead.fields_get()[field.name]['selection']
value = next((v[1] for v in selections if v[0] == value), value)
elif field.ttype == 'many2one':
if value:
value = value.sudo().display_name
elif field.ttype == 'many2many':
if value:
value = ','.join(
val.display_name
for val in value.sudo()
)
body.append("%s: %s" % (field.field_description, value or ''))
bodies.append("<br/>".join(body + ['<br/>']))
return bodies
def _merge_notify(self, opportunities):
""" Create a message gathering merged leads/opps informations. Using message_post, send a
message explaining which fields has been merged and their new value. `self` is the
resulting merge crm.lead record.
:param opportunities : recordset of merged crm.lead
:returns mail.message posted on resulting crm.lead
"""
# TODO JEM: mail template should be used instead of fix body, subject text
self.ensure_one()
# mail message's subject
result_type = opportunities._merge_get_result_type()
merge_message = _('Merged leads') if result_type == 'lead' else _('Merged opportunities')
subject = merge_message + ": " + ", ".join(opportunities.mapped('name'))
# message bodies
message_bodies = opportunities._mail_body(list(CRM_LEAD_FIELDS_TO_MERGE))
message_body = "\n\n".join(message_bodies)
return self.message_post(body=message_body, subject=subject)
def _merge_opportunity_history(self, opportunities):
""" Move mail.message from the given opportunities to the current one. `self` is the
crm.lead record destination for message of `opportunities`.
:param opportunities : recordset of crm.lead to move the messages
"""
self.ensure_one()
for opportunity in opportunities:
for message in opportunity.message_ids:
message.write({
'res_id': self.id,
'subject': _("From %s : %s") % (opportunity.name, message.subject)
})
return True
def _merge_opportunity_attachments(self, opportunities):
""" Move attachments of given opportunities to the current one `self`, and rename
the attachments having same name than native ones.
:param opportunities : recordset of merged crm.lead
"""
self.ensure_one()
# return attachments of opportunity
def _get_attachments(opportunity_id):
return self.env['ir.attachment'].search([('res_model', '=', self._name), ('res_id', '=', opportunity_id)])
first_attachments = _get_attachments(self.id)
# counter of all attachments to move. Used to make sure the name is different for all attachments
count = 1
for opportunity in opportunities:
attachments = _get_attachments(opportunity.id)
for attachment in attachments:
values = {'res_id': self.id}
for attachment_in_first in first_attachments:
if attachment.name == attachment_in_first.name:
values['name'] = "%s (%s)" % (attachment.name, count)
count += 1
attachment.write(values)
return True
def merge_dependences(self, opportunities):
""" Merge dependences (messages, attachments, ...). These dependences will be
transfered to `self`, the most important lead.
:param opportunities : recordset of opportunities to transfert. Does
not include `self`.
"""
self.ensure_one()
self._merge_notify(opportunities)
self._merge_opportunity_history(opportunities)
self._merge_opportunity_attachments(opportunities)
def merge_opportunity(self, user_id=False, team_id=False):
""" Merge opportunities in one. Different cases of merge:
- merge leads together = 1 new lead
- merge at least 1 opp with anything else (lead or opp) = 1 new opp
The resulting lead/opportunity will be the most important one (based on its confidence level)
updated with values from other opportunities to merge.
:param user_id : the id of the saleperson. If not given, will be determined by `_merge_data`.
:param team : the id of the Sales Team. If not given, will be determined by `_merge_data`.
:return crm.lead record resulting of th merge
"""
if len(self.ids) <= 1:
raise UserError(_('Please select more than one element (lead or opportunity) from the list view.'))
if len(self.ids) > 5 and not self.env.is_superuser():
raise UserError(_("To prevent data loss, Leads and Opportunities can only be merged by groups of 5."))
opportunities = self._sort_by_confidence_level(reverse=True)
# get SORTED recordset of head and tail, and complete list
opportunities_head = opportunities[0]
opportunities_tail = opportunities[1:]
# merge all the sorted opportunity. This means the value of
# the first (head opp) will be a priority.
merged_data = opportunities._merge_data(list(CRM_LEAD_FIELDS_TO_MERGE))
# force value for saleperson and Sales Team
if user_id:
merged_data['user_id'] = user_id
if team_id:
merged_data['team_id'] = team_id
# merge other data (mail.message, attachments, ...) from tail into head
opportunities_head.merge_dependences(opportunities_tail)
# check if the stage is in the stages of the Sales Team. If not, assign the stage with the lowest sequence
if merged_data.get('team_id'):
team_stage_ids = self.env['crm.stage'].search(['|', ('team_id', '=', merged_data['team_id']), ('team_id', '=', False)], order='sequence')
if merged_data.get('stage_id') not in team_stage_ids.ids:
merged_data['stage_id'] = team_stage_ids[0].id if team_stage_ids else False
# write merged data into first opportunity
opportunities_head.write(merged_data)
# delete tail opportunities
# we use the SUPERUSER to avoid access rights issues because as the user had the rights to see the records it should be safe to do so
opportunities_tail.sudo().unlink()
return opportunities_head
def _sort_by_confidence_level(self, reverse=False):
""" Sorting the leads/opps according to the confidence level of its stage, which relates to the probability of winning it
The confidence level increases with the stage sequence
An Opportunity always has higher confidence level than a lead
"""
def opps_key(opportunity):
return opportunity.type == 'opportunity', opportunity.stage_id.sequence, -opportunity._origin.id
return self.sorted(key=opps_key, reverse=reverse)
def get_duplicated_leads(self, partner_id, include_lost=False):
""" Search for opportunities that have the same partner and that arent done or cancelled
:param partner_id : partner to search
"""
self.ensure_one()
email = self.partner_id.email or self.email_from
return self._get_duplicated_leads_by_emails(partner_id, email, include_lost=include_lost)
@api.model
def _get_duplicated_leads_by_emails(self, partner_id, email, include_lost=False):
""" Search for opportunities that have the same partner and that arent done or cancelled """
if not email:
return self.env['crm.lead']
partner_match_domain = []
for email in set(email_split(email) + [email]):
partner_match_domain.append(('email_from', '=ilike', email))
if partner_id:
partner_match_domain.append(('partner_id', '=', partner_id))
if not partner_match_domain:
return self.env['crm.lead']
partner_match_domain = ['|'] * (len(partner_match_domain) - 1) + partner_match_domain
if not include_lost:
partner_match_domain += ['&', ('active', '=', True), ('probability', '<', 100)]
else:
partner_match_domain += ['|', '&', ('type', '=', 'lead'), ('active', '=', True), ('type', '=', 'opportunity')]
return self.search(partner_match_domain)
def _convert_opportunity_data(self, customer, team_id=False):
""" Extract the data from a lead to create the opportunity
:param customer : res.partner record
:param team_id : identifier of the Sales Team to determine the stage
"""
if not team_id:
team_id = self.team_id.id if self.team_id else False
value = {
'planned_revenue': self.planned_revenue,
'probability': self.probability,
'name': self.name,
'partner_id': customer.id if customer else False,
'type': 'opportunity',
'date_open': fields.Datetime.now(),
'email_from': customer and customer.email or self.email_from,
'phone': customer and customer.phone or self.phone,
'date_conversion': fields.Datetime.now(),
}
if not self.stage_id:
stage = self._stage_find(team_id=team_id)
value['stage_id'] = stage.id
return value
def convert_opportunity(self, partner_id, user_ids=False, team_id=False):
customer = False
if partner_id:
customer = self.env['res.partner'].browse(partner_id)
for lead in self:
if not lead.active or lead.probability == 100:
continue
vals = lead._convert_opportunity_data(customer, team_id)
lead.write(vals)
if user_ids or team_id:
self.allocate_salesman(user_ids, team_id)
return True
def _create_lead_partner_data(self, name, is_company, parent_id=False):
""" extract data from lead to create a partner
:param name : furtur name of the partner
:param is_company : True if the partner is a company
:param parent_id : id of the parent partner (False if no parent)
:returns res.partner record
"""
email_split = tools.email_split(self.email_from)
res = {
'name': name,
'user_id': self.env.context.get('default_user_id') or self.user_id.id,
'comment': self.description,
'team_id': self.team_id.id,
'parent_id': parent_id,
'phone': self.phone,
'mobile': self.mobile,
'email': email_split[0] if email_split else False,
'title': self.title.id,
'function': self.function,
'street': self.street,
'street2': self.street2,
'zip': self.zip,
'city': self.city,
'country_id': self.country_id.id,
'state_id': self.state_id.id,
'website': self.website,
'is_company': is_company,
'type': 'contact'
}
if self.lang_id:
res['lang'] = self.lang_id.code
return res
def _create_lead_partner(self):
""" Create a partner from lead data
:returns res.partner record
"""
Partner = self.env['res.partner']
contact_name = self.contact_name
if not contact_name:
contact_name = Partner._parse_partner_name(self.email_from)[0] if self.email_from else False
if self.partner_name:
partner_company = Partner.create(self._create_lead_partner_data(self.partner_name, True))
elif self.partner_id:
partner_company = self.partner_id
else:
partner_company = None
if contact_name:
return Partner.create(self._create_lead_partner_data(contact_name, False, partner_company.id if partner_company else False))
if partner_company:
return partner_company
return Partner.create(self._create_lead_partner_data(self.name, False))
def handle_partner_assignation(self, action='create', partner_id=False):
""" Handle partner assignation during a | |
i,k in enumerate(vegtype_mainlist))
inter = set(ind_dict).intersection(vegtype_str)
indices = [ ind_dict[x] for x in inter ]
return indices
# Flexibly subset time(s) and/or vegetation type(s) from an xarray Dataset or DataArray. Keyword arguments like dimension=selection. Selections can be individual values or slice()s. Optimize memory usage by beginning keyword argument list with the selections that will result in the largest reduction of object size.
def xr_flexsel(xr_object, patches1d_itype_veg=None, warn_about_seltype_interp=True, **kwargs):
# Setup
havewarned = False
delimiter = "__"
for key, selection in kwargs.items():
if key == "vegtype":
# Convert to list, if needed
if not isinstance(selection, list):
selection = [selection]
# Convert to indices, if needed
if isinstance(selection[0], str):
selection = vegtype_str2int(selection)
# Get list of boolean(s)
if isinstance(selection[0], int):
if isinstance(patches1d_itype_veg, type(None)):
patches1d_itype_veg = xr_object.patches1d_itype_veg.values
elif isinstance(patches1d_itype_veg, xr.core.dataarray.DataArray):
patches1d_itype_veg = patches1d_itype_veg.values
is_vegtype = is_each_vegtype(patches1d_itype_veg, selection, "ok_exact")
elif isinstance(selection[0], bool):
if len(selection) != len(xr_object.patch):
raise ValueError(f"If providing boolean 'vegtype' argument to xr_flexsel(), it must be the same length as xr_object.patch ({len(selection)} vs. {len(xr_object.patch)})")
is_vegtype = selection
else:
raise TypeError(f"Not sure how to handle 'vegtype' of type {type(selection)}")
xr_object = xr_object.isel(patch=[i for i, x in enumerate(is_vegtype) if x])
if "ivt" in xr_object:
xr_object = xr_object.isel(ivt=is_each_vegtype(xr_object.ivt.values, selection, "ok_exact"))
else:
# Parse selection type, if provided
if delimiter in key:
key, selection_type = key.split(delimiter)
# Check type of selection
else:
# Suggest suppressing selection type interpretation warnings
if warn_about_seltype_interp and not havewarned:
print("xr_flexsel(): Suppress all 'selection type interpretation' messages by specifying warn_about_seltype_interp=False")
havewarned = False
is_inefficient = False
if isinstance(selection, slice):
slice_members = []
if selection == slice(0):
raise ValueError("slice(0) will be empty")
if selection.start != None:
slice_members = slice_members + [selection.start]
if selection.stop != None:
slice_members = slice_members + [selection.stop]
if selection.step != None:
slice_members = slice_members + [selection.step]
if slice_members==[]:
raise TypeError("slice is all None?")
this_type = int
for x in slice_members:
if x < 0 or not isinstance(x, int):
this_type = "values"
break
elif isinstance(selection, np.ndarray):
if selection.dtype.kind in np.typecodes["AllInteger"]:
this_type = int
else:
is_inefficient = True
this_type = None
for x in selection:
if x < 0 or x%1 > 0:
if isinstance(x, int):
this_type = "values"
else:
this_type = type(x)
break
if this_type==None:
this_type = int
selection = selection.astype(int)
else:
this_type = type(selection)
print(f"this_type: {this_type}")
if this_type == int:
selection_type = "indices"
else:
selection_type = "values"
if warn_about_seltype_interp:
if is_inefficient:
extra = " This will also improve efficiency for large selections."
else:
extra = ""
print(f"xr_flexsel(): Selecting {key} as {selection_type} because selection was interpreted as {this_type}. If not correct, specify selection type ('indices' or 'values') in keyword like '{key}{delimiter}SELECTIONTYPE=...' instead of '{key}=...'.{extra}")
# Trim along relevant 1d axes
if isinstance(xr_object, xr.Dataset) and key in ["lat","lon"]:
if selection_type == "indices":
inclCoords = xr_object[key].values[selection]
elif selection_type == "values":
if isinstance(selection, slice):
inclCoords = xr_object.sel({key: selection}, drop=False)[key].values
else:
inclCoords = selection
else:
raise TypeError(f"selection_type {selection_type} not recognized")
if key == "lat":
thisXY = "jxy"
elif key=="lon":
thisXY = "ixy"
else:
raise KeyError(f"Key '{key}' not recognized: What 1d_ suffix should I use for variable name?")
pattern = re.compile(f"1d_{thisXY}")
matches = [x for x in list(xr_object.keys()) if pattern.search(x) != None]
for thisVar in matches:
if len(xr_object[thisVar].dims) != 1:
raise RuntimeError(f"Expected {thisVar} to have 1 dimension, but it has {len(xr_object[thisVar].dims)}: {xr_object[thisVar].dims}")
thisVar_dim = xr_object[thisVar].dims[0]
# print(f"Variable {thisVar} has dimension {thisVar_dim}")
thisVar_coords = xr_object[key].values[xr_object[thisVar].values.astype(int)-1]
# print(f"{thisVar_dim} size before: {xr_object.sizes[thisVar_dim]}")
ok_ind = []
new_1d_thisXY = []
for i, x in enumerate(thisVar_coords):
if x in inclCoords:
ok_ind = ok_ind + [i]
new_1d_thisXY = new_1d_thisXY + [(inclCoords==x).nonzero()[0] + 1]
xr_object = xr_object.isel({thisVar_dim: ok_ind})
new_1d_thisXY = np.array(new_1d_thisXY).squeeze()
xr_object[thisVar].values = new_1d_thisXY
# print(f"{thisVar_dim} size after: {xr_object.sizes[thisVar_dim]}")
# Perform selection
if selection_type == "indices":
# Have to select like this instead of with index directly because otherwise assign_coords() will throw an error. Not sure why.
if isinstance(selection, int):
# Single integer? Turn it into a slice.
selection = slice(selection,selection+1)
elif isinstance(selection, np.ndarray) and not selection.dtype.kind in np.typecodes["AllInteger"]:
selection = selection.astype(int)
xr_object = xr_object.isel({key: selection})
elif selection_type == "values":
xr_object = xr_object.sel({key: selection})
else:
raise TypeError(f"selection_type {selection_type} not recognized")
return xr_object
# Get PFT of each patch, in both integer and string forms.
def ivt_int_str(this_ds, this_pftlist):
# First, get all the integer values; should be time*pft or pft*time. We will eventually just take the first timestep.
vegtype_int = this_ds.patches1d_itype_veg
vegtype_int.values = vegtype_int.values.astype(int)
# Convert to strings.
vegtype_str = list(np.array(this_pftlist)[vegtype_int.values])
# Return a dictionary with both results
return {"int": vegtype_int, "str": vegtype_str, "all_str": this_pftlist}
# Convert a list of strings with vegetation type names into a DataArray. Used to add vegetation type info in import_ds().
def get_vegtype_str_da(vegtype_str):
nvt = len(vegtype_str)
thisName = "vegtype_str"
vegtype_str_da = xr.DataArray(\
vegtype_str,
coords={"ivt": np.arange(0,nvt)},
dims=["ivt"],
name = thisName)
return vegtype_str_da
# Function to drop unwanted variables in preprocessing of open_mfdataset(), making sure to NOT drop any unspecified variables that will be useful in gridding. Also adds vegetation type info in the form of a DataArray of strings.
# Also renames "pft" dimension (and all like-named variables, e.g., pft1d_itype_veg_str) to be named like "patch". This can later be reversed, for compatibility with other code, using patch2pft().
def mfdataset_preproc(ds, vars_to_import, vegtypes_to_import):
# Rename "pft" dimension and variables to "patch", if needed
if "pft" in ds.dims:
pattern = re.compile("pft.*1d")
matches = [x for x in list(ds.keys()) if pattern.search(x) != None]
pft2patch_dict = {"pft": "patch"}
for m in matches:
pft2patch_dict[m] = m.replace("pft","patch").replace("patchs","patches")
ds = ds.rename(pft2patch_dict)
if vars_to_import != None:
# Get list of dimensions present in variables in vars_to_import.
dimList = []
for thisVar in vars_to_import:
# list(set(x)) returns a list of the unique items in x
dimList = list(set(dimList + list(ds.variables[thisVar].dims)))
# Get any _1d variables that are associated with those dimensions. These will be useful in gridding. Also, if any dimension is "pft", set up to rename it and all like-named variables to "patch"
onedVars = []
for thisDim in dimList:
pattern = re.compile(f"{thisDim}.*1d")
matches = [x for x in list(ds.keys()) if pattern.search(x) != None]
onedVars = list(set(onedVars + matches))
# Add dimensions and _1d variables to vars_to_import
vars_to_import = list(set(vars_to_import \
+ list(ds.dims) + onedVars))
# Get list of variables to drop
varlist = list(ds.variables)
vars_to_drop = list(np.setdiff1d(varlist, vars_to_import))
# Drop them
ds = ds.drop_vars(vars_to_drop)
# Add vegetation type info
if "patches1d_itype_veg" in list(ds):
this_pftlist = define_pftlist()
ivt_int_str(ds, this_pftlist) # Includes check of whether vegtype changes over time anywhere
vegtype_da = get_vegtype_str_da(this_pftlist)
patches1d_itype_veg_str = vegtype_da.values[ds.isel(time=0).patches1d_itype_veg.values.astype(int)]
npatch = len(patches1d_itype_veg_str)
patches1d_itype_veg_str = xr.DataArray( \
patches1d_itype_veg_str,
coords={"patch": np.arange(0,npatch)},
dims=["patch"],
name = "patches1d_itype_veg_str")
ds = xr.merge([ds, vegtype_da, patches1d_itype_veg_str])
# Restrict to veg. types of interest, if any
if vegtypes_to_import != None:
ds = xr_flexsel(ds, vegtype=vegtypes_to_import)
# Finish import
ds = xr.decode_cf(ds, decode_times = True)
return ds
# Rename "patch" dimension and any associated variables back to "pft". Uses a dictionary with the names of the dimensions and variables we want to rename. This allows us to do it all at once, which may be more efficient than one-by-one.
def patch2pft(xr_object):
# Rename "patch" dimension
patch2pft_dict = {}
for thisDim in xr_object.dims:
if thisDim == "patch":
patch2pft_dict["patch"] = "pft"
break
# Rename variables containing "patch"
if isinstance(xr_object, xr.Dataset):
pattern = re.compile("patch.*1d")
matches = [x for x in list(xr_object.keys()) if pattern.search(x) != None]
if len(matches) > 0:
for m in matches:
patch2pft_dict[m] = m.replace("patches","patchs").replace("patch","pft")
# Do the rename
if len(patch2pft_dict) > 0:
xr_object = xr_object.rename(patch2pft_dict)
return xr_object
# Import a dataset that can be spread over multiple files, only including specified variables and/or vegetation types, concatenating by time. DOES actually read the dataset into memory, but only AFTER dropping unwanted variables and/or vegetation types.
def import_ds(filelist, myVars=None, myVegtypes=None):
# Convert myVegtypes here, if needed, to avoid repeating the process each time you read a file in xr.open_mfdataset().
if myVegtypes != None:
if not isinstance(myVegtypes, list):
myVegtypes = [myVegtypes]
if isinstance(myVegtypes[0], str):
myVegtypes = vegtype_str2int(myVegtypes)
# The | |
logging during replication.
:type log_storage_account_id: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'hv_host_vm_id': {'key': 'hvHostVmId', 'type': 'str'},
'vm_name': {'key': 'vmName', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'v_hd_id': {'key': 'vHDId', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'log_storage_account_id': {'key': 'logStorageAccountId', 'type': 'str'},
}
def __init__(
self,
*,
hv_host_vm_id: Optional[str] = None,
vm_name: Optional[str] = None,
os_type: Optional[str] = None,
v_hd_id: Optional[str] = None,
storage_account_id: Optional[str] = None,
log_storage_account_id: Optional[str] = None,
**kwargs
):
super(HyperVReplicaAzureReprotectInput, self).__init__(**kwargs)
self.instance_type = 'HyperVReplicaAzure' # type: str
self.hv_host_vm_id = hv_host_vm_id
self.vm_name = vm_name
self.os_type = os_type
self.v_hd_id = v_hd_id
self.storage_account_id = storage_account_id
self.log_storage_account_id = log_storage_account_id
class HyperVReplicaAzureTestFailoverInput(TestFailoverProviderSpecificInput):
"""HvrA provider specific input for test failover.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
:param primary_kek_certificate_pfx: Primary kek certificate pfx.
:type primary_kek_certificate_pfx: str
:param secondary_kek_certificate_pfx: Secondary kek certificate pfx.
:type secondary_kek_certificate_pfx: str
:param recovery_point_id: The recovery point id to be passed to test failover to a particular
recovery point. In case of latest recovery point, null should be passed.
:type recovery_point_id: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'primary_kek_certificate_pfx': {'key': 'primaryKekCertificatePfx', 'type': 'str'},
'secondary_kek_certificate_pfx': {'key': 'secondaryKekCertificatePfx', 'type': 'str'},
'recovery_point_id': {'key': 'recoveryPointId', 'type': 'str'},
}
def __init__(
self,
*,
primary_kek_certificate_pfx: Optional[str] = None,
secondary_kek_certificate_pfx: Optional[str] = None,
recovery_point_id: Optional[str] = None,
**kwargs
):
super(HyperVReplicaAzureTestFailoverInput, self).__init__(**kwargs)
self.instance_type = 'HyperVReplicaAzure' # type: str
self.primary_kek_certificate_pfx = primary_kek_certificate_pfx
self.secondary_kek_certificate_pfx = secondary_kek_certificate_pfx
self.recovery_point_id = recovery_point_id
class HyperVReplicaAzureUnplannedFailoverInput(UnplannedFailoverProviderSpecificInput):
"""HvrA provider specific input for unplanned failover.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
:param primary_kek_certificate_pfx: Primary kek certificate pfx.
:type primary_kek_certificate_pfx: str
:param secondary_kek_certificate_pfx: Secondary kek certificate pfx.
:type secondary_kek_certificate_pfx: str
:param recovery_point_id: The recovery point id to be passed to failover to a particular
recovery point. In case of latest recovery point, null should be passed.
:type recovery_point_id: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'primary_kek_certificate_pfx': {'key': 'primaryKekCertificatePfx', 'type': 'str'},
'secondary_kek_certificate_pfx': {'key': 'secondaryKekCertificatePfx', 'type': 'str'},
'recovery_point_id': {'key': 'recoveryPointId', 'type': 'str'},
}
def __init__(
self,
*,
primary_kek_certificate_pfx: Optional[str] = None,
secondary_kek_certificate_pfx: Optional[str] = None,
recovery_point_id: Optional[str] = None,
**kwargs
):
super(HyperVReplicaAzureUnplannedFailoverInput, self).__init__(**kwargs)
self.instance_type = 'HyperVReplicaAzure' # type: str
self.primary_kek_certificate_pfx = primary_kek_certificate_pfx
self.secondary_kek_certificate_pfx = secondary_kek_certificate_pfx
self.recovery_point_id = recovery_point_id
class HyperVReplicaAzureUpdateReplicationProtectedItemInput(UpdateReplicationProtectedItemProviderInput):
"""HyperV replica Azure input to update replication protected item.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. The class type.Constant filled by server.
:type instance_type: str
:param recovery_azure_v1_resource_group_id: The recovery Azure resource group Id for classic
deployment.
:type recovery_azure_v1_resource_group_id: str
:param recovery_azure_v2_resource_group_id: The recovery Azure resource group Id for resource
manager deployment.
:type recovery_azure_v2_resource_group_id: str
:param use_managed_disks: A value indicating whether managed disks should be used during
failover.
:type use_managed_disks: str
:param disk_id_to_disk_encryption_map: The dictionary of disk resource Id to disk encryption
set ARM Id.
:type disk_id_to_disk_encryption_map: dict[str, str]
:param target_proximity_placement_group_id: The target proximity placement group Id.
:type target_proximity_placement_group_id: str
:param target_availability_zone: The target availability zone.
:type target_availability_zone: str
:param target_vm_tags: The target VM tags.
:type target_vm_tags: dict[str, str]
:param target_managed_disk_tags: The tags for the target managed disks.
:type target_managed_disk_tags: dict[str, str]
:param target_nic_tags: The tags for the target NICs.
:type target_nic_tags: dict[str, str]
:param sql_server_license_type: The SQL Server license type. Possible values include:
"NotSpecified", "NoLicenseType", "PAYG", "AHUB".
:type sql_server_license_type: str or
~azure.mgmt.recoveryservicessiterecovery.models.SqlServerLicenseType
:param vm_disks: The list of disk update properties.
:type vm_disks: list[~azure.mgmt.recoveryservicessiterecovery.models.UpdateDiskInput]
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'recovery_azure_v1_resource_group_id': {'key': 'recoveryAzureV1ResourceGroupId', 'type': 'str'},
'recovery_azure_v2_resource_group_id': {'key': 'recoveryAzureV2ResourceGroupId', 'type': 'str'},
'use_managed_disks': {'key': 'useManagedDisks', 'type': 'str'},
'disk_id_to_disk_encryption_map': {'key': 'diskIdToDiskEncryptionMap', 'type': '{str}'},
'target_proximity_placement_group_id': {'key': 'targetProximityPlacementGroupId', 'type': 'str'},
'target_availability_zone': {'key': 'targetAvailabilityZone', 'type': 'str'},
'target_vm_tags': {'key': 'targetVmTags', 'type': '{str}'},
'target_managed_disk_tags': {'key': 'targetManagedDiskTags', 'type': '{str}'},
'target_nic_tags': {'key': 'targetNicTags', 'type': '{str}'},
'sql_server_license_type': {'key': 'sqlServerLicenseType', 'type': 'str'},
'vm_disks': {'key': 'vmDisks', 'type': '[UpdateDiskInput]'},
}
def __init__(
self,
*,
recovery_azure_v1_resource_group_id: Optional[str] = None,
recovery_azure_v2_resource_group_id: Optional[str] = None,
use_managed_disks: Optional[str] = None,
disk_id_to_disk_encryption_map: Optional[Dict[str, str]] = None,
target_proximity_placement_group_id: Optional[str] = None,
target_availability_zone: Optional[str] = None,
target_vm_tags: Optional[Dict[str, str]] = None,
target_managed_disk_tags: Optional[Dict[str, str]] = None,
target_nic_tags: Optional[Dict[str, str]] = None,
sql_server_license_type: Optional[Union[str, "SqlServerLicenseType"]] = None,
vm_disks: Optional[List["UpdateDiskInput"]] = None,
**kwargs
):
super(HyperVReplicaAzureUpdateReplicationProtectedItemInput, self).__init__(**kwargs)
self.instance_type = 'HyperVReplicaAzure' # type: str
self.recovery_azure_v1_resource_group_id = recovery_azure_v1_resource_group_id
self.recovery_azure_v2_resource_group_id = recovery_azure_v2_resource_group_id
self.use_managed_disks = use_managed_disks
self.disk_id_to_disk_encryption_map = disk_id_to_disk_encryption_map
self.target_proximity_placement_group_id = target_proximity_placement_group_id
self.target_availability_zone = target_availability_zone
self.target_vm_tags = target_vm_tags
self.target_managed_disk_tags = target_managed_disk_tags
self.target_nic_tags = target_nic_tags
self.sql_server_license_type = sql_server_license_type
self.vm_disks = vm_disks
class HyperVReplicaBaseEventDetails(EventProviderSpecificDetails):
"""Abstract model class for event details of a HyperVReplica E2E event.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the class type. Overridden in derived classes.Constant
filled by server.
:type instance_type: str
:param container_name: The container friendly name.
:type container_name: str
:param fabric_name: The fabric friendly name.
:type fabric_name: str
:param remote_container_name: The remote container name.
:type remote_container_name: str
:param remote_fabric_name: The remote fabric name.
:type remote_fabric_name: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'fabric_name': {'key': 'fabricName', 'type': 'str'},
'remote_container_name': {'key': 'remoteContainerName', 'type': 'str'},
'remote_fabric_name': {'key': 'remoteFabricName', 'type': 'str'},
}
def __init__(
self,
*,
container_name: Optional[str] = None,
fabric_name: Optional[str] = None,
remote_container_name: Optional[str] = None,
remote_fabric_name: Optional[str] = None,
**kwargs
):
super(HyperVReplicaBaseEventDetails, self).__init__(**kwargs)
self.instance_type = 'HyperVReplicaBaseEventDetails' # type: str
self.container_name = container_name
self.fabric_name = fabric_name
self.remote_container_name = remote_container_name
self.remote_fabric_name = remote_fabric_name
class HyperVReplicaBasePolicyDetails(PolicyProviderSpecificDetails):
"""Base class for HyperVReplica policy details.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the class type. Overridden in derived classes.Constant
filled by server.
:type instance_type: str
:param recovery_points: A value indicating the number of recovery points.
:type recovery_points: int
:param application_consistent_snapshot_frequency_in_hours: A value indicating the application
consistent frequency.
:type application_consistent_snapshot_frequency_in_hours: int
:param compression: A value indicating whether compression has to be enabled.
:type compression: str
:param initial_replication_method: A value indicating whether IR is online.
:type initial_replication_method: str
:param online_replication_start_time: A value indicating the online IR start time.
:type online_replication_start_time: str
:param offline_replication_import_path: A value indicating the offline IR import path.
:type offline_replication_import_path: str
:param offline_replication_export_path: A value indicating the offline IR export path.
:type offline_replication_export_path: str
:param replication_port: A value indicating the recovery HTTPS port.
:type replication_port: int
:param allowed_authentication_type: A value indicating the authentication type.
:type allowed_authentication_type: int
:param replica_deletion_option: A value indicating whether the VM has to be auto deleted.
Supported Values: String.Empty, None, OnRecoveryCloud.
:type replica_deletion_option: str
"""
_validation = {
'instance_type': {'required': True},
}
_attribute_map = {
'instance_type': {'key': 'instanceType', 'type': 'str'},
'recovery_points': {'key': 'recoveryPoints', 'type': 'int'},
'application_consistent_snapshot_frequency_in_hours': {'key': 'applicationConsistentSnapshotFrequencyInHours', 'type': 'int'},
'compression': {'key': 'compression', 'type': 'str'},
'initial_replication_method': {'key': 'initialReplicationMethod', 'type': 'str'},
'online_replication_start_time': {'key': 'onlineReplicationStartTime', 'type': 'str'},
'offline_replication_import_path': {'key': 'offlineReplicationImportPath', 'type': 'str'},
'offline_replication_export_path': {'key': 'offlineReplicationExportPath', 'type': 'str'},
'replication_port': {'key': 'replicationPort', 'type': 'int'},
'allowed_authentication_type': {'key': 'allowedAuthenticationType', 'type': 'int'},
'replica_deletion_option': {'key': 'replicaDeletionOption', 'type': 'str'},
}
def __init__(
self,
*,
recovery_points: Optional[int] = None,
application_consistent_snapshot_frequency_in_hours: Optional[int] = None,
compression: Optional[str] = None,
initial_replication_method: Optional[str] = None,
online_replication_start_time: Optional[str] = None,
offline_replication_import_path: Optional[str] = None,
offline_replication_export_path: Optional[str] = None,
replication_port: Optional[int] = None,
allowed_authentication_type: Optional[int] = None,
replica_deletion_option: Optional[str] = None,
**kwargs
):
super(HyperVReplicaBasePolicyDetails, self).__init__(**kwargs)
self.instance_type = 'HyperVReplicaBasePolicyDetails' # type: str
self.recovery_points = recovery_points
self.application_consistent_snapshot_frequency_in_hours = application_consistent_snapshot_frequency_in_hours
self.compression = compression
self.initial_replication_method = initial_replication_method
self.online_replication_start_time = online_replication_start_time
self.offline_replication_import_path = offline_replication_import_path
self.offline_replication_export_path = offline_replication_export_path
self.replication_port = replication_port
self.allowed_authentication_type = allowed_authentication_type
self.replica_deletion_option = replica_deletion_option
class HyperVReplicaBaseReplicationDetails(ReplicationProviderSpecificSettings):
"""Hyper V replica provider specific settings base class.
All required parameters must be populated in order to send to Azure.
:param instance_type: Required. Gets the Instance type.Constant filled by server.
:type instance_type: str
:param last_replicated_time: The Last replication time.
:type last_replicated_time: ~datetime.datetime
:param vm_nics: The PE Network details.
:type vm_nics: | |
'''
Biblioteca para calculo de refrigeracao regenerativa em motores foguetes bi propelentes
<NAME>
https://github.com/jeffersonmsb/rocket-cooling-calculator
'''
import csv
import numpy as np
import math
import pyCEA
from scipy import optimize
import os
import subprocess
def geometry(data_in, data_out):
with open(data_in['geometry_path']) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
data_out['geometry'] = list(csv_reader)
data_out['size'] = len(data_out['geometry'])
#Buscar geometria da garganta
Rt = float(data_out['geometry'][0][1])
zt = float(data_out['geometry'][0][0])
for row in data_out['geometry']:
if float(row[1]) < Rt:
Rt = float(row[1])
zt = float(row[0])
data_out['Rt'] = Rt
data_out['zt'] = zt
data_out['At'] = np.pi*np.power(Rt,2)
#Cálculo das razões de área
data_out['r1'] = []
data_out['r2'] = []
data_out['r3'] = []
data_out['Ae'] = []
data_out['Ae/At'] = []
data_out['z'] = []
data_out['N'] = []
data_out['CCH'] = []
data_out['CCW'] = []
data_out['FT'] = []
n = 0
for row in data_out['geometry']:
A = np.pi*np.power(float(row[1]),2)
data_out['r1'].append(float(row[1]))
r2 = float(row[1]) + data_in['IWT']
data_out['r2'].append(r2)
data_out['r3'].append(float(row[1]) + data_in['IWT'] + data_in['CCH'])
data_out['Ae'].append(A)
data_out['Ae/At'].append(A/data_out['At'])
data_out['z'].append(float(row[0]))
if float(row[0]) > data_in['channel_number'][n][0]:
n = n + 1
N = data_in['channel_number'][n][1]
data_out['N'].append(N)
data_out['CCH'].append(data_in['CCH'])
if data_in['dim_constant'] == 'FT':
data_out['FT'].append(data_in['FT'])
aux = (2*np.pi*r2)/N - data_in['FT']
if aux <= 0:
data_out['error_code'] = 1
return
data_out['CCW'].append(aux)
else:
data_out['CCW'].append(data_in['CCW'])
aux = (2*np.pi*r2)/N - data_in['CCW']
data_out['FT'].append(aux)
data_out['L'] = []
for i in range(0, data_out['size']):
if(i==0):
A = 0.5*(data_out['z'][i+1]+data_out['z'][i]) - data_out['z'][i]
B = 0.5*(data_out['r1'][i+1]+data_out['r1'][i]) - data_out['r1'][i]
data_out['L'].append(math.sqrt(A**2 + B**2))
else:
if(i!=(data_out['size']-1)):
A = 0.5*(data_out['z'][i+1]+data_out['z'][i]) - 0.5*(data_out['z'][i]+data_out['z'][i-1])
B = 0.5*(data_out['r1'][i+1]+data_out['r1'][i]) - 0.5*(data_out['r1'][i]+data_out['r1'][i-1])
data_out['L'].append(math.sqrt(A**2 + B**2))
else:
A = data_out['z'][i] - 0.5*(data_out['z'][i]+data_out['z'][i-1])
B = data_out['r1'][i] - 0.5*(data_out['r1'][i]+data_out['r1'][i-1])
data_out['L'].append(math.sqrt(A**2 + B**2))
data_out['error_code'] = 0
def coolant_prop(coolant_name, prop_name, temperature):
if coolant_name == 'RP-1':
if temperature > 800:
temperature = 800
if temperature < 300:
temperature = 300
if prop_name == 'ro':
return 820
if prop_name == 'cp':
return -2.82649e-3*temperature**2.0 + 6.77751e0*temperature - 2.45234e1 #BOYSAN
if prop_name == 'k':
return 9.64e-8*temperature**2-2.95e-4*temperature+0.261 #BOYSAN
if prop_name == 'mi':
return -1.46e-11*temperature**3+3.22e-8*temperature**2-2.39e-5*temperature+6E-3 #BOYSAN
if coolant_name == 'C2H5OH(L)':
if prop_name == 'ro':
return 785.3
if prop_name == 'cp':
return 2570
if prop_name == 'k':
return 0.167
if prop_name == 'mi':
return 1.36e-3
else:
print('Coolant proprieties not found')
return -1
def create_prop(data_in, data_out):
data_out['Tc'] = data_out['size']*[data_in['Tc_primary']]
data_out['Twg'] = data_out['size']*[data_in['Twg_primary']]
data_out['Twc'] = data_out['size']*[data_in['Twc_primary']]
data_out['Taw'] = data_out['size']*[data_in['Taw_primary']]
data_out['cp_c'] = data_out['size']*[None]
data_out['k_c'] = data_out['size']*[None]
data_out['mi_c'] = data_out['size']*[None]
data_out['Pr_c'] = data_out['size']*[None]
data_out['gama'] = data_out['size']*[None]
data_out['M'] = data_out['size']*[None]
data_out['cp'] = data_out['size']*[None]
data_out['R'] = data_out['size']*[None]
data_out['h_g'] = data_out['size']*[None]
data_out['Re_c'] = data_out['size']*[None]
data_out['D_h'] = data_out['size']*[None]
data_out['mi_s'] = data_out['size']*[None]
data_out['h_c'] = data_out['size']*[None]
data_out['Aa'] = data_out['size']*[None]
data_out['Atotal'] = data_out['size']*[None]
data_out['m'] = data_out['size']*[None]
data_out['eta_f'] = data_out['size']*[None]
data_out['eta_o'] = data_out['size']*[None]
data_out['R_c'] = data_out['size']*[None]
data_out['R_g'] = data_out['size']*[None]
data_out['R_w'] = data_out['size']*[None]
data_out['q'] = data_out['size']*[None]
data_out['Q'] = data_out['size']*[None]
data_out['f'] = data_out['size']*[None]
data_out['ro'] = data_out['size']*[None]
data_out['V_c'] = data_out['size']*[None]
data_out['hl'] = data_out['size']*[None]
data_out['deltap'] = data_out['size']*[None]
data_out['T_static'] = data_out['size']*[None]
data_out['p_static'] = data_out['size']*[6000000]
def calc_prop(data_in, data_out):
data_in['p0_pyCEA'] = data_in['p0']/1e5 #Conversão de [Pa] para [bar]
pyCEA.calcPropStagnationCEA(data_in['p0_pyCEA'],data_in['fuel'], data_in['oxidizer'],data_in['of'], data_in['motor_name'])
T0 = pyCEA.readPropStagnationCEA('t', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
cp0 = pyCEA.readPropStagnationCEA('cp', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
Pr0 = pyCEA.readPropStagnationCEA('pr', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
mi0 = pyCEA.readPropStagnationCEA('mi', data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
Tc1 = data_in['Tc_primary']
IWT = data_in['IWT']
k_w = data_in['k_w']
mponto_c = data_in['m._c']
e = data_in['e']
p0 = data_in['p0']
Re_c = data_out['Re_c']
N = data_out['N']
mi_c = data_out['mi_c']
CCW = data_out['CCW']
FT = data_out['FT']
D_h = data_out['D_h']
mi_s = data_out['mi_s']
Tc = data_out['Tc']
Twg = data_out['Twg']
Twc = data_out['Twc']
Taw = data_out['Taw']
h_c = data_out['h_c']
k_c = data_out['k_c']
Pr_c = data_out['Pr_c']
Aa = data_out['Aa']
L = data_out['L']
r1 = data_out['r1']
r2 = data_out['r2']
r3 = data_out['r3']
Atotal = data_out['Atotal']
m = data_out['m']
eta_f = data_out['eta_f']
eta_o = data_out['eta_o']
R_c = data_out['R_c']
R_g = data_out['R_g']
R_w = data_out['R_w']
h_g = data_out['h_g']
q = data_out['q']
Q = data_out['Q']
cp_c = data_out['cp_c']
k_c = data_out['k_c']
f = data_out['f']
ro = data_out['ro']
V_c = data_out['V_c']
hl = data_out['hl']
deltap = data_out['deltap']
T_static = data_out['T_static']
p_static = data_out['p_static']
gama = data_out['gama']
M = data_out['M']
CCH = data_out['CCH']
data_out['p_drop'] = 0
def f_mach(M):
A = 2/(data_out['gama'][i]+1)
B = 1+(((data_out['gama'][i]-1)/2)*(M**2))
C = (data_out['gama'][i]+1)/(data_out['gama'][i]-1)
D = (data_out['Ae/At'][i]*M)**2
return ( (A*B)**C-D )
def f_coolebrook(f):
return (1/(-2*math.log(((e/D_h[i])/3.7)+(2.51/(Re_c[i]*f**0.5)), 10))**2-f)
for i in reversed(range(0,data_out['size'])):
cp_c[i] = coolant_prop(data_in['coolant'], 'cp', Tc[i])
k_c[i] = coolant_prop(data_in['coolant'], 'k', data_out['Tc'][i])
data_out['mi_c'][i] = coolant_prop(data_in['coolant'], 'mi', data_out['Tc'][i])
data_out['Pr_c'][i] = data_out['cp_c'][i]*data_out['mi_c'][i]/data_out['k_c'][i]
pyCEA.calcPropCEA(data_out['Taw'][i] , data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
data_out['cp'][i] = pyCEA.readPropCEA('cp', data_out['Taw'][i], data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
#data_out['cp'][i] = -5.84399e-05*data_out['Taw'][i]**2.0 + 4.23454e-01*data_out['Taw'][i] + 1.29256e+03
data_out['gama'][i] = 1.23854e-8*data_out['Taw'][i]**2 - 8.09028e-5*data_out['Taw'][i] + 1.34563
#Gama para o L-75
#data_out['gama'][i] = pyCEA.readPropCEA('gama', data_out['Taw'][i], data_in['p0_pyCEA'], data_in['fuel'], data_in['oxidizer'], data_in['of'], data_in['motor_name'])
data_out['R'][i] = (data_out['cp'][i]*(1 - 1/data_out['gama'][i]))
mponto = data_in['p0']*data_out['At']*((data_out['gama'][i]/(data_out['R'][i]*T0))*(2/(data_out['gama'][i]+1))**((data_out['gama'][i]+1)/(data_out['gama'][i]-1)))**0.5
c = (data_in['p0']*data_out['At'])/mponto
if(data_out['z'][i] > data_out['zt']):
a = 1
b = 25
else:
a = 0
b = 1
data_out['M'][i] = optimize.bisect(f_mach, a, b, rtol=8.881784197001252e-16)
aux1 = 1 + ((data_out['gama'][i]-1)/2)*data_out['M'][i]**2
sigma = ((data_out['Twg'][i]/(2*T0))*aux1+0.5 )**-0.68 * aux1**-0.12
data_out['h_g'][i] = ( 0.026 * ((mi0/(2*data_out['Rt']))**0.2) * (cp0/(Pr0**0.6)) * (data_in['p0']/c)**0.8 * (data_out['At']/data_out['Ae'][i])**0.9 * sigma )
D_h[i] = (4*CCW[i]*CCH[i])/(2*(CCW[i]+CCH[i]))
Re_c[i] = (4*mponto)/(N[i]*mi_c[i]*2*(CCW[i]+CCH[i]))
mi_s[i] = coolant_prop(data_in['coolant'], 'mi', Twc[i])
h_c[i] = ((k_c[i]/D_h[i]) * 0.027 * Re_c[i]**0.8 * Pr_c[i]**(1/3) * (mi_c[i]/mi_s[i])**0.14 )
Aa[i] = (2*CCH[i]*L[i])
Atotal[i] = (N[i]*Aa[i] + L[i]*(2*math.pi*r2[i]-N[i]*FT[i]))
m[i] = math.sqrt((2*h_c[i])/(k_c[i]*FT[i]))
eta_f[i] = (math.tanh(m[i]*CCH[i])/(m[i]*CCH[i]))
eta_o[i] = 1-((N[i]*Aa[i]*(1-eta_f[i])) / Atotal[i])
R_g[i] = (1/(2*math.pi*r1[i]*L[i]*h_g[i]))
R_w[i] = (math.log(r2[i]/r1[i]) / (2*math.pi*L[i]*k_w))
R_c[i] = (1 / (eta_o[i]*h_c[i]*Atotal[i]))
q[i] = ((Taw[i] - Tc[i]) / (R_g[i] + R_w[i] + R_c[i]))
Q[i] = ( q[i]/(2*math.pi*r1[i]*L[i])/1000000 )
aux = 0.5*(data_out['gama'][i] - 1)*data_out['M'][i]**2
Taw[i] = (T0 * ((1 + Pr0**(1/3)*aux) / (1 + aux)))
Twg[i] = -R_g[i]*q[i]+Taw[i]
Twc[i] = -q[i]*(R_g[i]+R_w[i])+Taw[i]
lista = reversed(range( i,data_out['size']))
Tc1 = 303
for j in lista:
Tc2 = (q[j] / (mponto_c*cp_c[j])) + Tc1
Tc[j] = (Tc2+Tc1)/2
Tc1 = Tc2
p_static[i] = p0*(1+((gama[i]-1)/2)*M[i]**2)**-(gama[i]/(gama[i]-1))
#Cálculo da perda de carga
f[i] = optimize.bisect(f_coolebrook, 0.00001, 2, rtol=8.881784197001252e-16)
ro[i] = coolant_prop(data_in['coolant'], 'ro', Tc[i])
V_c[i] = mponto_c/(ro[i]*CCH[i]*CCW[i]*N[i])
hl[i] = f[i]*((L[i]/D_h[i])/(V_c[i]**2/2))
deltap[i] = ro[i]*hl[i]*N[i]
data_out['p_drop'] += deltap[i]
#Cálculo da temperatura estática e pressão estática
T_static[i] = T0*(1+((gama[i]-1)/2)*M[i]**2)**-1
def iteration(data_in , data_out):
geometry(data_in, data_out)
if data_out['error_code'] != 0:
print('CCW <= 0')
return
create_prop(data_in, data_out)
for i in range(0,data_in['max_iterations']):
print('Iteration {}'.format(i+1))
calc_prop(data_in, data_out)
if i==0:
Tc_0 = sum(data_out['Q'])
Twg_0 = sum(data_out['Twg'])
Twc_0 = sum(data_out['Twc'])
Taw_0 = sum(data_out['Taw'])
Tc_prev = Tc_0
Twg_prev = Twg_0
Twc_prev = Twc_0
Taw_prev = Taw_0
else:
Tc = sum(data_out['Q'])
Twg = sum(data_out['Twg'])
Twc = sum(data_out['Twc'])
Taw = sum(data_out['Taw'])
Tc_L1 = abs(Tc-Tc_prev)/Tc_0
Twg_L1 = abs(Twg-Twg_prev)/Twg_0
Twc_L1 = abs(Twc-Twc_prev)/Twc_0
Taw_L1 = abs(Taw-Taw_prev)/Taw_0
Tc_prev = Tc
Twg_prev = Twg
Twc_prev = Twc
Taw_prev = Taw
if Tc_L1 <= data_in['tol'] and Twg_L1 <= data_in['tol'] and Twc_L1 <= data_in['tol'] and Taw_L1 <= data_in['tol']:
break
print('Total Iteration Temperature: ' + str(i+1))
def optimize_channel2(data_in, data_out):
flag1 = False
flag2 = False
if data_in['dim_constant'] == 'FT':
dim_const = 'FT'
dim_var = 'CCW'
else:
dim_const = 'CCW'
dim_var = 'FT'
geometry(data_in, data_out)
m = 0
for i in range(0, data_out['size']):
if data_out['r2'][i] < data_out['r2'][m]:
m = i
dim_max = (2*np.pi*data_out['r2'][m])/data_out['N'][m] - data_in[dim_var + '_min']
if dim_max-data_in[dim_const + '_min'] <= 0:
print('Maior dimensão geométrica é menor que dimensão mínima.')
return
dim = (dim_max+data_in[dim_const + '_min'])/2
x = np.array([data_in['CCH'] , dim])
data_in[dim_const] = dim
iteration(data_in, data_out)
Q = max(data_out['Q'])
Q_prev = Q
Q0 = Q
w = data_in['w']
for opt in range(0,data_in['max_iterations_opt']):
grad = np.gradient(x)
xn = x - w*grad
if xn[0] <= data_in['CCH_min'] and flag1 == False:
flag1 = True
print('CCH_min')
if (xn[1] <= data_in[dim_const+'_min'] or xn[1] >= dim_max) and flag2 == False:
flag2 = True
print(dim_const+' min or max')
if flag1 == True:
xn[0] = x[0]
if flag2 == True:
xn[1] = x[1]
data_in['CCH'] = xn[0]
data_in[dim_const] = xn[1]
iteration(data_in, data_out)
Q = max(data_out['Q'])
if Q-Q_prev < 0:
w=w*-1
print(w)
continue
x = xn
print('Opt #{} Q:{} CCH:{} {}:{}'.format(opt, Q, x[0], dim_const, x[1]))
Q_diff = abs(Q-Q_prev)/Q0
Q_prev = Q
if Q_diff <= data_in['tol_opt']:
break
def plot(data_out):
data = []
for i in range(0,data_out['size']):
data_row = [ data_out['z'][i], data_out['Q'][i], data_out['Taw'][i], data_out['Twg'][i], data_out['Twc'][i], data_out['Tc'][i]]
data.append(data_row)
with open('rcc_plot_data.csv', mode='w', encoding='utf-8') as data_file:
csv_writer = csv.writer(data_file, delimiter=',')
csv_writer.writerows(data)
p = subprocess.Popen("gnuplot \'rcc_plot_config.gnu\'", shell = True)
os.waitpid(p.pid, 0)
'''p2 = subprocess.Popen("ristretto \'temps.png\'", shell = True)
os.waitpid(p2.pid, 1)'''
def calc_wall_thickness(p, path, sigmae, n=1):
with open(path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
| |
it!”
“Your father doesn’t admit that——”
“He must have been.”
“<NAME>. These matters are not for you to judge. You know your
father has done all he honestly could to be fully pardoned, or to
discover the real criminal, and as he hasn’t succeeded, you must rest
content with the knowledge that there was no stone left unturned.”
“But, mother, suppose Mr. Appleby has something more up his sleeve.
Suppose he comes down on dad with some unexpected, some unforeseen blow
that——”
“Maida, be quiet. Don’t make me sorry that we have let you into our
confidence as far as we have. These are matters above your head. Should
such a thing as you hint occur, your father can deal with it.”
“But I want to help——”
“And you can best do that by not trying to help! Your part is to divert
your father, to love him and cheer him and entertain him. You know this,
and you know for you to undertake to advise or suggest is not only
ridiculous but disastrous.”
“All right, mother, I’ll be good. I don’t mean to be silly.”
“You are, when you assume ability you don’t possess.” Mrs. Wheeler’s
loving smile robbed the words of any harsh effect. “Run along now, and
see if dad won’t go for a walk with you; and don’t refer to anything
unpleasant.”
Maida went, and found Wheeler quite ready for a stroll
“Which way?” he asked as they crossed the south veranda.
“Round the park, and bring up under the tree, and have tea there,”
dictated Maida, her heart already lighter as she obeyed her mother’s
dictum to avoid unpleasant subjects.
But as they walked on, and trivial talk seemed to pall, they naturally
reverted to the discussion of their recent guests.
“Mr. Appleby is an old curmudgeon,” Maida declared; “Mr. Keefe is nice
and well-behaved; but the little Lane girl is a scream! I never saw any
one so funny. Now she was quite a grand lady, and then she was a common
little piece! But underneath it all she showed a lot of good sense and
I’m sure in her work she has real ability.”
“Appleby wouldn’t keep her if she didn’t have,” her father rejoined; “but
why do you call him a curmudgeon? He’s very well-mannered.”
“Oh, yes, he is. And to tell the truth, I’m not sure just what a
curmudgeon is. But—he’s it, anyway.”
“I gather you don’t especially admire my old friend.”
“Friend! If he’s a friend—give me enemies!”
“Fie, fie, Maida, what do you mean? Remember, he gave me my pardon.”
“Yes, a high old pardon! Say, dad, tell me again exactly how he worded
that letter about the tree.”
“I’ve told you a dozen times! He didn’t mean anything anyhow. He only
said, that when the big sycamore tree went into Massachusetts I could
go.”
“What a crazy thing to say, wasn’t it?”
“It was because we had been talking about the play of _Macbeth_. You
remember, ’Till <NAME> shall come to Dunsinane.”
“Oh, yes, and then it did come—by a trick.”
“Yes, the men came, carrying branches. We’d been talking about it,
discussing some point, and then—it seemed clever, I suppose—to Appleby,
and he wrote that about the sycamore.”
“Meaning—never?”
“Meaning never.”
“But <NAME> did go.”
“Only by a trick, and that would not work in this case. Why, are you
thinking of carrying a branch of sycamore into Massachusetts?”
Maida returned his smile as she answered: “I’d manage to carry the whole
tree in, if it would do any good! But, I s’pose, old Puritan Father,
you’re too conscientious to take advantage of a trick?”
“Can’t say, till I know the details of the game. But I doubt Appleby’s
being unable to see through your trick, and then—where are you?”
“That wouldn’t matter. Trick or no trick, if the big sycamore went into
Massachusetts, you could go. But I don’t see any good plan for getting it
in. And, too, Sycamore Ridge wouldn’t be Sycamore Ridge without it. Don’t
you love the old tree, dad?”
“Of course, as I love every stick and stone about the place. It has been
a real haven to me in my perturbed life.”
“Suppose you had to leave it, daddy?”
“I think I’d die, dear. Unless, that is, we could go back home.”
“Isn’t this home?”
“It’s the dearest spot on earth—outside my native state.”
“There, there, dad, don’t let’s talk about it. We’re here for keeps——”
“Heaven send we are, dearest! I couldn’t face the loss of this place.
What made you think of such a thing?”
“Oh, I’m thinking of all sorts of things to-day. But, father, while we’re
talking of moving—couldn’t you—oh, couldn’t you, bring yourself, somehow,
to do what Mr. Appleby wants you to do? I don’t know much about it—but
father, darling, if you _only could_!”
“Maida, my little girl, don’t think I haven’t tried. Don’t think I don’t
realize what it means to you and Jeff. I know—oh, I _do_ know how it
would simplify matters if I should go over to the Appleby side—and push
Sam’s campaign—as I could do it. I know that it would mean my full
pardon, my return to my old home, my reunion with old scenes and
associations. And more than that, it would mean the happiness of my only
child—my daughter—and her chosen husband. And yet, Maida, as God is my
judge, I am honest in my assertion that I _can’t_ so betray my honor and
spend my remaining years a living lie. I can’t do it, Maida—I _can’t_.”
And the calm, sorrowful countenance he turned to the girl was more
positive and final than any further protestation could have been.
CHAPTER V
THE BUGLE SOUNDED TAPS
Although the portions of the house and grounds that were used by Wheeler
included the most attractive spots, yet there were many forbidden places
that were a real temptation to him.
An especial one was the flower-covered arbor that had so charmed
Genevieve and another was the broad and beautiful north veranda. To be
sure, the south piazza was equally attractive, but it was galling to be
compelled to avoid any part of his own domain. However, the passing years
had made the conditions a matter of habit and it was only occasionally
that Wheeler’s annoyance was poignant.
In fact, he and his wife bore the cross better than did Maida. She had
never become reconciled to the unjust and arbitrary dictum of the
conditional pardon. She lived in a constant fear lest her father should
some day inadvertently and unintentionally step on the forbidden ground,
and it should be reported. Indeed, knowing her father’s quixotic honesty,
she was by no means sure he wouldn’t report it himself.
It had never occurred—probably never would occur, and yet, she often
imagined some sudden emergency, such as a fire, or burglars, that might
cause his impulsive invasion of the other side of the house.
In her anxiety she had spoken of this to <NAME> when he was
there. But he gave her no satisfaction. He merely replied: “A condition
is a condition.”
<NAME> had tried to help her cause, by saying: “Surely a case of
danger would prove an exception to the rule,” but Appleby had only shaken
his head in denial.
Though care had been taken to have the larger part of the house on the
Massachusetts side of the line, yet the rooms most used by the family
were in Connecticut. Here was Mr. Wheeler’s den, and this had come to be
the most used room in the whole house. Mrs. Wheeler’s sitting-room, which
her husband never had entered, was also attractive, but both mother and
daughter invaded the den, whenever leisure hours were to be enjoyed.
The den contained a large south bay window, which was Maida’s favorite
spot. It had a broad, comfortable window-seat, and here she spent much of
her time, curled up among the cushions, reading. There were long
curtains, which, half-drawn, hid her from view, and often she was there
for hours, without her father’s knowing it.
His own work was engrossing. Cut off from his established law business in
Massachusetts, he had at first felt unable to start it anew in different
surroundings. Then, owing to his wife’s large fortune, it was decided
that he should give up all business for a time. And as the time went on,
and there was no real necessity | |
<reponame>eubr-bigsea/limonero
# -*- coding: utf-8 -*-
import datetime
import json
from copy import deepcopy
from marshmallow import Schema, fields, post_load, post_dump, EXCLUDE, INCLUDE
from marshmallow.validate import OneOf
from flask_babel import gettext
from limonero.models import *
def partial_schema_factory(schema_cls):
schema = schema_cls(partial=True)
for field_name, field in list(schema.fields.items()):
if isinstance(field, fields.Nested):
new_field = deepcopy(field)
new_field.schema.partial = True
schema.fields[field_name] = new_field
return schema
def translate_validation(validation_errors):
for field, errors in list(validation_errors.items()):
if isinstance(errors, dict):
validation_errors[field] = translate_validation(errors)
else:
validation_errors[field] = [gettext(error) for error in errors]
return validation_errors
def load_json(str_value):
try:
return json.loads(str_value)
except BaseException:
return None
# region Protected
def generate_download_token(identifier, expires=None):
from flask import current_app
from cryptography.fernet import Fernet
import time
f = current_app.fernet
if expires is None:
f_expires = 0
else:
f_expires = time.time() + expires
return f.encrypt('{{"id": {}, "expires": {} }}'.format(
identifier, f_expires).encode('utf8')).decode('utf8')
# endregion
class BaseSchema(Schema):
@post_dump
def remove_skip_values(self, data, **kwargs):
return {
key: value for key, value in data.items()
if value is not None # Empty lists must be kept!
}
class AttributeListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
description = fields.String(required=False, allow_none=True)
type = fields.String(required=True,
validate=[OneOf(list(DataType.__dict__.keys()))])
size = fields.Integer(required=False, allow_none=True)
precision = fields.Integer(required=False, allow_none=True)
scale = fields.Integer(required=False, allow_none=True)
nullable = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
enumeration = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
missing_representation = fields.String(required=False, allow_none=True)
feature = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
label = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
distinct_values = fields.Integer(required=False, allow_none=True)
mean_value = fields.Float(required=False, allow_none=True)
median_value = fields.String(required=False, allow_none=True)
max_value = fields.String(required=False, allow_none=True)
min_value = fields.String(required=False, allow_none=True)
std_deviation = fields.Float(required=False, allow_none=True)
missing_total = fields.String(required=False, allow_none=True)
deciles = fields.String(required=False, allow_none=True)
format = fields.String(required=False, allow_none=True)
key = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
attribute_privacy = fields.Nested(
'limonero.schema.AttributePrivacyListResponseSchema',
allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Attribute"""
return Attribute(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributeItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
description = fields.String(required=False, allow_none=True)
type = fields.String(required=True,
validate=[OneOf(list(DataType.__dict__.keys()))])
size = fields.Integer(required=False, allow_none=True)
precision = fields.Integer(required=False, allow_none=True)
scale = fields.Integer(required=False, allow_none=True)
nullable = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
enumeration = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
missing_representation = fields.String(required=False, allow_none=True)
feature = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
label = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
distinct_values = fields.Integer(required=False, allow_none=True)
mean_value = fields.Float(required=False, allow_none=True)
median_value = fields.String(required=False, allow_none=True)
max_value = fields.String(required=False, allow_none=True)
min_value = fields.String(required=False, allow_none=True)
std_deviation = fields.Float(required=False, allow_none=True)
missing_total = fields.String(required=False, allow_none=True)
deciles = fields.String(required=False, allow_none=True)
format = fields.String(required=False, allow_none=True)
key = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
attribute_privacy = fields.Nested(
'limonero.schema.AttributePrivacyItemResponseSchema',
allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Attribute"""
return Attribute(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributeCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(allow_none=True)
name = fields.String(required=True)
description = fields.String(required=False, allow_none=True)
type = fields.String(required=True,
validate=[OneOf(list(DataType.__dict__.keys()))])
size = fields.Integer(required=False, allow_none=True)
precision = fields.Integer(required=False, allow_none=True)
scale = fields.Integer(required=False, allow_none=True)
nullable = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
enumeration = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
missing_representation = fields.String(required=False, allow_none=True)
feature = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
label = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
distinct_values = fields.Integer(required=False, allow_none=True)
mean_value = fields.Float(required=False, allow_none=True)
median_value = fields.String(required=False, allow_none=True)
max_value = fields.String(required=False, allow_none=True)
min_value = fields.String(required=False, allow_none=True)
std_deviation = fields.Float(required=False, allow_none=True)
missing_total = fields.String(required=False, allow_none=True)
deciles = fields.String(required=False, allow_none=True)
format = fields.String(required=False, allow_none=True)
key = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
attribute_privacy = fields.Nested(
'limonero.schema.AttributePrivacyCreateRequestSchema',
allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Attribute"""
return Attribute(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
attribute_privacy = fields.Nested(
'limonero.schema.AttributePrivacyItemResponseSchema',
allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of Attribute"""
return Attribute(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
attribute_name = fields.String(required=True)
data_type = fields.String(required=False, allow_none=True,
validate=[OneOf(list(DataType.__dict__.keys()))])
privacy_type = fields.String(required=True,
validate=[OneOf(list(PrivacyType.__dict__.keys()))])
category_technique = fields.String(required=False, allow_none=True)
anonymization_technique = fields.String(required=True,
validate=[OneOf(list(AnonymizationTechnique.__dict__.keys()))])
hierarchical_structure_type = fields.String(
required=False, allow_none=True)
privacy_model_technique = fields.String(required=False, allow_none=True)
hierarchy = fields.String(required=False, allow_none=True)
category_model = fields.String(required=False, allow_none=True)
privacy_model = fields.String(required=False, allow_none=True)
privacy_model_parameters = fields.String(required=False, allow_none=True)
unlock_privacy_key = fields.String(required=False, allow_none=True)
is_global_law = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
attribute_privacy_group = fields.Nested(
'limonero.schema.AttributePrivacyGroupListResponseSchema',
allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of AttributePrivacy"""
return AttributePrivacy(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
attribute_name = fields.String(required=True)
data_type = fields.String(required=False, allow_none=True,
validate=[OneOf(list(DataType.__dict__.keys()))])
privacy_type = fields.String(required=True,
validate=[OneOf(list(PrivacyType.__dict__.keys()))])
category_technique = fields.String(required=False, allow_none=True)
anonymization_technique = fields.String(required=True,
validate=[OneOf(list(AnonymizationTechnique.__dict__.keys()))])
hierarchical_structure_type = fields.String(
required=False, allow_none=True)
privacy_model_technique = fields.String(required=False, allow_none=True)
hierarchy = fields.String(required=False, allow_none=True)
category_model = fields.String(required=False, allow_none=True)
privacy_model = fields.String(required=False, allow_none=True)
privacy_model_parameters = fields.String(required=False, allow_none=True)
unlock_privacy_key = fields.String(required=False, allow_none=True)
is_global_law = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
attribute_privacy_group_id = fields.Integer(
required=False, allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of AttributePrivacy"""
return AttributePrivacy(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(allow_none=True)
attribute_name = fields.String(required=True)
data_type = fields.String(required=False, allow_none=True,
validate=[OneOf(list(DataType.__dict__.keys()))])
privacy_type = fields.String(required=True,
validate=[OneOf(list(PrivacyType.__dict__.keys()))])
category_technique = fields.String(required=False, allow_none=True)
anonymization_technique = fields.String(required=True,
validate=[OneOf(list(AnonymizationTechnique.__dict__.keys()))])
hierarchical_structure_type = fields.String(
required=False, allow_none=True)
privacy_model_technique = fields.String(required=False, allow_none=True)
hierarchy = fields.String(required=False, allow_none=True)
category_model = fields.String(required=False, allow_none=True)
privacy_model = fields.String(required=False, allow_none=True)
privacy_model_parameters = fields.String(required=False, allow_none=True)
unlock_privacy_key = fields.String(required=False, allow_none=True)
attribute_id = fields.Integer(required=False, allow_none=True)
attribute_privacy_group_id = fields.Integer(
required=False, allow_none=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of AttributePrivacy"""
return AttributePrivacy(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyPrivacyResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
data_type = fields.String(required=False, allow_none=True,
validate=[OneOf(list(DataType.__dict__.keys()))])
privacy_type = fields.String(required=True,
validate=[OneOf(list(PrivacyType.__dict__.keys()))])
category_technique = fields.String(required=False, allow_none=True)
anonymization_technique = fields.String(required=True,
validate=[OneOf(list(AnonymizationTechnique.__dict__.keys()))])
hierarchical_structure_type = fields.String(
required=False, allow_none=True)
privacy_model_technique = fields.String(required=False, allow_none=True)
hierarchy = fields.String(required=False, allow_none=True)
category_model = fields.String(required=False, allow_none=True)
privacy_model = fields.String(required=False, allow_none=True)
privacy_model_parameters = fields.String(required=False, allow_none=True)
unlock_privacy_key = fields.String(required=False, allow_none=True)
is_global_law = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of AttributePrivacy"""
return AttributePrivacy(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyGroupListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of AttributePrivacyGroup"""
return AttributePrivacyGroup(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyGroupItemResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of AttributePrivacyGroup"""
return AttributePrivacyGroup(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class AttributePrivacyGroupCreateRequestSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(allow_none=True)
name = fields.String(required=True)
user_id = fields.Integer(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of AttributePrivacyGroup"""
return AttributePrivacyGroup(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DataSourceExecuteRequestSchema(BaseSchema):
""" JSON schema for executing tasks """
id = fields.Integer(required=True)
name = fields.String(required=True)
# noinspection PyUnresolvedReferences
@post_load
def make_object(self, data, **kwargs):
""" Deserialize data into an instance of DataSource"""
return DataSource(**data)
class Meta:
ordered = True
unknown = EXCLUDE
class DataSourceListResponseSchema(BaseSchema):
""" JSON serialization schema """
id = fields.Integer(required=True)
name = fields.String(required=True)
description = fields.String(required=False, allow_none=True)
enabled = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
statistics_process_counter = fields.Integer(
required=False, allow_none=True, missing=0, default=0)
read_only = fields.Boolean(
required=False,
allow_none=True,
missing=True,
default=True)
privacy_aware = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
url = fields.String(required=True)
created = fields.DateTime(required=False, allow_none=True)
updated = fields.DateTime(
required=False,
allow_none=True,
missing=datetime.datetime.utcnow,
default=datetime.datetime.utcnow)
format = fields.String(required=True,
validate=[OneOf(list(DataSourceFormat.__dict__.keys()))])
initialization = fields.String(required=False, allow_none=True, missing=DataSourceInitialization.INITIALIZED, default=DataSourceInitialization.INITIALIZED,
validate=[OneOf(list(DataSourceInitialization.__dict__.keys()))])
initialization_job_id = fields.String(required=False, allow_none=True)
provenience = fields.String(required=False, allow_none=True)
estimated_rows = fields.Integer(
required=False,
allow_none=True,
missing=0,
default=0)
estimated_size_in_mega_bytes = fields.Decimal(
required=False, allow_none=True)
expiration = fields.String(required=False, allow_none=True)
user_id = fields.Integer(required=False, allow_none=True)
user_login = fields.String(required=False, allow_none=True)
user_name = fields.String(required=False, allow_none=True)
tags = fields.String(required=False, allow_none=True)
temporary = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
workflow_id = fields.Integer(required=False, allow_none=True)
task_id = fields.String(required=False, allow_none=True)
attribute_delimiter = fields.String(required=False, allow_none=True)
record_delimiter = fields.String(required=False, allow_none=True)
text_delimiter = fields.String(required=False, allow_none=True)
is_public = fields.Boolean(
required=False,
allow_none=True,
missing=False,
default=False)
treat_as_missing = fields.String(required=False, allow_none=True)
| |
detected in check special 'scene' subgroup."
# " Stray metadata files are ignored for the purpose of flagging"
# " higher-level check special groups as incomplete due to missing suffixes.")
# warnings.showwarning = script_utils.showwarning_stderr
# continue
missing_suffixes = [s for s in src_suffixes_subgroup if not ends_one_of_coll(s, src_rasters_subgroup)]
if missing_suffixes and args.get(ARGSTR_CHECK_SPECIAL) is not None:
missing_suffixes_set = set(missing_suffixes)
if args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_SCENELEVEL:
if CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL_MATCHTAG_SET.issubset(missing_suffixes_set):
missing_suffixes_set.difference_update(CHECK_SPECIAL_DEM_SUFFIX_SCENELEVEL_MATCHTAG_SET)
missing_suffixes_set.difference_update(CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_SCENELEVEL_SET)
if missing_suffixes_set and args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM:
if ( ( CHECK_SPECIAL_DEM_SUFFIX_ORTHO2 in missing_suffixes_set
or CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M in missing_suffixes_set)
and ((not check_group_is_xtrack) or args.get(ARGSTR_ALLOW_MISSING_ORTHO2))):
if CHECK_SPECIAL_DEM_SUFFIX_ORTHO2 in missing_suffixes_set:
missing_suffixes_set.remove(CHECK_SPECIAL_DEM_SUFFIX_ORTHO2)
if CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M in missing_suffixes_set:
missing_suffixes_set.remove(CHECK_SPECIAL_DEM_SUFFIX_ORTHO2_10M)
if missing_suffixes_set and args.get(ARGSTR_CHECK_SPECIAL) in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_STRIPLEVEL:
missing_suffixes_set.difference_update(CHECK_SPECIAL_DEM_SUFFIX_OPTIONAL_STRIPLEVEL_SET)
missing_suffixes = [s for s in missing_suffixes if s in missing_suffixes_set]
if missing_suffixes:
warnings.warn("Source file suffixes for a check group were not found")
missing_suffix_errmsg = (
"Check {}group {}; missing the following source file suffixes: {}".format(
"special '{}' sub".format(check_special_option)*(check_special_option is not None),
checkfile_root_subgroup, missing_suffixes
)
)
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR) and checkgroup_errfile is not None:
if not args.get(ARGSTR_DRYRUN):
with open(checkgroup_errfile, 'a') as checkgroup_errfile_fp:
checkgroup_errfile_fp.write(missing_suffix_errmsg+'\n')
if warn_missing_suffix:
eprint(missing_suffix_errmsg)
if type(missing_suffix_flag) is list and len(missing_suffix_flag) == 1:
missing_suffix_flag[0] = True
delete_files = (delete_files or args.get(ARGSTR_RMWHERE_MISSING_SUFFIX))
if missing_suffix_flag[0] and check_special_option is None:
break
src_raster_errfnames = [f+errfile_ext for f in src_rasters if os.path.isfile(os.path.join(checkfile_dir, f+errfile_ext))]
if checkgroup_errfile is not None and os.path.isfile(checkgroup_errfile):
src_raster_errfnames.append(checkgroup_errfile)
if src_raster_errfnames:
warnings.warn("Error files were found among source files for a check group")
if warn_errfile_exists:
eprint("Check group {}; {} error files were found among source selection:".format(
checkfile_root, len(src_raster_errfnames)))
for f in sorted(list(src_raster_errfnames)):
eprint(f)
if type(errfile_count) is list and len(errfile_count) == 1:
errfile_count[0] = len(src_raster_errfnames)
delete_files = (delete_files or args.get(ARGSTR_RMWHERE_ERRFILE_EXISTS))
delete_dryrun = (args.get(ARGSTR_DRYRUN) or not args.get(ARGSTR_DO_DELETE))
if ( (delete_files and checkfile_exists)
and args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_CHECKFILES, ARGCHO_REMOVE_TYPE_BOTH]):
eprint("Removing checkfile"+" (dryrun)"*delete_dryrun)
cmd = "rm {}".format(checkfile)
if args.get(ARGSTR_DO_DELETE):
eprint(cmd)
if not delete_dryrun:
os.remove(checkfile)
if type(checkfile_removed_flag) is list and len(checkfile_removed_flag) == 1:
checkfile_removed_flag[0] = True
src_rasters_to_check = src_rasters
if ( delete_files
and args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_SOURCEFILES, ARGCHO_REMOVE_TYPE_BOTH]):
eprint("Removing source files"+" (dryrun)"*delete_dryrun)
srcfnames_to_remove = list(src_rasters) + src_raster_errfnames
for fn in srcfnames_to_remove:
srcfile_to_remove = os.path.join(checkfile_dir, fn)
cmd = "rm {}".format(srcfile_to_remove)
if args.get(ARGSTR_DO_DELETE):
eprint(cmd)
if not delete_dryrun:
os.remove(srcfile_to_remove)
return -1
return list(src_rasters_to_check) if return_incomplete_src_rasters else bool(src_rasters_to_check)
def main():
global LOGGER
# Invoke argparse argument parsing.
arg_parser = argparser_init()
try:
args = script_utils.ArgumentPasser(PYTHON_EXE, SCRIPT_FILE, arg_parser, sys.argv)
except ScriptArgumentError as e:
arg_parser.error(e)
## Further parse/adjust argument values.
src = args.get(ARGSTR_SRC)
search_depth = args.get(ARGSTR_DEPTH)
verify_by_pairname_dir_depth = args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH)
checkfile_ext = args.get(ARGSTR_CHECKFILE_EXT)
errfile_ext = args.get(ARGSTR_ERRFILE_EXT)
allow_missing_suffix = args.get(ARGSTR_ALLOW_MISSING_SUFFIX)
retry_errors = args.get(ARGSTR_RETRY_ERRORS)
warn_errfile_exists = (not args.get(ARGSTR_SUPPRESS_ERRFILE_EXISTS) or args.get(ARGSTR_RMWHERE_ERRFILE_EXISTS))
warn_missing_suffix = (not args.get(ARGSTR_SUPPRESS_MISSING_SUFFIX) or args.get(ARGSTR_RMWHERE_MISSING_SUFFIX))
warn_missing_checked = (not args.get(ARGSTR_SUPPRESS_MISSING_CHECKED) or args.get(ARGSTR_RMWHERE_MISSING_CHECKED))
warn_new_source = (not args.get(ARGSTR_SUPPRESS_NEW_SOURCE) or args.get(ARGSTR_RMWHERE_NEW_SOURCE))
try_removal = (True in args.get(ARGGRP_RMWHERE))
allow_remove_checkfiles = args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_CHECKFILES, ARGCHO_REMOVE_TYPE_BOTH]
allow_remove_sourcefiles = args.get(ARGSTR_REMOVE_TYPE) in [ARGCHO_REMOVE_TYPE_SOURCEFILES, ARGCHO_REMOVE_TYPE_BOTH]
delete_dryrun = (args.get(ARGSTR_DRYRUN) or not args.get(ARGSTR_DO_DELETE))
if args.get(ARGSTR_DEBUG):
LOGGER.setLevel(logging.DEBUG)
verifying_strips = (args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR) and args.get(ARGSTR_CHECK_SPECIAL) == ARGCHO_CHECK_SPECIAL_STRIPS)
if args.get(ARGSTR_SCHEDULER) is not None:
if args.get(ARGSTR_JOBSCRIPT) is None:
jobscript_default = os.path.join(JOBSCRIPT_DIR, 'head_{}.sh'.format(args.get(ARGSTR_SCHEDULER)))
if not os.path.isfile(jobscript_default):
arg_parser.error(
"Default jobscript ({}) does not exist, ".format(jobscript_default)
+ "please specify one with {} argument".format(ARGSTR_JOBSCRIPT))
else:
args.set(ARGSTR_JOBSCRIPT, jobscript_default)
print("argument {} set automatically to: {}".format(ARGSTR_JOBSCRIPT, args.get(ARGSTR_JOBSCRIPT)))
## Validate argument values.
argstr_mutexl_checkfile = [
ARGSTR_CHECKFILE,
ARGSTR_CHECKFILE_ROOT,
ARGSTR_CHECKFILE_ROOT_REGEX,
ARGSTR_CHECK_SPECIAL
]
argstr_incompat_sched = [ARGSTR_CHECKFILE, ARGSTR_CHECKFILE_ROOT]
if args.get(argstr_mutexl_checkfile).count(None) < (len(argstr_mutexl_checkfile)-1):
arg_parser.error("Only one of the following checkfile arguments may be provided: {}".format(argstr_mutexl_checkfile))
if args.get(ARGSTR_CHECK_SPECIAL) is not None:
check_special_option = args.get(ARGSTR_CHECK_SPECIAL)
for check_special_set_argstr, check_special_set_value in ARGCHOSET_CHECK_SPECIAL_SETTING_DICT[check_special_option]:
if args.provided(check_special_set_argstr):
continue
if check_special_option in ARGCHOGRP_CHECK_SPECIAL_SETSM_DEM_SCENELEVEL and check_special_set_argstr == ARGSTR_SRC_SUFFIX:
check_special_set_value = '/'.join([
ARGCHOSET_CHECK_SPECIAL_DEMTYPE_SUFFIX_DICT[args.get(ARGSTR_CHECK_SPECIAL_DEMTYPE)],
check_special_set_value
])
args.set(check_special_set_argstr, check_special_set_value)
print("via provided argument {}={}, argument {} set automatically to: '{}'".format(
ARGSTR_CHECK_SPECIAL, args.get(ARGSTR_CHECK_SPECIAL),
check_special_set_argstr, args.get(check_special_set_argstr)))
if args.get(ARGSTR_INDEX_PAIRNAMES_TO_JSON):
if not args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
arg_parser.error("{} option can only be used in conjuction with {} option".format(
ARGSTR_INDEX_PAIRNAMES_TO_JSON, ARGSTR_VERIFY_BY_PAIRNAME_DIR
))
if args.get(ARGSTR_CHECK_SPECIAL) not in ARGCHOSET_CHECK_SPECIAL_INDEX_MODE_DICT:
arg_parser.error("{} option requires {} must be set to one of {}".format(
ARGSTR_INDEX_PAIRNAMES_TO_JSON, ARGSTR_CHECK_SPECIAL, sorted(ARGCHOSET_CHECK_SPECIAL_INDEX_MODE_DICT.keys())
))
if not os.path.isfile(INDEX_SETSM_SCRIPT):
arg_parser.error(
"{} option requires the 'pgcdemtools' repo to exist alongside this repo, "
"but SETSM indexing script does not exist: {}".format(
ARGSTR_INDEX_PAIRNAMES_TO_JSON, SCRIPT_DIR, INDEX_SETSM_SCRIPT)
)
for removal_argstr in ARGGRP_REQUIRES_RMWHERE:
if args.get(removal_argstr) and not try_removal:
arg_parser.error("{} option can only be used in conjunction with one of the following "
"removal arguments: {}".format(removal_argstr, ARGGRP_RMWHERE))
if args.get(ARGSTR_SCHEDULER) is not None and args.get(argstr_incompat_sched).count(None) < len(argstr_incompat_sched):
arg_parser.error("{} option is incompatible with the following arguments: {}".format(
ARGSTR_SCHEDULER, argstr_incompat_sched
))
if args.get(ARGSTR_TASKS_PER_JOB) is not None and not args.get(ARGSTR_SCHEDULER):
arg_parser.error("{} option requires {} option".format(ARGSTR_TASKS_PER_JOB, ARGSTR_SCHEDULER))
src_suffixes = [s.strip() for s in args.get(ARGSTR_SRC_SUFFIX).split('/')]
if ( endswith_one_of_coll(SETSM_META_SUFFIX, src_suffixes, case_sensitive=False)
and args.get(ARGSTR_CHECK_SPECIAL) not in ARGCHOGRP_CHECK_SPECIAL_SETSM):
arg_parser.error("argument {} suffix '{}' that could match SETSM meta suffix '{}' "
"may only be provided when argument {} is set to one of the following SETSM options: {}".format(
ARGSTR_SRC_SUFFIX, endswith_one_of_coll(SETSM_META_SUFFIX, src_suffixes, case_sensitive=False, return_match=True),
SETSM_META_SUFFIX, ARGSTR_CHECK_SPECIAL, ARGCHOGRP_CHECK_SPECIAL_SETSM
))
checkfile_root_regex = (re.compile(args.get(ARGSTR_CHECKFILE_ROOT_REGEX))
if args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None else None)
if args.get(ARGSTR_VERIFY_QUICK_CHECK):
## Do quick verification check and exit
print("\nDoing verification quick check...")
if not os.path.isdir(args.get(ARGSTR_SRC)):
arg_parser.error("{} must be a directory when {} option is provided".format(
ARGSTR_SRC, ARGSTR_VERIFY_QUICK_CHECK
))
srcdir = args.get(ARGSTR_SRC)
pairname_dir_list = []
for root, dnames, fnames in walk.walk(srcdir, maxdepth=verify_by_pairname_dir_depth):
for dn in dnames:
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn) is not None:
pairname_dir = os.path.join(root, dn)
pairname_dir_list.append(pairname_dir)
pairname_dir_num_total = len(pairname_dir_list)
if len(pairname_dir_list) == 0:
eprint("ERROR: No pairname directories were found with {} and {}={}".format(
ARGSTR_VERIFY_BY_PAIRNAME_DIR, ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH, verify_by_pairname_dir_depth
))
sys.exit(1)
else:
print("Found {} pairname directories within {}".format(pairname_dir_num_total, srcdir))
print('')
pairname_dir_not_done_list = []
pairname_dir_empty_list = []
for pairname_dir in pairname_dir_list:
pairname_errfile = pairname_dir+errfile_ext
pnamedir_checkfile = pairname_dir+checkfile_ext
pnamedir_jsonfile = pairname_dir+'.json'
pnamedir_errfile_exists = os.path.isfile(pairname_errfile)
pnamedir_checkfile_exists = os.path.isfile(pnamedir_checkfile)
pnamedir_jsonfile_exists = os.path.isfile(pnamedir_jsonfile)
if pnamedir_errfile_exists or not (pnamedir_checkfile_exists and pnamedir_jsonfile_exists):
for _, _, srcfname_list in walk.walk(pairname_dir, maxdepth=1):
break
if len(srcfname_list) == 0:
print("WARNING: Pairname directory is empty: {}".format(pairname_dir))
pairname_dir_empty_list.append(pairname_dir)
if pnamedir_checkfile_exists or pnamedir_jsonfile_exists:
print("ERROR: Empty pairname directory has a checkfile or JSON file: {}".format(pairname_dir))
else:
continue
elif len(srcfname_list) == 1 and verifying_strips:
single_strip_fname = srcfname_list[0]
if single_strip_fname.endswith('.fin'):
if pnamedir_jsonfile_exists:
print("ERROR: Pairname directory with lone strip finfile has JSON file: {}".format(pnamedir_jsonfile))
elif not pnamedir_checkfile_exists:
continue
else:
with open(pnamedir_checkfile, 'r') as check_strips_fin_fp:
strip_finfname = check_strips_fin_fp.read().strip()
if strip_finfname == single_strip_fname:
continue
else:
print("ERROR: Solo strip finfile in pairname directory checkfile ({}) "
"does not match existing lone strip finfile ({}): {}".format(
strip_finfname, single_strip_fname, pnamedir_checkfile
))
print("Pairname directory containing {} files, where {}, has not passed verification: {}".format(
len(srcfname_list),
"(errfile {}, checkfile {}, JSON {})".format(
*['exists' if file_exists else 'DNE' for file_exists in [
pnamedir_errfile_exists,
pnamedir_checkfile_exists,
pnamedir_jsonfile_exists
]]
),
pairname_errfile if pnamedir_errfile_exists else pairname_dir
))
pairname_dir_not_done_list.append(pairname_dir)
print('')
if len(pairname_dir_not_done_list) == 0:
print("All pairname directories have passed verification!")
else:
print("{} pairname directories have not yet passed verification:\n {}".format(
len(pairname_dir_not_done_list), '\n '.join(pairname_dir_not_done_list)
))
if len(pairname_dir_empty_list) != 0:
print("{} pairname directories are empty:\n {}".format(
len(pairname_dir_empty_list), '\n '.join(pairname_dir_empty_list)
))
sys.exit(0)
## Scan source dir/file input to determine which source files should be checked.
checkffileroot_srcfnamechecklist_dict = None
srcffile_checklist = None
num_srcfiles = 0
num_checkgroups = None
srcfile_count = [None]
errfile_count = [None]
missing_suffix_flag = [False]
checkfile_removed_flag = [False]
print("-----")
if not args.get(ARGSTR_CHECKFILE_OFF):
print("Checkfile extension: {}".format(checkfile_ext))
print("Error file extension: {}".format(errfile_ext))
print("Accepted source file suffixes: {}".format(src_suffixes))
print("-----")
print("Any check group warnings would appear here:")
srcdir = None
if os.path.isdir(src):
srcdir = src
if ( args.get(ARGSTR_CHECKFILE_ROOT_REGEX) is not None
and args.get(ARGSTR_CHECK_SPECIAL) != ARGCHO_CHECK_SPECIAL_ALL_SEPARATE):
checkffileroot_srcfnamechecklist_dict = dict()
if args.get(ARGSTR_VERIFY_BY_PAIRNAME_DIR):
pairname_dir_list = []
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, os.path.basename(srcdir)) is not None:
pairname_dir_list.append(srcdir)
else:
for root, dnames, fnames in walk.walk(srcdir, maxdepth=verify_by_pairname_dir_depth):
for dn in dnames:
if re.match(ARGCHOSET_CHECK_SPECIAL_DEM_REGEX_STRIPLEVEL, dn) is not None:
pairname_dir = os.path.join(root, dn)
pairname_dir_list.append(pairname_dir)
if len(pairname_dir_list) == 0:
eprint("No pairname directories were found with {} and {}={}".format(
ARGSTR_VERIFY_BY_PAIRNAME_DIR, ARGSTR_VERIFY_BY_PAIRNAME_DIR_DEPTH, verify_by_pairname_dir_depth
))
for pairname_dir in pairname_dir_list:
srcfname_list = []
for _, _, srcfname_list in walk.walk(pairname_dir, maxdepth=1):
break
if len(srcfname_list) == 1 and verifying_strips:
single_strip_fname = srcfname_list[0]
if single_strip_fname.endswith('.fin'):
strip_finfname = single_strip_fname
with open(pairname_dir+'.check', 'w') as check_strips_fin_fp:
check_strips_fin_fp.write(strip_finfname)
continue
for srcfname in srcfname_list:
if endswith_one_of_coll(srcfname, src_suffixes):
match = re.match(checkfile_root_regex, srcfname)
if match is None:
eprint("No regex match for filename matching suffix criteria in source directory: {}".format(srcfname))
else:
if pairname_dir not in checkffileroot_srcfnamechecklist_dict:
checkffileroot_srcfnamechecklist_dict[pairname_dir] = []
checkffileroot_srcfnamechecklist_dict[pairname_dir].append(srcfname)
else:
for root, dnames, | |
- :class:`~adorn.exception.type_check_error.MissingDependencyError`:
``typing.Literal`` requested state that wasn't specified in the
local state
Args:
target_cls (Parameter): container parameter's state
literal_dict (Dict[str, str]): dependency request
args (Dict[str, Any]): local state
Returns:
Optional[TypeCheckError]: ``TypeCheckError`` if the dependency
request can't be fulfilled by the local state, otherwise ``None``
"""
missing_keys = literal_dict.keys() - args.keys()
if missing_keys:
return MissingDependencyError(
target_cls=target_cls,
missing_dependency={k: literal_dict[k] for k in missing_keys},
)
@classmethod
def perfunctory_type_check(
cls,
target_cls: Parameter,
orchestrator: "Orchestrator",
obj: Dict[Any, Any],
dependent_from_obj: bool = False,
) -> Optional[TypeCheckError]:
"""Ensure the Dependent type specified is well formed
Exceptions:
- see :class:`~adorn.unit.parameter_value.Dependent.check_args`
- see :class:`~adorn.unit.complex.Complex.general_check`
- see :class:`~adorn.unit.parameter_value.Dependent.check_literal_dict`
Args:
target_cls (Parameter): specification of the parameter
and its associated state
orchestrator (Orchestrator): container of types, used
to check nested types
obj (Dict[Any, Any]): the arguments to construct the parameter
dependent_from_obj (bool): if ``True``, the request is grabbing
instantiated values, otherwise configured values
Returns:
Optional[TypeCheckError]: ``TypeCheckError`` if there was an issue
with the ``DependentType``, otherwise ``None``
"""
check_arg_ouptut = cls.check_args(
target_cls=target_cls, orchestrator=orchestrator
)
if check_arg_ouptut is not None:
return check_arg_ouptut
# perform a Complex.general_check on the given wrapped type
wrapped_cls = target_cls.args[0]
cls_unit = orchestrator.get(wrapped_cls)
wrapped_cls_general_check = cls_unit.general_check(
wrapped_cls, orchestrator, obj
)
if wrapped_cls_general_check is not None:
return wrapped_cls_general_check
# check that the literal dict is well specified
check_literal_dict_output = cls.check_literal_dict(
target_cls=target_cls, obj=obj, dependent_from_obj=dependent_from_obj
)
if check_literal_dict_output is not None:
return check_literal_dict_output
@classmethod
def _from_obj(
cls, target_cls: Parameter, orchestrator: "Orchestrator", obj: Dict[Any, Any]
) -> Dict[Any, Any]:
"""Instantiate a parameter, using arguments and state from the :class:`~adorn.data.constructor.Constructor`
Exceptions:
- :class:`~adorn.exception.type_check_error.MalformedDependencyError`:
the ``Dependent`` type didn't specify the appropriate number of
arguments
- :class:`~adorn.exception.type_check_error.MissingLiteralError`:
the first argument, zero based counting, wasn't a ``typing.Literal``
- :class:`~adorn.exception.type_check_error.UnaryLiteralError`:
``typing.Literal`` specifies more than one argument
- :class:`~adorn.exception.type_check_error.MalformedLiteralError`:
``typing.Literal`` wrapped a value that wasn't of type
``Dict[str, str]``
Args:
target_cls (Parameter): the requested type and relevant state
orchestrator (Orchestrator): container of all types, typically used
to recurse down nested types
obj (Any): an instance, containing the arguments for
:attr:`~adorn.data.parameter.Parameter.cls`
Returns:
Any: An instance of :attr:`~adorn.data.parameter.Parameter.cls`
""" # noqa: B950, DAR401, RST304
check_arg_ouptut = cls.check_args(
target_cls=target_cls, orchestrator=orchestrator
)
if check_arg_ouptut is not None:
raise check_arg_ouptut
literal_dict = target_cls.args[1].__args__[0]
args = cls.get_args(
target_cls=target_cls, literal_dict=literal_dict, dependent_from_obj=True
)
# check all required state exists
literal_dict_key_check = cls.check_literal_dict_keys(
target_cls=target_cls, literal_dict=literal_dict, args=args
)
if literal_dict_key_check is not None:
raise literal_dict_key_check
# enrich obj with dependent state
for k, v in args.items():
if k not in obj:
obj[k] = v
subcls = target_cls.args[0].resolve_class_name(obj["type"])
return orchestrator.from_obj(subcls, obj)
@ParameterValue.register("dependent_type_check")
class DependentTypeCheck(Generic[_T, DictStrStr], Dependent):
"""Parameter that requires local state that is known before instantiation
The first argument must be a :class:`~adorn.unit.complex.Complex`, that requires
state, known before construction, from the constructor. The second argument is
a ``typing.Literal[Dict[str, str]]`` where the keys specifies the names of the
dependent parameters and the values are the state needed from the constructor.
"""
@classmethod
def _contains(cls, obj: Parameter, orchestrator: "Orchestrator") -> bool:
"""Check if the ``obj`` is an instance of ``DependentTypeCheck``
Args:
obj (Parameter): potentially an instance of
``DependentTypeCheck``
orchestrator (Orchestrator): container of all types, typically
used to recurse nested types
Returns:
bool: if ``True``, ``obj`` is an instance of
``DependentTypeCheck``
""" # noqa: B950
if obj.origin != DependentTypeCheck:
return False
return (orchestrator.contains(obj.args[0])) and issubclass(obj.args[0], Complex)
@classmethod
def _type_check(
cls, target_cls: Parameter, orchestrator: "Orchestrator", obj: Dict[Any, Any]
) -> Optional[TypeCheckError]:
"""Type check a parameter, using arguments and state from the :class:`~adorn.data.constructor.Constructor`
Exceptions:
- see :class:`~adorn.unit.parameter_value.Dependent.perfunctory_type_check`
- :class:`~adorn.exception.type_check_error.KeyValueError`:
errors for the argument(s) from ``obj`` and/or local state
Args:
target_cls (Parameter): the requested type and relevant state
orchestrator (Orchestrator): container of all types, typically used
to recurse down nested types
obj (Any): an instance, containing the arguments for
:attr:`~adorn.data.parameter.Parameter.cls`
Returns:
Optional[TypeCheckError]: ``TypeCheckError``, if the state and arguments
can't generate an instance of :attr:`~adorn.data.parameter.Parameter.cls`,
otherwise ``None``
""" # noqa: B950, RST304
perfunctory_type_check_output = cls.perfunctory_type_check(
target_cls=target_cls, orchestrator=orchestrator, obj=obj
)
if perfunctory_type_check_output is not None:
return perfunctory_type_check_output
literal_dict = target_cls.args[1].__args__[0]
constructor = cls.get_constructor(target_cls=target_cls, obj=obj)
# perform a Complex.general_check on the requested values
# need to parse the correct Complex instance for the given
# parameter's type
args = Params(cls.get_args(target_cls=target_cls, literal_dict=literal_dict))
bad_args = dict()
for k, v in args.items():
sub = constructor.parameters[k].annotation
unit = orchestrator.get(sub)
arg_type_check = None
if getattr(sub, "__origin__", None) is None and isinstance(v, sub):
# object is already instantiated, so we assume it is ok
arg_type_check = None
elif isinstance(unit, Complex):
# we do a shallow check on complex types because may contain
# their own dependencies.
arg_type_check = unit.general_check(
sub, orchestrator=orchestrator, obj=v
)
else:
arg_type_check = orchestrator.type_check(sub, v)
if arg_type_check is not None:
bad_args[k] = arg_type_check
if bad_args:
return KeyValueError(target_cls, bad_args, obj)
# generate an instance of Constructor where the key's in the literal_dict
# are removed from the parameters attribute
for k in literal_dict:
if k not in obj:
constructor.parameters.pop(k)
return orchestrator.type_check(constructor, obj)
@ParameterValue.register("dependent_from_obj")
class DependentFromObj(Generic[_T, DictStrStr], Dependent):
"""Parameter that requires local state that is known after instantiation
The first argument must be a :class:`~adorn.unit.complex.Complex`, that requires
state, known after construction, from the constructor. The second argument is
a ``typing.Literal[Dict[str, str]]`` where the keys specifies the names of the
dependent parameters and the values are the state needed from the constructor.
"""
@classmethod
def _contains(cls, obj: Parameter, orchestrator: "Orchestrator") -> bool:
"""Check if the ``obj`` is an instance of ``DependentFromObj``
Args:
obj (Parameter): potentially an instance of
``DependentFromObj``
orchestrator (Orchestrator): container of all types, typically
used to recurse nested types
Returns:
bool: if ``True``, ``obj`` is an instance of
``DependentFromObj``
"""
if obj.origin != DependentFromObj:
return False
return (orchestrator.contains(obj.args[0])) and issubclass(obj.args[0], Complex)
@classmethod
def _type_check(
cls, target_cls: Parameter, orchestrator: "Orchestrator", obj: Dict[Any, Any]
) -> Optional[TypeCheckError]:
"""Type check a parameter, using arguments from ``obj``
Any local state that has been specified in the second argument to
``DependentFromObj`` will not be checked, because nothing has been
instantiated, yet.
Exceptions:
- see :class:`~adorn.unit.parameter_value.Dependent.perfunctory_type_check`
Args:
target_cls (Parameter): the requested type and relevant state
orchestrator (Orchestrator): container of all types, typically used
to recurse down nested types
obj (Any): an instance, containing the arguments for
:attr:`~adorn.data.parameter.Parameter.cls`
Returns:
Optional[TypeCheckError]: ``TypeCheckError``, if the state and arguments
can't generate an instance of :attr:`~adorn.data.parameter.Parameter.cls`,
otherwise ``None``
""" # noqa: B950, RST30
perfunctory_type_check_output = cls.perfunctory_type_check(
target_cls=target_cls,
orchestrator=orchestrator,
obj=obj,
dependent_from_obj=True,
)
if perfunctory_type_check_output is not None:
return perfunctory_type_check_output
literal_dict = target_cls.args[1].__args__[0]
constructor = cls.get_constructor(target_cls=target_cls, obj=obj)
# generate an instance of Constructor where the key's in the literal_dict
# are removed from the parameters attribute
for k in literal_dict:
if k not in obj:
constructor.parameters.pop(k)
return orchestrator.type_check(constructor, obj)
_DependentT = TypeVar("_DependentT", bound=Dependent)
@ParameterValue.register("dependent_union")
class DependentUnion(Generic[_DependentT, _T], Dependent):
"""Similar to ``typing.Union``, but only accepts two arguments, where the zeroth argument must be :class:`~adorn.unit.parameter_value.Dependent`""" # noqa: B950
@staticmethod
def get_parameter_subtype(target_cls: Parameter, arg: Type) -> Parameter:
"""Generate an Parameter where we update the ``cls`` arg with a type option.
Args:
target_cls (Parameter): state and ``DependentUnion`` request
arg (Type): a type option specified within ``DependentUnion``
Returns:
Parameter: state with :attr:`~adorn.data.parameter.Parameter.cls`
argument specifying a type option specified in ``DependentUnion``
""" # noqa: RST304
return Parameter(
cls=arg,
parent=target_cls.parent,
local_state=target_cls.local_state,
parameter_name=target_cls.parameter_name,
)
@classmethod
def _contains(cls, obj: Parameter, orchestrator: "Orchestrator") -> bool:
"""Check if the ``obj`` is an instance of ``DependentUnion``
Args:
obj (Parameter): potentially an instance of
``DependentUnion``
orchestrator (Orchestrator): container of all types, typically
used to recurse nested types
Returns:
bool: if ``True``, ``obj`` is an instance of
``DependentUnion``
"""
if obj.origin != DependentUnion:
return False
origin = getattr(obj.args[0], "__origin__", None)
if origin is None or not issubclass(origin, Dependent):
return False
return all(
orchestrator.contains(
cls.get_parameter_subtype(target_cls=obj, arg=obj.args[i])
)
for i in range(2)
)
@classmethod
def get_parameter(
cls, target_cls: Parameter, orchestrator: "Orchestrator", obj: Dict[Any, Any]
) -> Union[Parameter, KeyValueError]:
"""Type Check the given ``obj`` against all potential types
Args:
target_cls (Parameter): state and list of potential types ``obj``
could be
orchestrator (Orchestrator): container of all types, typically
used to recurse nested types
obj (Dict[Any, Any]): arguments for one of the listed types
Returns:
Union[Parameter, KeyValueError]: either a ``Parameter`` which wraps the | |
Hello, i'm <NAME>, studying MCA 2years at M.S.Ramaiah Institute Of Technology, Bangalore <eom>
[14:30] <kushal> singh123, you spoke early
[14:30] <kushal> next
[14:30] <sayan> singh123: wait for your turn
[14:30] <kushal> next
[14:30] <fihae> Aligarh Muslim University ,Aligarh ,India
[14:30] <kushal> fihae, Thanks, see, now we all know about your college.
[14:30] <kushal> and University.
[14:31] <kushal> next
[14:31] <abhinavshirur[m]> Hello everyone, I am <NAME>, pursuing Information Technology in Walchand College of Engineering Sangli :) <eof>
[14:31] <kushal> next.
[14:31] <kushal> next
[14:31] <kushal> next
[14:31] <deepika> hello everyone , deepika upadhyay 2nd year student in jabalpur engineering college, (m.p.),a nebie in open source world ! <eof>
[14:31] <avik> Hi! Iam Avik (as the nick says).
[14:31] <avik> I'm from Kolkata, India!
[14:31] <avik> I'm a student of CSE BTech course of Narula Institute Of Technology.
[14:31] <avik> Its my 2nd year here! Last year I messed up mid-way!
[14:31] <avik> This year, I am determined to complete all the way to commiting in my fist project!
[14:31] <sitlanigaurav[m]> Hello #dgplug I'm <NAME> , Final year comp science student from SCOE, Pune and a FOSS enthusiast. Worked on projects using python with startups.
[14:31] <avik> <eof>
[14:32] <kushal> sitlanigaurav[m], Does your college syllabus says that you are studying comp science? It is a new branch it seems.
[14:32] <bhawana> hii everyone!! I am bhawana, final year B.tech student at IP University, delhi.
[14:32] <sayan> Don't wait for your turn to write the intro. Please write your intro now and just wait for your turn to press `Enter`
[14:32] <kushal> Maybe about compressing technologies.
[14:32] <kushal> like food items.
[14:32] <bhargav> !
[14:32] <sitlanigaurav[m]> kushal: Computer Engineering
[14:32] <wrik003> kushal: lol
[14:32] <kushal> sitlanigaurav[m], yes, so type full.
[14:32] <kushal> next
[14:33] <dhairyya> Hello, I am <NAME>. I am 3rd year CSE undergraduate in Institute Of Engineering & Management, Kolkata<eof>
[14:33] <kushal> next
[14:33] <anuGupta> Myself anu <NAME>, 2nd year Computer science and engineering student at academy of technology, west bengal. I am learning Python nowadays<eof>
[14:33] <kushal> dhairyya, you spoke early.
[14:33] <dhairyya> I am sorry <eof>
[14:33] <neer> !
[14:33] <sayan> next
[14:33] <kushal> Hello everyone, I am <NAME>, part of dgplug, I will also learn along the way with all of you.
[14:33] <RatanShreshtha> Hello, I am <NAME>, I am currently working in Reliance Jio <eof>
[14:34] <kushal> I am also an admin in this channel. <eof>
[14:34] <kushal> next
[14:34] <kushal> RatanShreshtha, you also spoke early.
[14:34] <kushal> next
[14:34] <pr97> Hello everyone, I am Priyanka, second year CSE student, here to learn from you all.
[14:34] <kushal> next
[14:34] <iKshitij> Hello Everyone, I am Kshitij, Computer Science Undergrad 2nd year, Dr BC Roy Engineering College Durgapur. <eom>
[14:35] <kushal> next
[14:35] <kushal> next
[14:35] <cran-cg> Hello everyone I am <NAME>, a recent graduate from Zakir Husain College Of Engineeering And Technology, Aligarh Muslim University. I am here to learn and thus contribute better to the open source society
[14:35] <RatanShreshtha> I am sorry <eof>
[14:35] <cran-cg> <eof>
[14:35] <geekodour08> btech student, CSE 4th semester student from Girijananda Chowdhury College, Assam. I like learning new things.
[14:35] <sayan> nextnext
[14:35] <sayan> next
[14:35] <kushal> RatanShreshtha, no problem for nowl.
[14:35] <skarpy> Hello, I am <NAME>, 3rd year, ECE, D.r. BC Roy engineering college, I am from durgapur. :) <eof>
[14:35] <kushal> * now
[14:35] <bhavin192> geekodour08, name
[14:35] <bhavin192> ?
[14:35] <kushal> next
[14:36] <shivamA1> I am <NAME>, last year BE (IT) student from NBN sinhgad institutes, vadgaon,pune, i am an active attendie of python pune meetup group .
[14:36] <kushal> next
[14:36] <geekodour08> bhavin192: <NAME>
[14:36] <kushal> next
[14:36] <santoshShaw[m]> <NAME> . a CSE 3rd year @Academy of technology. Have a lot of passion towards programming . C is my favorite and I early look forward to contribute to open source.<eof>
[14:36] <kushal> next
[14:36] <shankarj67> Hello Everyone, I have completed my BTech CSE from Asansol Engineering College and got placed in bangalore based startup name Skillspeed and I will be joining from 7th of july.
[14:36] <kushal> shankarj67, Congratulations :)
[14:36] <kushal> next
[14:36] <RJ722> Hello, I am <NAME>! I am a freshman year Electronics Engineering Student at Zakir Hussain College of Engineering and Technology.
[14:36] <RJ722> I have been selected with coala for Google Summer of Code this year <eof>
[14:36] <saikat195> I am <NAME>, student, pusuing B.Tech CSE Freshman Year. Willing to learn, and contribute to FOSS.
[14:37] <kushal> next
[14:37] <kushal> next
[14:37] <apoorv> Hi, I am <NAME>, I am a 2nd year btech student of computer science from Jaypee Institute of Information Technology noida sector 62<eof>
[14:37] <kushal> saikat195, you also spoke early
[14:37] <kushal> next
[14:37] <ravindra> Hi,My name is Ravindra I have contributed to an FOSS accounting software GNUKhata as an intern also have some experience of customizing python based ERP system ERPNext.Want to contribute to more FOSS projects<EOF>
[14:37] <anupamas> hello! i am <NAME>. I am 12th passed out from science stream and looking forward to pursue engineering.<eof>
[14:37] <kushal> next
[14:37] <shankarj67> kushal, thanks a lot.
[14:37] <kushal> anupamas, you are early :)
[14:37] <kushal> next
[14:38] <meamitk> Hello Everyone, my self <NAME>, system administrator from Mumbai. :)
[14:38] <kushal> next
[14:38] <bhavin192> ravindra, GNUKhata is great :)
[14:38] <vikram> Hey! I am <NAME> , B.tech 1st year Computer Engineering undergrad from Zakir Hussain College of Engineering and Technology, Aligarh Muslim University.
[14:38] <sayan> next
[14:38] <kushal> vikram, means we have at least 3 people from your colleg3e
[14:38] <sayan> next
[14:38] <kushal> * college
[14:38] <kushal> next
[14:38] <zebak_> Hey! I am <NAME>, a 1st year student of Electronics Engineering at Zakir Hussain College of Engineering and Technology, Aligarh Muslim University. <eof>
[14:38] <mdbk_> Hello, I'm <NAME>, I started learning programming few months ago, learnt about dgplug from a friend
[14:38] <sapahia> Hello Everyone,I am Rishav ,Computer Science-Senior Year, from Jaypee University Of Information Technology,Shimla.Greetings to Everyone from Shimla.<eom>
[14:38] <kushal> zebak_, and you are the fourth.
[14:38] <kushal> next
[14:38] <niranjana> Hello, I am Niranjana, recently finished B.E, Computer Engineering student from Cummins College of Engineering, Pune. <eom>
[14:38] <vikram> yeas!
[14:39] <sayan> next
[14:39] <messifc> hello everyone :) I am from Noida and a final year computer science student. I like to stay anonymous while doing something online, but always open to make new friends :) <eof>
[14:39] <kushal> next
[14:39] <sayan> next
[14:39] <kushal> next
[14:39] <ishank_g> Hi.. I am <NAME>,just completed my Btech(ECE) from Aligarh Muslim University, Aligarh,UP.<eof>
[14:39] <jiteshpabla> Hi, I am <NAME> (i know my username is very creative /s), and I study computer science engineering at JIIT noida. Looking forward to working with this community, as I get very lazy when it comes to working alone without accountability.
[14:39] <kushal> next
[14:39] <Padfoot7> Hi I am <NAME>. I am a 1st year B.Tech student from Zakir Husain college of Engineering & Technology(ZHCET) ,Aligarh Muslim Unversity (AMU), Aligarh.
[14:40] <kvy> !
[14:40] <kushal> Padfoot7, you guys are wining in the highest number of people from one college category this year :)
[14:40] <kushal> next
[14:40] <jasonbraganza> Howdy folks, I’m Jason, 38, devoted husband, newly balding, love reading, IT consultant to small businesses, from Mumbai, and looking to switch careers to programming <eom>
[14:40] <kushal> next
[14:40] <Padfoot7> True That:)
[14:41] <kushal> next
[14:41] <ndakota> I'm Abhiram, Senior Software Engineer at Blue Jeans Network and one of the organizers of BangPypers (Bangalore Python Meetup Chapter). I'm here to learn things and to help out in any topic I can :) . <eom>
[14:41] <rimshakhan> Hello people! This is <NAME>. B.tech 1st year Computer Engineering undergrad from Zakir Hussain College of Engineering and Technology, Aligarh Muslim University. Aligarh.
[14:41] <bhawana> hello everyone!! I m <NAME>,CSE final year student at IP university.
[14:41] <kushal> next
[14:41] <kushal> rimshakhan, you also spoke early :)
[14:41] <kushal> next
[14:41] <nisha_> hello everyone ,I am <NAME> a economics student.And completed my graduation this year.
[14:41] <kushal> nisha_, welcome to #dgplug
[14:42] <kushal> nisha_, we may get a Economics professor from Japan to teach us a few things here, so it will be fun.
[14:42] <kushal> :)
[14:42] <nisha_> Kushal,thank you
[14:42] <kushal> * an
[14:42] <kushal> next
[14:42] <sayan> Hi, I am <NAME>. I work as a Fedora Infrastructure Engineer in the Fedora Engineering Team. I work on quite some open source projects. I did my dgplug training in the summers of 2010 and I've been stuck here since then learning stuffs from you people. | |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import time
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from common import constants
from common.waterfall import buildbucket_client
from common.waterfall import failure_type
from common.waterfall import try_job_error
from common.waterfall.buildbucket_client import BuildbucketBuild
from gae_libs import appengine_util
from gae_libs.http.http_client_appengine import HttpClientAppengine
from gae_libs.pipeline_wrapper import BasePipeline
from gae_libs.pipeline_wrapper import pipeline
from libs import analysis_status
from libs import time_util
from model.flake.flake_try_job_data import FlakeTryJobData
from model.wf_try_job_data import WfTryJobData
from waterfall import buildbot
from waterfall import monitoring
from waterfall import swarming_util
from waterfall import waterfall_config
def _GetError(buildbucket_response, buildbucket_error, timed_out, no_report):
"""Determines whether or not a try job error occurred.
Args:
buildbucket_response: A dict of the json response from buildbucket.
buildbucket_error: A BuildBucketError object returned from the call to
buildbucket_client.GetTryJobs()
timed_out: A bool whether or not Findit abandoned monitoring the try job.
no_report: A bool whether we get result report.
Returns:
A tuple containing an error dict and number representing an error code, or
(None, None) if no error was determined to have occurred.
"""
if buildbucket_error:
return (
{
'message': buildbucket_error.message,
'reason': buildbucket_error.reason
},
try_job_error.BUILDBUCKET_REQUEST_ERROR)
if timed_out:
return (
{
'message': 'Try job monitoring was abandoned.',
'reason': 'Timeout after %s hours' % (
waterfall_config.GetTryJobSettings().get('job_timeout_hours'))
},
try_job_error.TIMEOUT)
if buildbucket_response:
# Check buildbucket_response.
buildbucket_failure_reason = buildbucket_response.get('failure_reason')
if buildbucket_failure_reason == 'BUILD_FAILURE':
# Generic buildbucket-reported error which can occurr if an exception is
# thrown, disk is full, compile fails during a test try job, etc.
return (
{
'message': 'Buildbucket reported a general error.',
'reason': MonitorTryJobPipeline.UNKNOWN
},
try_job_error.INFRA_FAILURE
)
elif buildbucket_failure_reason == 'INFRA_FAILURE':
return (
{
'message': ('Try job encountered an infra issue during '
'execution.'),
'reason': MonitorTryJobPipeline.UNKNOWN
},
try_job_error.INFRA_FAILURE
)
elif buildbucket_failure_reason:
return (
{
'message': buildbucket_failure_reason,
'reason': MonitorTryJobPipeline.UNKNOWN
},
try_job_error.UNKNOWN
)
# Check result_details_json for errors.
result_details_json = json.loads(
buildbucket_response.get('result_details_json', '{}')) or {}
error = result_details_json.get('error', {})
if error:
return (
{
'message': 'Buildbucket reported an error.',
'reason': error.get('message', MonitorTryJobPipeline.UNKNOWN)
},
try_job_error.CI_REPORTED_ERROR)
if no_report:
return (
{
'message': 'No result report was found.',
'reason': MonitorTryJobPipeline.UNKNOWN
},
try_job_error.UNKNOWN
)
return None, None
def _OnTryJobError(try_job_type, error_dict,
master_name, builder_name): # pragma: no cover
monitoring.try_job_errors.increment(
{
'type': try_job_type,
'error': error_dict.get('message', 'unknown'),
'master_name': master_name,
'builder_name': builder_name
})
def _UpdateTryJobMetadata(try_job_data, try_job_type, buildbucket_build,
buildbucket_error, timed_out, report=None):
buildbucket_response = {}
if buildbucket_build:
try_job_data.request_time = (
try_job_data.request_time or
time_util.MicrosecondsToDatetime(buildbucket_build.request_time))
# If start_time hasn't been set, use request_time.
try_job_data.start_time = try_job_data.start_time or (
try_job_data.request_time)
try_job_data.end_time = time_util.MicrosecondsToDatetime(
buildbucket_build.end_time)
if try_job_type != failure_type.FLAKY_TEST: # pragma: no branch
if report:
try_job_data.number_of_commits_analyzed = len(
report.get('result', {}))
try_job_data.regression_range_size = report.get(
'metadata', {}).get('regression_range_size')
else:
try_job_data.number_of_commits_analyzed = 0
try_job_data.regression_range_size = None
try_job_data.try_job_url = buildbucket_build.url
buildbucket_response = buildbucket_build.response
try_job_data.last_buildbucket_response = buildbucket_response
# report should only be {} when error happens on getting report after try job
# completed. If try job is still running, report will be set to None.
error_dict, error_code = _GetError(
buildbucket_response, buildbucket_error, timed_out, report == {})
if error_dict:
try_job_data.error = error_dict
try_job_data.error_code = error_code
_OnTryJobError(try_job_type, error_dict, try_job_data.master_name,
try_job_data.builder_name)
try_job_data.put()
def _DictsAreEqual(dict_1, dict_2, exclude_keys=None):
if dict_1 == dict_2:
return True
if dict_1 is None or dict_2 is None:
return False
if exclude_keys is None:
exclude_keys = []
for key, value in dict_1.iteritems():
if key not in exclude_keys and (key not in dict_2 or dict_2[key] != value):
return False
for key, value in dict_2.iteritems():
if key not in exclude_keys and (key not in dict_1 or dict_1[key] != value):
return False
return True
def _UpdateLastBuildbucketResponse(try_job_data, build):
if not build or not build.response: # pragma: no cover
return
if not _DictsAreEqual(try_job_data.last_buildbucket_response,
build.response, exclude_keys=['utcnow_ts']):
try_job_data.last_buildbucket_response = build.response
try_job_data.put()
class MonitorTryJobPipeline(BasePipeline):
"""A pipeline for monitoring a try job and recording results when it's done.
The result will be stored to compile_results or test_results according to
which type of build failure we are running try job for.
"""
async = True
UNKNOWN = 'UNKNOWN'
@ndb.transactional
def _UpdateTryJobResult(self, urlsafe_try_job_key, try_job_type, try_job_id,
try_job_url, status, result_content=None):
"""Updates try job result based on response try job status and result."""
result = {
'report': result_content,
'url': try_job_url,
'try_job_id': try_job_id,
}
try_job = ndb.Key(urlsafe=urlsafe_try_job_key).get()
if try_job_type == failure_type.FLAKY_TEST:
result_to_update = try_job.flake_results
elif try_job_type == failure_type.COMPILE:
result_to_update = try_job.compile_results
else:
result_to_update = try_job.test_results
if result_to_update and result_to_update[-1]['try_job_id'] == try_job_id:
result_to_update[-1].update(result)
else: # pragma: no cover
# Normally result for current try job should have been saved in
# schedule_try_job_pipeline, so this branch shouldn't be reached.
result_to_update.append(result)
if status == BuildbucketBuild.STARTED:
try_job.status = analysis_status.RUNNING
try_job.put()
return result_to_update
def __init__(self, *args, **kwargs):
super(MonitorTryJobPipeline, self).__init__(*args, **kwargs)
# This dictionary needs to be serializable so that the tests can simulate
# callbacks to this pipeline.
self.last_params = {}
# Arguments number differs from overridden method - pylint: disable=W0221
def run(self, urlsafe_try_job_key, try_job_type, try_job_id):
"""Monitors try job until it's complete.
This method stores parameters in self so that the callback method can
perform appropriate checks.
callback(), defined below is expected to run when a pubsub notification from
the buildbucket service is sent to this application indicating that the job
has changed status.
callback() is also run in two occassions separate from pubsub:
- at the end of this run method (i.e. when creating this pipeline)
- after timeout_hours have passed without the job completing.
Args:
urlsafe_try_job_key (str): The urlsafe key for the corresponding try job
entity.
try_job_type (str): The type of the try job.
try_job_id (str): The try job id to query buildbucket with.
"""
if not try_job_id:
self.complete()
return
if try_job_type == failure_type.FLAKY_TEST:
try_job_kind = FlakeTryJobData
else:
try_job_kind = WfTryJobData
try_job_data = try_job_kind.Get(try_job_id)
if not try_job_data:
logging.error('%(kind)s entity does not exist for id %(id)s: creating it',
{'kind': try_job_kind, 'id': try_job_id})
try_job_data = try_job_kind.Create(try_job_id)
try_job_data.try_job_key = ndb.Key(urlsafe=urlsafe_try_job_key)
# Check if callback url is already registered with the TryJobData entity to
# guarantee this run method is idempotent when called again with the same
# params.
if try_job_data.callback_url and (
self.pipeline_id in try_job_data.callback_url):
return
timeout_hours = waterfall_config.GetTryJobSettings().get(
'job_timeout_hours')
default_pipeline_wait_seconds = waterfall_config.GetTryJobSettings(
).get( 'server_query_interval_seconds')
max_error_times = waterfall_config.GetTryJobSettings().get(
'allowed_response_error_times')
# TODO(chanli): Make sure total wait time equals to timeout_hours
# regardless of retries.
deadline = time.time() + timeout_hours * 60 * 60
already_set_started = False
backoff_time = default_pipeline_wait_seconds
error_count = 0
self.last_params = {
'try_job_id': try_job_id,
'try_job_type': try_job_type,
'urlsafe_try_job_key': urlsafe_try_job_key,
'deadline': deadline,
'already_set_started': already_set_started,
'error_count': error_count,
'max_error_times': max_error_times,
'default_pipeline_wait_seconds': default_pipeline_wait_seconds,
'timeout_hours': timeout_hours,
'backoff_time': backoff_time,
}
callback_url = self.get_callback_url(callback_params=json.dumps(
self.last_params))
try_job_data.callback_url = callback_url
try_job_data.callback_target = appengine_util.GetTargetNameForModule(
constants.WATERFALL_BACKEND)
try_job_data.put()
# Guarantee one callback 10 minutes after the deadline to clean up even if
# buildbucket fails to call us back.
self.delay_callback(
(timeout_hours * 60 + 10) * 60,
self.last_params,
name=try_job_id + '_cleanup_task')
# Run immediately in case the job already went from scheduled to started.
self.callback(callback_params=self.last_params)
def finalized(self):
try:
try_job_id = self.kwargs.get('try_job_id')
if not try_job_id and len(self.args) > 2:
try_job_id = self.args[2]
if try_job_id:
taskqueue.Queue(
constants.WATERFALL_ANALYSIS_QUEUE).delete_tasks_by_name([
try_job_id + '_cleanup_task'])
else:
logging.error('Did not receive a try_job_id at construction.')
except taskqueue.BadTaskStateError, e: # pragma: no cover
logging.debug('Could not delete cleanup task: %s', e.message)
return super(MonitorTryJobPipeline, self).finalized()
def delay_callback(self, countdown, callback_params, name=None):
target = appengine_util.GetTargetNameForModule(constants.WATERFALL_BACKEND)
task = self.get_callback_task(
countdown=countdown, target=target,
params={'callback_params': json.dumps(callback_params)},
name=name)
task.add(queue_name=constants.WATERFALL_ANALYSIS_QUEUE)
# Arguments number differs from overridden method - pylint: disable=W0221
def callback(self, *args, **kwargs):
"""Transitional callback.
This temporary hack should accept callbacks in the old format
as well as the new one.
"""
assert not args
if 'callback_params' in kwargs:
return self._callback(**kwargs)
return self._callback(callback_params=kwargs)
def _callback(self, callback_params, pipeline_id=None):
"""Updates the TryJobData entities with status from buildbucket."""
# callback_params may have been serialized if the callback was converted to
# a URL.
if isinstance(callback_params, basestring):
callback_params = json.loads(callback_params)
self.last_params = callback_params
_ = pipeline_id # We do nothing with this id.
try_job_id = callback_params['try_job_id']
assert try_job_id
urlsafe_try_job_key = callback_params['urlsafe_try_job_key']
try_job_type = callback_params['try_job_type']
deadline = callback_params['deadline']
already_set_started = callback_params['already_set_started']
error_count = callback_params['error_count']
max_error_times = callback_params['max_error_times']
default_pipeline_wait_seconds = callback_params[
'default_pipeline_wait_seconds']
timeout_hours = callback_params['timeout_hours']
backoff_time = callback_params['backoff_time']
if try_job_type == failure_type.FLAKY_TEST:
try_job_data = FlakeTryJobData.Get(try_job_id)
else:
try_job_data = WfTryJobData.Get(try_job_id)
error, build = buildbucket_client.GetTryJobs([try_job_id])[0]
if error:
if error_count < max_error_times:
error_count += 1
self.delay_callback(
backoff_time,
callback_params={
| |
seealso::
- `ISO 18431-4 Mechanical vibration and shock — Signal processing — Part 4: Shock-response spectrum analysis`
Explicit implementations of digital filter coefficients for shock spectra.
"""
A = omega*T/(2.*Q)
B = omega*T*np.sqrt(1. - 1/(4.*(Q**2.)))
C = (T*omega)
q = (1./(2.*(Q**2.)) - 1.)/(np.sqrt(1. - 1./(4.*(Q**2.))))
b = (
((1 - np.exp(-A)*np.cos(B))/Q - q*np.exp(-A)*np.sin(B) - omega*T)/C,
(2*np.exp(-A)*np.cos(B)*omega*T - (1 - np.exp(-2.*A))/Q + 2.*q*np.exp(-A)*np.sin(B))/C,
(-np.exp(-2.*A)*(omega*T + 1./Q) + np.exp(-A)*np.cos(B)/Q - q*np.exp(-A)*np.sin(B))/C,
)
a = (
1.,
-2.*np.exp(-A)*np.cos(B),
np.exp(-2.*A),
)
return b, a
def relative_displacement_static(accel: pd.DataFrame, omega: float, damp: float = 0.0) -> pd.DataFrame:
"""
Calculate the relative displacement expressed as equivalent static
acceleration for a SDOF system.
The relative displacement as static acceleration follows the transfer
function:
`H(s) = L{ω²z(t)}(s) / L{y"(t)}(s) = (ω²/s²)(Z(s)/Y(s))`
for the PDE:
`z" + (2ζω)z' + (ω²)z = -y"`
:param accel: the absolute acceleration y"
:param omega: the natural frequency ω of the SDOF system
:param damp: the damping coefficient ζ of the SDOF system
:return: the relative displacement of the SDOF system expressed as
equivalent static acceleration
.. seealso::
- `Pseudo Velocity Shock Spectrum Rules For Analysis Of Mechanical Shock, <NAME> <https://info.endaq.com/hubfs/pvsrs_rules.pdf>`_
- `SciPy transfer functions <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.html>`_
Documentation for the transfer function class used to characterize the
relative displacement calculation.
- `SciPy biquad filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html>`__
Documentation for the biquad function used to implement the transfer
function.
- `ISO 18431-4 Mechanical vibration and shock — Signal processing — Part 4: Shock-response spectrum analysis`
Explicit implementations of digital filter coefficients for shock spectra.
"""
T = utils.sample_spacing(accel)
Q = 1./(2.*damp)
return accel.apply(
functools.partial(
scipy.signal.lfilter,
*_relative_displacement_static_coefficients(omega, Q, T),
axis=0,
),
raw=True,
)
def shock_spectrum(
accel: pd.DataFrame,
freqs: np.ndarray,
damp: float = 0.0,
mode: typing.Literal["srs", "pvss"] = "srs",
two_sided: bool = False,
aggregate_axes: bool = False,
) -> pd.DataFrame:
"""
Calculate the shock spectrum of an acceleration signal.
:param accel: the absolute acceleration `y"`
:param freqs: the natural frequencies across which to calculate the spectrum
:param damp: the damping coefficient `ζ`, related to the Q-factor by
`ζ = 1/(2Q)`; defaults to 0
:param mode: the type of spectrum to calculate:
- `'srs'` (default) specifies the Shock Response Spectrum (SRS)
- `'pvss'` specifies the Pseudo-Velocity Shock Spectrum (PVSS)
:param two_sided: whether to return for each frequency:
both the maximum negative and positive shocks (`True`),
or simply the maximum absolute shock (`False`; default)
:param aggregate_axes: whether to calculate the column-wise resultant (`True`)
or calculate spectra along each column independently (`False`; default)
:return: the shock spectrum
.. seealso::
- `Pseudo Velocity Shock Spectrum Rules For Analysis Of Mechanical Shock, <NAME> <https://info.endaq.com/hubfs/pvsrs_rules.pdf>`_
- `An Introduction To The Shock Response Spectrum, <NAME>, 9 July 2012 <http://www.vibrationdata.com/tutorials2/srs_intr.pdf>`_
- `SciPy transfer functions <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.TransferFunction.html>`_
Documentation for the transfer function class used to characterize the
relative displacement calculation.
- `SciPy biquad filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html>`__
Documentation for the biquad function used to implement the transfer
function.
"""
if two_sided and aggregate_axes:
raise ValueError("cannot enable both options `two_sided` and `aggregate_axes`")
freqs = np.asarray(freqs)
if freqs.ndim != 1:
raise ValueError("target frequencies must be in a 1D-array")
omega = 2 * np.pi * freqs
if mode == "srs":
make_coeffs = _absolute_acceleration_coefficients
elif mode == "pvss":
make_coeffs = _pseudo_velocity_coefficients
else:
raise ValueError(f"invalid spectrum mode {mode:r}")
results = np.empty(
(2,) + freqs.shape + ((1,) if aggregate_axes else accel.shape[1:]),
dtype=np.float64,
)
dt = utils.sample_spacing(accel)
T_padding = 1 / (
freqs.min() * np.sqrt(1 - damp ** 2)
) # uses lowest damped frequency
if not two_sided:
T_padding /= 2
zi = np.zeros((2,) + accel.shape[1:])
zero_padding = np.zeros((int(T_padding // dt) + 1,) + accel.shape[1:])
Q = 1./(2.*damp)
for i_nd in np.ndindex(freqs.shape[0]):
rd, zf = scipy.signal.lfilter(
*make_coeffs(omega[i_nd], Q, dt),
accel.to_numpy(),
zi=zi,
axis=0,
)
rd_padding, _ = scipy.signal.lfilter(
*make_coeffs(omega[i_nd], Q, dt), zero_padding, zi=zf, axis=0
)
if aggregate_axes:
rd = L2_norm(rd, axis=-1, keepdims=True)
rd_padding = L2_norm(rd_padding, axis=-1, keepdims=True)
results[(0,) + i_nd] = -np.minimum(rd.min(axis=0), rd_padding.min(axis=0))
results[(1,) + i_nd] = np.maximum(rd.max(axis=0), rd_padding.max(axis=0))
if aggregate_axes or not two_sided:
return pd.DataFrame(
np.maximum(results[0], results[1]),
index=pd.Series(freqs, name="frequency (Hz)"),
columns=(["resultant"] if aggregate_axes else accel.columns),
)
return namedtuple("PseudoVelocityResults", "neg pos")(
*(
pd.DataFrame(
r, index=pd.Series(freqs, name="frequency (Hz)"), columns=accel.columns
)
for r in results
)
)
@dataclass
class HalfSineWavePulse:
"""
The output data type for :py:func:`enveloping_half_sine`.
The significant data members are `amplitude` and `duration`, which can
simply be unpacked as if from a plain tuple:
.. testsetup::
import pandas as pd
df_pvss = pd.DataFrame([1, 1], index=[200, 400])
from endaq.calc.shock import enveloping_half_sine
.. testcode::
ampl, T = enveloping_half_sine(df_pvss)
However, users can also elect to use the other methods of this class to
generate other kinds of outputs.
.. note:: This class is not intended to be instantiated manually.
"""
amplitude: pd.Series
duration: pd.Series
def __iter__(self):
return iter((self.amplitude, self.duration))
def to_time_series(
self,
tstart: Optional[float] = None,
tstop: Optional[float] = None,
dt: Optional[float] = None,
tpulse: Optional[float] = None,
) -> pd.DataFrame:
"""
Generate a time-series of the half-sine pulse.
:param tstart: the starting time of the resulting waveform; if `None`
(default), the range starts at `tpulse`
:param tstop: the ending time of the resulting waveform; if `None`
(default), the range ends at `tpulse + duration`
:param dt: the sampling period of the resulting waveform; defaults to
1/20th of the pulse duration
:param tpulse: the starting time of the pulse within the resulting
waveform; if `None` (default), the pulse starts at either:
- ``tstart``, if provided
- ``tstop - self.duration.max())``, if `tstop` is provided
- ``0.0`` otherwise
:return: a time-series of the half-sine pulse
"""
if dt is None:
dt = self.duration.min() / 20
if dt > self.duration.min() / 8:
warnings.warn(
f"the sampling period {dt} is large relative to the pulse duration"
f" {self.duration.min()}; the waveform may not accurately represent"
f" the half-sine pulse's shock intensity"
)
default_start = 0.0
if tstop is not None:
default_start = tstop - self.duration.max()
if tpulse is None and tstart is None:
tpulse = tstart = default_start
elif tpulse is None:
tpulse = tstart
elif tstart is None:
tstart = tpulse
if tstop is None:
tstop = tpulse + self.duration.max()
if not (tstart <= tpulse <= tstop - self.duration.max()):
warnings.warn(
"half-sine pulse extends beyond the bounds of the time series"
)
t = np.arange(tstart, tstop, dt)
data = np.zeros((len(t), len(self.amplitude)), dtype=float)
t_data, ampl_data, T_data = np.broadcast_arrays(
t[..., None], self.amplitude.to_numpy(), self.duration.to_numpy()
)
t_mask = np.nonzero((t_data >= tpulse) & (t_data < tpulse + T_data))
data[t_mask] = ampl_data[t_mask] * np.sin(
np.pi * t_data[t_mask] / T_data[t_mask]
)
return pd.DataFrame(
data,
index=pd.Series(t, name="timestamp"),
columns=self.amplitude.index,
)
# def widened_duration(self, new_duration: float):
# pass
# def pseudo_velocity(self):
# pass
def enveloping_half_sine(
pvss: pd.DataFrame,
damp: float = 0.0,
) -> HalfSineWavePulse:
"""
Characterize a half-sine pulse whose PVSS envelopes the input.
:param pvss: the PVSS to envelope
:param damp: the damping factor used to generate the input PVSS
:return: a tuple of amplitudes and periods, each pair of which describes a
half-sine pulse
.. seealso::
`Pseudo Velocity Shock Spectrum Rules For Analysis Of Mechanical Shock, <NAME> <https://info.endaq.com/hubfs/pvsrs_rules.pdf>`_
"""
def amp_factor(damp):
"""
Calculate the PVSS amplitude attenuation on a half-sine pulse from the
damping coefficient.
The PVSS of a half-sine pulse differs based on the damping coefficient
used. While the high-frequency rolloff is relatively consistent, the
flat low-frequency amplitude is attenuated at higher damping values.
This function calculates this attenuation for a given damping
coefficient.
"""
# This calculates the PVSS value as ω->0. However, since it necessarily
# computes the maximum of a function *over time*, and ω is only found
# therein in the multiplicative factor (ωt), it is mathematically
# equivalent to compute this maximum for any arbitrary ω>0. Thus we
# choose ω=1 for convenience, w/o loss of generality.
a = np.exp(1j * np.arccos(-damp)) # = -damp + 1j * np.sqrt(1 - damp**2)
# From WolframAlpha: https://www.wolframalpha.com/input/?i=D%5BPower%5Be%2C%5C%2840%29-d+*t%5C%2841%29%5D+sin%5C%2840%29Sqrt%5B1-Power%5Bd%2C2%5D%5D*t%5C%2841%29%2Ct%5D+%3D+0&assumption=%22ListOrTimes%22+-%3E+%22Times%22&assumption=%7B%22C%22%2C+%22e%22%7D+-%3E+%7B%22NamedConstant%22%7D&assumption=%7B%22C%22%2C+%22d%22%7D+-%3E+%7B%22Variable%22%7D&assumption=%22UnitClash%22+-%3E+%7B%22d%22%2C+%7B%22Days%22%7D%7D
t_max = (2 / np.imag(a)) * np.arctan2(np.imag(a), 1 - np.real(a))
PVSS_max = (1 / np.imag(a)) * np.imag(np.exp(a * t_max))
return PVSS_max
max_pvss = pvss.max()
max_f_pvss = pvss.mul(pvss.index, axis=0).max()
return HalfSineWavePulse(
amplitude=2 * np.pi * | |
<gh_stars>0
from redbot.core.bot import Red
from redbot.core import checks
from discord.ext import commands
import discord
import os, sys
import base64
import time
import asyncio
from io import BytesIO
import datetime
from PIL import Image
import json
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import math
import pprint
import re
import random
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
import sqlite3
def text_to_code(user_input):
code = user_input
if user_input.find(";") != -1:
pos_start = user_input.find(";") + 1
end_bracket_pos = user_input.find("]")
if end_bracket_pos != -1:
code = user_input[pos_start:end_bracket_pos]
else:
code = user_input[pos_start:]
template_name = user_input[1:pos_start-1]
else:
template_name = None
return code, template_name
def decode_base64(code):
missing_padding = (4 - (len(code) % 4)) % 4
code += 'A'* (missing_padding)
return base64.b64decode(code)
def code_to_strflipped(code):
bytes_obj = decode_base64(code)
int_large = int.from_bytes(bytes_obj, byteorder='big')
binary_str = "00" + bin(int_large)[2:]
array = re.findall("......",binary_str)
i = 0
for group in array:
group = group[::-1]
array[i] = group
i += 1
strflipped = ''.join(array)
return strflipped
def strflipped_to_code(strflipped):
strflipped = strflipped + "0"*(168 - len(strflipped))
array = re.findall("......",strflipped)
i = 0
for group in array:
group = group[::-1]
array[i] = group
i += 1
binary_str = "".join(array)
binary_str = binary_str[2:]
int_large = int(binary_str, 2)
bytes_obj = int_large.to_bytes((int_large.bit_length() // 8) + 1, byteorder='big')
base64_code = base64.b64encode(bytes_obj)
code = str(base64_code)[2:-1]
if code.endswith("AAAA"):
code = code[:-4]
return code
def code_to_build(code, original_code=None):
build = {}
if original_code is None:
original_code = code
build["original_code"] = original_code
strflipped = code_to_strflipped(code)
build["code"] = code
build["template type"] = int(strflipped[0:4][::-1],2)
build["version number"] = int(strflipped[4:8][::-1],2)
build["professions"] = {}
build["professions"]["control"] = int(strflipped[8:10][::-1],2) # Number of encoded bits per profession code
professions_code_size = build["professions"]["control"] * 2 + 4
xpos = 10
ypos = xpos + professions_code_size
build["professions"]["primary"] = {}
build["professions"]["primary"]["id"] = int(strflipped[xpos:ypos][::-1],2)
build["professions"]["primary"]["name"] = prof_reference[str(build["professions"]["primary"]["id"])]
xpos = ypos
ypos = xpos + professions_code_size
build["professions"]["secondary"] = {}
build["professions"]["secondary"]["id"] = int(strflipped[xpos:ypos][::-1],2)
build["professions"]["secondary"]["name"] = prof_reference[str(build["professions"]["secondary"]["id"])]
build["attrbs"] = {}
xpos = ypos
ypos = xpos + 4
build["attrbs"]["count"] = int(strflipped[xpos:ypos][::-1],2)
xpos = ypos
ypos = xpos + 4
build["attrbs"]["control"] = int(strflipped[xpos:ypos][::-1],2) # Number of encoded bits per attribute id
attributes_code_size = build["attrbs"]["control"] + 4
for i in range(1, build["attrbs"]["count"]+1):
xpos = ypos
ypos = xpos + attributes_code_size
build["attrbs"][i] = {}
build["attrbs"][i]["id"] = int(strflipped[xpos:ypos][::-1],2)
build["attrbs"][i]["name"] = attrb_reference[str(build["attrbs"][i]["id"])]
xpos = ypos
ypos = xpos + 4
build["attrbs"][i]["points"] = int(strflipped[xpos:ypos][::-1],2)
xpos = ypos
ypos = xpos + 4
build["skills"] = {}
build["skills"]["control"] = int(strflipped[xpos:ypos][::-1],2) # Number of encoded bits per skill id
skills_code_size = build["skills"]["control"] + 8
for i in range(1, 8+1):
xpos = ypos
ypos = xpos + skills_code_size
build["skills"][i] = {}
build["skills"][i]["id"] = int(strflipped[xpos:ypos][::-1],2)
build["skills"][i]["name"] = skill_reference[str(build["skills"][i]["id"])]["Name"]
return build
def pad_binary(input, req_length):
length = len(input)
output = "0" * (req_length - length) + input
return output
def int_to_binary_str(int_value, req_length):
str_int = "{0:b}".format(int_value)
str_int = str_int.replace("0b","")
output = pad_binary((str_int), req_length)
output = output[::-1]
return output
def build_to_code(build):
# Update controls
# Attributes count
build["attrbs"]["count"] = len(build["attrbs"]) - 2
# Attributes control
attrb_ids = []
for i in range(1, len(build["attrbs"]) - 1):
attrb_ids.append(build["attrbs"][i]["id"])
if len(attrb_ids) > 0:
max_attrb_id = int(max(attrb_ids))
else:
max_attrb_id = 0
if max_attrb_id > 31:
build["attrbs"]["control"] = 2
elif max_attrb_id > 15:
build["attrbs"]["control"] = 1
else:
build["attrbs"]["control"] = 0
# Skills control
max_points = [255, 511, 1023, 2047]
skill_ids = []
for i in range(1, 8+1):
skill_ids.append(build["skills"][i]["id"])
i = 0
build["skills"]["control"] = 0
for max_point in max_points:
if max(skill_ids) > max_point:
build["skills"]["control"] = i + 1
i += 1
new_binary_str = ""
# Template headers: type and version
selection = build["template type"]
new_binary_str = new_binary_str + int_to_binary_str(selection, 4)
selection = build["version number"]
new_binary_str = new_binary_str + int_to_binary_str(selection, 4)
# Professions control
selection = build["professions"]["control"]
new_binary_str = new_binary_str + int_to_binary_str(selection, 2)
# Professions
professions_code_size = build["professions"]["control"] * 2 + 4
selection = build["professions"]["primary"]["id"]
new_binary_str = new_binary_str + int_to_binary_str(selection, professions_code_size)
selection = build["professions"]["secondary"]["id"]
new_binary_str = new_binary_str + int_to_binary_str(selection, professions_code_size)
# Attributes count and control
selection = build["attrbs"]["count"]
new_binary_str = new_binary_str + int_to_binary_str(selection, 4)
selection = build["attrbs"]["control"]
new_binary_str = new_binary_str + int_to_binary_str(selection, 4)
attributes_code_size = build["attrbs"]["control"] + 4
# Attributes
for i in range(1, build["attrbs"]["count"]+1):
selection = build["attrbs"][i]["id"]
new_binary_str = new_binary_str + int_to_binary_str(selection, attributes_code_size)
selection = build["attrbs"][i]["points"]
new_binary_str = new_binary_str + int_to_binary_str(selection, 4)
# Skills control
selection = build["skills"]["control"]
new_binary_str = new_binary_str + int_to_binary_str(selection, 4)
skills_code_size = build["skills"]["control"] + 8
largest_skill_id = 0
for i in range(1, 8+1):
selection = build["skills"][i]["id"]
new_binary_str = new_binary_str + int_to_binary_str(selection, skills_code_size)
code = strflipped_to_code(new_binary_str)
return code
def flip_dict(original):
flipped_dict = {}
for key in original.keys():
flipped_dict[original[key]] = int(key)
return flipped_dict
start_phrase = " -- `"
end_of_code_identifier = "` -- <:Template:384823463644233731>"
end_of_attrb_identifier = " "
skill_reactions_msg = "\nUse reactions for skill descriptions. :point_down:"
adrenaline_emoji = "<:Adrenaline:388335958002630658>"
energy_emoji = "<:Energy:384819524244865035>"
activation_emoji = "<:Activation:384819522596765697>"
recharge_emoji = "<:Recharge:384819522693103627>"
overcast_emoji = "<:Overcast:384828799424004127>"
path = os.path.dirname(os.path.abspath(__file__))
data_dir = path + "/data"
with open("{}/skill_reference.json".format(data_dir)) as f:
skill_reference = json.loads(f.read())
with open("{}/emoji_reference.json".format(data_dir)) as f:
emoji_reference = json.loads(f.read())
with open("{}/attrb_reference.json".format(data_dir)) as f:
attrb_reference = json.loads(f.read())
with open("{}/prof_reference.json".format(data_dir)) as f:
prof_reference = json.loads(f.read())
with open("{}/skillslist_lower.json".format(data_dir)) as f:
skillslist_lower = json.loads(f.read())
with open("{}/skillslist.json".format(data_dir)) as f:
skillslist = json.loads(f.read())
with open("{}/skillsnumber.json".format(data_dir)) as f:
skillsnumber = json.loads(f.read())
attrb_name_list = list(attrb_reference.values())
attrb_reverse_reference = flip_dict(attrb_reference)
prof_name_list = list(prof_reference.values())
prof_reverse_reference = flip_dict(prof_reference)
skillslist = tuple(skillslist)
skillsnumber = tuple(skillsnumber)
attrb_name_list = tuple(attrb_name_list)
prof_name_list = tuple(prof_name_list)
def build_to_message(build, skill_number=None):
code = build["code"]
pri_prof = build["professions"]["primary"]["name"]
sec_prof = build["professions"]["secondary"]["name"]
pri_emoji = "{}".format(emoji_reference[pri_prof])
sec_emoji = "{}".format(emoji_reference[sec_prof])
prof_msg = "{}**{}**/**{}**{}".format(pri_emoji, pri_prof, sec_prof, sec_emoji)
attrb_msg = ""
number_of_attributes = len(build["attrbs"])- 2 # Ignore "count" and "control"
for i in range(1, number_of_attributes+1):
attrb_name = build["attrbs"][i]["name"]
attrb_points = build["attrbs"][i]["points"]
attrb_msg += "{}: **{}**, ".format(attrb_name, attrb_points)
attrb_msg = attrb_msg[:len(attrb_msg)-2]
if skill_number is not None:
skill_id = build["skills"][skill_number]["id"]
skill_name = skill_reference[str(skill_id)]["Name"]
skill_url = "https://wiki.guildwars.com/wiki/" + skill_name.replace(" ", "_")
summary = skill_reference[str(skill_id)].get("Summary")
fullstop_pos = summary.find(".")
skill_summary = "*" + summary[:fullstop_pos+1] + "*" + summary[fullstop_pos+1:]
skill_reactions_msg = "Skill {}: **{}** -- <{}>\n{}\n".format(skill_number, skill_name, skill_url, skill_summary)
summary, attribute, activation, recharge, energy, profession, skill_type, overcast = "", "", "", "", "", "", "", ""
attribute = skill_reference[str(skill_id)].get("Attribute")
activation = skill_reference[str(skill_id)].get("Activation")
recharge = skill_reference[str(skill_id)].get("Recharge")
energy = skill_reference[str(skill_id)].get("Energy")
adrenaline = skill_reference[str(skill_id)].get("Adrenaline")
profession = skill_reference[str(skill_id)].get("Profession")
skill_type = skill_reference[str(skill_id)].get("Skill Type")
overcast = skill_reference[str(skill_id)].get("Overcast")
if adrenaline is not None:
skill_reactions_msg = "{} {}{}".format(skill_reactions_msg, adrenaline, adrenaline_emoji)
if energy is not None:
skill_reactions_msg = "{} {}{}".format(skill_reactions_msg, energy, energy_emoji)
if activation is not None:
skill_reactions_msg = "{} {}{}".format(skill_reactions_msg, activation, activation_emoji)
if recharge is not None:
skill_reactions_msg = "{} {}{}".format(skill_reactions_msg, recharge, recharge_emoji)
if overcast is not None:
skill_reactions_msg = "{} {}{}".format(skill_reactions_msg, overcast, overcast_emoji)
if profession is not None:
skill_reactions_msg = "{} Prof: **{}**.".format(skill_reactions_msg, profession)
if attribute is not None:
skill_reactions_msg = "{} Attrb: **{}**.".format(skill_reactions_msg, attribute)
if skill_type is not None:
skill_reactions_msg = "{} Type: **{}**.".format(skill_reactions_msg, skill_type)
else:
skill_reactions_msg = "Use reactions for skill descriptions. :point_down:"
build_msg = "{} -- `{}{}\n{}{}\n\n{}".format(prof_msg, code, end_of_code_identifier, attrb_msg, end_of_attrb_identifier, skill_reactions_msg)
return build_msg
def build_to_img(build):
result = Image.new("RGB", (512, 64))
for i in range(1, 9):
skill_id = build["skills"][i]["id"]
file_name = skillsnumber.index(skill_id)
path = "{}/template/skills/".format(data_dir) + str(file_name) + ".jpg"
img = Image.open(path)
img.thumbnail((512, 64), Image.ANTIALIAS)
x = (i-1) * 64
y = 0
w, h = img.size
result.paste(img, (x, y, x + w, y + h))
output = BytesIO()
result.save(output, format="JPEG")
output.seek(0)
result = output.read()
bytesimage = BytesIO(result)
code = build["code"]
image = discord.File(bytesimage, str(code) + ".jpg")
return image
def prev_build_msg_to_code(original_content):
end_phrase = end_of_code_identifier
start_pos = original_content.find(start_phrase)
end_pos = original_content.find(end_phrase)
code = original_content[start_pos + len(start_phrase):end_pos]
return code
def rebuild_build(original_content):
code = prev_build_msg_to_code(original_content)
build = code_to_build(code)
return build
def convert_image_team(build, team_image, build_number):
result = Image.new("RGB", (512, 64))
for i in range(1, 9):
skill_id = build["skills"][i]["id"]
file_name = skillsnumber.index(skill_id)
path = "{}/template/skills/".format(data_dir) + str(file_name) + ".jpg"
img = Image.open(path)
img.thumbnail((512, 64), Image.ANTIALIAS)
x = (i-1) * 64
y = 0
w, h = img.size
result.paste(img, (x, y, x + w, y + h))
x = 0
y = build_number * 64
w = 512
h = 64
team_image.paste(result, (x, y, x + w, y + h))
return team_image
def convert_to_bytes(team_image):
output = BytesIO()
team_image.save(output, format="JPEG")
output.seek(0)
team_image = output.read()
bytesimage = BytesIO(team_image)
return bytesimage
def rebuild_build_attrbs(build, i):
total_attrbs = len(build["attrbs"].keys()) - 2
del build["attrbs"][i]
for x in range(i, total_attrbs):
build["attrbs"][x] = build["attrbs"][x+1].copy()
del build["attrbs"][x+1]
return build
async def submit_to_executor(executor, build):
future_build_msg = executor.submit(build_to_message, build)
future_image = executor.submit(build_to_img, build)
await asyncio.sleep(0)
return future_build_msg.result(), future_image.result()
columns = ("name", "pri_prof", "sec_prof", "timestamp")
def archive_read():
conn = sqlite3.connect("{}/archive.db".format(data_dir))
c = conn.cursor()
| |
Temp. forcing tendency
103 |gS_Forc | 50 | |SMR MR|psu/s |Salinity forcing tendency
104 |AB_gT | 50 | |SMR MR|degC/s |Potential Temp. tendency from Adams-Bashforth
105 |AB_gS | 50 | |SMR MR|psu/s |Salinity tendency from Adams-Bashforth
106 |gTinAB | 50 | |SMR MR|degC/s |Potential Temp. tendency going in Adams-Bashforth
107 |gSinAB | 50 | |SMR MR|psu/s |Salinity tendency going in Adams-Bashforth
108 |AB_gU | 50 | 109 |UUR MR|m/s^2 |U momentum tendency from Adams-Bashforth
109 |AB_gV | 50 | 108 |VVR MR|m/s^2 |V momentum tendency from Adams-Bashforth
110 |ADVr_TH | 50 | |WM LR|degC.m^3/s |Vertical Advective Flux of Pot.Temperature
111 |ADVx_TH | 50 | 112 |UU MR|degC.m^3/s |Zonal Advective Flux of Pot.Temperature
112 |ADVy_TH | 50 | 111 |VV MR|degC.m^3/s |Meridional Advective Flux of Pot.Temperature
113 |DFrE_TH | 50 | |WM LR|degC.m^3/s |Vertical Diffusive Flux of Pot.Temperature (Explicit part)
114 |DFxE_TH | 50 | 115 |UU MR|degC.m^3/s |Zonal Diffusive Flux of Pot.Temperature
115 |DFyE_TH | 50 | 114 |VV MR|degC.m^3/s |Meridional Diffusive Flux of Pot.Temperature
116 |DFrI_TH | 50 | |WM LR|degC.m^3/s |Vertical Diffusive Flux of Pot.Temperature (Implicit part)
117 |ADVr_SLT| 50 | |WM LR|psu.m^3/s |Vertical Advective Flux of Salinity
118 |ADVx_SLT| 50 | 119 |UU MR|psu.m^3/s |Zonal Advective Flux of Salinity
119 |ADVy_SLT| 50 | 118 |VV MR|psu.m^3/s |Meridional Advective Flux of Salinity
120 |DFrE_SLT| 50 | |WM LR|psu.m^3/s |Vertical Diffusive Flux of Salinity (Explicit part)
121 |DFxE_SLT| 50 | 122 |UU MR|psu.m^3/s |Zonal Diffusive Flux of Salinity
122 |DFyE_SLT| 50 | 121 |VV MR|psu.m^3/s |Meridional Diffusive Flux of Salinity
123 |DFrI_SLT| 50 | |WM LR|psu.m^3/s |Vertical Diffusive Flux of Salinity (Implicit part)
124 |SALTFILL| 50 | |SM MR|psu.m^3/s |Filling of Negative Values of Salinity
125 |VISCAHZ | 50 | |SZ MR|m^2/s |Harmonic Visc Coefficient (m2/s) (Zeta Pt)
126 |VISCA4Z | 50 | |SZ MR|m^4/s |Biharmonic Visc Coefficient (m4/s) (Zeta Pt)
127 |VISCAHD | 50 | |SM MR|m^2/s |Harmonic Viscosity Coefficient (m2/s) (Div Pt)
128 |VISCA4D | 50 | |SM MR|m^4/s |Biharmonic Viscosity Coefficient (m4/s) (Div Pt)
129 |VISCAHW | 50 | |WM LR|m^2/s |Harmonic Viscosity Coefficient (m2/s) (W Pt)
130 |VISCA4W | 50 | |WM LR|m^4/s |Biharmonic Viscosity Coefficient (m4/s) (W Pt)
131 |VAHZMAX | 50 | |SZ MR|m^2/s |CFL-MAX Harm Visc Coefficient (m2/s) (Zeta Pt)
132 |VA4ZMAX | 50 | |SZ MR|m^4/s |CFL-MAX Biharm Visc Coefficient (m4/s) (Zeta Pt)
133 |VAHDMAX | 50 | |SM MR|m^2/s |CFL-MAX Harm Visc Coefficient (m2/s) (Div Pt)
134 |VA4DMAX | 50 | |SM MR|m^4/s |CFL-MAX Biharm Visc Coefficient (m4/s) (Div Pt)
135 |VAHZMIN | 50 | |SZ MR|m^2/s |RE-MIN Harm Visc Coefficient (m2/s) (Zeta Pt)
136 |VA4ZMIN | 50 | |SZ MR|m^4/s |RE-MIN Biharm Visc Coefficient (m4/s) (Zeta Pt)
137 |VAHDMIN | 50 | |SM MR|m^2/s |RE-MIN Harm Visc Coefficient (m2/s) (Div Pt)
138 |VA4DMIN | 50 | |SM MR|m^4/s |RE-MIN Biharm Visc Coefficient (m4/s) (Div Pt)
139 |VAHZLTH | 50 | |SZ MR|m^2/s |Leith Harm Visc Coefficient (m2/s) (Zeta Pt)
140 |VA4ZLTH | 50 | |SZ MR|m^4/s |Leith Biharm Visc Coefficient (m4/s) (Zeta Pt)
141 |VAHDLTH | 50 | |SM MR|m^2/s |Leith Harm Visc Coefficient (m2/s) (Div Pt)
142 |VA4DLTH | 50 | |SM MR|m^4/s |Leith Biharm Visc Coefficient (m4/s) (Div Pt)
143 |VAHZLTHD| 50 | |SZ MR|m^2/s |LeithD Harm Visc Coefficient (m2/s) (Zeta Pt)
144 |VA4ZLTHD| 50 | |SZ MR|m^4/s |LeithD Biharm Visc Coefficient (m4/s) (Zeta Pt)
145 |VAHDLTHD| 50 | |SM MR|m^2/s |LeithD Harm Visc Coefficient (m2/s) (Div Pt)
146 |VA4DLTHD| 50 | |SM MR|m^4/s |LeithD Biharm Visc Coefficient (m4/s) (Div Pt)
147 |VAHZSMAG| 50 | |SZ MR|m^2/s |Smagorinsky Harm Visc Coefficient (m2/s) (Zeta Pt)
148 |VA4ZSMAG| 50 | |SZ MR|m^4/s |Smagorinsky Biharm Visc Coeff. (m4/s) (Zeta Pt)
149 |VAHDSMAG| 50 | |SM MR|m^2/s |Smagorinsky Harm Visc Coefficient (m2/s) (Div Pt)
150 |VA4DSMAG| 50 | |SM MR|m^4/s |Smagorinsky Biharm Visc Coeff. (m4/s) (Div Pt)
151 |momKE | 50 | |SMR MR|m^2/s^2 |Kinetic Energy (in momentum Eq.)
152 |momHDiv | 50 | |SMR MR|s^-1 |Horizontal Divergence (in momentum Eq.)
153 |momVort3| 50 | |SZR MR|s^-1 |3rd component (vertical) of Vorticity
154 |Strain | 50 | |SZR MR|s^-1 |Horizontal Strain of Horizontal Velocities
155 |Tension | 50 | |SMR MR|s^-1 |Horizontal Tension of Horizontal Velocities
156 |UBotDrag| 50 | 157 |UUR MR|m/s^2 |U momentum tendency from Bottom Drag
157 |VBotDrag| 50 | 156 |VVR MR|m/s^2 |V momentum tendency from Bottom Drag
158 |USidDrag| 50 | 159 |UUR MR|m/s^2 |U momentum tendency from Side Drag
159 |VSidDrag| 50 | 158 |VVR MR|m/s^2 |V momentum tendency from Side Drag
160 |Um_Diss | 50 | 161 |UUR MR|m/s^2 |U momentum tendency from Dissipation
161 |Vm_Diss | 50 | 160 |VVR MR|m/s^2 |V momentum tendency from Dissipation
162 |Um_Advec| 50 | 163 |UUR MR|m/s^2 |U momentum tendency from Advection terms
163 |Vm_Advec| 50 | 162 |VVR MR|m/s^2 |V momentum tendency from Advection terms
164 |Um_Cori | 50 | 165 |UUR MR|m/s^2 |U momentum tendency from Coriolis term
165 |Vm_Cori | 50 | 164 |VVR MR|m/s^2 |V momentum tendency from Coriolis term
166 |Um_dPHdx| 50 | 167 |UUR MR|m/s^2 |U momentum tendency from Hydrostatic Pressure grad
167 |Vm_dPHdy| 50 | 166 |VVR MR|m/s^2 |V momentum tendency from Hydrostatic Pressure grad
168 |Um_Ext | 50 | 169 |UUR MR|m/s^2 |U momentum tendency from external forcing
169 |Vm_Ext | 50 | 168 |VVR MR|m/s^2 |V momentum tendency from external forcing
170 |Um_AdvZ3| 50 | 171 |UUR MR|m/s^2 |U momentum tendency from Vorticity Advection
171 |Vm_AdvZ3| 50 | 170 |VVR MR|m/s^2 |V momentum tendency from Vorticity Advection
172 |Um_AdvRe| 50 | 173 |UUR MR|m/s^2 |U momentum tendency from vertical Advection (Explicit part)
173 |Vm_AdvRe| 50 | 172 |VVR MR|m/s^2 |V momentum tendency from vertical Advection (Explicit part)
174 |VISrI_Um| 50 | |WU LR|m^4/s^2 |Vertical Viscous Flux of U momentum (Implicit part)
175 |VISrI_Vm| 50 | |WV LR|m^4/s^2 |Vertical Viscous Flux of V momentum (Implicit part)
176 |EXFhs | 1 | |SM U1|W/m^2 |Sensible heat flux into ocean, >0 increases theta
177 |EXFhl | 1 | |SM U1|W/m^2 |Latent heat flux into ocean, >0 increases theta
178 |EXFlwnet| 1 | |SM U1|W/m^2 |Net upward longwave radiation, >0 decreases theta
179 |EXFswnet| 1 | |SM U1|W/m^2 |Net upward shortwave radiation, >0 decreases theta
180 |EXFlwdn | 1 | |SM U1|W/m^2 |Downward longwave radiation, >0 increases theta
181 |EXFswdn | 1 | |SM U1|W/m^2 |Downward shortwave radiation, >0 increases theta
182 |EXFqnet | 1 | |SM U1|W/m^2 |Net upward heat flux (turb+rad), >0 decreases theta
183 |EXFtaux | 1 | |UM U1|N/m^2 |zonal surface wind stress, >0 increases uVel
184 |EXFtauy | 1 | |VM U1|N/m^2 |meridional surface wind stress, >0 increases vVel
185 |EXFuwind| 1 | |UM U1|m/s |zonal 10-m wind speed, >0 increases uVel
186 |EXFvwind| 1 | |VM U1|m/s |meridional 10-m wind speed, >0 increases uVel
187 |EXFwspee| 1 | |SM U1|m/s |10-m wind speed modulus ( >= 0 )
188 |EXFatemp| 1 | |SM U1|degK |surface (2-m) air temperature
189 |EXFaqh | 1 | |SM U1|kg/kg |surface (2-m) specific humidity
190 |EXFevap | 1 | |SM U1|m/s |evaporation, > 0 increases salinity
191 |EXFpreci| 1 | |SM U1|m/s |precipitation, > 0 decreases salinity
192 |EXFsnow | 1 | |SM U1|m/s |snow precipitation, > 0 decreases salinity
193 |EXFempmr| 1 | |SM U1|m/s |net upward freshwater flux, > 0 increases salinity
194 |EXFpress| 1 | |SM U1|N/m^2 |atmospheric pressure field
195 |EXFroff | 1 | |SM U1|m/s |river runoff, > 0 decreases salinity
196 |EXFroft | 1 | |SM U1|deg C |river runoff temperature
197 |GGL90TKE| 50 | |SM LR|m^2/s^2 |GGL90 sub-grid turbulent kinetic energy
198 |GGL90Lmx| 50 | |SM LR|m |Mixing length scale
199 |GGL90Prl| 50 | |SM LR|1 |Prandtl number used in GGL90
------------------------------------------------------------------------------------
Num |<-Name->|Levs| mate |<- code ->|<-- Units -->|<- Tile (max=80c)
------------------------------------------------------------------------------------
200 |GGL90ArU| 50 | |SM LR|m^2/s |GGL90 eddy viscosity at U-point
201 |GGL90ArV| | |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora_pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions allowing to check the configuration given to Pandora pipeline.
"""
import json
from json_checker import Checker, And, Or
import rasterio
import numpy as np
import sys
from typing import Dict, List
import logging
import copy
from collections.abc import Mapping
from . import stereo
from . import optimization
from . import aggregation
from . import filter
from . import validation
from . import refinement
def gdal_can_open_mandatory(f: str) -> bool:
"""
Test if file f can be open by gdal
:param f: File to test
:type f: string
:returns: True if rasterio can open file and False otherwise
:rtype: bool
"""
try:
rasterio.open(f)
return True
except Exception as e:
logging.warning("Impossible to read file {}: {}".format(f, e))
return False
def gdal_can_open(f: str) -> bool:
"""
Test if file f can be open by gdal
:param f: File to test
:type f: string
:returns: True if rasterio can open file and False otherwise
:rtype: bool
"""
if f == 'none' or f is None:
return True
else:
return gdal_can_open_mandatory(f)
def check_images(img_ref: str, img_sec: str, msk_ref: str, msk_sec: str) -> None:
"""
Check the images
:param img_ref: path to the reference image
:type img_ref: string
:param img_sec: path to the secondary image
:type img_sec: string
:param msk_ref: path to the mask of the reference image
:type msk_ref: string
:param msk_sec: path to the mask of the secondary image
:type msk_sec: string
"""
# verify that the images have 1 channel
ref_ = rasterio.open(img_ref)
if ref_.count != 1:
logging.error('The input images must be 1-channel grayscale images')
sys.exit(1)
sec_ = rasterio.open(img_sec)
if sec_.count != 1:
logging.error('The input images must be 1-channel grayscale images')
sys.exit(1)
# verify that the images have the same size
if (ref_.width != sec_.width) or \
(ref_.height != sec_.height):
logging.error('Images must have the same size')
sys.exit(1)
# verify that image and mask have the same size
if msk_ref is not None:
msk_ = rasterio.open(msk_ref)
if (ref_.width != msk_.width) or \
(ref_.height != msk_.height):
logging.error('Image and masks must have the same size')
sys.exit(1)
# verify that image and mask have the same size
if msk_sec is not None:
msk_ = rasterio.open(msk_sec)
# verify that the image and mask have the same size
if (sec_.width != msk_.width) or \
(sec_.height != msk_.height):
logging.error('Image and masks must have the same size')
sys.exit(1)
def check_disparity(disp_min: int, disp_max: int) -> None:
"""
Check the disparity
:param disp_min: minimal disparity
:type disp_min: int
:param disp_max: maximal disparity
:type disp_max: int
"""
# verify the disparity
if abs(disp_min) + abs(disp_max) == 0:
logging.error('Disparity range must be greater than 0')
sys.exit(1)
def get_config_pipeline(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Get the pipeline configuration
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: partial configuration
:rtype cfg: dict
"""
cfg = {}
if 'invalid_disparity' in user_cfg:
cfg['invalid_disparity'] = user_cfg['invalid_disparity']
if 'stereo' in user_cfg:
cfg['stereo'] = user_cfg['stereo']
if 'aggregation' in user_cfg:
cfg['aggregation'] = user_cfg['aggregation']
if 'optimization' in user_cfg:
cfg['optimization'] = user_cfg['optimization']
if 'refinement' in user_cfg:
cfg['refinement'] = user_cfg['refinement']
if 'filter' in user_cfg:
cfg['filter'] = user_cfg['filter']
if 'validation' in user_cfg:
cfg['validation'] = user_cfg['validation']
return cfg
def get_config_input(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Get the input configuration
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: partial configuration
:rtype cfg: dict
"""
cfg = {}
if 'input' in user_cfg:
cfg['input'] = user_cfg['input']
return cfg
def get_config_image(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Get the image configuration
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: partial configuration
:rtype cfg: dict
"""
cfg = {}
if 'image' in user_cfg:
cfg['image'] = user_cfg['image']
return cfg
def check_pipeline_section(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Complete and check if the pipeline dictionary is correct
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: global configuration
:rtype cfg: dict
"""
# Add missing steps and inputs defaults values in user_cfg
cfg = update_conf(default_short_configuration_pipeline, user_cfg)
# Initialize the plugins
stereo_ = stereo.AbstractStereo(**cfg["stereo"])
aggregation_ = aggregation.AbstractAggregation(**cfg["aggregation"])
optimization_ = optimization.AbstractOptimization(**cfg["optimization"])
refinement_ = refinement.AbstractRefinement(**cfg["refinement"])
filter_ = filter.AbstractFilter(**cfg["filter"])
validation_ = validation.AbstractValidation(**cfg["validation"])
# Load configuration steps
cfg_stereo = {'stereo': stereo_.cfg}
cfg_aggregation = {'aggregation': aggregation_.cfg}
cfg_optimization = {'optimization': optimization_.cfg}
cfg_refinement = {'refinement': refinement_.cfg}
cfg_filter = {'filter': filter_.cfg}
cfg_validation = {'validation': validation_.cfg}
# Update the configuration with steps configuration
cfg = update_conf(cfg, cfg_stereo)
cfg = update_conf(cfg, cfg_aggregation)
cfg = update_conf(cfg, cfg_optimization)
cfg = update_conf(cfg, cfg_refinement)
cfg = update_conf(cfg, cfg_filter)
cfg = update_conf(cfg, cfg_validation)
configuration_schema = {
"invalid_disparity": Or(int, lambda x: np.isnan(x)),
"stereo": dict,
"aggregation": dict,
"optimization": dict,
"refinement": dict,
"filter": dict,
"validation": dict
}
checker = Checker(configuration_schema)
checker.validate(cfg)
return cfg
def check_image_section(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Complete and check if the dictionary is correct
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: global configuration
:rtype cfg: dict
"""
# Add missing steps and inputs defaults values in user_cfg
cfg = update_conf(default_short_configuration_image, user_cfg)
# check schema
configuration_schema = {
"image": image_configuration_schema
}
checker = Checker(configuration_schema)
checker.validate(cfg)
return cfg
def check_input_section(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Complete and check if the dictionary is correct
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: global configuration
:rtype cfg: dict
"""
# Add missing steps and inputs defaults values in user_cfg
cfg = update_conf(default_short_configuration_input, user_cfg)
# check schema
configuration_schema = {
"input": input_configuration_schema
}
checker = Checker(configuration_schema)
checker.validate(cfg)
# custom checking
check_disparity(cfg['input']['disp_min'], cfg['input']['disp_max'])
check_images(cfg['input']['img_ref'], cfg['input']['img_sec'], cfg['input']['ref_mask'],
cfg['input']['sec_mask'])
return cfg
def check_conf(user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Complete and check if the dictionary is correct
:param user_cfg: user configuration
:type user_cfg: dict
:return cfg: global configuration
:rtype cfg: dict
"""
# check pipeline
user_cfg_pipeline = get_config_pipeline(user_cfg)
cfg_pipeline = check_pipeline_section(user_cfg_pipeline)
# check image
user_cfg_image = get_config_image(user_cfg)
cfg_image = check_image_section(user_cfg_image)
# check input
user_cfg_input = get_config_input(user_cfg)
cfg_input = check_input_section(user_cfg_input)
# concatenate updated config
cfg = concat_conf([cfg_image, cfg_input, cfg_pipeline])
return cfg
def concat_conf(cfg_list: List[Dict[str, dict]]) -> Dict[str, dict]:
"""
Concatenate dictionaries
:param cfg_list: list of configurations
:type cfg_list: List of dict
:return cfg: global configuration
:rtype cfg: dict
"""
# concatenate updated config
cfg = {}
for c in cfg_list:
cfg.update(c)
return cfg
input_configuration_schema = {
"img_ref": And(str, gdal_can_open_mandatory),
"img_sec": And(str, gdal_can_open_mandatory),
"ref_mask": And(Or(str, lambda x: x is None), gdal_can_open),
"sec_mask": And(Or(str, lambda x: x is None), gdal_can_open),
"disp_min": int,
"disp_max": int
}
image_configuration_schema = {
"nodata1": Or(int, lambda x: np.isnan(x)),
"nodata2": Or(int, lambda x: np.isnan(x)),
"valid_pixels": int,
"no_data": int
}
default_short_configuration_image = {
"image": {
"nodata1": 0,
"nodata2": 0,
"valid_pixels": 0,
"no_data": 1
}
}
default_short_configuration_input = {
"input": {
"ref_mask": None,
"sec_mask": None
}
}
default_short_configuration_pipeline = {
"invalid_disparity": -9999,
"stereo": {
"stereo_method": "ssd"
},
"aggregation": {
"aggregation_method": "none"
},
"optimization": {
"optimization_method": "none"
},
"refinement": {
"refinement_method": "none"
},
"filter": {
"filter_method": "none"
},
"validation": {
"validation_method": "none"
}
}
default_short_configuration = concat_conf([default_short_configuration_image, default_short_configuration_input,
default_short_configuration_pipeline])
def read_config_file(config_file: str) -> Dict[str, dict]:
"""
Read a json configuration file
:param config_file: path to a json file containing the algorithm parameters
:type config_file: string
:return user_cfg: configuration dictionary
:rtype: dict
"""
with open(config_file, 'r') as f:
user_cfg = json.load(f)
return user_cfg
def update_conf(def_cfg: Dict[str, dict], user_cfg: Dict[str, dict]) -> Dict[str, dict]:
"""
Update the default configuration with the user configuration,
:param def_cfg: default configuration
:type def_cfg: dict
:param user_cfg: user configuration
:type user_cfg: dict
:return: the user and default configuration
:rtype: dict
"""
config = copy.deepcopy(def_cfg)
for key, value in user_cfg.items():
if isinstance(value, Mapping):
config[key] = update_conf(config.get(key, {}), value)
else:
if value == "np.nan":
value = np.nan
config[key] = value
return config
def is_method(s: str, methods: List[str]) -> bool:
"""
Test if s is a method in methods
:param s: String to test
| |
<gh_stars>0
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module provides classes that describes :class:`.Plot` content.
Instances of those classes are returned by :class:`.Plot` methods that give
access to its content such as :meth:`.Plot.getCurve`, :meth:`.Plot.getImage`.
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "09/02/2017"
from collections import Sequence
from copy import deepcopy
import logging
import weakref
import numpy
from . import Colors
from ...third_party.six import string_types
from ...utils.decorators import deprecated
_logger = logging.getLogger(__name__)
class Item(object):
"""Description of an item of the plot"""
_DEFAULT_Z_LAYER = 0
"""Default layer for overlay rendering"""
_DEFAULT_LEGEND = ''
"""Default legend of items"""
_DEFAULT_SELECTABLE = False
"""Default selectable state of items"""
def __init__(self):
self._plotRef = None
self._visible = True
self._legend = self._DEFAULT_LEGEND
self._selectable = self._DEFAULT_SELECTABLE
self._z = self._DEFAULT_Z_LAYER
self._info = None
self._xlabel = None
self._ylabel = None
def getPlot(self):
"""Returns Plot this item belongs to.
:rtype: Plot or None
"""
return None if self._plotRef is None else self._plotRef()
def _setPlot(self, plot):
"""Set the plot this item belongs to.
WARNING: This should only be called from the Plot.
:param Plot plot: The Plot instance.
"""
if plot is not None and self._plotRef is not None:
raise RuntimeError('Trying to add a node at two places.')
# Alternative: remove it from previous children list
self._plotRef = None if plot is None else weakref.ref(plot)
def getBounds(self): # TODO return a Bounds object rather than a tuple
"""Returns the bounding box of this item in data coordinates
:returns: (xmin, xmax, ymin, ymax) or None
:rtype: 4-tuple of float or None
"""
return self._getBounds()
def _getBounds(self):
""":meth:`getBounds` implementation to override by sub-class"""
return None
def isVisible(self):
"""True if item is visible, False otherwise
:rtype: bool
"""
return self._visible
def _setVisible(self, visible):
"""Set visibility of item.
:param bool visible: True to display it, False otherwise
"""
self._visible = bool(visible)
def getLegend(self):
"""Returns the legend of this item (str)"""
return self._legend
def _setLegend(self, legend):
legend = str(legend) if legend is not None else self._DEFAULT_LEGEND
self._legend = legend
def isSelectable(self):
"""Returns true if item is selectable (bool)"""
return self._selectable
def _setSelectable(self, selectable):
self._selectable = bool(selectable)
def getZValue(self):
"""Returns the layer on which to draw this item (int)"""
return self._z
def _setZValue(self, z):
self._z = int(z) if z is not None else self._DEFAULT_Z_LAYER
def getInfo(self, copy=True):
"""Returns the info associated to this item
:param bool copy: True to get a deepcopy, False otherwise.
"""
return deepcopy(self._info) if copy else self._info
def _setInfo(self, info, copy=True):
if copy:
info = deepcopy(info)
self._info = info
# Mix-in classes ##############################################################
class LabelsMixIn(object):
"""Mix-in class for items with x and y labels"""
def __init__(self):
self._xlabel = None
self._ylabel = None
def getXLabel(self):
"""Return the X axis label associated to this curve
:rtype: str or None
"""
return self._xlabel
def _setXLabel(self, label):
"""Set the X axis label associated with this curve
:param str label: The X axis label
"""
self._xlabel = str(label)
def getYLabel(self):
"""Return the Y axis label associated to this curve
:rtype: str or None
"""
return self._ylabel
def _setYLabel(self, label):
"""Set the Y axis label associated with this curve
:param str label: The Y axis label
"""
self._ylabel = str(label)
class DraggableMixIn(object):
"""Mix-in class for draggable items"""
def __init__(self):
self._draggable = False
def isDraggable(self):
"""Returns true if image is draggable
:rtype: bool
"""
return self._draggable
def _setDraggable(self, draggable):
"""Set if image is draggable or not.
:param bool draggable:
"""
self._draggable = bool(draggable)
class ColormapMixIn(object):
"""Mix-in class for items with colormap"""
_DEFAULT_COLORMAP = {'name': 'gray', 'normalization': 'linear',
'autoscale': True, 'vmin': 0.0, 'vmax': 1.0}
"""Default colormap of the item"""
def __init__(self):
self._colormap = self._DEFAULT_COLORMAP
def getColormap(self):
"""Return the used colormap"""
return self._colormap.copy()
def _setColormap(self, colormap):
"""Set the colormap of this image
:param dict colormap: colormap description
"""
self._colormap = colormap.copy()
class SymbolMixIn(object):
"""Mix-in class for items with symbol type"""
_DEFAULT_SYMBOL = ''
"""Default marker of the item"""
def __init__(self):
self._symbol = self._DEFAULT_SYMBOL
def getSymbol(self):
"""Return the point marker type.
Marker type::
- 'o' circle
- '.' point
- ',' pixel
- '+' cross
- 'x' x-cross
- 'd' diamond
- 's' square
:rtype: str
"""
return self._symbol
def _setSymbol(self, symbol):
"""Set the marker type
See :meth:`getSymbol`.
:param str symbol: Marker type
"""
assert symbol in ('o', '.', ',', '+', 'x', 'd', 's', '', None)
if symbol is None:
symbol = self._DEFAULT_SYMBOL
self._symbol = symbol
class ColorMixIn(object):
"""Mix-in class for item with color"""
_DEFAULT_COLOR = (0., 0., 0., 1.)
"""Default color of the item"""
def __init__(self):
self._color = self._DEFAULT_COLOR
def getColor(self):
"""Returns the RGBA color of the item
:rtype: 4-tuple of float in [0, 1]
"""
return self._color
def _setColor(self, color, copy=True):
"""Set item color
:param color: color(s) to be used
:type color: str ("#RRGGBB") or (npoints, 4) unsigned byte array or
one of the predefined color names defined in Colors.py
:param bool copy: True (Default) to get a copy,
False to use internal representation (do not modify!)
"""
if isinstance(color, string_types):
self._color = Colors.rgba(color)
else:
color = numpy.array(color, copy=copy)
# TODO more checks
if color.ndim == 1: # Single RGBA color
color = Colors.rgba(color)
else: # Array of colors
assert color.ndim == 2
self._color = color
class YAxisMixIn(object):
"""Mix-in class for item with yaxis"""
_DEFAULT_YAXIS = 'left'
"""Default Y axis the item belongs to"""
def __init__(self):
self._yaxis = self._DEFAULT_YAXIS
def getYAxis(self):
"""Returns the Y axis this curve belongs to.
Either 'left' or 'right'.
:rtype: str
"""
return self._yaxis
def _setYAxis(self, yaxis):
"""Set the Y axis this curve belongs to.
:param str yaxis: 'left' or 'right'
"""
yaxis = str(yaxis)
assert yaxis in ('left', 'right')
self._yaxis = yaxis
class FillMixIn(object):
"""Mix-in class for item with fill"""
def __init__(self):
self._fill = False
def isFill(self):
"""Returns whether the item is filled or not.
:rtype: bool
"""
return self._fill
def _setFill(self, fill):
"""Set whether to fill the item or not.
:param bool fill:
"""
self._fill = bool(fill)
# Items #######################################################################
class Curve(Item, LabelsMixIn, SymbolMixIn, ColorMixIn, YAxisMixIn, FillMixIn):
"""Description of a curve"""
_DEFAULT_Z_LAYER = 1
"""Default overlay layer for curves"""
_DEFAULT_SELECTABLE = True
"""Default selectable state for curves"""
_DEFAULT_LINEWIDTH = 1.
"""Default line width of the curve"""
_DEFAULT_LINESTYLE = '-'
"""Default line style of the curve"""
_DEFAULT_HIGHLIGHT_COLOR = (0, 0, 0, 255)
"""Default highlight color of the item"""
def __init__(self):
Item.__init__(self)
LabelsMixIn.__init__(self)
SymbolMixIn.__init__(self)
ColorMixIn.__init__(self)
YAxisMixIn.__init__(self)
FillMixIn.__init__(self)
self._x = ()
self._y = ()
self._xerror = None
self._yerror = None
self._linewidth = self._DEFAULT_LINEWIDTH
self._linestyle = self._DEFAULT_LINESTYLE
self._histogram = None
self._highlightColor = self._DEFAULT_HIGHLIGHT_COLOR
self._highlighted = False
# Store filtered data for x > 0 and/or y > 0
self._filteredCache = {}
# Store bounds depending on axes filtering >0:
# key is (isXPositiveFilter, isYPositiveFilter)
self._boundsCache = {}
@deprecated
def __getitem__(self, item):
"""Compatibility with PyMca and silx <= 0.4.0"""
if isinstance(item, slice):
return [self[index] for index in range(*item.indices(5))]
elif item == 0:
return self.getXData(copy=False)
elif item == 1:
return self.getYData(copy=False)
elif item == 2:
return self.getLegend()
elif item == 3:
info = self.getInfo(copy=False)
return {} if info is None else info
elif item == 4:
params = {
'info': self.getInfo(),
| |
str
the sorting order of the ranked results. Should be either "asc" (ascending) or "desc" (descending)
Returns
-------
list
a list of the searched and matched model dictionaries containing metric and model_id, sorted by metric.
"""
ranked_models = []
matching_models = self.model_selector.find_matching_models_by_values(values=values,
target_values_operator=target_values_operator,
are_keys_also_matched=are_keys_also_matched,
is_case_sensitive=is_case_sensitive)
if len(matching_models) < 1:
logging.warning(
f'For your input, there were {len(matching_models)} matching models, while at least 1 is needed. '
f'Please adjust either your metric your search value inputs {values} to find at least one match.')
else:
matching_model_ids = [model.model_id for model in matching_models]
logging.debug(f"matching_model_ids: {matching_model_ids}")
ranked_models = self.model_selector.rank_models_by_performance(model_ids=matching_model_ids, metric=metric,
order=order)
if len(ranked_models) < 1:
logging.warning(
f'None ({len(ranked_models)}) of the {len(matching_model_ids)} found matching models, had a valid metric entry for {metric}. '
f'Please adjust your metric to enable ranking of the found models.')
return ranked_models
def find_models_rank_and_generate(self, values: list, target_values_operator: str = 'AND',
are_keys_also_matched: bool = False, is_case_sensitive: bool = False,
metric: str = 'SSIM', order: str = "asc", num_samples: int = 30,
output_path: str = None, is_gen_function_returned: bool = False, **kwargs):
""" Search for values (and keys) in model configs, rank results to generate samples with highest ranked model.
Parameters
----------
values: list
list of values used to search and find models corresponding to these `values`
target_values_operator: str
the operator indicating the relationship between `values` in the evaluation of model search results.
Should be either "AND", "OR", or "XOR".
are_keys_also_matched: bool
flag indicating whether, apart from values, the keys in the model config should also be searchable
is_case_sensitive: bool
flag indicating whether the search for values (and) keys in the model config should be case-sensitive.
metric: str
The key in the selection dict that corresponds to the metric of interest
order: str
the sorting order of the ranked results. Should be either "asc" (ascending) or "desc" (descending)
num_samples: int
the number of samples that will be generated
output_path: str
the path as str to the output folder where the generated samples will be stored
is_gen_function_returned: bool
flag indicating whether, instead of generating samples, the sample generation function will be returned
**kwargs
arbitrary number of keyword arguments passed to the model's sample generation function
Returns
-------
None
However, if `is_gen_function_returned` is True, it returns the internal generate function of the model.
"""
ranked_models = self.find_models_and_rank(values=values,
target_values_operator=target_values_operator,
are_keys_also_matched=are_keys_also_matched,
is_case_sensitive=is_case_sensitive, metric=metric, order=order)
assert ranked_models is not None and len(ranked_models) > 0, \
f'None of the models fulfilled both, the matching (values: {values}) AND ' \
f'ranking (metric: {metric}) criteria you provided.'
# Get the ID of the highest ranking model to generate() with that model
highest_ranking_model_id = ranked_models[0][MODEL_ID]
# Let's generate with the best-ranked model
logging.info(f'For your input, there were {len(ranked_models)} models found and ranked. '
f'The highest ranked model ({highest_ranking_model_id}) will now be used for generation: '
f'{ranked_models[0]}')
return self.generate(model_id=highest_ranking_model_id, num_samples=num_samples,
output_path=output_path,
is_gen_function_returned=is_gen_function_returned, **kwargs)
def find_model_and_generate(self, values: list, target_values_operator: str = 'AND',
are_keys_also_matched: bool = False, is_case_sensitive: bool = False,
num_samples: int = 30, output_path: str = None, is_gen_function_returned: bool = False,
**kwargs):
""" Search for values (and keys) in model configs to generate samples with the found model.
Note that the number of found models should be ==1. Else no samples will be generated and a error is logged to
console.
Parameters
----------
values: list
list of values used to search and find models corresponding to these `values`
target_values_operator: str
the operator indicating the relationship between `values` in the evaluation of model search results.
Should be either "AND", "OR", or "XOR".
are_keys_also_matched: bool
flag indicating whether, apart from values, the keys in the model config should also be searchable
is_case_sensitive: bool
flag indicating whether the search for values (and) keys in the model config should be case-sensitive.
num_samples: int
the number of samples that will be generated
output_path: str
the path as str to the output folder where the generated samples will be stored
is_gen_function_returned: bool
flag indicating whether, instead of generating samples, the sample generation function will be returned
**kwargs
arbitrary number of keyword arguments passed to the model's sample generation function
Returns
-------
None
However, if `is_gen_function_returned` is True, it returns the internal generate function of the model.
"""
matching_models: list = self.model_selector.find_matching_models_by_values(values=values,
target_values_operator=target_values_operator,
are_keys_also_matched=are_keys_also_matched,
is_case_sensitive=is_case_sensitive)
if len(matching_models) > 1:
logging.error(f'For your input, there were more than 1 matching model ({len(matching_models)}). '
f'Please choose one of the models (see model_ids below) or use find_models_rank_and_generate() instead.'
f'Alternatively, you may also further specify additional search values apart from the provided ones '
f'to find exactly one model: {values}. The matching models were the following: \n {matching_models}')
elif len(matching_models) < 1:
logging.error(f'For your input, there were {len(matching_models)} matching models, while 1 is needed. '
f'Please adjust your search value inputs {values} to find at least one match.')
else:
# Exactly one matching model. Let's generate with this model
logging.info(f'For your input, there was {len(matching_models)} model matched. '
f'This model will now be used for generation: {matching_models}')
matched_model_id = matching_models[0].model_id
return self.generate(model_id=matched_model_id, num_samples=num_samples, output_path=output_path,
is_gen_function_returned=is_gen_function_returned, **kwargs)
############################ MODEL EXECUTOR METHODS ############################
def add_all_model_executors(self):
""" Add `ModelExecutor` class instances for all models available in the config.
Returns
-------
None
"""
for model_id in self.config_manager.model_ids:
execution_config = self.config_manager.get_config_by_id(model_id=model_id,
config_key=CONFIG_FILE_KEY_EXECUTION)
self._add_model_executor(model_id=model_id,
execution_config=execution_config)
def add_model_executor(self, model_id: str):
""" Add one `ModelExecutor` class instance corresponding to the specified `model_id`.
Parameters
----------
model_id: str
The generative model's unique id
Returns
-------
None
"""
if not self.is_model_executor_already_added(model_id):
execution_config = self.config_manager.get_config_by_id(model_id=model_id,
config_key=CONFIG_FILE_KEY_EXECUTION)
self._add_model_executor(model_id=model_id, execution_config=execution_config)
def _add_model_executor(self, model_id: str, execution_config: dict):
""" Add one `ModelExecutor` class instance corresponding to the specified `model_id` and `execution_config`.
Parameters
----------
model_id: str
The generative model's unique id
execution_config: dict
The part of the config below the 'execution' key
Returns
-------
None
"""
if not self.is_model_executor_already_added(model_id):
model_executor = ModelExecutor(model_id=model_id, execution_config=execution_config,
download_package=True)
self.model_executors.append(model_executor)
def is_model_executor_already_added(self, model_id) -> bool:
""" Check whether the `ModelExecutor` instance of this model_id is already in `self.model_executors` list.
Parameters
----------
model_id: str
The generative model's unique id
Returns
-------
bool
indicating whether this `ModelExecutor` had been already previously added to `self.model_executors`
"""
if self.find_model_executor_by_id(model_id=model_id) is None:
logging.debug(f"{model_id}: The model has not yet been added to the model_executor list.")
return False
return True
def find_model_executor_by_id(self, model_id: str) -> ModelExecutor:
""" Find and return the `ModelExecutor` instance of this model_id in the `self.model_executors` list.
Parameters
----------
model_id: str
The generative model's unique id
Returns
-------
ModelExecutor
`ModelExecutor` class instance corresponding to the `model_id`
"""
for idx, model_executor in enumerate(self.model_executors):
if model_executor.model_id == model_id:
return model_executor
return None
def get_model_executor(self, model_id: str) -> ModelExecutor:
""" Add and return the `ModelExecutor` instance of this model_id from the `self.model_executors` list.
Relies on `self.add_model_executor` and `self.find_model_executor_by_id` functions.
Parameters
----------
model_id: str
The generative model's unique id
Returns
-------
ModelExecutor
`ModelExecutor` class instance corresponding to the `model_id`
"""
try:
self.add_model_executor(model_id=model_id) # only adds after checking that is not already added
return self.find_model_executor_by_id(model_id=model_id)
except Exception as e:
logging.error(f"{model_id}: This model could not be added to model_executor list: {e}")
raise e
def generate(self, model_id: str, num_samples: int = 30, output_path: str = None, save_images: bool = True,
is_gen_function_returned: bool = False, **kwargs):
""" Generate samples with the model corresponding to the `model_id` or return the model's generate function.
Parameters
----------
model_id: str
The generative model's unique id
num_samples: int
the number of samples that will be generated
output_path: str
the path as str to the output folder where the generated samples will be stored
save_images: bool
flag indicating whether generated samples are returned (i.e. as list of numpy arrays) or rather stored in file system (i.e in `output_path`)
is_gen_function_returned: bool
flag indicating whether, instead of generating samples, the sample generation function will be returned
**kwargs
arbitrary number of keyword arguments passed to the model's sample generation function
Returns
-------
list
Returns images as list of numpy arrays if `save_images` is False. However, if `is_gen_function_returned` is True, it returns the internal generate function of the model.
"""
model_executor | |
#!/usr/bin/env python3
def get_bs_vs_tn(in_file_name, out_file_name, best_config_list):
'''
Note: best case for dense int8 new log is:
batch_size data_parallel model_parallel thread_num
16 4 1 8
batch size: 1 2 4 8 16 32 64 128 256 512 1024
data parallel: 1 2 4 8 16 32
model parallel: 1 2 4 8 16 32
thread num: 1 2 4 8 16 32 64 128
for 'batch size' vs 'thread num': 11 x 8
'''
best_bs, best_dp, best_mp, best_tn = best_config_list[0], best_config_list[1], best_config_list[2], best_config_list[3]
#batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8, 256:9, 512:10, 1024:11}
#thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8}
batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
data_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
model_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
file_reader = open(in_file_name, 'r')
file_writer = open(out_file_name, 'w')
res_list = [None for _ in range(len(batch_size_index_dict)*len(thread_num_index_dict))]
try:
text_lines = file_reader.readlines()
for line in text_lines:
line = line.rstrip('\n')
line = line.split('\t')
batch_size, data_parallel, model_parallel, thread_num, fifo_size, end2end_fps = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), float(line[5])
if model_parallel == best_mp and data_parallel == best_dp:
index = (batch_size_index_dict[batch_size] - 1) * len(thread_num_index_dict) + thread_num_index_dict[thread_num] - 1
res_list[index] = end2end_fps
print(len(res_list), res_list)
for i in range(len(batch_size_index_dict)):
res_str = ''
for j in range(len(thread_num_index_dict)):
if j == len(thread_num_index_dict) - 1:
if res_list[i * len(thread_num_index_dict) + j] == None:
res_str += '\n'
else:
res_str += str(res_list[i * len(thread_num_index_dict) + j]) + '\n'
else:
if res_list[i * len(thread_num_index_dict) + j] == None:
res_str += ','
else:
res_str += str(res_list[i * len(thread_num_index_dict) + j]) + ','
print(res_str)
file_writer.write(res_str)
finally:
if file_reader:
file_reader.close()
file_writer.close()
def get_dp_vs_mp(in_file_name, out_file_name, best_config_list):
'''
Note: best case for dense int8 new log is:
batch_size data_parallel model_parallel thread_num
16 4 1 8
batch size: 1 2 4 8 16 32 64 128 256 512 1024
data parallel: 1 2 4 8 16 32
model parallel: 1 2 4 8 16 32
thread num: 1 2 4 8 16 32 64 128
for 'batch size' vs 'thread num': 11 x 8
'''
best_bs, best_dp, best_mp, best_tn = best_config_list[0], best_config_list[1], best_config_list[2], best_config_list[3]
#batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8, 256:9, 512:10, 1024:11}
#thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8}
batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
data_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
model_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
file_reader = open(in_file_name, 'r')
file_writer = open(out_file_name, 'w')
res_list = [None for _ in range(len(data_parallel_index_dict)*len(model_parallel_index_dict))]
try:
text_lines = file_reader.readlines()
for line in text_lines:
line = line.rstrip('\n')
line = line.split('\t')
batch_size, data_parallel, model_parallel, thread_num, fifo_size, end2end_fps = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), float(line[5])
if batch_size == best_bs and thread_num == best_tn:
index = (data_parallel_index_dict[data_parallel] - 1) * len(model_parallel_index_dict) + model_parallel_index_dict[model_parallel] - 1
res_list[index] = end2end_fps
print(len(res_list), res_list)
for i in range(len(data_parallel_index_dict)):
res_str = ''
for j in range(len(model_parallel_index_dict)):
if j == len(model_parallel_index_dict) - 1:
if res_list[i * len(model_parallel_index_dict) + j] == None:
res_str += '\n'
else:
res_str += str(res_list[i * len(model_parallel_index_dict) + j]) + '\n'
else:
if res_list[i * len(model_parallel_index_dict) + j] == None:
res_str += ','
else:
res_str += str(res_list[i * len(model_parallel_index_dict) + j]) + ','
print(res_str)
file_writer.write(res_str)
finally:
if file_reader:
file_reader.close()
file_writer.close()
def get_heat_map_data(in_file_name, out_file_name, column_opt_dict, row_opt_dict, column_line_map, row_line_map, best_colum, best_row, remain_opt_first, remain_opt_second, best_config_list):
'''
Note: best case for dense int8 new log is:
batch_size data_parallel model_parallel thread_num
16 4 1 8
batch size: 1 2 4 8 16 32 64 128 256 512 1024
data parallel: 1 2 4 8 16 32
model parallel: 1 2 4 8 16 32
thread num: 1 2 4 8 16 32 64 128
for 'batch size' vs 'thread num': 11 x 8
'''
best_bs, best_dp, best_mp, best_tn = 16, 4, 1, 8
best_bs, best_dp, best_mp, best_tn = best_config_list[0], best_config_list[1], best_config_list[2], best_config_list[3]
#batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8, 256:9, 512:10, 1024:11}
#thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8}
batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
data_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
model_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
file_reader = open(in_file_name, 'r')
file_writer = open(out_file_name, 'w')
res_list = [None for _ in range(len(column_opt_dict)*len(row_opt_dict))]
try:
text_lines = file_reader.readlines()
for line in text_lines:
line = line.rstrip('\n')
line = line.split('\t')
batch_size, data_parallel, model_parallel, thread_num, fifo_size, end2end_fps = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), float(line[5])
row_key = int(line[column_line_map])
column_key = int(line[row_line_map])
if row_key == best_column and column_key == best_row:
index = (data_parallel_index_dict[data_parallel] - 1) * len(model_parallel_index_dict) + model_parallel_index_dict[model_parallel] - 1
res_list[index] = end2end_fps
print(len(res_list), res_list)
for i in range(len(data_parallel_index_dict)):
res_str = ''
for j in range(len(model_parallel_index_dict)):
if j == len(batch_size_index_dict) - 1:
if res_list[i * len(model_parallel_index_dict) + j] == None:
res_str += '\n'
else:
res_str += str(res_list[i * len(model_parallel_index_dict) + j]) + '\n'
else:
if res_list[i * len(model_parallel_index_dict) + j] == None:
res_str += ','
else:
res_str += str(res_list[i * len(model_parallel_index_dict) + j]) + ','
print(res_str)
file_writer.write(res_str + '\n')
finally:
if file_reader:
file_reader.close()
file_writer.close()
def get_bs_vs_dp(in_file_name, out_file_name, best_config_list):
'''
Note: best case for dense int8 new log is:
batch_size data_parallel model_parallel thread_num
16 4 1 8
batch size: 1 2 4 8 16 32 64 128 256 512 1024
data parallel: 1 2 4 8 16 32
model parallel: 1 2 4 8 16 32
thread num: 1 2 4 8 16 32 64 128
for 'batch size' vs 'thread num': 11 x 8
'''
best_bs, best_dp, best_mp, best_tn = 16, 4, 1, 8
best_bs, best_dp, best_mp, best_tn = best_config_list[0], best_config_list[1], best_config_list[2], best_config_list[3]
#batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8, 256:9, 512:10, 1024:11}
#thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8}
batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
data_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
model_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
file_reader = open(in_file_name, 'r')
file_writer = open(out_file_name, 'w')
res_list = [None for _ in range(len(batch_size_index_dict)*len(data_parallel_index_dict))]
try:
text_lines = file_reader.readlines()
for line in text_lines:
line = line.rstrip('\n')
line = line.split('\t')
batch_size, data_parallel, model_parallel, thread_num, fifo_size, end2end_fps = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), float(line[5])
if model_parallel == best_mp and thread_num == best_tn:
index = (batch_size_index_dict[batch_size] - 1) * len(data_parallel_index_dict) + data_parallel_index_dict[data_parallel] - 1
res_list[index] = end2end_fps
print(len(res_list), res_list)
for i in range(len(batch_size_index_dict)):
res_str = ''
for j in range(len(data_parallel_index_dict)):
index = i * len(data_parallel_index_dict) + j
if j == len(data_parallel_index_dict) - 1:
if res_list[index] == None:
res_str += '\n'
else:
res_str += str(res_list[index]) + '\n'
else:
if res_list[index] == None:
res_str += ','
else:
res_str += str(res_list[index]) + ','
print(res_str)
file_writer.write(res_str)
finally:
if file_reader:
file_reader.close()
file_writer.close()
def get_bs_vs_mp(in_file_name, out_file_name, best_config_list):
'''
Note: best case for dense int8 new log is:
batch_size data_parallel model_parallel thread_num
16 4 1 8
batch size: 1 2 4 8 16 32 64 128 256 512 1024
data parallel: 1 2 4 8 16 32
model parallel: 1 2 4 8 16 32
thread num: 1 2 4 8 16 32 64 128
for 'batch size' vs 'thread num': 11 x 8
'''
best_bs, best_dp, best_mp, best_tn = 16, 4, 1, 8
best_bs, best_dp, best_mp, best_tn = best_config_list[0], best_config_list[1], best_config_list[2], best_config_list[3]
#batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8, 256:9, 512:10, 1024:11}
#thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7, 128:8}
batch_size_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
thread_num_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6, 64:7}
data_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
model_parallel_index_dict={1:1, 2:2, 4:3, 8:4, 16:5, 32:6}
file_reader = open(in_file_name, 'r')
file_writer = open(out_file_name, 'w')
res_list = [None for _ in range(len(batch_size_index_dict)*len(model_parallel_index_dict))]
try:
text_lines = file_reader.readlines()
for line in text_lines:
line = line.rstrip('\n')
line = line.split('\t')
batch_size, data_parallel, model_parallel, thread_num, fifo_size, end2end_fps = int(line[0]), int(line[1]), int(line[2]), int(line[3]), int(line[4]), float(line[5])
if data_parallel == best_dp and thread_num == best_tn:
index = (batch_size_index_dict[batch_size] - 1) * len(model_parallel_index_dict) + model_parallel_index_dict[model_parallel] - 1
res_list[index] = end2end_fps
print(len(res_list), res_list)
for i in range(len(batch_size_index_dict)):
res_str = ''
for j in range(len(model_parallel_index_dict)):
index = i * len(model_parallel_index_dict) + j
if j == len(model_parallel_index_dict) - 1:
if res_list[index] == None:
res_str += '\n'
else:
res_str += str(res_list[index]) + '\n'
else:
if res_list[index] == | |
<filename>rrlfd/residual/train.py
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Train a residual policy on top of a learned agent.
Usage:
Use case --> flags to set
1) Use base agent
a) Use feats from base agent --> network && bc_ckpt_to_load
b) Learn new feats --> network && bc_ckpt_to_load && rl_observation_network
c) Init feats from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network
&& init_feats_from_bc && predict_residual
2) Use RL only
a) Learn new feats --> rl_observation_network (if input type is visual)
b) Init feats & policy from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network && init_from_bc
c) Init feats from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network
&& init_feats_from_bc
3) Use base controller + rl observation net from scratch
--> base_controller && rl_observation_network
"""
import os
from absl import app
from absl import flags
from acme import specs
import numpy as np
import tensorflow as tf
from rrlfd.residual import agents
from rrlfd.residual import eval_utils
from rrlfd.residual import setup
from tensorflow.io import gfile
flags.DEFINE_string('domain', None, 'Domain from which to load task.')
flags.DEFINE_string('task', None, 'Task to solve.')
flags.DEFINE_enum('input_type', 'depth', ['depth', 'rgb', 'rgbd', 'full_state'],
'Input modality.')
flags.DEFINE_integer('num_episodes', 10000, 'Number of episodes to run for.')
flags.DEFINE_integer('seed', 2, 'Experiment seed.')
flags.DEFINE_integer('eval_seed', 1, 'Environtment seed for evaluation.')
flags.DEFINE_boolean('increment_eval_seed', False,
'If True, increment eval seed after each eval episode.')
flags.DEFINE_integer('num_eval_episodes', 100,
'Number of episodes to evaluate.')
flags.DEFINE_boolean('collapse_in_eval', True,
'If True, collapse RL policy to its mean in evaluation.')
flags.DEFINE_boolean('stop_if_stuck', False,
'If True, end episode if observations and actions are '
'stuck.')
flags.DEFINE_boolean('end_on_success', False,
'If True, end episode early if success criteria is met.')
flags.DEFINE_integer('eval_freq', 100_000,
'Frequency (in environment training steps) with which to '
'evaluate policy.')
flags.DEFINE_boolean('eval_only', False,
'If True, evaluate policy ckpts of trained policy.')
# Flags for BC agent.
flags.DEFINE_boolean('binary_grip_action', True,
'If True, use open/close action space for gripper. Else '
'use gripper velocity.')
flags.DEFINE_enum('action_norm', 'unit', ['unit', 'zeromean_unitvar'],
'Which normalization to apply to actions.')
flags.DEFINE_enum('residual_action_norm', 'unit',
['none', 'unit', 'zeromean_unitvar', 'centered'],
'Which normalization to apply to residual actions.')
flags.DEFINE_float('residual_action_norm_scale', 1.0,
'Factor by which to scale residual actions. Applied to raw '
'predictions in none, unit and centered normalisation, and '
'to standard deviation in the case of zeromean_unitvar.')
flags.DEFINE_enum('signals_norm', 'none', ['none', 'unit', 'zeromean_unitvar'],
'Which normalization to apply to scalar observations.')
flags.DEFINE_string('original_demos_file', None,
'Dataset used to compute stats for action normalization.')
flags.DEFINE_integer('max_demos_to_load', None,
'Maximum number of demos from demos_file (in order) to '
'use to compute action stats.')
flags.DEFINE_integer('max_demo_length', None,
'If set, trim demonstrations to this length.')
flags.DEFINE_float('val_size', 0.05,
'Amount of data to exlude from action normalisation stats. '
'If < 1, the fraction of total loaded data points. Else the '
'number of data points.')
flags.DEFINE_boolean('val_full_episodes', True,
'If True, split data into train and validation on an '
'episode basis. Else split by individual time steps.')
flags.DEFINE_string('last_activation', None,
'Activation function to apply to network output, if any.')
flags.DEFINE_list('fc_layer_sizes', [],
'Sizes of fully connected layers to add on top of bottleneck '
'layer, if any.')
flags.DEFINE_integer('num_input_frames', 3,
'Number of frames to condition base policy on.')
flags.DEFINE_integer('image_size', None, 'Size of rendered images.')
flags.DEFINE_integer('crop_margin_size', 16,
'If crop_frames is True, the number of pixels to crop '
'from each dimension.')
flags.DEFINE_boolean('crop_frames', True,
'If True, crop input frames by 16 pixels in H and W.')
flags.DEFINE_list('target_offsets', [0, 10, 20, 30],
'Offsets in time for actions to predict in behavioral '
'cloning.')
flags.DEFINE_enum('network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Policy network of base policy.')
flags.DEFINE_boolean('bn_before_concat', False,
'If True, add a batch norm layer before concatenating '
'scalar featuses to visual features.')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight decay for training.')
flags.DEFINE_boolean('predict_residual', True,
'If True, train a residual agent. Else train RL from '
'scratch without base agent.')
flags.DEFINE_enum('rl_observation_network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Observation network of residual policy. If None, '
'observation network of base agent is reused.')
flags.DEFINE_boolean('late_fusion', False,
'If True, fuse stacked frames after convolutional layers. '
'If False, fuse at network input.')
flags.DEFINE_string('policy_init_path', None,
'If set, initialize network weights from a pickle file at '
'this path.')
flags.DEFINE_string('rl_observation_network_ckpt', None,
'If set, checkpoint from which to load observation network '
'weights.')
flags.DEFINE_string('base_controller', None,
'If set, a black-box controller to use for base actions.')
flags.DEFINE_string('bc_ckpt_to_load', None,
'If set, checkpoint from which to load base policy.')
flags.DEFINE_string('rl_ckpt_to_load', None,
'If set, checkpoint from which to load residual policy.')
flags.DEFINE_string('original_demos_path', None,
'If set, path to the original demonstration dataset (to '
'restore normalization statistics). If not set, inferred '
'from BC checkpoint path.')
flags.DEFINE_boolean('init_from_bc', False,
'If True, use BC agent loaded from bc_ckpt_to_load as '
'initialization for RL observation and policy nets.')
flags.DEFINE_boolean('init_feats_from_bc', False,
'If True, initialize RL observation network with BC.')
flags.DEFINE_string('logdir', None, 'Location to log results to.')
flags.DEFINE_boolean('load_saved', False,
'If True, load saved model from checkpoint. Else train '
'from scratch.')
flags.DEFINE_string('base_visible_state', 'robot',
'State features on which to condition the base policy.')
flags.DEFINE_string('residual_visible_state', 'robot',
'State features on which to condition the residual policy. '
'If using full state, the BC net features are replaced '
'with these true state features in input to RL policy.')
flags.DEFINE_float('bernoulli_rate', 0.,
'Fraction of time to use bernoulli exploration for gripper '
'action.')
flags.DEFINE_float('sticky_rate', 0.,
'Stickiness rate of bernoulli exploration for gripper '
'action.')
flags.DEFINE_string('job_id', None,
'Subdirectory to add to logdir to identify run. Set '
'automatically to XM id or datetime if None.')
flags.DEFINE_integer('base_policy_success', None,
'No-op flag used to identify base policy.')
flags.DEFINE_boolean('freeze_rl_observation_network', False,
'If True, do not update acme observation network weights. '
'Else train critic and observation net jointly.')
FLAGS = flags.FLAGS
def train_residual(
env_loop, num_episodes, logdir, eval_freq, num_eval_episodes,
collapse_in_eval, eval_seed, increment_eval_seed, stop_if_stuck):
"""Train residual for num_episodes episodes."""
# TODO(minttu): Should bernoulli rate and sticky rate be defined here instead?
total_steps = env_loop.run(
num_episodes=num_episodes,
out_dir=logdir,
ckpt_freq=min(50_000, eval_freq),
eval_freq=eval_freq,
num_eval_episodes=num_eval_episodes,
collapse_in_eval=collapse_in_eval,
eval_seed=eval_seed,
increment_eval_seed=increment_eval_seed,
stop_if_stuck=stop_if_stuck)
if logdir is not None:
setup.save_acme_agent(env_loop.actor, logdir)
return total_steps
def main(_):
np.random.seed(FLAGS.seed)
tf.random.set_seed(FLAGS.seed)
counter = setup.setup_counting()
logdir, env_logger, agent_logger, summary_writer, _ = setup.setup_logging(
FLAGS.logdir)
base_state = setup.set_visible_features(
FLAGS.domain, FLAGS.task, FLAGS.base_visible_state)
residual_state = setup.set_visible_features(
FLAGS.domain, FLAGS.task, FLAGS.residual_visible_state)
print('Base policy state features', base_state)
print('Residual policy state features', residual_state)
image_size = FLAGS.image_size
if image_size is None:
# Default sizes.
image_size = {
'adroit': 128,
'mime': 240,
}[FLAGS.domain]
# Whether BCAgent's network is used for visual features (expects frames in a
# certain shape).
use_base_agent_image_shape = (
FLAGS.predict_residual or FLAGS.freeze_rl_observation_network)
visible_state = (
list(set(base_state + residual_state)) if FLAGS.predict_residual
else residual_state)
env_loop = setup.make_environment_loop(
domain=FLAGS.domain,
task=FLAGS.task,
seed=FLAGS.seed,
input_type=FLAGS.input_type,
num_input_frames=FLAGS.num_input_frames,
visible_state=visible_state,
image_size=image_size,
use_base_agent_image_shape=use_base_agent_image_shape,
late_fusion=FLAGS.late_fusion,
max_train_episode_steps=FLAGS.max_episode_steps,
agent=None,
counter=counter,
env_logger=env_logger,
summary_writer=summary_writer)
env = env_loop._environment # pylint: disable=protected-access
environment_spec = specs.make_environment_spec(env)
print('Environment spec', environment_spec)
base_agent = None
# Create BC agent. In residual RL, it is used as the base agent, and in
# standalone RL it may be used for action and observation space normalization.
if FLAGS.bc_ckpt_to_load or FLAGS.original_demos_file:
base_agent = setup.load_saved_bc_agent(
ckpt_to_load=FLAGS.bc_ckpt_to_load,
network_type=FLAGS.network,
late_fusion=FLAGS.late_fusion,
input_type=FLAGS.input_type,
domain=FLAGS.domain,
binary_grip_action=FLAGS.binary_grip_action,
num_input_frames=FLAGS.num_input_frames,
crop_frames=FLAGS.crop_frames,
full_image_size=image_size,
crop_margin_size=FLAGS.crop_margin_size,
target_offsets=[int(t) for t in FLAGS.target_offsets],
visible_state_features=base_state,
action_norm=FLAGS.action_norm,
signals_norm=FLAGS.signals_norm,
last_activation=FLAGS.last_activation,
fc_layer_sizes=[int(i) for i in FLAGS.fc_layer_sizes],
weight_decay=FLAGS.weight_decay,
max_demos_to_load=FLAGS.max_demos_to_load,
max_demo_length=FLAGS.max_demo_length,
val_size=FLAGS.val_size,
val_full_episodes=FLAGS.val_full_episodes,
split_seed=FLAGS.split_seed,
env=env,
task=FLAGS.task)
print('action normalization mean\n', base_agent.action_space.mean)
print('action normalization std\n', base_agent.action_space.std)
obs_network_type = None
include_base_feats = True
if ((FLAGS.bc_ckpt_to_load is None and FLAGS.policy_init_path is None)
or (FLAGS.init_from_bc and not FLAGS.freeze_rl_observation_network)
or FLAGS.init_feats_from_bc):
obs_network_type = FLAGS.rl_observation_network
include_base_feats = False
if FLAGS.residual_visible_state == 'full':
include_base_feats = False
include_base_action = FLAGS.predict_residual
residual_spec = setup.define_residual_spec(
residual_state, env, base_agent,
action_norm=FLAGS.residual_action_norm,
action_norm_scale=FLAGS.residual_action_norm_scale,
include_base_action=include_base_action,
include_base_feats=include_base_feats,
base_network=FLAGS.network)
binary_grip_action = FLAGS.init_from_bc and FLAGS.binary_grip_action
residual_agent, eval_policy = setup.make_acme_agent(
environment_spec=environment_spec,
residual_spec=residual_spec,
obs_network_type=obs_network_type,
crop_frames=FLAGS.crop_frames,
full_image_size=image_size,
crop_margin_size=FLAGS.crop_margin_size,
late_fusion=FLAGS.late_fusion,
binary_grip_action=binary_grip_action,
input_type=FLAGS.input_type,
counter=counter,
logdir=logdir,
agent_logger=agent_logger)
if FLAGS.init_from_bc:
setup.init_policy_networks(base_agent.network, residual_agent)
if not FLAGS.freeze_rl_observation_network:
setup.init_observation_networks(base_agent.network, residual_agent)
if FLAGS.init_feats_from_bc:
setup.init_observation_networks(base_agent.network, residual_agent)
# agent_class = (
# agents.ResidualAgent if FLAGS.predict_residual else agents.RLAgent)
if FLAGS.predict_residual:
agent_class = agents.ResidualAgent
else:
if FLAGS.freeze_rl_observation_network:
agent_class = agents.FixedObservationAgent
else:
agent_class = agents.RLAgent
agent = agent_class(
base_agent=base_agent,
rl_agent=residual_agent,
action_space='tool_lin' if FLAGS.domain == 'mime' else FLAGS.task,
action_norm=FLAGS.residual_action_norm,
action_norm_scale=FLAGS.residual_action_norm_scale,
signals_norm=FLAGS.signals_norm,
rl_eval_policy=eval_policy,
feats_spec=residual_spec.observations,
state_keys=residual_state,
bernoulli_rate=FLAGS.bernoulli_rate,
sticky_rate=FLAGS.sticky_rate,
rl_observation_network_type=FLAGS.rl_observation_network,
rl_input_type=FLAGS.input_type,
rl_num_input_frames=FLAGS.num_input_frames,
base_controller=FLAGS.base_controller,
env=env)
| |
{1}").format(name,
response.reason)
e = exception.CephFailure(reason=msg)
LOG.error(e)
raise e
# TODO(CephPoolsDecouple): remove
def create_or_resize_osd_pool(self, pool_name, pg_num, pgp_num,
size, min_size):
"""Create or resize an osd pool as needed
:param pool_name: pool name
:param pg_num: number of placement groups
:param pgp_num: number of placement groups for placement
:param size: number of replicas for objects in the pool
:param min_size: minimum number of replicas required for I/O
"""
# ruleset 0: is the default ruleset if no crushmap is loaded or
# the ruleset for the backing tier if loaded:
# Name: storage_tier_ruleset
ruleset = 0
# Create the pool if not present
self._pool_create(pool_name, pg_num, pgp_num, ruleset, size, min_size)
def delete_osd_pool(self, pool_name):
"""Delete an osd pool
:param pool_name: pool name
"""
response, body = self._ceph_api.osd_pool_delete(
pool_name, pool_name,
sure='--yes-i-really-really-mean-it',
body='json')
if response.ok:
LOG.info(_("Deleted OSD pool {}").format(pool_name))
else:
e = exception.CephPoolDeleteFailure(
name=pool_name, reason=response.reason)
LOG.warn(e)
raise e
def list_osd_pools(self):
"""List all osd pools
"""
resp, pools = self._ceph_api.osd_pool_ls(body='json')
if not resp.ok:
e = exception.CephPoolListFailure(
reason=resp.reason)
LOG.error(e)
raise e
else:
return pools['output']
def get_ceph_object_pool_name(self):
response, body = self._ceph_api.osd_pool_get(
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL,
"pg_num",
body='json')
if response.ok:
return constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL
response, body = self._ceph_api.osd_pool_get(
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER,
"pg_num",
body='json')
if response.ok:
return constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER
return None
def update_ceph_object_pool_name(self, pool):
"""
Check whether JEWEL or HAMMER pool should be used
"""
if pool['pool_name'] == constants.CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL:
# Check if Hammer version pool exists. If it does, it means it is an
# upgrade from R3; otherwise, it is a fresh R4+ installation
response, body = self._ceph_api.osd_pool_get(
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER,
"pg_num",
body='json')
if response.ok:
# Now check if Swift was enabled in R3. If it was, the Hammer pool
# will be kept; otherwise, the Hammer pool will be deleted and a
# Jewel pool will be created.
storage_ceph = self._db_api.storage_ceph_get_list()[0]
if storage_ceph['object_gateway'] is True:
# Make sure Swift/Radosgw is really enabled
response, body = self._ceph_api.osd_pool_get(
constants.CEPH_POOL_OBJECT_GATEWAY_ROOT_NAME,
"pg_num",
body='json')
if response.ok:
LOG.info("Hammer-->Jewel upgrade: keep Hammer object data pool %s",
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)
pool['pool_name'] = constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER
else:
if body['status'].find("unrecognized pool") != -1:
LOG.warn("Swift is enabled but pool %s does not exist.",
constants.CEPH_POOL_OBJECT_GATEWAY_ROOT_NAME)
LOG.info("Hammer-->Jewel upgrade: delete inactive Hammer object data pool %s",
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)
self.delete_osd_pool(constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)
else:
LOG.warn("Failed to query pool %s ",
constants.CEPH_POOL_OBJECT_GATEWAY_ROOT_NAME)
else:
LOG.info("Hammer-->Jewel upgrade: delete inactive Hammer object data pool %s",
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)
self.delete_osd_pool(constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)
def _configure_pool_key(self, pool_name):
"""Get CEPH key for a certain pool."""
response, body = ("", "")
caps_dict = {'mon': 'allow r',
'osd': 'allow rwx pool=%s' % pool_name}
entity = "client.%s" % pool_name
try:
response, body = ("", "")
response, body = self._ceph_api.auth_get_or_create(
entity, caps_dict, body='json', timeout=10)
auth_result = body['output']
rc = auth_result[0].get('key')
except Exception as e:
rc = None
LOG.info("CEPH auth exception: %s response: %s body: %s" %
(str(e), str(response), str(body)))
return rc
# TODO(CephPoolsDecouple): remove
def _configure_secondary_tier_pools(self, tier_obj, size, min_size):
"""Configure the service pools that are allowed for additional ceph tiers.
"""
# Get the backend object if there is one attached.
backend = None
if tier_obj.forbackendid:
backend = self._db_api.storage_ceph_get(tier_obj.forbackendid)
# Make sure OSD exist for this tier before creating ceph pools
LOG.info("Calling _configure_secondary_tier_pools "
"to create/update ceph pools for tier: %s" % tier_obj.name)
for p in constants.SB_TIER_CEPH_POOLS:
# If we have a backend for the tier, then set the quota
if backend:
# if the quota is not set, set the default value
quota_gib_value = backend.get(p['be_quota_attr'], None)
if quota_gib_value is None:
self._db_api.storage_ceph_update(backend.uuid,
{p['be_quota_attr']:
p['quota_default']})
quota_gib_value = p['quota_default']
# get the pool name
pool_name = "%s-%s" % (p['pool_name'], tier_obj.name)
rule_name = "{0}{1}{2}".format(
tier_obj.name,
constants.CEPH_CRUSH_TIER_SUFFIX,
"-ruleset").replace('-', '_')
# get the rule for the tier, if present then create the pool if
# required.
response, body = self._ceph_api.osd_crush_rule_dump(name=rule_name,
body='json')
if response.ok:
ruleset = body['output']['ruleset']
# create/update the pool
self._pool_create(pool_name, p['pg_num'], p['pgp_num'],
ruleset, size, min_size)
else:
e = exception.CephPoolRulesetFailure(
name=rule_name, reason=body['status'])
raise e
def _update_db_capabilities(self, bk, new_storceph):
# Avoid updating DB for all capabilities in new_storceph as we
# don't manage them. Leave the callers deal with it.
if (not new_storceph or
(new_storceph and bk['name'] != new_storceph['name'])):
self._db_api.storage_backend_update(
bk['id'],
{'capabilities': bk['capabilities']}
)
def get_osd_tree(self):
"""Get OSD tree info
return: list of nodes and a list of stray osds e.g.:
[{u'type_id': 10, u'type': u'root', u'id': -6, u'name': u'gold-tier',
u'children': [-7]},
{u'type_id': 2, u'type': u'chassis', u'id': -7, u'name': u'group-0-gold',
u'children': [-9, -8]},
{u'status': u'up', u'name': u'osd.2', u'exists': 1, u'type_id': 0,
u'reweight': 1.0, u'crush_weight': 0.008789, u'primary_affinity': 1.0,
u'depth': 3, u'type': u'osd', u'id': 2}, ...]
[{u'status': u'up', u'name': u'osd.1', u'exists': 1, u'reweight': 1.0,
u'type_id': 0, u'crush_weight': 0.0, u'primary_affinity': 1.0, u'depth': 0,
u'type': u'osd', u'id': 1}, ...]
"""
resp, body = self._ceph_api.osd_tree(body='json')
if not resp.ok:
LOG.error("Failed to get OSD tree info")
return resp, None, None
else:
return resp, body['output']['nodes'], body['output']['stray']
def set_osd_down(self, osdid):
"""Set an osd to down state
:param osdid: OSD id
"""
response, body = self._ceph_api.osd_down(
osdid, body='json')
if response.ok:
LOG.info("Set OSD %d to down state.", osdid)
else:
LOG.error("Set OSD down failed for OSD %d: %s",
osdid, response.reason)
response.raise_for_status()
def mark_osd_down(self, osdid):
"""Mark the object store device down
:param osdid: object based storage id
"""
to_mark_osd_down = False
resp, nodes, stray = self.get_osd_tree()
if not resp.ok:
# We would still try to mark the osd down
to_mark_osd_down = True
else:
osdid_str = "osd." + str(osdid)
for entry in nodes + stray:
if entry['name'] == osdid_str:
if entry['status'] == 'up':
LOG.info("OSD %s is still up. Mark it down.", osdid_str)
to_mark_osd_down = True
break
if to_mark_osd_down:
self.set_osd_down(osdid)
def osd_remove_crush_auth(self, osdid):
""" Remove the object store device from ceph
osdid: object based storage id
:param osdid:
"""
osdid_str = "osd." + str(osdid)
# Remove the OSD from the crush map
response, body = self._ceph_api.osd_crush_remove(
osdid_str, body='json')
if not response.ok:
LOG.error("OSD crush remove failed for OSD %s: %s",
osdid_str, response.reason)
response.raise_for_status()
# Remove the OSD authentication key
response, body = self._ceph_api.auth_del(
osdid_str, body='json')
if not response.ok:
LOG.error("Auth delete failed for OSD %s: %s",
osdid_str, response.reason)
response.raise_for_status()
def osd_remove(self, *args, **kwargs):
return self._ceph_api.osd_remove(*args, **kwargs)
def get_cluster_df_stats(self, timeout=10):
"""Get the usage information for the ceph cluster.
:param timeout:
"""
resp, body = self._ceph_api.df(body='json',
timeout=timeout)
if not resp.ok:
e = exception.CephGetClusterUsageFailure(reason=resp.reason)
LOG.error(e)
raise e
else:
return body["output"]["stats"]
def get_pools_df_stats(self, timeout=10):
resp, body = self._ceph_api.df(body='json',
timeout=timeout)
if not resp.ok:
e = exception.CephGetPoolsUsageFailure(reason=resp.reason)
LOG.error(e)
raise e
else:
return body["output"]["pools"]
# TODO(CephPoolsDecouple): remove
# This function is only called from audit_osd_quotas_for_tier() which
# will be removed by CephPoolsDecouple.
def get_osd_stats(self, timeout=30):
try:
resp, body = self._ceph_api.osd_stat(body='json',
timeout=timeout)
except ReadTimeout:
resp = type('Response', (),
dict(ok=False,
reason=('Ceph API osd_stat() timeout '
'after {} seconds').format(timeout)))
if not resp.ok:
e = exception.CephGetOsdStatsFailure(reason=resp.reason)
LOG.error(e)
raise e
else:
return body["output"]
def have_ceph_monitor_access(self, timeout=5):
""" Verify that ceph monitor access will not timeout.
:param timeout: Time in seconds to wait for the REST API request to
respond.
"""
available_mons = 0
monitors = self._db_api.ceph_mon_get_list()
for m in monitors:
try:
ihost = self._db_api.ihost_get_by_hostname(m.hostname)
except exception.NodeNotFound:
LOG.error("Monitor host %s not found" % m.hostname)
continue
if (ihost['administrative'] == constants.ADMIN_UNLOCKED and
ihost['operational'] == constants.OPERATIONAL_ENABLED):
available_mons += 1
# Avoid calling the ceph rest_api until we have a minimum configuration
check_access = False
if cutils.is_aio_system(self._db_api) and available_mons > 0:
# one monitor: need it available
check_access = True
elif available_mons > 1:
# three monitors: need two available
check_access = True
LOG.debug("Checking ceph monitors. Available: %s. Check cluster: "
"access %s" % (available_mons, check_access))
if check_access:
return True if self._get_fsid(timeout) else False
return False
def get_ceph_cluster_info_availability(self):
# TODO(CephPoolsDecouple): rework
# Check if the ceph cluster is ready to return statistics
storage_hosts = self._db_api.ihost_get_by_personality(
constants.STORAGE)
is_aio = tsc.system_type == constants.TIS_AIO_BUILD
if not storage_hosts and is_aio:
storage_hosts = self._db_api.ihost_get_by_personality(
constants.CONTROLLER)
# If there is no storage node present, ceph usage
# information is not relevant
if not storage_hosts:
return False
# At least one storage node must be in available state
for host in storage_hosts:
if host['availability'] == constants.AVAILABILITY_AVAILABLE:
break
else:
# No storage node is available
return False
return True
# TODO(CephPoolsDecouple): rework - determine the existing pools
def get_pools_config(self):
for pool in CEPH_POOLS:
# Here it is okay for | |
<gh_stars>1-10
import os
import sys
import glob
import argparse
import pandas as pd
import numpy as np
from astropy.wcs import WCS
from astropy.io import fits, ascii
from astropy.visualization import ImageNormalize, ZScaleInterval
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
from spacekit.analyzer.track import stopwatch
class DrawMosaics:
"""Class for generating machine-learning image inputs from drizzled total detection fits files and their associated catalog files. Primarily used for creating multiple images at once (batch) with capability for single images/single dataset also available."""
def __init__(
self,
input_path,
output_path=None,
fname=None,
visit=None,
pattern="",
gen=3,
size=(24, 24),
crpt=0,
):
"""Initializes a DrawMosaics class object.
Parameters
----------
input_path : str (path)
path to dataset subdirectories containing total or filter fits files
output_path : [type], optional
where to save the pngs (path), by default None (will create and save in relative dir path named "img" )
fname : str (path), optional
csv (dataframe) fname for generating specific list of datasets, by default None
visit : [type], optional
name of specific subdirectory (typically ipppssoot or visit name) containing .fits and .ecsv files, by default None
pattern : str, optional
glob search pattern (to restrict image generator to look for only certain visit names, e.g. 'ia*', by default ''
gen : int, optional
generator method to use: 0=generate original only; 1=point-segment, 2=gaia; 3=original, point-segment, and gaia (3 separate images), by default 3
size : tuple, optional
size to make the figures i.e. figsize=(size,size), by default (24, 24)
crpt : int, optional
modifies the input search pattern as well as output png file naming convention (so that a non-corrupt visit of the same name is not overwritten), by default 0
"""
self.input_path = input_path
self.output_path = output_path
self.check_output()
self.fname = fname
self.visit = visit
self.pattern = pattern
self.gen = gen
self.size = size
self.crpt = crpt
self.rgx = self.check_format()
self.status = {"new": [], "skip": [], "err": []}
self.datasets = self.get_datasets()
self.clip = True
self.manual = None
def check_output(self):
"""check if a custom output_path is set, otherwise create a subdirectory "img" in the current working directory and set as the output_path attribute.
Returns
-------
str (path)
path to subdirectory for saving png images.
"""
if self.output_path is None:
self.output_path = os.path.join(os.getcwd(), "img")
# TODO if permission error, write to /tmp
os.makedirs(self.output_path, exist_ok=True)
return self.output_path
def check_format(self, dname=None):
if dname is None:
dname = "??????"
if self.crpt == 1:
return f"{dname}_*_???_st??"
else:
return dname
def get_hapfiles(self, dataset):
if self.pattern:
subdir = f"{self.input_path}/{self.pattern}/{dataset}"
else:
subdir = f"{self.input_path}/{dataset}"
dname = dataset.split("_")[0]
hfiles = glob.glob(f"{subdir}/*total_{dname}_dr?.fits")
return subdir, hfiles
def get_datasets(self):
"""Locate inputs (fits file directories) to use for drawing the images. Search method used is based on parameters set when the DrawMosaics class object is instantiated. If multiple parameters are passed in, the order of search priority is 1) `fname`: only look for visits found in the csv file/dataframe (uses `load_from_file` method); 2) `visit` only look for subdirectories matching this specific visit name; 3) `local_search`: glob-based search for any visit subdirectories matching the pattern set in `pattern` attribute. (if crpt)
Returns
-------
list
list of datasets/visits found according to search method used
"""
if self.fname:
return self.load_from_file()
elif self.visit:
return [self.visit]
else:
return self.local_search()
def load_from_file(self):
"""only look for visits found in the csv file/dataframe.
Returns
-------
list
restricted list of inputs (visits) for which images will be drawn
"""
if not self.fname.endswith("csv"):
self.fname += ".csv"
df = pd.read_csv(self.fname, index_col="index")
idx = list(df.index)
self.datasets = []
skip = []
for i in idx:
impath = os.path.join(self.output_path, i)
visit = i.split("_")[6] if not self.crpt else "_".join(i.split("_")[6:])
if os.path.exists(impath):
num = len(glob.glob(f"{impath}/*"))
if num < 3:
self.datasets.append(visit)
else:
skip.append(visit)
else:
self.datasets.append(visit)
if len(skip) > 0:
self.status["skip"] = list(set(skip))
print("Skipping found images: ", len(self.status["skip"]))
print(f"\nFound {len(self.datasets)} new datasets.")
return list(set(self.datasets))
def local_search(self):
"""only look for visit names matching a glob-based search pattern.
Returns
-------
list
list of inputs (visits) for which images will be drawn
"""
search = self.pattern if self.pattern else self.rgx
inputs = glob.glob(f"{self.input_path}/{search}")
if len(inputs) == 0: # try one more directory down
print("None found - Checking subdirectories")
inputs = glob.glob(f"{self.input_path}/*/{search}")
if len(inputs) == 0: # fall back to wildcard
print("None found - using fallback (wildcard)")
inputs = glob.glob(f"{self.input_path}/*{self.rgx}")
try:
self.datasets = [i.split("/")[-1] for i in inputs]
print(f"\nFound {len(self.datasets)} datasets.")
return self.datasets
except Exception as e:
print(e)
print("No datasets found. Exiting.")
sys.exit(1)
def point_flag_color(self, x):
"""determines whether or not to draw a small red (or green) circle on top of the original image data depending on the value found in point source catalog. More info on the values associated with the "flag" color can be found in the Drizzlepac handbook at drizzlepac.readthedocs.io (Drizzlepac.catalog_generation api)
Parameters
----------
x : int
value pulled from point catalog file
Returns
-------
str
"red", "green" or None depending on input value
"""
if x <= 1:
return "red", "Flag <= 1"
elif x <= 5:
return "green", "2 <= Flag <= 5"
else:
return None, None # 'yellow', 'Flag > 5'
def segment_flag_color(self, x):
"""draw a small blue circle on top of the original image data depending on the value found in segment source catalog.
Parameters
----------
x : int
value pulled from segment catalog file
Returns
-------
str
"blue", "green" or None depending on input value
"""
if x <= 1:
return "blue", "Flag <= 1"
elif x <= 5:
return "green", "2 <= Flag <= 5"
else:
return None, None # 'yellow', 'Flag > 5'
def draw_catalogs(self, cfile, catalog):
"""Open and read .escv catalog file associated with the visit (if available) and map the appropriate values and coordinates to draw as an overlay on the original image. Credit: based in part on code by <NAME>
Parameters
----------
cfile : str (path)
path to source catalog file
catalog : str
"point", "segment" or "gaia"
Returns
-------
Pandas dataframe, lists of flag colors
table of catalog values and associated flag colors.
"""
cat, fcolor_, fcolor = None, None, None
if os.path.exists(cfile):
# cat = Table.read(catfile, format='ascii.ecsv')
cat = ascii.read(cfile).to_pandas()
else:
cat = ""
if len(cat) > 0:
if "Flags" in cat.columns:
flagcols = cat["Flags"]
else:
flagcols = [c for c in cat.columns if "Flags" in c]
if len(flagcols) > 0:
flags = (
cat.loc[:, flagcols]
.fillna(100, axis=0, inplace=False)
.apply(min, axis=1)
)
if catalog == "point":
fcolor_ = flags.apply(self.point_flag_color)
elif catalog == "segment":
fcolor_ = flags.apply(self.segment_flag_color)
fcolor = fcolor_.apply(lambda x: x[0]).values
return cat, fcolor_, fcolor
def create_image_name(self, name, dataset, P=0, S=0, G=0, fgroup=None):
"""Determines which suffix to append to the output png file based on which catalog(s) are used (if any).
Parameters
----------
name : str
visit name
dataset : [type]
visit name (used to adjust `name` if crpt=1)
P : int, optional
draw point catalog overlay (if available), by default 0
S : int, optional
draw segment catalog overlay (if available), by default 0
G : int, optional
draw Gaia catalog overlay (if eDR3 or GSC242 available), by default 0
Returns
-------
str
path to png output for this image.
"""
if P == 1 and S == 1:
catstr = "_source"
elif P == 1 and S == 0:
catstr = "_point"
elif P == 0 and S == 1:
catstr = "_segment"
elif G == 1:
catstr = "_gaia"
else:
catstr = ""
if self.crpt:
sfx = "_".join(dataset.split("_")[1:])
name = f"{name}_{sfx}"
if fgroup: # rel filter images share same parent dir
img_out = fgroup
else:
img_out = f"{self.output_path}/{name}"
os.makedirs(img_out, exist_ok=True)
imgpath = os.path.join(img_out, f"{name}{catstr}")
return imgpath
def generate_total_images(self):
"""Batch image generation method for multiple datasets (and multiple catalog types)"""
base = os.path.dirname(os.path.abspath(self.output_path))
start = time.time()
stopwatch("DRAWING IMAGES", t0=start, out=base)
if self.datasets is None:
print("No datasets available. Exiting")
sys.exit(1)
print(f"Generating images for {len(self.datasets)} datasets.")
for dataset in tqdm(self.datasets):
if self.gen == 3: # original, point-segment, and GAIA
self.draw_total_images(dataset)
self.draw_total_images(dataset, P=1, S=1)
self.draw_total_images(dataset, G=1)
elif self.gen == 2: # GAIA
self.draw_total_images(dataset, G=1)
elif self.gen == 1: # point-segment
self.draw_total_images(dataset, P=1, S=1)
else: # original | |
import shutil, atexit, os, tempfile, logging
import numpy as np
import cv2
from scipy import ndimage
import ray
from src.focus_stack.utilities import Utilities
import src.focus_stack.RayFunctions as RayFunctions
# Setup logging
log = logging.getLogger(__name__)
class ImageHandler:
image_storage = {}
image_shape = ()
temp_dir_path = None
rgb_images_temp_files = {}
def __init__(self):
# Initialize algorithms
self.LaplacianPixelAlgorithm = LaplacianPixelAlgorithm(self)
self.PyramidAlgorithm = PyramidAlgorithm(self)
# Create tempdirectory (for storing all image data)
self.temp_dir_path = tempfile.mkdtemp(prefix="python_focus_stacking_")
# Remove folder on program exit
atexit.register(self.deleteTempFolder)
# Load a list of images in parallel
def loadImages(self, image_paths, update_func):
# Clear image_storage
self.image_storage = {}
# Start image loading in parallel
data = [
RayFunctions.loadImage.remote(path, self.temp_dir_path)
for path in image_paths
]
# Run update loop (wait for one item to finish and send update back to UI)
finished = []
while True:
ready_ref, remaining_refs = ray.wait(data, num_returns=1, timeout=None)
data = remaining_refs
ready_ref = ray.get(ready_ref) # Get value
finished.append(ready_ref[0]) # Add finished image to table
update_func(ready_ref[0][0]) # Send loaded image path to UI
if not data:
break # All images have been loaded
# Extract data and place references to files inside image_storage
image_paths = []
for info_table in finished:
image_path = info_table[0]
image_shape = info_table[1]
rgb_file_name = info_table[2]
grayscale_file_name = info_table[3]
image_paths.append(image_path)
self.rgb_images_temp_files[image_path] = rgb_file_name
self.image_storage[image_path] = {
"image_shape": image_shape,
"rgb_source": rgb_file_name,
"grayscale_source": grayscale_file_name,
}
del finished
return image_paths # Return loaded images to UI
# Align a list of images in parallel
def alignImages(self, image_paths, parameters, update_func):
data = [
RayFunctions.alignImage.remote(
path, parameters, self.image_storage, self.temp_dir_path
)
for path in image_paths
]
# Run update loop (wait for one item to finish and send update back to UI)
finished = []
while True:
ready_ref, remaining_refs = ray.wait(data, num_returns=1, timeout=None)
data = remaining_refs
ready_ref = ray.get(ready_ref) # Get value
finished.append(ready_ref[0]) # Add finished image to table
update_func(ready_ref[0][0]) # Send loaded image path to UI
if not data:
break # All images have been aligned
# Extract data and place references to files inside image_storage
image_paths = []
for info_table in finished:
image_path = info_table[0]
image_paths.append(image_path)
# Append to image storage ditionary
self.image_storage[image_path]["rgb_aligned"] = info_table[1]
self.image_storage[image_path]["grayscale_aligned"] = info_table[2]
return image_paths
# Return image shape
def getImageShape(self):
return self.image_shape
# Export image to path
def exportImage(self, path):
if "stacked_image" in self.image_storage:
output = self.getImageFromPath(None, "stacked")
rgb = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
cv2.imwrite(path, rgb)
del output
return True
else:
return False # No stacked image
# Get image (specified type aka. RGB, grayscale, aligned, ...) from storage
def getImageFromPath(self, path, im_type):
if path in self.image_storage:
im_root = self.image_storage[path]
if im_type == "rgb_source" and im_type in im_root:
return np.memmap(
im_root[im_type], mode="r", shape=im_root["image_shape"]
)
elif im_type == "rgb_aligned" and im_type in im_root:
return np.memmap(
im_root[im_type], mode="r", shape=im_root["image_shape"]
)
elif im_type == "grayscale_gaussian" and im_type in im_root:
return np.memmap(
im_root[im_type], mode="r", shape=im_root["image_shape"]
)
elif im_type == "grayscale_laplacian" and im_type in im_root:
return np.memmap(
im_root[im_type],
mode="r",
shape=im_root["image_shape"],
dtype="float64",
)
elif im_type == "stacked" and "stacked_image" in self.image_storage:
im = self.image_storage["stacked_image"]
return np.memmap(im["file"], mode="r", shape=im["image_shape"])
# Downscale an image
def downscaleImage(self, image, scale_percent):
new_dim = (
round(image.shape[1] * scale_percent / 100),
round(image.shape[0] * scale_percent / 100),
) # New width and height
return cv2.resize(image, new_dim, interpolation=cv2.INTER_AREA)
def deleteTempFolder(self):
log.info("Removing tempfile directory")
shutil.rmtree(self.temp_dir_path)
def clearImages(self):
log.info("Clearing loaded images and their files")
self.image_storage = {}
# Remove all tempfiles inside directory
folder = self.temp_dir_path
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
log.error("Failed to delete %s. Reason: %s" % (file_path, e))
def rgbOrAligned(self, path, im_type):
if path in self.image_storage:
im = self.image_storage[path]
# Get image shape based on im_type
if im_type == "grayscale":
shape = (im["image_shape"][0], im["image_shape"][1])
else:
shape = im["image_shape"]
if im_type + "_aligned" in im:
return im[im_type + "_aligned"], shape # Return aligned image
elif im_type + "_source" in im:
return im[im_type + "_source"], shape # Return source image
class LaplacianPixelAlgorithm:
"""
Class that handles image stacking using a gaussian / laplacian pyramid.
Uses images from the "ImageHandler" class.
"""
def __init__(self, parent):
self.Parent = parent
log.info("Initialized Laplacian pixel algorithm.")
# Compute the laplacian edges of an image
@ray.remote
def computeLaplacianEdges(self, image_path, parameters):
if not self.Parent.image_storage[image_path]:
return
elif not self.Parent.image_storage[image_path]["grayscale_source"]:
return
if self.Parent.image_storage[image_path]["grayscale_aligned"]:
grayscale_image = self.Parent.image_storage[image_path]["grayscale_aligned"]
else:
grayscale_image = self.Parent.image_storage[image_path]["grayscale_source"]
blurred = grayscale_image
if parameters["GaussianBlur"] != 0:
# Blur image
blurred = cv2.GaussianBlur(
grayscale_image,
(parameters["GaussianBlur"], parameters["GaussianBlur"]),
0,
)
laplacian = cv2.Laplacian(
blurred, cv2.CV_64F, ksize=parameters["LaplacianKernel"]
)
del grayscale_image
# Save gaussian blurred grayscale
memmapped_blur = np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=(self.Parent.image_shape[0], self.Parent.image_shape[1]),
dtype=blurred.dtype,
)
memmapped_blur[:] = blurred
self.Parent.image_storage[image_path]["grayscale_gaussian"] = memmapped_blur
# Save laplacian grayscale
memmapped_laplacian = np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=(self.Parent.image_shape[0], self.Parent.image_shape[1]),
dtype=blurred.dtype,
)
memmapped_laplacian[:] = laplacian
self.Parent.image_storage[image_path][
"grayscale_laplacian"
] = memmapped_laplacian
return True
# Calculate output image (final stacking)
@ray.remote
def stackImages(self, image_paths):
"""
Load rgb images and laplacian gradients
Try using aligned RGB images (if there), or use source RGB images
"""
rgb_images = []
laplacian_images = []
for im_path in image_paths:
if "rgb_aligned" in self.Parent.image_storage[im_path]:
rgb_image = self.Parent.image_storage[im_path]["rgb_aligned"]
else:
rgb_image = self.Parent.image_storage[im_path]["rgb_source"]
rgb_images.append(
np.memmap(
rgb_image,
mode="r",
shape=self.Parent.image_shape,
)
)
if "grayscale_aligned" in self.Parent.image_storage[im_path]:
grayscale_image = self.Parent.image_storage[im_path][
"grayscale_aligned"
]
else:
grayscale_image = self.Parent.image_storage[im_path]["grayscale_source"]
laplacian_images.append(
np.memmap(
grayscale_image,
mode="r",
shape=(self.Parent.image_shape[0], self.Parent.image_shape[1]),
dtype="float64",
)
)
"""
Calculate output image
"""
# Create memmap (same size as rgb input)
stacked_memmap = np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=self.Parent.image_shape,
dtype=rgb_images[0].dtype,
)
for y in range(rgb_images[0].shape[0]): # Loop through vertical pixels (rows)
# Create holder for whole row
holder = np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=[1, stacked_memmap.shape[1], stacked_memmap.shape[2]],
dtype=stacked_memmap.dtype,
)
for x in range(
rgb_images[0].shape[1]
): # Loop through horizontal pixels (columns)
def get_abs():
values = []
for arr in laplacian_images: # noqa: F821
values.append(
abs(arr[y, x])
) # Insert (absolute) values of this pixel for each image
return np.asarray(values, dtype=np.uint8)
abs_val = get_abs() # Get absolute value of this pixel from every image
index = (np.where(abs_val == max(abs_val)))[0][
0
] # Get image that has highest value for this pixel
holder[0, x] = rgb_images[index][
y, x
] # Write pixel from "best image" to holder
stacked_memmap[y] = holder[
0
] # Write entire focused row to output (holder has only one row)
yield y # Send progress back to UI (every row)
yield rgb_images[0].shape[0] # Finished
# Store stacked image
self.Parent.image_storage["stacked image"] = stacked_memmap
class PyramidAlgorithm:
"""
Class that handles image stacking using a gaussian / laplacian pyramid.
Uses inherited images from the "ImageHandler" class.
"""
def __init__(self, parent):
self.Parent = parent
from src.focus_stack.RayFunctions import reduceLayer
self.reduceLayer = reduceLayer
def generating_kernel(a):
kernel = np.array([0.25 - a / 2.0, 0.25, a, 0.25, 0.25 - a / 2.0])
return np.outer(kernel, kernel)
def convolve(self, image, kernel=generating_kernel(0.4)):
return ndimage.convolve(image.astype(np.float64), kernel, mode="mirror")
def expand_layer(self, layer, kernel=generating_kernel(0.4)):
if len(layer.shape) == 2:
expand = next_layer = np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=(2 * layer.shape[0], 2 * layer.shape[1]),
dtype=np.float64,
)
expand[::2, ::2] = layer
convolution = self.convolve(expand, kernel)
return 4.0 * convolution
ch_layer = self.expand_layer(layer[:, :, 0])
next_layer = np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=tuple(list(ch_layer.shape) + [layer.shape[2]]),
dtype=ch_layer.dtype,
)
next_layer[:, :, 0] = ch_layer
for channel in range(1, layer.shape[2]):
next_layer[:, :, channel] = self.expand_layer(layer[:, :, channel])
return next_layer
def gaussian_pyramid(self, images, levels):
log.info("Started calculation of Gaussian pyramid.")
# Convert images to float64
for image in images:
image = image.astype(np.float64, copy=False)
pyramid = [images]
while levels > 0:
log.info("Start processing of level {}.".format(levels))
next_layer = ray.get(self.reduceLayer.remote(pyramid[-1][0]))
next_layer_size = [len(images)] + list(next_layer.shape)
pyramid.append(
np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=tuple(next_layer_size),
dtype=next_layer.dtype,
)
)
pyramid[-1][0] = next_layer
for image_index in range(1, len(images)):
print("Start processing of image {}.".format(image_index))
pyramid[-1][image_index] = ray.get(
self.reduceLayer.remote(pyramid[-2][image_index])
)
levels -= 1
return pyramid
def laplacian_pyramid(self, images, gaussian):
log.info("Started calculation of Laplacian pyramid.")
pyramid = [gaussian[-1]]
for level in range(len(gaussian) - 1, 0, -1):
log.info("Start processing of level {}.".format(level))
gauss = gaussian[level - 1]
d = gauss[0].shape
pyramid.append(
np.memmap(
tempfile.NamedTemporaryFile(),
mode="w+",
shape=(len(images), d[0], d[1], d[2]),
dtype=np.float64,
)
)
for image_index in range(len(images)):
print("Start processing of image {}.".format(image_index))
gauss_layer = gauss[image_index]
expanded = self.expand_layer(gaussian[level][image_index])
if expanded.shape != gauss_layer.shape:
expanded = expanded[: gauss_layer.shape[0], : gauss_layer.shape[1]]
pyramid[-1][image_index] = gauss_layer - expanded
| |
dataf_spec.destroy()
self.updateTable()
return
dataf_spec=Toplevel()
dataf_spec.title('Specify type of data field')
top=self.master.winfo_toplevel()
rootx=top.winfo_rootx()
rooty=top.winfo_rooty()
dataf_spec.geometry('+%d+%d' %(rootx+100,rooty+100))
dataf_spec.transient(self.master)
dataf_spec.grab_set() # Grab all input
# Get the name
exclude=['Ekintype']
self.column_types=['text']+self.table.peatactions.keys()+self.table.ekin_actions.keys()
self.column_types.remove('Ekintype')
name_var=entry_field(dataf_spec,row=1,column=0,name='Name',ftype='text')
type_var=entry_field(dataf_spec,
row=2,
column=0,
name='Data field type',
ftype='menu',
items=self.column_types,
default_item='text')
self.default_var=entry_field(dataf_spec,row=3,column=0,name='Default value',ftype='text')
Button(dataf_spec,
command=add,
text='Add data field',fg='red').grid(row=5,column=0,columnspan=1,
padx=2,pady=2,sticky='news')
Button(dataf_spec,
command=close,
text='Cancel',fg='green').grid(row=5,column=1,columnspan=1,
padx=2,pady=2,sticky='news')
def addField(self):
"""Add a new data field to the database"""
return
def deleteColumn(self):
"""Delete selected field"""
col = self.table.getSelectedColumn()
colname = self.tablemodel.getColumnName(col)
if colname in self.DB.meta.staticfields:
tkMessageBox.showinfo('Cannot delete',
"This field can't be removed.\n"
'You may mark it as hidden.',
parent=self.master)
return
ans = tkMessageBox.askyesno("Delete", "Delete This Column?")
if ans:
#if self.DB.length() > 20:
from Dialogs import PEATDialog
pb=PEATDialog(self.master, option='progressbar',
message='Deleting this field for all records..')
pb.update_progress(0)
callback = pb.update_progress
self.DB.deleteField(colname, callback=callback)
self.DB.data._p_jar.cacheGC()
self.updateTable()
self.updateStatusPane()
if pb:
pb.close()
return
def undo(self):
"""Undo all changes since last commit to DB"""
if self.DB == None or self.DB.isChanged() == False:
return
self.DB.abort()
self.updateTable()
return
def showChanged(self):
"""Show list of changed records"""
if self.DB == None:
return
cframe = self.createChildFrame() #we should make sidepane a class
def close():
cframe.destroy()
self.resetSidePane(width=20)
return
def update():
items = self.DB.getChanged()
try:
self.chgeditemswin.destroy()
except:
pass
self.chgeditemswin = Pmw.ScrolledText(cframe,
labelpos = 'nw',
label_text='Changed Items Since last Save',
usehullsize = 1,
hull_width=self.MAIN_width,
hull_height=200)
self.chgeditemswin.pack(fill=BOTH, side=BOTTOM, padx=2,pady=2)
for i in items:
self.chgeditemswin.insert(END, i+'\n')
return
Button(cframe,text='Update',command=update).pack(fill=BOTH, side=BOTTOM, padx=2,pady=2)
Button(cframe,text='Close',command=close).pack(fill=BOTH, side=BOTTOM, padx=2,pady=2)
update()
return
def showUndoLog(self):
"""Show db commit log"""
if self.DB == None:
return
from datetime import datetime
self.changelogframe = self.createChildFrame()
cframe = self.changelogframe
def close():
cframe.destroy()
self.changelogframe = None
self.resetSidePane(width=20)
return
def errormessage():
tkMessageBox.showwarning("Undo Error",
'Could not undo this transaction.\n'
'Later commits probably altered the same data.')
def tryUndo():
i = self.ltable.get_selectedRecordNames()[0]
undoid = self.ulogs[i]['id']
comment = tkSimpleDialog.askstring('Log comment',
'Enter a comment for the log',
initialvalue='',
parent=self.main)
self.DB.undo(id=undoid, user=self.username,
note=comment,
callback=errormessage)
self.updateTable()
self.recordEvent('Undid transaction %s' %undoid)
self.updateChangelog()
if self.DB.supportsUndo():
Button(cframe,text='Update',command=self.updateChangelog).grid(row=3,column=0,columnspan=3,
sticky='news')
Button(cframe,text='Undo Selected',command=tryUndo).grid(row=4,column=0,columnspan=3,
sticky='news')
Button(cframe,text='Close',command=close).grid(row=5,column=0,columnspan=3,
sticky='news')
w='Use the undo feature carefully.\nYou should be aware of what data changes\n'
w+='are being undone. Provide a comment\nwith the undo to indicate what you reverted.'
Label(cframe,text=w,bg='lightyellow').grid(row=6,column=0,columnspan=3,sticky='news')
self.updateChangelog()
return
def updateChangelog(self):
if not hasattr(self, 'changelogframe') or self.changelogframe == None:
return
try:
self.changelog.destroy()
except:
pass
#show logs in a table
from Tables import TableCanvas
self.ulogs={}; l=self.DB.undoLog()
i=0
for r in range(len(l)-1,-1,-1):
l[r]['time'] = self.formatTime(l[r]['time'])
l[r]['user_name'] = l[r]['user_name'].strip('/ ')
self.ulogs[i] = l[r]; i+=1
self.ltable = TableCanvas(self.changelogframe, newdict=self.ulogs, namefield='rev',
cellwidth=50, cellbackgr='#E3F6CE',
thefont="Arial 10",rowheight=16, editable=False,
rowselectedcolor='yellow',reverseorder=1)
self.ltable.createTableFrame()
return
def formatTime(self, s):
x = datetime.fromtimestamp(s).strftime("%d-%m-%Y %H:%M:%S")
return x
def updateView(self):
"""Update the table if no changes pending"""
if self.DB==None:
return
if self.DB.isChanged() == True:
tkMessageBox.showinfo('Pending changes',
'You have pending changes and should save these first.',
parent=self.master)
return
self.DB.connection.sync()
self.updateTable()
self.updateStatusPane()
self.updateChangelog()
return
def get_field_label(self, field):
"""Return the field label"""
model = self.table.getModel()
return model.columnlabels[field]
def updateTable(self, protein=None, field_name=None):
"""To be called whenever a change to DB needs to be reflected in
table. """
if self.DB == None:
return
DB=self.DB
if hasattr(self, 'table') and self.table != None:
#Update the table model to reflect changes in DB and redraw the table
model = self.tablemodel
if self.table.rows < 10000:
sortcol = self.table.sortcol
else:
sortcol = None
model.update_reclist(DB.data.keys(), self.table.sortcol)
model.update_columnNames()
model.update_colors()
self.table.redrawTable()
return
def removeTable(self):
"""Remove the main table"""
self.closeSidePane()
if hasattr(self.table, 'filterframe') and self.filterframe != None:
self.table.filterframe.destroy()
self.table = None
try:
if self.tableframe != None:
self.tableframe.destroy()
self.tableframe = None
except:
print 'no tableframe'
return
def createTableView(self):
"""Create a new table view"""
if self.DB == None or not hasattr(self, 'table'):
return
DB=self.DB
newframe = Toplevel()
tableframe = Frame(newframe, bd=1,relief=RAISED)
tableframe.pack(fill=BOTH,expand=1)
table = PEATTable(tableframe, self.tablemodel, parentapp=self)
table.loadPrefs(self.preferences)
table.createTableFrame()
if self.preferences.get('thumbsize') == '':
self.preferences.set('thumbsize',200)
table.thumbsize = int(self.preferences.get('thumbsize'))
return
def pageView(self, event=None):
if self.pageviewvar.get() == False:
self.table.paging = 0
else:
self.table.paging = 1
self.table.redrawTable()
return
def resize(self,event):
"""Update the scrollbars"""
if event.widget==self.master:
if event.width>100 and event.height>100:
self.masterframe.configure(hull_width=event.width,
hull_height=event.height)
try:
self.tableframe.configure(width=event.width-50)
self.tableframe.configure(height=event.height-100)
except:
pass
try:
self.cv.configure(width=event.width-50)
self.cv.configure(height=event.height-100)
self.cv.configure(scrollregion=(0,0,self.canvas_x,self.canvas_y))
except:
pass
return
def showBlankCanvas(self, frame, row):
"""Create a blank canvas for introduction"""
width=900
height=750
cv=Canvas(frame,
width=self.canvas_x_size-self.canvas_border_x,
height=self.canvas_y-self.canvas_border_y,
scrollregion=(0,0,self.canvas_x_size,self.canvas_y),
bd=0, relief=GROOVE, bg='#9999CC',
highlightthickness=0)
cv.grid(row=row,column=0,columnspan=10,rowspan=1,
sticky='news',
pady=3,ipady=2)
#frame.add(cv)
cv.configure(width=width-50)
cv.configure(height=height-300)
return cv
def removeBlankCanvas(self):
self.cv.destroy()
return
def welcomeLogo(self):
"""Show the welcome logo and text"""
self.removeLogo()
self.logo=None
import tkFont
try:
logo = PEAT_images.logo_large_mono()
self.cv.create_image(self.canvas_x_size/2,self.canvas_y_size/2,image=logo)
self.cv.image=logo
except:
import tkFont
font=tkFont.Font(family='Arial',size=38)
self.cv.create_text(self.canvas_x_size/2,self.canvas_y_size/2-50,
text='P E A T',
font=font,anchor='n',
fill='black',
tag='logo')
text=['Welcome to Protein Engineering and Analysis Tool (PEAT)',
'Authors: <NAME>, <NAME> and <NAME>',
'Copyright <NAME>, University College Dublin 2003-, All rights reserved']
self.y=20
self.x=380
ifont=tkFont.Font(family='Arial',size=12)
for line in text:
self.cv.create_text(self.x,self.y,text=line,fill='white',font=ifont, tag='logo')
self.y=self.y+15
# Remove the logo after some time
#self.wait=self.after(4000, self.show_startinstructions)
return
def removeLogo(self,event=None):
"""Remove the logo and welcome message."""
if self.cv == None:
return
self.cv.delete('logo')
self.cv.image=None
self.master.unbind('<KeyPress>')
self.master.unbind('<Button-1>')
try:
self.after_cancel(self.wait)
except:
pass
return
def onlineDocumentation(self,event=None):
"""Open the online documentation"""
import webbrowser
link='http://enzyme.ucd.ie/main/index.php/PEAT_DB'
webbrowser.open(link,autoraise=1)
return
def gotoBugzilla(self, event=None):
"""Open bugzilla site"""
import webbrowser
link='http://peat.ucd.ie/bugzilla/'
webbrowser.open(link,autoraise=1)
return
def aboutPEAT(self):
self.ab_win=Toplevel()
self.ab_win.geometry('+100+350')
self.ab_win.title('About PEAT')
logo = PEAT_images.logo()
label = Label(self.ab_win,image=logo)
label.image = logo
label.grid(row=0,column=0,sticky='news',padx=4,pady=4)
text=['P E A T ','Protein Engineering and Analysis Tool',
'Version 2 ','A database tool for analysing the effect of point mutations',
'on the catalytic and structural characteristics of proteins and enzymes',
'Authors: <NAME>, <NAME> and <NAME>',
'University College Dublin','(C) Copyright 2003- <NAME> All rights reserved']
row=1
for line in text:
tmp=Label(self.ab_win,text=line)
tmp.grid(row=row,column=0,sticky='news',padx=4)
row=row+1
return
def recordEvent(self,message='Timestamp'):
"""Display a message in the eventlog"""
import time
tid=time.strftime('%H:%M:%S %d/%m',time.localtime(time.time()))
self.eventlog.insert('0.0','%s: %s\n' %(tid,message))
self.master.update_idletasks()
return
def findValue(self):
self.currenttable.findValue()
return
def dofindText(self, event=None):
"""Find the text in the table"""
if not hasattr(self,'currenttable'):
return
import string
if string.strip(self.findtext.get())=='':
return
searchstring=self.findtext.get()
if self.currenttable!=None:
self.currenttable.findValue(searchstring)
return
def dofindAgain(self, event=None):
"""Find again"""
if not hasattr(self,'currenttable'):
return
searchstring=self.findtext.get()
if self.currenttable!=None:
self.currenttable.findValue(searchstring, findagain=1)
return
def loginSetup(self):
"""Login setttings for authentication to ZEO"""
mpDlg = MultipleValDialog(title='Login settings',
initialvalues=(self.username,
self.password),
labels=('user name','password'),
types=('string','password'),
parent=self.main)
if mpDlg.result == True:
self.username = mpDlg.results[0]
self.password = mpDlg.results[1]
self.preferences.set('username',self.username)
self.preferences.set('password',<PASSWORD>)
else:
return
return
def showSettings(self):
"""Settings dialog"""
import Prefs
if self.showDialogsinSidePane == True and self.DB!=None:
self.resetSidePane(width=320)
X=Prefs.preferences_dialog(parent=self,parentframe=self.sidepane,
subset='PEAT',callback=self.getPrefs)
else:
X=Prefs.preferences_dialog(parent=self,subset='PEAT',callback=self.getPrefs)
return
def hideFieldsDialog(self):
"""Allow fields to be hidden"""
fr=Toplevel()
fr.geometry('300x350+300+200')
fr.title('Show/hide fields')
fr.grab_set()
fr.transient()
userfields = self.DB.meta.userfields
def apply():
show=list(checkbuttons.getcurselection())
for f in userfields:
if '_' in f: n=f.replace('_',' ')
else: n=f
print f,n, show
if n not in show:
self.DB.showField(f, False)
else:
self.DB.showField(f)
self.updateTable()
return
def close():
fr.destroy()
Button(fr, text='Apply', command=apply).grid(row=0,column=0,padx=2,pady=2)
Button(fr, text='Close', command=close).grid(row=1,column=0,padx=2,pady=2)
cbfr = Pmw.ScrolledFrame(fr)
cbfr.grid(row=2,column=0,padx=2,pady=2)
checkbuttons = Pmw.RadioSelect(cbfr.interior(),
buttontype = 'checkbutton',
orient = 'vertical',
labelpos = 'n',
label_text = 'Deselect fields to hide them:')
for f in userfields:
if '_' in f:
n=f.replace('_',' ')
else: n=f
checkbuttons.add(n)
if userfields[f]['show'] == True:
checkbuttons.invoke(n)
checkbuttons.pack(fill=BOTH,expand=1,padx=2,pady=2)
fr.columnconfigure(0,weight=1)
fr.rowconfigure(2,weight=1)
return
def quit(self,event=None):
"""Close DB and quit"""
answer = self.closeDB()
if answer == 'cancel':
return
#check is plugin running?
#if hasattr(self,'runningplugin'):
# print self.runningplugin
self.closeSidePane()
self.main.destroy()
self.preferences.save_prefs()
if not self.parent:
sys.exit()
return
def showtablePrefs(self):
if self.table:
self.table.showtablePrefs()
return
#
#move these to an actions class..?
#calls to updateTable could be replaced with an updateCell function
#
def importCSV(self):
"""Import CSV files dialog"""
if self.DB == None:
return
from PEATDB.IO import Importer
IM = Importer()
result = IM.getLines()
if result==None: return
IM.showimportDialog()
self.wait_window(IM.ask_csv)
#also get field for unique key
keys = IM.data[0].keys()
mpDlg = MultipleValDialog(title='Import Helper',
initialvalues=([keys]),
labels=(['Choose key field:']),
types=(['list']),
parent=self.main)
if mpDlg.result == True:
key = mpDlg.results[0]
else:
return
self.DB.importDict(importdata=IM.data, namefield=key)
self.updateTable()
return
def importSequences(self):
"""Import AA sequences"""
return
def importMutants(self):
"""Specialised import method for adding mutant data from a csv
file. Requires a wt structure to create mutant AA sequences"""
def getpdb():
return
def doimport():
#self.DB.importDict(importdata=IM.data, namefield='name')
#use mutations field to derive aa seq..
return
fr=Frame()
win=Toplevel()
Label(win, text='This dialog allows you to import a set of mutant data\n'
'along with some associated experimental values. You will\n'
'need to provide the the structure for creating an AA\n'
'sequence for each mutant. Mutation codes should be of\n'
'the form chain:residuenumber:code',bg='#ccFFFF').pack(fill=BOTH,expand=1)
Button(win,text='Set a wt PDB',command=getpdb).pack(fill=BOTH,expand=1)
fr=Frame(win); fr.pack(fill=X,expand=1)
self.useref = IntVar()
Label(fr,text='Use current reference protein').pack(side=LEFT)
Checkbutton(fr,variable=self.useref).pack(side=LEFT)
self.set_centered_geometry(self.main,win)
Button(win,text='Continue',command=doimport).pack(fill=BOTH,expand=1)
return
def importFileset(self):
"""Use filehandler class to import a set of external files"""
if self.DB == None:
return
fh = FileHandler(parent=self)
if | |
or when the upstream device is performing a
RSS for connection based distribution.
type: str
dnsprofilename:
description:
- >-
Name of the DNS profile to be associated with the VServer. DNS profile properties will be applied to
transactions processed by a VServer. This parameter is valid only for DNS and DNS-TCP VServers.
- "Minimum length = 1"
- "Maximum length = 127"
type: str
lbprofilename:
description:
- "Name of the LB profile which is associated to the vserver."
type: str
redirectfromport:
description:
- "Port number for the virtual server, from which we absorb the traffic for http redirect."
- "Minimum value = C(1)"
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
type: int
httpsredirecturl:
description:
- "URL to which to redirect traffic if the traffic is recieved from redirect port."
type: str
retainconnectionsoncluster:
description:
- >-
This option enables you to retain existing connections on a node joining a Cluster system or when a
is being configured for passive timeout. By default, this option is disabled.
type: bool
adfsproxyprofile:
description:
- "Name of the adfsProxy profile to be used to support ADFSPIP protocol for ADFS servers."
type: str
weight:
description:
- "Weight to assign to the specified service."
- "Minimum value = C(1)"
- "Maximum value = C(100)"
type: str
servicename:
description:
- "Service to bind to the virtual server."
- "Minimum length = 1"
type: str
redirurlflags:
description:
- "The redirect URL to be unset."
type: bool
disabled:
description:
- When set to C(true) the server state will be set to C(disabled).
- When set to C(false) the server state will be set to C(enabled).
type: bool
default: false
ssl_certkey:
type: str
description:
- The name of the ssl certificate that is bound to this service.
- The ssl certificate must already exist.
- Creating the certificate can be done with the M(citrix_adc_ssl_certkey) module.
- This option is only applicable only when C(servicetype) is C(SSL).
servicebindings:
type: list
elements: dict
description:
- List of services along with the weights that are load balanced.
- The following suboptions are available.
suboptions:
servicename:
description:
- "Service to bind to the virtual server."
- "Minimum length = 1"
type: str
weight:
description:
- "Weight to assign to the specified service."
- "Minimum value = C(1)"
- "Maximum value = C(100)"
type: str
servicegroupbindings:
type: list
elements: dict
description:
- List of services along with the weights that are load balanced.
- The following suboptions are available.
suboptions:
servicegroupname:
description:
- "The service group name bound to the selected load balancing virtual server."
type: str
weight:
description:
- >-
Integer specifying the weight of the service. A larger number specifies a greater weight. Defines the
of the service relative to the other services in the load balancing configuration. Determines the
given to the service in load balancing decisions.
- "Minimum value = C(1)"
- "Maximum value = C(100)"
type: str
appfw_policybindings:
type: list
elements: dict
description:
- List of services along with the weights that are load balanced.
- The following suboptions are available.
suboptions:
policyname:
description:
- "Name of the policy bound to the LB vserver."
type: str
priority:
description:
- "Priority."
type: str
gotopriorityexpression:
description:
- >-
Expression specifying the priority of the next policy which will get evaluated if the current policy
evaluates to TRUE.
type: str
bindpoint:
choices:
- 'REQUEST'
- 'RESPONSE'
description:
- "The bindpoint to which the policy is bound."
type: str
invoke:
description:
- "Invoke policies bound to a virtual server or policy label."
type: bool
labeltype:
choices:
- 'reqvserver'
- 'resvserver'
- 'policylabel'
description:
- "The invocation type."
type: str
labelname:
description:
- "Name of the label invoked."
type: str
extends_documentation_fragment: citrix.adc.citrixadc
'''
EXAMPLES = '''
# Citrix ADC services service-http-1, service-http-2 must have been already created with the citrix_adc_service module
- name: Create a load balancing vserver bound to services
delegate_to: localhost
citrix_adc_lb_vserver:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
validate_certs: no
state: present
name: lb_vserver_1
servicetype: HTTP
timeout: 12
ipv46: 172.16.17.32
port: 80
servicebindings:
- servicename: service-http-1
weight: 80
- servicename: service-http-2
weight: 20
# Service group service-group-1 must have been already created with the citrix_adc_servicegroup module
- name: Create load balancing vserver bound to servicegroup
delegate_to: localhost
citrix_adc_lb_vserver:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: <PASSWORD>
validate_certs: no
state: present
name: lb_vserver_2
servicetype: HTTP
ipv46: 172.16.31.10
port: 80
timeout: 10
servicegroupbindings:
- servicegroupname: service-group-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }
'''
import copy
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.citrix.adc.plugins.module_utils.citrix_adc import (
NitroResourceConfig,
NitroException,
netscaler_common_arguments,
log,
loglines,
NitroAPIFetcher
)
class ModuleExecutor(object):
def __init__(self, module):
self.module = module
self.fetcher = NitroAPIFetcher(self.module)
self.main_nitro_class = 'lbvserver'
# Dictionary containing attribute information
# for each NITRO object utilized by this module
self.attribute_config = {
'lbvserver': {
'attributes_list': [
'name',
'servicetype',
'ipv46',
'ippattern',
'ipmask',
'port',
'ipset',
'range',
'persistencetype',
'timeout',
'persistencebackup',
'backuppersistencetimeout',
'lbmethod',
'hashlength',
'netmask',
'v6netmasklen',
'backuplbmethod',
'cookiename',
'rule',
'listenpolicy',
'listenpriority',
'resrule',
'persistmask',
'v6persistmasklen',
'pq',
'sc',
'rtspnat',
'm',
'tosid',
'datalength',
'dataoffset',
'sessionless',
'trofspersistence',
'connfailover',
'redirurl',
'cacheable',
'clttimeout',
'somethod',
'sopersistence',
'sopersistencetimeout',
'healththreshold',
'sothreshold',
'sobackupaction',
'redirectportrewrite',
'downstateflush',
'backupvserver',
'disableprimaryondown',
'insertvserveripport',
'vipheader',
'authenticationhost',
'authentication',
'authn401',
'authnvsname',
'push',
'pushvserver',
'pushlabel',
'pushmulticlients',
'tcpprofilename',
'httpprofilename',
'dbprofilename',
'comment',
'l2conn',
'oracleserverversion',
'mssqlserverversion',
'mysqlprotocolversion',
'mysqlserverversion',
'mysqlcharacterset',
'mysqlservercapabilities',
'appflowlog',
'netprofile',
'icmpvsrresponse',
'rhistate',
'newservicerequest',
'newservicerequestunit',
'newservicerequestincrementinterval',
'minautoscalemembers',
'maxautoscalemembers',
'persistavpno',
'skippersistency',
'td',
'authnprofile',
'macmoderetainvlan',
'dbslb',
'dns64',
'bypassaaaa',
'recursionavailable',
'processlocal',
'dnsprofilename',
'lbprofilename',
'redirectfromport',
'httpsredirecturl',
'retainconnectionsoncluster',
'adfsproxyprofile',
'weight',
'servicename',
'redirurlflags',
],
'transforms': {
'pq': lambda v: 'ON' if v else 'OFF',
'sc': lambda v: 'ON' if v else 'OFF',
'rtspnat': lambda v: 'ON' if v else 'OFF',
'sessionless': lambda v: v.upper(),
'trofspersistence': lambda v: v.upper(),
'cacheable': lambda v: 'YES' if v else 'NO',
'sopersistence': lambda v: v.upper(),
'redirectportrewrite': lambda v: v.upper(),
'downstateflush': lambda v: v.upper(),
'disableprimaryondown': lambda v: v.upper(),
'authentication': lambda v: 'ON' if v else 'OFF',
'authn401': lambda v: 'ON' if v else 'OFF',
'push': lambda v: v.upper(),
'pushmulticlients': lambda v: 'YES' if v else 'NO',
'l2conn': lambda v: 'ON' if v else 'OFF',
'appflowlog': lambda v: v.upper(),
'macmoderetainvlan': lambda v: v.upper(),
'dbslb': lambda v: v.upper(),
'dns64': lambda v: v.upper(),
'bypassaaaa': lambda v: 'YES' if v else 'NO',
'recursionavailable': lambda v: 'YES' if v else 'NO',
'processlocal': lambda v: v.upper(),
'retainconnectionsoncluster': lambda v: 'YES' if v else 'NO',
'clttimeout': str,
},
'get_id_attributes': [
'name',
],
'delete_id_attributes': [
'name',
],
'non_updateable_attributes': [
'servicetype',
'port',
'range',
'state',
'td',
'redirurlflags',
'newname',
],
},
'servicebindings': {
'attributes_list': [
'servicename',
'weight',
],
'transforms': {
},
'get_id_attributes': [
'name',
],
'delete_id_attributes': [
'servicename',
]
},
'servicegroupbindings': {
'attributes_list': [
'servicegroupname',
'weight',
],
'transforms': {
},
'get_id_attributes': [
'name',
],
'delete_id_attributes': [
'servicegroupname',
]
},
'appfw_policybindings': {
'attributes_list': [
'policyname',
'priority',
'gotopriorityexpression',
'bindpoint',
'invoke',
'labeltype',
'labelname',
],
'transforms': {
},
'get_id_attributes': [
'name',
],
'delete_id_attributes': [
'policyname',
'priority',
'bindpoint',
'name',
]
},
}
self.module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
self.prepared_list = []
# Calculate functions will apply transforms to values read from playbook
self.calculate_configured_lbvserver()
self.calculate_configured_service_bindings()
self.calculate_configured_servicegroup_bindings()
self.calculate_configured_appfwpolicy_bindings()
def calculate_configured_lbvserver(self):
log('ModuleExecutor.calculate_configured_lbvserver()')
self.configured_lbvserver = {}
for attribute in self.attribute_config['lbvserver']['attributes_list']:
value = self.module.params.get(attribute)
# Skip null values
if value is None:
continue
transform = self.attribute_config['lbvserver']['transforms'].get(attribute)
if transform is not None:
value = transform(value)
self.configured_lbvserver[attribute] = value
log('calculated configured lbvserver %s' % self.configured_lbvserver)
def calculate_configured_service_bindings(self):
log('ModuleExecutor.calculate_configured_service_bindings()')
self.configured_service_bindings = []
if self.module.params.get('servicebindings') is None:
return
for service in self.module.params['servicebindings']:
binding = {}
binding['name'] = self.module.params['name']
for attribute in self.attribute_config['servicebindings']['attributes_list']:
# Disregard null values
value = service.get(attribute)
if value is None:
continue
transform = self.attribute_config['servicebindings']['transforms'].get(attribute)
if transform is not None:
value = transform(value)
binding[attribute] = value
self.configured_service_bindings.append(binding)
log('calculated configured service bindings %s' % | |
import asyncio, logging
from ..Exceptions import ClientExceptions
logger = logging.getLogger("Decorator")
class BASE_DECORATOR(object):
def __init__(self, parent:object, *args, **kwargs):
self.parent = parent
self.args = args
self.kwargs = kwargs
def __call__(self, func):
self.func = func
if self.validate():
setattr(self.parent, str(self), func)
logger.info("%s created on parent(server) is on %s" %(self, self.parent))
else:
logger.warning("%s is not valid so it hasn't added!" %self)
def validate(self) -> bool:
"""
[:validator:]
checks that the given function is coroutine
"""
return asyncio.iscoroutinefunction(self.func)
def __str__(self):
return self.__class__.__name__
class DecoratorUtils:
async def call(self, name:str, *args, **kwargs) -> None:
"""
[:Decorator util:]
the correct way to call decorated functions (not Requests)
[:params:]
name - decorator name like ('ready', 'close', etc...)
*args - parse args to the function
**kwargs - parse keyword arguments to the function
"""
logger.debug("Calling decorated function %s" %name)
if hasattr(self, name):
f = getattr(self, name)
if self.is_okay_function(f):
return (await f(*args, **kwargs))
logger.warning("Failed calling %s" %name)
return -1 # means there is no such decorated function
def is_okay_function(self, func) -> bool:
"""
[:Decorator util:]
called by the call util function to valid the function
[:params:]
func - function to valid
"""
try:
return asyncio.iscoroutinefunction(func)
except: return False # may raise error because most of the time
# the parameter is a class and the "iscoroutinefunction" checks for functions only
class ServerDecorators(DecoratorUtils):
"""
[:decorator(s):]
server decorators that you can access with the server object
do not import this file and start using it!!
[:to recv params:]
server - the running server object
[:example:]
@Server.ready()
async def foo(server):
print("Server is ready and running on (ip %s | port %d)" %(server.ip, server.port))
[:NOTE:]
do not import this file and start using in it wont work
the server object is a subclass of this class
and make sure your mathod gets a server as the first parameter
"""
@classmethod
class ready(BASE_DECORATOR):
"""
[:decorated:]
called when servers is started running
[:params:]
server - server object
"""
@classmethod
class Event(BASE_DECORATOR):
"""
[:decorator:]
if you want a function to appear every x time
use event decorator
[:to recv params:]
*args/**kwargs - all you enter in **kwargs and *args
should be recved in your function event
[:params:]
timer - when to appear after given time
*args - args to parse to your event
**kwargs - keyword args to parse to your event
[:example:]
@Server.Event(3, name="Daniel")
async def foo(name):
print("got name %s" %name)
"""
def __init__(self, parent:object, timer:int, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.timer = self.Timer(timer, *self.args, **self.kwargs)
self.loop = asyncio.get_event_loop()
self.future = None
self._create_future()
# just getting the running event and creating future
logger.info('Event %s created' )
def _create_and_run(self, *args, **kwargs):
"""
[:Event func safe:]
creates a future and run it after
this called when first created future finished and needs to create himself again
"""
_ = self._create_future()
if _ == 0: return
self.loop.create_task(self.start())
def _create_future(self, *args, **kwargs):
"""
[:Event func safe:]
this creates a new future and adds him a callback
and updating the Timer future
"""
if self.future is not None:
if self.future.cancelled(): # if you cancelled it wont come back
logger.info("%s cancelled aborting..." %self.timer)
return 0
self.future = self.loop.create_future()
self.future.add_done_callback(self._create_and_run)
self.timer.future = self.future
# the future refreshing himself
logger.debug("%s created future for the Event via callback" %self)
def __call__(self, func):
self.func = func
if self.validate():
self.timer.func = self.func
self.parent.events[str(self)] = self
async def start(self):
await self
def __await__(self): # awating the appended event starting it
return self.timer.start().__await__()
def __str__(self):
return 'undefine' if not(hasattr(self, 'func')) else self.func.__name__
class Timer(object):
"""
[:Event obj safe:] (!not to use directliy!)
this is sort of proxy to await for the given time
and activate the function after awaited
[:params:]
time - timer
*args - args to parse to the function
**kwargs - keyword args to parse to the function
"""
def __init__(self, time:int, *args, **kwargs):
self.time = time
self.future = None # added manually
self.args = args
self.kwargs = kwargs
async def start(self):
"""
[:Timer func:]
starting the sleep time and when finished
returnning the requested function
"""
await asyncio.sleep(self.time)
await self.func(*self.args, **self.kwargs)
self.future.set_result(1)
logger.debug("Event %s finished after waitng %ds" %(self.func.__name__, self.time))
@classmethod
class Request(object):
"""
[:decorator:]
to add methods for your server use this decorator every added function appended to the server as a Func object
make sure your function get server and client as paramters
[:to recv params:]
server - the running server object
client - client who requested the added request
custom - get what you want to get for this function
look at the second example
[:params:]
superuser(default:False) - if True the request be allowed only for superusers
allowed_groups(default:['*']) - list of group names the '*' mean everyone, only the client in the group will be allowed to use the request
[:example:]
1.================================
@Server.Request()
async def foo(server, client):
print("method foo called from %d" %client.id)
2.================================
if you want the function to get parameters the client need to enter them
just add this normaly
@Server.Request()
async def foo(server, client, foo, oof=False):
print("foo called (foo=%s, oof=%s)" %(foo, oof))
2.1================================
catch you requests error use
@Server.Request.foo.error
async def oof(foo, client, error):
print("my %s method raised %s" %(foo, error))
# the foo parameter is the Func object of the current function
"""
def __init__(self, parent:object, *, name=None, superuser:bool=False, allowed_groups:list=['*']):
self.server = parent
self.name = name
self.for_super = superuser
self.allowed_groups = allowed_groups
# this request be allowed to users in the given group name
def __call__(self, func):
self.func = self.Func(
name=self.name,
func=func,
superusers=self.for_super,
allowed_groups=self.allowed_groups
)
if self.func.valid():
setattr(self.__class__, str(self.func), self.func)
logger.info("%s added to the parent(server) object" %self.func)
else:
logger.warning("Failed adding %s to the parent(server)" %self.func)
@classmethod
async def remove(cls, func_name:str) -> None:
"""
[:Request classmethod func:]
deletes the given function name
[:params:]
func_name - function name to delete
[:example:]
await Server.Request.remove('foo')
"""
if hasattr(cls, func_name):
delattr(cls, func_name)
logger.warning("Request %s deleted from the parent(server)" %func_name)
class Func(object):
"""
[:Request obj:]
for every added request the Request decorator build a new
class from here to check validation everytime and more
[:params:]
name - request custome name (if none given the decorated function will be the name)
func - the added func as request
superusers - parsed from the Request object
allowed_groups - parsed from the Request object
"""
def __init__(self, name, func, superusers:bool, allowed_groups:list):
self.func = func
self.for_super = superusers
self.allowed_groups = allowed_groups
self.name = self.get_name(name)
self.server = None
self.client = None
self.args = ()
self.kwargs = {}
@property
def __doc__(self):
return self.func.__doc__
def get_name(self, name):
"""
[:validator/func:]
getting the given name if None (as default) the request
name will be the decorated function name
[:param:]
name - passed name to the Request decorator
"""
if name == None:
return self.func.__name__
ls = name.split(' ')
return '_'.join(ls) # removing the spaces
def valid(self):
return asyncio.iscoroutinefunction(self.func)
def check_permissions(self, client:object) -> bool:
"""
[:validator:]
checks if the function caller has access to call this function
[:params:]
client - client to check if he has access
"""
if client.is_superuser: return True
# superusers have access to everything in the server
if "*" not in self.allowed_groups:
return any(str(group) in self.allowed_groups for group in client.groups)
if self.for_super: return client.is_superuser
return True
def __call__(self, server:object, client:object, **kwargs):
self.server = server
self.client = client
self.kwargs = kwargs
client_has_access = self.check_permissions(self.client)
if client_has_access:
return self
logger.warning("Client %s id: %d tried to use method with no permissions to use it" %(client.addr, client.id))
return (self._call_error())(server=self.server, client=self.client, error=ClientExceptions.ClientDoesNotHasAccess)
def __await__(self):
try:
return self.func(
server=self.server,
client=self.client,
*self.args, **self.kwargs
).__await__()
except Exception as e:
return self._call_error()(
server=self.server,
client=self.client,
error=e
).__await__()
def __str__(self):
return self.name
def _call_error(self):
"""
[:Request Func func:]
checks if error_fetcher added
(error_fetcher - added via @Server.Request.foo.error)
"""
if hasattr(self, 'error_fetcher'):
return getattr(self, 'error_fetcher')
logger.warning('%s tried to call the error_fetcher but not found any' %self)
return self._anon
async def _anon(self): pass
| |
'>' + 'ALIGNMENT COVERAGE (HIT SEQ)' + '</font></td>']
html_report_lines += ['<td style="border-right:solid 2px ' + border_head_color + '; border-bottom:solid 2px ' +
border_head_color + '"><font color="' + text_color + '" size=' + text_fontsize + '>' + 'GENE ID' + '</font></td>']
html_report_lines += ['<td style="border-right:solid 2px ' + border_head_color + '; border-bottom:solid 2px ' +
border_head_color + '"><font color="' + text_color + '" size=' + text_fontsize + '>' + 'FUNCTION' + '</font></td>']
html_report_lines += ['<td style="border-right:solid 2px ' + border_head_color + '; border-bottom:solid 2px ' +
border_head_color + '"><font color="' + text_color + '" size=' + text_fontsize + '>' + 'GENOME' + '</font></td>']
# html_report_lines += ['<td align=center style="border-right:solid 2px '+border_head_color+'; border-bottom:solid 2px '+border_head_color+'"><font color="'+text_color+'" size='+text_fontsize+'>'+'IDENT'+'%</font></td>']
html_report_lines += ['<td align=center style="border-right:solid 2px ' + border_head_color + '; border-bottom:solid 2px ' +
border_head_color + '"><font color="' + text_color + '" size=' + text_fontsize + '>' + 'ALN_LEN' + '</font></td>']
html_report_lines += ['<td align=center style="border-right:solid 2px ' + border_head_color + '; border-bottom:solid 2px ' +
border_head_color + '"><font color="' + text_color + '" size=' + text_fontsize + '>' + 'E-VALUE' + '</font></td>']
html_report_lines += ['<td align=center style="border-right:solid 2px ' + border_head_color + '; border-bottom:solid 2px ' +
border_head_color + '"><font color="' + text_color + '" size=' + text_fontsize + '>' + 'BIT SCORE' + '</font></td>']
html_report_lines += ['<td align=center style="border-right:solid 2px ' + border_head_color + '; border-bottom:solid 2px ' +
border_head_color + '"><font color="' + text_color + '" size=' + text_fontsize + '>' + '<nobr>H_BEG-H_END</nobr>' + '</font></td>']
# html_report_lines += ['<td align=center style="border-right:solid 2px '+border_head_color+'; border-bottom:solid 2px '+border_head_color+'"><font color="'+text_color+'" size='+text_fontsize+'>'+'MIS MATCH'+'</font></td>']
# html_report_lines += ['<td align=center style="border-right:solid 2px '+border_head_color+'; border-bottom:solid 2px '+border_head_color+'"><font color="'+text_color+'" size='+text_fontsize+'>'+'GAP OPEN'+'</font></td>']
html_report_lines += ['</tr>']
for hmm_i, hmm_id in enumerate(input_HMM_ids[hmm_group]):
html_report_lines += ['<tr><td colspan=table_col_width>Hits to <b>' +
str(hmm_id) + '</b></td></tr>']
if total_hit_cnts[hmm_id] == 0 or html_report_chunks[hmm_i] == None or html_report_chunks[hmm_i] == '':
html_report_lines += ['<tr><td colspan=table_col_width><blockquote><i>no hits found</i></td></tr>']
else:
#html_report_lines.extend(html_report_chunks[hmm_i])
html_report_lines += [html_report_chunks[hmm_i]]
html_report_lines += ['<tr><td colspan=table_col_width>' + sp + '</td></tr>']
html_report_lines += ['</table>']
html_report_lines += ['</body>']
html_report_lines += ['</html>']
# write html to file
html_path = html_search_path
html_report_str = "\n".join(html_report_lines)
with open(html_path, 'w', 0) as html_handle:
html_handle.write(html_report_str)
#### Build Profile output report
##
self.log(console, "BUILDING PROFILE REPORT ") # DEBUG
if len(invalid_msgs) == 0 and many_type_name == 'GenomeSet':
# calculate table
#
cats = all_HMM_ids_order
table_data = dict()
INSANE_VALUE = 10000000000000000
overall_low_val = INSANE_VALUE
overall_high_val = -INSANE_VALUE
cat_seen = dict()
for cat in cats:
cat_seen[cat] = False
# count raw
for genome_ref in genome_refs:
if genome_ref not in table_data:
table_data[genome_ref] = dict()
for cat in cats:
table_data[genome_ref][cat] = 0
if genome_ref not in hit_cnt_by_genome_and_model:
continue
for cat in cats:
if cat in hit_cnt_by_genome_and_model[genome_ref] and \
hit_cnt_by_genome_and_model[genome_ref][cat] != 0:
table_data[genome_ref][cat] = hit_cnt_by_genome_and_model[genome_ref][cat]
cat_seen[cat] = True
# determine high and low val
for genome_ref in genome_refs:
for cat in cats:
val = table_data[genome_ref][cat]
if val == 0:
continue
#self.log (console, "HIGH VAL SCAN CAT: '"+cat+"' VAL: '"+str(val)+"'") # DEBUG
if val > overall_high_val:
overall_high_val = val
if val < overall_low_val:
overall_low_val = val
if overall_high_val == -INSANE_VALUE:
raise ValueError("unable to find any counts")
# build html report
sp = ' '
text_color = "#606060"
text_color_2 = "#606060"
head_color_1 = "#eeeeee"
head_color_2 = "#eeeeee"
border_color = "#cccccc"
border_cat_color = "#ffccff"
#graph_color = "lightblue"
#graph_width = 100
#graph_char = "."
graph_char = sp
#color_list = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e']
color_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd']
max_color = len(color_list) - 1
cat_disp_trunc_len = 40
cell_width = '10px'
if len(genome_refs) > 20:
graph_gen_fontsize = "1"
# elif len(genome_refs) > 10:
# graph_gen_fontsize = "2"
else:
# graph_gen_fontsize = "3"
graph_gen_fontsize = "2"
if len(cats) > 20:
graph_cat_fontsize = "1"
# elif len(cats) > 5:
# graph_cat_fontsize = "2"
else:
# graph_cat_fontsize = "3"
graph_cat_fontsize = "2"
if int(graph_cat_fontsize) < int(graph_gen_fontsize):
cell_fontsize = graph_gen_fontsize = graph_cat_fontsize
else:
cell_fontsize = graph_cat_fontsize = graph_gen_fontsize
graph_padding = "5"
graph_spacing = "3"
#border = "1"
border = "0"
#row_spacing = "-2"
num_rows = len(genome_refs)
show_groups = False
show_blanks = False
if 'show_blanks' in params and int(params['show_blanks']) == 1:
show_blanks = True
# build html buffer
html_report_lines = []
html_report_lines += ['<html>']
html_report_lines += ['<head>']
html_report_lines += ['<title>KBase HMMER Custom Model Profile</title>']
html_report_lines += ['<style>']
html_report_lines += [
".vertical-text {\ndisplay: inline-block;\noverflow: hidden;\nwidth: 0.65em;\n}\n.vertical-text__inner {\ndisplay: inline-block;\nwhite-space: nowrap;\nline-height: 1.1;\ntransform: translate(0,100%) rotate(-90deg);\ntransform-origin: 0 0;\n}\n.vertical-text__inner:after {\ncontent: \"\";\ndisplay: block;\nmargin: 0.0em 0 100%;\n}"]
html_report_lines += [
".vertical-text_title {\ndisplay: inline-block;\noverflow: hidden;\nwidth: 1.0em;\n}\n.vertical-text__inner_title {\ndisplay: inline-block;\nwhite-space: nowrap;\nline-height: 1.0;\ntransform: translate(0,100%) rotate(-90deg);\ntransform-origin: 0 0;\n}\n.vertical-text__inner_title:after {\ncontent: \"\";\ndisplay: block;\nmargin: 0.0em 0 100%;\n}"]
html_report_lines += ['</style>']
html_report_lines += ['</head>']
html_report_lines += ['<body bgcolor="white">']
html_report_lines += ['<font color="' + header_tab_color + '" size=' +
header_tab_fontsize + '><b>TABULAR PROFILE</b></font> | ']
for this_hmm_group_i, this_hmm_group in enumerate(hmm_groups_used):
disp_hmm_group = this_hmm_group[0].upper() + this_hmm_group[1:]
this_html_search_file = search_tool_name + '_Search-' + \
str(this_hmm_group_i) + '-' + str(this_hmm_group) + '.html'
html_report_lines += [' <a href="' + this_html_search_file + '"><font color="' + header_tab_color +
'" size=' + header_tab_fontsize + '>' + str(disp_hmm_group) + ' HITS</font></a> ']
if this_hmm_group_i < len(hmm_groups_used) - 1:
html_report_lines += [' | ']
html_report_lines += ['<p>']
# genomes as rows
if 'vertical' in params and int(params['vertical']) == 1:
# table header
html_report_lines += ['<table cellpadding=' + graph_padding +
' cellspacing=' + graph_spacing + ' border=' + border + '>']
corner_rowspan = "1"
label = ''
html_report_lines += ['<tr>']
html_report_lines += ['<td valign=bottom align=right rowspan=' + corner_rowspan +
'><div class="vertical-text_title"><div class="vertical-text__inner_title"><font color="' + text_color + '">' + label + '</font></div></div></td>']
# column headers
for cat_i, cat in enumerate(cats):
if not cat_seen[cat] and not show_blanks:
continue
cat_disp = cat
cell_title = input_HMM_descs[cat]
if len(cat_disp) > cat_disp_trunc_len + 1:
cat_disp = cat_disp[0:cat_disp_trunc_len] + '*'
html_report_lines += ['<td style="border-right:solid 2px ' + border_cat_color + '; border-bottom:solid 2px ' +
border_cat_color + '" bgcolor="' + head_color_2 + '"title="' + cell_title + '" valign=bottom align=center>']
html_report_lines += ['<div class="vertical-text"><div class="vertical-text__inner">']
html_report_lines += ['<font color="' + text_color_2 + '" size=' + graph_cat_fontsize + '><b>']
#for c_i,c in enumerate(cat_disp):
# if c_i < len(cat_disp)-1:
# html_report_lines += [c+'<br>']
# else:
# html_report_lines += [c]
html_report_lines += [cat_disp]
html_report_lines += ['</b></font>']
html_report_lines += ['</div></div>']
html_report_lines += ['</td>']
html_report_lines += ['</tr>']
# rest of rows
for genome_ref in genome_refs:
genome_sci_name = genome_ref_to_sci_name[genome_ref]
html_report_lines += ['<tr>']
html_report_lines += ['<td align=right><font color="' + text_color + '" size=' +
graph_gen_fontsize + '><b><nobr>' + genome_sci_name + '</nobr></b></font></td>']
for cat in cats:
if not cat_seen[cat] and not show_blanks:
continue
val = table_data[genome_ref][cat]
if val == 0:
cell_color = 'white'
else:
cell_color_i = max_color - \
int(round(max_color * (val - overall_low_val) / float(overall_high_val - overall_low_val)))
c = color_list[cell_color_i]
cell_color = '#' + c + c + c + c + 'FF'
cell_val = str(table_data[genome_ref][cat]) # the key line
if 'heatmap' in params and params['heatmap'] == '1':
if table_data[genome_ref][cat] == 0:
this_text_color = text_color
#this_graph_char = "0"
this_graph_char = sp
else:
this_text_color = cell_color
this_graph_char = graph_char
html_report_lines += ['<td align=center valign=middle title="' + cell_val + '" style="width:' + cell_width + '" bgcolor="' +
cell_color + '"><font color="' + this_text_color + '" size=' + cell_fontsize + '>' + this_graph_char + '</font></td>']
else:
html_report_lines += ['<td align=center valign=middle style="' + cell_width + '; border-right:solid 2px ' + border_color +
'; border-bottom:solid 2px ' + border_color + '"><font color="' + text_color + '" size=' + cell_fontsize + '>' + cell_val + '</font></td>']
html_report_lines += ['</tr>']
html_report_lines += ['</table>']
# genomes as columns
else:
raise ValueError("Do not yet support Genomes as columns")
# key table
CAZy_server_addr = 'www.cazy.org'
html_report_lines += ['<p>']
html_report_lines += ['<table cellpadding=3 cellspacing=2 border=' + border + '>']
html_report_lines += ['<tr><td valign=middle align=left colspan=2 style="border-bottom:solid 4px ' +
border_color + '"><font color="' + text_color + '"><b>KEY</b></font></td></tr>']
for cat_i, cat in enumerate(cats):
cell_color = 'white'
if not cat_seen[cat] and not show_blanks:
cell_color = "#eeeeee"
desc = input_HMM_descs[cat]
cat_disp = cat
if len(cat_disp) > cat_disp_trunc_len + 1:
cat_disp = cat_disp[0:cat_disp_trunc_len] + '*'
if cat == 'GT2_Cellulose_synt':
| |
# A utility script to clean up the tables and set annotations after running remap_vocab.py to remapping from 'data_commons' to 'ermrest_model' vocabulary tables.
#
# Example usage:
#
#
# $ python facebase_post_remap_vocab.py dev.facebase.org 11 'vocab' '.*$' -v --annotations --cleanup
# The command above would run this script on the facebase catalog 11 over the vocab schema and all tables table
import argparse
import re
from deriva.core import DerivaServer, get_credential
from deriva.core.ermrest_model import builtin_types, Table, Column, ForeignKey
import sys
import traceback
import json
from deriva.core import ErmrestCatalog, AttrDict
from deriva.core.ermrest_model import builtin_types, Table, Column, Key, ForeignKey
description = """
A utility script to clean up the tables and set annotations after running remap_vocab.py to remapping from 'data_commons' to 'ermrest_model' vocabulary tables.
"""
class ReplacementPatternAction(argparse.Action):
"""Custom action for subheader."""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(ReplacementPatternAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
replacement = values.split('=')
if len(replacement) != 2:
parser.error("Invalid replacement pattern '{values}' for '{arg}'. The pattern must be 'old=new'.".format(
values=values,
arg=self.dest
))
setattr(namespace, self.dest, {'old': replacement[0], 'new': replacement[1]})
# Argument parser
parser = argparse.ArgumentParser(description=description)
parser.add_argument('hostname')
parser.add_argument('catalog', type=int, help='Catalog number')
parser.add_argument('schema_regex', help='A regular expression to filter the schemas')
parser.add_argument('exclude_table_regex', help='A regular expression to exclude the tables matching that pattern')
parser.add_argument('-n', '--dryrun', action='store_true', help='Dry run. No changes to the catalog.')
parser.add_argument('-v', '--verbose', action="count", default=0, help='Increase verbosity of output.')
parser.add_argument('--cleanup', action='store_true', help='Cleanup. Drop old tables w/ names ending in _terms, _relationship_types and _paths.')
parser.add_argument('--annotations', action='store_true', help='Annotations. Add basic level annotations to tables in selected schema')
args = parser.parse_args()
# Compile regex patterns
schema_pattern = re.compile(args.schema_regex)
table_pattern = re.compile(args.exclude_table_regex)
# Create/connect catalog and get various interfaces
credential = get_credential(args.hostname)
server = DerivaServer('https', args.hostname, credential)
catalog = server.connect_ermrest(args.catalog)
model = catalog.getCatalogModel()
config = catalog.getCatalogConfig()
def verbose(message):
"""Print message for verbose output"""
if args.verbose:
print(message)
def vverbose(message):
"""Print message for very verbose output"""
if args.verbose > 1:
print(message)
def clean_term(term):
"""Cleans up the term by monkey patching and returning it."""
# Use dbxref in curie if it does not match the blacklist
if not curie_blacklist_pattern.match(term['dbxref']):
term['id'] = term['dbxref'][:term['dbxref'].rindex(':')] # strip the trailing ':###'
else:
term['id'] = None
# Similarly, clean up the alternative_dbxrefs
if args.altids:
term['alternate_ids'] = []
if term['alternate_dbxrefs']:
for old_alt_dbxref in term['alternate_dbxrefs']:
if old_alt_dbxref.startswith('URL:'):
old_alt_dbxref = old_alt_dbxref[len('URL:'):old_alt_dbxref.rindex(':')] # strip 'URL:' prefix and the trailing ':###'
else:
old_alt_dbxref = old_alt_dbxref[:old_alt_dbxref.rindex(':')] # strip the trailing ':###'
term['alternate_ids'].append(old_alt_dbxref)
del term['alternate_dbxrefs']
# Description must be non-null but many existing terms have none
if not term['description']:
term['description'] = 'No description'
return term
def cleanup_old_vocab_tables():
"""Drop all _terms, _paths, _relationship_types tables in the vocabulary schema"""
verbose('Cleaning old tables ending on _terms, _relationship_types, _paths ....')
p1 = re.compile('.+_terms$')
p2 = re.compile('.+_relationship_types$')
p3 = re.compile('.+_paths$')
for schema_name in list(model.schemas):
if schema_pattern.match(schema_name):
schema = model.schemas[schema_name]
verbose('Schema "{sname}" matched "{pattern}"'.format(sname=schema_name, pattern=args.schema_regex))
for table_name in list(model.schemas[schema_name].tables):
#verbose('Processing table "{tname}" for deletion.....'.format(tname=table_name))
if p3.match(table_name):
#verbose(' --->Table "{tname}" matches delete pattern....'.format(tname=table_name))
if not args.dryrun:
verbose('Deleting Table="{tname}" ...'.format(tname=table_name))
schema.tables[table_name].delete(catalog, schema)
else:
verbose("Found {tname} but skipping delete in dry-run mode...".format(tname=table_name))
for table_name in list(model.schemas[schema_name].tables):
#verbose('Processing table "{tname}" for deletion.....'.format(tname=table_name))
if p2.match(table_name):
#verbose(' --->Table "{tname}" matches delete pattern....'.format(tname=table_name))
if not args.dryrun:
verbose('Deleting Table="{tname}" ...'.format(tname=table_name))
schema.tables[table_name].delete(catalog, schema)
else:
verbose("Found {tname} but skipping delete in dry-run mode...".format(tname=table_name))
for table_name in list(model.schemas[schema_name].tables):
#verbose('Processing table "{tname}" for deletion.....'.format(tname=table_name))
if p1.match(table_name):
#verbose(' --->Table "{tname}" matches delete pattern....'.format(tname=table_name))
if not args.dryrun:
verbose('Deleting Table="{tname}" ...'.format(tname=table_name))
schema.tables[table_name].delete(catalog, schema)
else:
verbose("Found {tname} but skipping delete in dry-run mode...".format(tname=table_name))
def update_annotations_vocab_tables(goal):
for schema_name in list(model.schemas):
if schema_pattern.match(schema_name):
schema = model.schemas[schema_name]
for table_name in list(model.schemas[schema_name].tables):
if not table_pattern.match(table_name):
verbose('Setting up annotations for table "{tname}" ...'.format(tname=table_name))
if not args.dryrun:
update_annotations_vocab_table(schema_name,table_name,goal)
verbose(' Done setting up annotations for table "{tname}" ...'.format(tname=table_name))
else:
verbose(" Found {tname} but skipping setting up annotations in dry-run mode...".format(tname=table_name))
def update_annotations_vocab_table(schema_name,table_name,goal):
verbose('Setting up annotations for table "{tname}" ...'.format(tname=table_name))
row_order = [{"column": "name"}]
if table_name == 'stage':
row_order = [{"column": "sort_key"}, {"column": "name"}]
if table_name != 'file_extension' and table_name != 'gene_summary':
goal.column('%s' % schema_name, '%s' % table_name, 'id').display.update({'name': 'ID'})
goal.column('%s' % schema_name, '%s' % table_name, 'uri').display.update({'name': 'URI'})
goal.table(
'%s' % schema_name, '%s' % table_name
).table_display.update({
"row_name": {"row_markdown_pattern": "{{name}}"},
"*": {"row_order": row_order}
})
goal.column(
'%s' % schema_name, '%s' % table_name, 'uri'
).column_display.update({
"detailed": {
"markdown_pattern": "[{{uri}}]({{uri}})"
}
})
goal.column(
'%s' % schema_name, '%s' % table_name, 'id'
).column_display.update({
"detailed": {
"markdown_pattern": "[{{id}}]({{uri}})"
}
})
goal.table(
'%s' % schema_name, '%s' % table_name
).visible_columns.update({
"filter": {"and": [{"source": "name"},
{"source": "id"},
{"source": "description"},
{"source": "synonyms"},
{"source": "alternate_ids"}
]
},
"entry": [
"id","name","uri","description"
],
"detailed": [["vocab","%s_RIDkey1" % table_name],
"id",
"name",
"uri",
"description",
"synonyms",
"alternate_ids"
],
"compact": [["vocab","%s_RIDkey1" % table_name],
"id",
"name",
"description",
"synonyms",
"alternate_ids"
]
})
if table_name == 'gene_summary':
goal.table(
'%s' % schema_name, '%s' % table_name
).visible_columns.update({
"filter": {"and": [
{"source":[{"outbound": ["vocab", "gene_summary_gene_fkey"]},"id"],"entity": True},
{"source":[{"outbound": ["vocab", "gene_summary_species_fkey"]}, "id"],"entity": True}
]
},
"entry": [
["vocab", "gene_summary_gene_fkey"],["vocab", "gene_summary_species_fkey"],["vocab","gene_summary_contributed_by_fkey"],"summary"
],
"detailed": [["vocab", "gene_summary_gene_fkey"],
["vocab", "gene_summary_species_fkey"],
["vocab","gene_summary_contributed_by_fkey"],
"summary"
],
"compact": [["vocab", "gene_summary_gene_fkey"],
["vocab", "gene_summary_species_fkey"],
["vocab","gene_summary_contributed_by_fkey"],
"summary"
]
})
if table_name == 'file_extension':
goal.table(
'%s' % schema_name, '%s' % table_name
).visible_columns.update({
"filter": {"and": [
{"source": "extension"},
{"source":[{"outbound": ["vocab", "file_extension_file_format_fkey"]},"id"],"entity": True}
]
},
"entry": [
"extension",["vocab", "file_extension_file_format_fkey"]
],
"detailed": [
["vocab","file_extension_rid_key"],
"extension",
["vocab", "file_extension_file_format_fkey"]
],
"compact": [
["vocab", "file_extension_rid_key"],
"extension",
["vocab", "file_extension_file_format_fkey"]
]
})
if table_name == 'phenotype':
goal.table(
'%s' % schema_name, '%s' % table_name
).visible_foreign_keys.update({
"*": [{"source": [{"inbound": ["isa", "dataset_%s_%s_fkey" % (table_name, table_name)]},
{"outbound": ["isa", "dataset_%s_dataset_fkey" % table_name]}, "id"]}]
})
elif table_name == 'species':
goal.table(
'%s' % schema_name, '%s' % table_name
).visible_foreign_keys.update({
"*": [{"source": [{"inbound": ["isa", "dataset_organism_organism_fkey"]},
{"outbound": ["isa", "dataset_organism_dataset_id_fkey"]}, "id"]}]
})
else:
goal.table(
'%s' % schema_name, '%s' % table_name
).visible_foreign_keys.update({
"*": [{"source": [{"inbound": ["isa", "dataset_%s_%s_fkey" % (table_name, table_name)]},
{"outbound": ["isa", "dataset_%s_dataset_id_fkey" % table_name]}, "id"]}]
})
def update_annotations_dataset_table(goal):
verbose('Setting up viz cols for Dataset')
goal.table('isa', 'dataset').visible_columns.update({
"filter": {
"and": [
{"source": [{"inbound": ["isa", "dataset_organism_dataset_id_fkey"]}, {"outbound": ["isa", "dataset_organism_organism_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_experiment_type_dataset_id_fkey"]}, {"outbound": ["isa", "dataset_experiment_type_experiment_type_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_data_type_data_type_fkey"]}, {"outbound": ["isa", "dataset_data_type_dataset_id_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_gene_dataset_id_fkey"]}, {"outbound": ["isa", "dataset_gene_gene_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_stage_dataset_id_fkey"]}, {"outbound": ["isa", "dataset_stage_stage_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_anatomy_dataset_id_fkey"]}, {"outbound": ["isa", "dataset_anatomy_anatomy_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_genotype_dataset_id_fkey"]}, {"outbound": ["isa", "dataset_genotype_genotype_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_phenotype_dataset_fkey"]}, {"outbound": ["isa", "dataset_phenotype_phenotype_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "dataset_chromosome_dataset_id_fkey"]}, "chromosome"], "entity": True, "open": False, "markdown_name": "Chromosome"},
{"source": [{"inbound": ["isa", "publication_dataset_fkey"]}, "pmid"], "entity": True, "open": False,"markdown_name": "Pubmed ID"},
{"source": [{"outbound": ["isa", "dataset_project_fkey"]},{"inbound": ["isa", "project_investigator_project_id_fkey"]},{"outbound": ["isa", "project_investigator_person_fkey"]},"RID"], "entity": True, "open": False,"markdown_name": "Project Investigator"},
{"source": "accession", "entity": False, "open": False},
{"source": "title", "entity": False, "open": False},
{"source": [{"outbound": ["isa", "dataset_project_fkey"]}, "id"], "entity": True, "open": False},
{"source": "release_date", "entity": False, "open": False},
{"source": [{"outbound": ["isa", "dataset_status_fkey"]}, "name"], "entity": True, "open": False}
]
},
"compact": [["isa","dataset_RID_key"],["isa","accession_unique"],"title",["isa","dataset_project_fkey"],"status","release_date"],
"entry": ["accession","title",["isa","dataset_project_fkey"],"description","study_design","released","release_date", "show_in_jbrowse"],
"detailed": [["isa","dataset_RID_key"],"accession","description","study_design",["isa","dataset_project_fkey"],["isa","dataset_status_fkey"],"funding","release_date","show_in_jbrowse",
["isa","publication_dataset_fkey"],
["isa","dataset_experiment_type_dataset_id_fkey"],
["isa","dataset_data_type_dataset_id_fkey"],
["isa","dataset_phenotype_dataset_fkey"],
["isa","dataset_organism_dataset_id_fkey"],
["isa","dataset_gene_dataset_id_fkey"],
["isa","dataset_stage_dataset_id_fkey"],
["isa","dataset_anatomy_dataset_id_fkey"],
["isa","dataset_mutation_dataset_id_fkey"],
["isa","dataset_enhancer_dataset_id_fkey"],
["isa","dataset_mouse_genetic_background_dataset_id_fkey"],
["isa","dataset_gender_dataset_id_fkey"],
["isa","dataset_genotype_dataset_id_fkey"],
["isa","dataset_instrument_dataset_id_fkey"],
["isa","dataset_geo_dataset_id_fkey"],
["isa","dataset_chromosome_dataset_id_fkey"]
]
})
def update_annotations_ebr(goal):
"""
Experiment
"""
verbose('Setting up viz cols for Experiment')
goal.table('isa', 'experiment').visible_columns.update({
"filter": {
"and": [
{"source": [{"outbound": ["isa", "experiment_experiment_type_fkey"]}, "id"], "entity": True,"open": True},
{"source": [{"outbound": ["isa", "experiment_dataset_fkey"]}, "RID"], "entity": True,"open": False},
{"source": [{"inbound": ["isa", "replicate_experiment_fkey"]},
{"outbound": ["isa", "replicate_biosample_fkey"]},
{"outbound": ["isa", "biosample_species_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "replicate_experiment_fkey"]},
{"outbound": ["isa", "replicate_biosample_fkey"]},
{"outbound": ["isa", "biosample_stage_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "replicate_experiment_fkey"]},
{"outbound": ["isa", "replicate_biosample_fkey"]},
{"outbound": ["isa", "biosample_anatomy_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "replicate_experiment_fkey"]},
{"outbound": ["isa", "replicate_biosample_fkey"]},
{"outbound": ["isa", "biosample_genotype_fkey"]}, "id"], "entity": True, "open": False},
{"source": [{"inbound": ["isa", "replicate_experiment_fkey"]},
{"inbound": ["isa", "track_data_replicate_fkey"]},
{"outbound": ["isa", "track_data_mapping_assembly_fkey"]},"id"], "entity": True, "open": False,"markdown_name": "Mapping Assembly"},
{"source": [{"inbound": ["isa", "replicate_experiment_fkey"]},
{"inbound": ["isa", "track_data_replicate_fkey"]},"RID"], "entity": True,"open": False, "markdown_name": "Track Data"}
]
},
"detailed": [["isa", "experiment_pkey"],
["isa", "experiment_dataset_fkey"],
"local_identifier",
["isa", "experiment_experiment_type_fkey"],
"biosample_summary",
["isa", "experiment_molecule_type_fkey"],
["isa", "experiment_strandedness_fkey"],
["isa", "experiment_rnaseq_selection_fkey"],
["isa", "experiment_target_of_assay_fkey"],
["isa", "experiment_chromatin_modifier_fkey"],
["isa", "experiment_transcription_factor_fkey"],
["isa", "experiment_histone_modification_fkey"],
["isa", "experiment_control_assay_fkey"],
["isa", "experiment_protocol_fkey"]
],
"compact": [["isa", "experiment_pkey"],
["isa", "experiment_dataset_fkey"],
["isa", "experiment_experiment_type_fkey"],
"biosample_summary",
["isa", "experiment_protocol_fkey"],
"local_identifier"],
"entry": [["isa", "experiment_dataset_fkey"],
"local_identifier",
"biosample_summary",
["isa", "experiment_experiment_type_fkey"],
["isa", "experiment_molecule_type_fkey"],
["isa", "experiment_strandedness_fkey"],
["isa", "experiment_rnaseq_selection_fkey"],
["isa", "experiment_target_of_assay_fkey"],
["isa", "experiment_chromatin_modifier_fkey"],
["isa", "experiment_transcription_factor_fkey"],
["isa", "experiment_histone_modification_fkey"],
["isa", "experiment_control_assay_fkey"],
["isa", "experiment_protocol_fkey"]]
})
"""
Biosample
"""
verbose('Setting up viz cols for Biosample')
goal.table('isa', 'biosample').visible_columns.update({
"filter": | |
from backend import *
from basement import *
from pausable import *
from kittyaccesscontrol import *
from kittybase import KittyBase
from erc721 import ERC721
from erc721metadata import ERC721Metadata
# @title The facet of the CryptoKitties core contract that manages ownership, ERC-721 (draft) compliant.
# @author <NAME> (https://www.axiomzen.co)
# @dev Ref: https://github.com/ethereum/EIPs/issues/721
# See the KittyCore contract documentation to understand how the various contract facets are arranged.
class KittyOwnership(KittyBase, ERC721):
def __init__(self):
KittyBase.__init__(self)
ERC721.__init__(self)
# @notice Name and symbol of the non fungible token, as defined in ERC721.
#string public constant name = "CryptoKitties";
#string public constant symbol = "CK";
self.name = "CryptoKitties"
self.symbol = "CK"
# The contract that will return kitty metadata
#ERC721Metadata public erc721Metadata;
self.erc721Metadata = ERC721Metadata()
self.InterfaceSignature_ERC165 = bytes4(keccak256('supportsInterface(bytes4)'));
self.InterfaceSignature_ERC721 = bytes4(keccak256('InterfaceSignature_ERC721(bytes4)'));
'''FIXME
bytes4 constant InterfaceSignature_ERC721 =
bytes4(keccak256('name()')) ^
bytes4(keccak256('symbol()')) ^
bytes4(keccak256('totalSupply()')) ^
bytes4(keccak256('balanceOf(address)')) ^
bytes4(keccak256('ownerOf(uint256)')) ^
bytes4(keccak256('approve(address,uint256)')) ^
bytes4(keccak256('transfer(address,uint256)')) ^
bytes4(keccak256('transferFrom(address,address,uint256)')) ^
bytes4(keccak256('tokensOfOwner(address)')) ^
bytes4(keccak256('tokenMetadata(uint256,string)'));
'''
# @notice Introspection interface as per ERC-165 (https://github.com/ethereum/EIPs/issues/165).
# Returns true for any standardized interfaces implemented by this contract. We implement
# ERC-165 (obviously!) and ERC-721.
def supportsInterface(self, _interfaceID: bytes) -> bool:
# DEBUG ONLY
#require((InterfaceSignature_ERC165 == 0x01ffc9a7) && (InterfaceSignature_ERC721 == 0x9a20483d));
return (_interfaceID == self.InterfaceSignature_ERC165) or (_interfaceID == self.InterfaceSignature_ERC721)
# @dev Set the address of the sibling contract that tracks metadata.
# CEO only.
@onlyCEO
def setMetadataAddress(self, _contractAddress: address):
self.erc721Metadata = ERC721Metadata(_contractAddress)
# Internal utility functions: These functions all assume that their input arguments
# are valid. We leave it to public methods to sanitize their inputs and follow
# the required logic.
# @dev Checks if a given address is the current owner of a particular Kitty.
# @param _claimant the address we are validating against.
# @param _tokenId kitten id, only valid when > 0
def _owns(self, _claimant: address, _tokenId: uint256) -> bool:
return self.kittyIndexToOwner[_tokenId] == _claimant
# @dev Checks if a given address currently has transferApproval for a particular Kitty.
# @param _claimant the address we are confirming kitten is approved for.
# @param _tokenId kitten id, only valid when > 0
def _approvedFor(self, _claimant: address, _tokenId: uint256) -> bool:
return self.kittyIndexToApproved[_tokenId] == _claimant
# @dev Marks an address as being approved for transferFrom(), overwriting any previous
# approval. Setting _approved to address(0) clears all transfer approval.
# NOTE: _approve() does NOT send the Approval event. This is intentional because
# _approve() and transferFrom() are used together for putting Kitties on auction, and
# there is no value in spamming the log with Approval events in that case.
def _approve(self, _tokenId: uint256, _approved: address):
self.kittyIndexToApproved[_tokenId] = _approved
# @notice Returns the number of Kitties owned by a specific address.
# @param _owner The owner address to check.
# @dev Required for ERC-721 compliance
def balanceOf(self, _owner: address) -> uint256:
return self.ownershipTokenCount[_owner]
# @notice Transfers a Kitty to another address. If transferring to a smart
# contract be VERY CAREFUL to ensure that it is aware of ERC-721 (or
# CryptoKitties specifically) or your Kitty may be lost forever. Seriously.
# @param _to The address of the recipient, can be a user or contract.
# @param _tokenId The ID of the Kitty to transfer.
# @dev Required for ERC-721 compliance.
def transfer(self, _to: address, _tokenId: uint256):
self.whenNotPaused()
# Safety check to prevent against an unexpected 0x0 default.
require(_to != address(0))
# Disallow transfers to this contract to prevent accidental misuse.
# The contract should never own any kitties (except very briefly
# after a gen0 cat is created and before it goes on auction).
require(_to != address(this))
# Disallow transfers to the auction contracts to prevent accidental
# misuse. Auction contracts should only take ownership of kitties
# through the allow + transferFrom flow.
require(_to != address(self.saleAuction))
require(_to != address(self.siringAuction))
# You can only send your own cat.
require(self._owns(msg.sender, _tokenId))
# Reassign ownership, clear pending approvals, emit Transfer event.
self._transfer(msg.sender, _to, _tokenId)
# @notice Grant another address the right to transfer a specific Kitty via
# transferFrom(). This is the preferred flow for transfering NFTs to contracts.
# @param _to The address to be granted transfer approval. Pass address(0) to
# clear all approvals.
# @param _tokenId The ID of the Kitty that can be transferred if this call succeeds.
# @dev Required for ERC-721 compliance.
@whenNotPaused
def approve(self, _to: address, _tokenId: uint256):
# Only an owner can grant transfer approval.
require(self._owns(msg.sender, _tokenId))
# Register the approval (replacing any previous approval).
self._approve(_tokenId, _to)
# Emit approval event.
self.Approval(msg.sender, _to, _tokenId)
# @notice Transfer a Kitty owned by another address, for which the calling address
# has previously been granted transfer approval by the owner.
# @param _from The address that owns the Kitty to be transfered.
# @param _to The address that should take ownership of the Kitty. Can be any address,
# including the caller.
# @param _tokenId The ID of the Kitty to be transferred.
# @dev Required for ERC-721 compliance.
@whenNotPaused
def transferFrom(self, _from: address, _to: address, _tokenId: uint256):
# Safety check to prevent against an unexpected 0x0 default.
require(_to != address(0))
# Disallow transfers to this contract to prevent accidental misuse.
# The contract should never own any kitties (except very briefly
# after a gen0 cat is created and before it goes on auction).
require(_to != address(this))
# Check for approval and valid ownership
require(self._approvedFor(msg.sender, _tokenId))
require(self._owns(_from, _tokenId))
# Reassign ownership (also clears pending approvals and emits Transfer event).
self._transfer(_from, _to, _tokenId)
# @notice Returns the total number of Kitties currently in existence.
# @dev Required for ERC-721 compliance.
def totalSupply(self) -> uint:
return self.kitties.length - 1
# @notice Returns the address currently assigned ownership of a given Kitty.
# @dev Required for ERC-721 compliance.
def ownerOf(self, _tokenId: uint256) -> address:
owner = self.kittyIndexToOwner[_tokenId]
require(owner != address(0))
return owner
# @notice Returns a list of all Kitty IDs assigned to an address.
# @param _owner The owner whose Kitties we are interested in.
# @dev This method MUST NEVER be called by smart contract code. First, it's fairly
# expensive (it walks the entire Kitty array looking for cats belonging to owner),
# but it also returns a dynamic array, which is only supported for web3 calls, and
# not contract-to-contract calls.
def tokensOfOwner(self, _owner: address) -> List:
tokenCount = self.balanceOf(_owner)
result = List([],uint256)
if tokenCount == 0:
# Return an empty array
return result
else:
#FIXME memory type
# uint256[] memory result = new uint256[](tokenCount);
# uint256 totalCats = self.totalSupply();
# uint256 resultIndex = 0;
result = List(size = tokenCount, value_type=uint256)
totalCats = self.totalSupply()
resultIndex = 0
# We count on the fact that all cats have IDs starting at 1 and increasing
# sequentially up to the totalCat count.
# uint256 catId;
for catId in range(1, totalCats+1):
if self.kittyIndexToOwner[catId] == _owner:
result[resultIndex] = catId
resultIndex+=1
return result
# @dev Adapted from memcpy() by @arachnid (<NAME> <<EMAIL>>)
# This method is licenced under the Apache License.
# Ref: https://github.com/Arachnid/solidity-stringutils/blob/2f6ca9accb48ae14c66f1437ec50ed19a0616f78/strings.sol
def _memcpy(self, _dest: uint, _src: uint, _len: uint):
pass
'''
def _memcpy(uint _dest, uint _src, uint _len) private view {
# Copy word-length chunks while possible
for(; _len >= 32; _len -= 32) {
assembly {
mstore(_dest, mload(_src))
}
_dest += 32;
_src += 32;
}
# Copy remaining bytes
uint256 mask = 256 ** (32 - _len) - 1;
assembly {
let srcpart := and(mload(_src), not(mask))
let destpart := and(mload(_dest), mask)
mstore(_dest, or(destpart, srcpart))
}
}
'''
# @dev Adapted from toString(slice) by @arachnid (<NAME> <<EMAIL>>)
# This method is licenced under the Apache License.
# Ref: https://github.com/Arachnid/solidity-stringutils/blob/2f6ca9accb48ae14c66f1437ec50ed19a0616f78/strings.sol
#FIXME
def _toString(self, _rawBytes, _stringLength) -> str:
assert False
'''
def _toString(bytes32[4] _rawBytes, uint256 _stringLength) private view returns (string) {
var outputString = new string(_stringLength);
uint256 outputPtr;
uint256 bytesPtr;
assembly {
outputPtr := add(outputString, 32)
bytesPtr := _rawBytes
}
_memcpy(outputPtr, bytesPtr, _stringLength);
return outputString;
'''
# @notice Returns a URI pointing to a metadata package for | |
#!/usr/bin/env python
__author__ = "<EMAIL>"
import shutil
import sys
from collections import OrderedDict, defaultdict
from csv import DictReader, DictWriter
from enum import Enum
from multiprocessing import Process
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import typer
from Bio import SeqIO
from bx.intervals.cluster import ClusterTree
from cupcake import version_callback
from cupcake import cupcake_logger as logger
from cupcake.sequence import GFF
from cupcake.tofu.counting import combine_abundance_across_samples as sp
# from BCBio import GFF as bGFF
app = typer.Typer(name="cupcake.tofu.counting.chain_samples")
class fl_fields(str, Enum):
norm_fl = "norm_fl"
count_fl = "count_fl"
def sample_sanity_check(
group_filename, gff_filename, count_filename, fastq_filename=None
) -> None:
"""
Double check that the formats are expected and all PBIDs are concordant across the files
:return: raise Exception if sanity check failed
"""
logger.info(
f"Sanity checking. Retrieving PBIDs from {group_filename},{gff_filename},{count_filename}..."
)
ids1 = [line.strip().split()[0] for line in open(group_filename)]
ids2 = [r.seqid for r in GFF.collapseGFFReader(gff_filename)]
f = open(count_filename)
while True:
# advance through the headers which start with #
cur = f.tell()
if (
not f.readline().startswith("#") or f.tell() == cur
): # first non-# seen or EOF
f.seek(cur)
break
ids3 = [r["pbid"] for r in DictReader(f, delimiter="\t")]
if len(set(ids2).difference(ids1)) > 0 or len(set(ids2).difference(ids3)) > 0:
raise Exception(
f"Sanity check failed! Please make sure the PBIDs listed in {gff_filename} are also in {group_filename} and {count_filename}"
)
if fastq_filename is not None:
ids4 = [r.id.split("|")[0] for r in SeqIO.parse(open(fastq_filename), "fastq")]
if len(set(ids2).difference(ids4)) > 0:
raise Exception(
f"Sanity check failed! Please make sure the PBIDs listed in {gff_filename} are also in {fastq_filename}"
)
def read_config(
filename: Union[str, Path]
) -> Tuple[Dict[str, Path], List[str], str, str, str, str]:
# Okay, why is this a thing? Why not just pass arguments?
"""
tmpSAMPLE=<name>;<path>
SAMPLE=<name>;<path>
must also have
GROUP_FILENAME=
GFF_FILENAME=
COUNT_FILENAME=
optional:
FASTQ_FILENAME=
"""
sample_dirs = {}
sample_names = []
group_filename, gff_filename, count_filename = None, None, None
fastq_filename = None
no_more_tmp = False
with open(filename) as f:
for line in f:
if line.startswith("tmpSAMPLE="):
if no_more_tmp:
logger.error(
"Cannot have tmp_ samples after non-tmp_ samples! Abort!"
)
sys.exit(-1)
name, path = line.strip()[len("tmpSAMPLE=") :].split(";")
if name.startswith("tmp_"):
logger.error(
f"Sample names are not allowed to start with tmp_! "
f"Please change {name} to something else."
)
sys.exit(-1)
sample_dirs[name] = Path(path).resolve()
sample_names.append(f"tmp_{name}")
elif line.startswith("SAMPLE="):
no_more_tmp = True
name, path = line.strip()[len("SAMPLE=") :].split(";")
if name.startswith("tmp_"):
logger.error(
f"Sample names are not allowed to start with tmp_! "
f"Please change {name} to something else."
)
sys.exit(-1)
sample_dirs[name] = Path(path).resolve()
sample_names.append(name)
elif line.startswith("GROUP_FILENAME="):
group_filename = line.strip()[len("GROUP_FILENAME=") :]
elif line.startswith("GFF_FILENAME="):
gff_filename = line.strip()[len("GFF_FILENAME=") :]
elif line.startswith("COUNT_FILENAME="):
count_filename = line.strip()[len("COUNT_FILENAME=") :]
elif line.startswith("FASTQ_FILENAME="):
fastq_filename = line.strip()[len("FASTQ_FILENAME=") :]
if group_filename is None:
raise FileNotFoundError(
f"Expected GROUP_FILENAME= but not in config file {filename}! Abort."
)
if count_filename is None:
raise FileNotFoundError(
f"Expected COUNT_FILENAME= but not in config file {filename}! Abort."
)
if gff_filename is None:
raise FileNotFoundError(
f"Expected GFF_FILENAME= but not in config file {filename}! Abort."
)
if len(sample_names) == 0:
logger.error("No samples given. Exit.")
sys.exit(-1)
# return signature is:
# sample_dirs = Dict[sample_name, Path(sample_path)]
# sample_names = List[sample_name]
# group_filename = str
# gff_filename = str
# count_filename = str
# fastq_filename = str
# so, for the test data, we get:
# sample_dirs = {
# 'A': Path('tests/test_data/chaining/A'),
# 'B': Path('tests/test_data/chaining/B')
# }
# sample_names = ["A", "B"]
# group_filename = touse.group.txt
# gff_filename = touse.gff
# count_filename = touse.count.txt
# fastq_filename = touse.rep.fq
return (
sample_dirs,
sample_names,
group_filename,
gff_filename,
count_filename,
fastq_filename,
)
def read_count_info(
count_filename: Union[str, Path], dirs: List[Union[str, Path]], field_to_use: str
) -> Dict[Tuple[str, str], int]:
count_info = {} # key: (sample, PB.1.1) --> count
count_header = ""
for name, d in dirs.items():
with Path(d, count_filename).open() as f:
while True:
cur = f.tell()
line = f.readline().strip()
if not line.startswith("#"):
break
count_header += line
f.seek(cur)
for r in DictReader(f, delimiter="\t"):
count_info[name, r["pbid"]] = r[field_to_use]
# count_info = {
# (sample_name, pbid): count,
# (sample_name, pbid): count,
# ...
# }
return count_info
def chain_split_file(
ref_gff: Path,
ref_group: Path,
ref_name: str,
addon_gff: Path,
addon_group: Path,
addon_name: str,
fuzzy_junction: int,
allow_5merge: bool,
max_3_diff: int,
n_chunks: int,
) -> Tuple[List[str], List[str]]:
"""
Organize entries in both a gff and transcript group file
and split both such that the original two files are split into chunks
where gff.chunk.n covers the same entries as group.chunk.n
"""
# read in the group_file as a dictionary in the form of
# {
# 'PB.1.1': ["transcript/1"],
# 'PB.1.2': ["transcript/2", "transcript/3"]
# }
addon_group_info = sp.MegaPBTree.read_group(addon_group, None)
# with addon_group.open('r') as ag:
# addon_group_info = {_.split('\t')[0]: _.split('\t')[1].split(",") for _ in ag.readlines()}
recs = []
tree = OrderedDict()
i = 0
# for r in HTSeq.GFF_Reader(addon_gff):
# if r.iv.chrom not in tree2:
# tree[r.iv.chrom] = {"+": ClusterTree(0, 0), "-": ClusterTree(0, 0)}
# tree[r.iv.chrom][r.iv.strand].insert(r.iv.start, r.iv.end, i)
# recs.append(r)
# i += 1
# This should build a structure in the form of:
# {"chrN":
# {
# "+" : bx.intervals.cluster.clusterTree,
# "-" : bx.intervals.cluster.clusterTree,
# },
# "chrN+1":
# {
# "+" : bx.intervals.cluster.clusterTree,
# "-" : bx.intervals.cluster.clusterTree,
# },
# }
# CusterTree objects have the form
# [(x,y,[z]), (a,b,[c]), (m,n,[o])]
# where each tuple is a range and a list of ids that lie within that range
# e.g. (from the bx-python docs):
# tree = ClusterTree(0, 0) Insert (6, 7, 1), (1, 2, 3), (9, 10, 2), (3, 4, 0), (3, 8, 4)
# tree.getregions() returns [(1, 2, [3]), (3, 8, [0, 1, 4]), (9, 10, [2])]
# NOTE: GFF.collapseGFFReader is a specialized GFF reader that in the attributes
# field stores a list of bx.intervals.intersection.Interval objects
# describing the exons
for r in GFF.collapseGFFReader(addon_gff):
if r.chr not in tree:
tree[r.chr] = {"+": ClusterTree(0, 0), "-": ClusterTree(0, 0)}
tree[r.chr][r.strand].insert(r.start, r.end, i)
recs.append(r)
i += 1
n = len(recs)
chunk_size = (n // n_chunks) + (n % n_chunks > 0)
split_files = []
i = 0
counter = 0
f_gff = open(f"{addon_gff}.split{str(i)}", "w")
f_group = open(f"{addon_group}.split{str(i)}", "w")
# this loop is going to reorder everything
# so that we have a GFF with a transcript followed by all the exons that
# made up that transcript and a separate file with the matching
# transcript_id transcript/read_group#
# (see the sp.MegaPBTree above)
for v1 in tree.values():
for strand in ("+", "-"):
v2 = v1[strand]
for *_, _indices in v2.getregions():
for cur in _indices:
GFF.write_collapseGFF_format(f_gff, recs[cur])
f_group.write(
f"{recs[cur].seqid}\t{','.join(addon_group_info[recs[cur].seqid])}\n"
)
counter += 1
if counter >= (i + 1) * chunk_size:
i += 1
n = f_gff.tell()
f_gff.close()
f_group.close()
if n == 0: # didn't write any records, delete these
Path(f_gff.name).unlink()
Path(f_group.name).unlink()
else:
split_files.append((f_gff.name, f_group.name))
if i >= n_chunks or counter >= len(recs):
break
f_gff = open(f"{addon_gff}.split{str(i)}", "w")
f_group = open(f"{addon_group}.split{str(i)}", "w")
if not f_gff.closed:
n = f_gff.tell()
f_gff.close()
f_group.close()
if n == 0: # didn't write any records, delete these
Path(f_gff.name).unlink()
Path(f_group.name).unlink()
else:
split_files.append((f_gff.name, f_group.name))
result_prefixes = []
pools = []
for i, (split_gff, split_group) in enumerate(split_files):
p = Process(
target=chain_helper,
args=(
ref_gff,
ref_group,
split_gff,
split_group,
ref_name,
f"{addon_name}.{str(i)}",
fuzzy_junction,
allow_5merge,
max_3_diff,
),
)
p.start()
pools.append(p)
result_prefixes.append((ref_name, f"{addon_name}.{str(i)}"))
for p in pools:
p.join()
return result_prefixes, split_files
def chain_helper(
ref_gff: Union[str, Path],
ref_group: Union[str, Path],
addon_gff: Union[str, Path],
addon_group: Union[str, Path],
name1: str,
name2: str,
fuzzy_junction: int,
allow_5merge: bool,
max_3_diff: int,
) -> None:
o = sp.MegaPBTree(
gff_filename=ref_gff,
group_filename=ref_group,
self_prefix=name1,
internal_fuzzy_max_dist=fuzzy_junction,
allow_5merge=allow_5merge,
max_3_diff=max_3_diff,
fastq_filename=None,
)
o.add_sample(
gff_filename=addon_gff,
group_filename=addon_group,
sample_prefix=name2,
output_prefix=f"tmp_{name2}",
fastq_filename=None,
)
def combine_split_chained_results(
output_prefixes,
final_prefix,
ref_gff,
ref_group,
ref_name,
ref_fq,
addon_gff,
addon_group,
addon_name,
addon_fq,
):
"""
Each <output_prefix> will have .gff, .group.txt, .mega_info.txt.
There should be NO overlap between the split files, so clean merge should be possible!
1. read the .gff files, record the group and mega (id-map) info
2. sort the total records so can properly put on a unified superPBID
3. write out the unified result
4. delete the split files
"""
# sanity check files are all there
split_files = [] # tuple of (gff, group, mega)
for ref_name, o in output_prefixes:
gff_file = Path(f"tmp_{o}.gff")
mega_file = Path(f"tmp_{o}.mega_info.txt")
group_file = Path(f"tmp_{o}.group.txt")
if not gff_file.exists() or not mega_file.exists() or not group_file.exists():
raise RuntimeError(
f"Expects to see {gff_file},{mega_file},{group_file} but one or |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.