input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
##############################################################################
#
# Copyright (c) 2011 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""PJ Data Manager Tests"""
import contextlib
import doctest
import persistent
import unittest
import psycopg2
import psycopg2.errors
from pprint import pprint
import transaction
import mock
from pjpersist import interfaces, serialize, testing, datamanager
class Root(persistent.Persistent):
pass
class Foo(persistent.Persistent):
name = None
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
class Super(persistent.Persistent):
_p_pj_table = 'Super'
def __init__(self, name=None):
self.name = name
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
class Sub(Super):
pass
class Bar(persistent.Persistent):
_p_pj_sub_object = True
def __init__(self, name=None):
super(Bar, self).__init__()
self.name = name
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.name)
class FooItem(object):
def __init__(self):
self.bar = 6
class ComplexFoo(persistent.Persistent):
def __init__(self):
self.item = FooItem()
self.name = 'complex'
def doctest_Root():
r"""Root: General Test
This class represents the root(s) of the object tree. All roots are stored
in a specified table. Since the rooted object needs to immediately
provide a data manager (jar), the operations on the DB root are not art of
the transaction mechanism.
>>> root = datamanager.Root(dm, 'proot')
Initially the root is empty:
>>> root.keys()
[]
Let's now add an item:
>>> foo = Foo()
>>> root['foo'] = foo
>>> root.keys()
['foo']
>>> root['foo'] == foo
True
Root objects can be overridden:
>>> foo2 = Foo()
>>> root['foo'] = foo2
>>> root.keys()
['foo']
>>> root['foo'] == foo
False
And of course we can delete an item:
>>> del root['foo']
>>> root.keys()
[]
"""
def doctest_PJDataManager_get_table_from_object():
r"""PJDataManager: _get_table_from_object(obj)
Get the table for an object.
>>> foo = Foo('1')
>>> foo_ref = dm.insert(foo)
>>> dbname, table = dm._get_table_from_object(foo)
>>> dm.reset()
We are returning the database and table name pair.
>>> dbname, table
('pjpersist_test', 'pjpersist_dot_tests_dot_test_datamanager_dot_Foo')
"""
def doctest_PJDataManager_object_dump_load_reset():
r"""PJDataManager: dump(), load(), reset()
The PJ Data Manager is a persistent data manager that manages object
states in a PostGreSQL database accross Python transactions.
There are several arguments to create the data manager, but only the
psycopg2 connection is required:
>>> dm = datamanager.PJDataManager(
... testing.DummyConnectionPool(conn),
... root_table = 'proot')
There are two convenience methods that let you serialize and de-serialize
objects explicitly:
>>> foo = Foo()
>>> dm.dump(foo)
DBRef('pjpersist_dot_tests_dot_test_datamanager_dot_Foo',
'0001020304050607080a0b0c',
'pjpersist_test')
When the object is modified, ``dump()`` will remove it from the list of
registered objects.
>>> foo.name = 'Foo'
>>> foo._p_changed
True
>>> list(dm._registered_objects.values())
[<Foo Foo>]
>>> foo_ref = dm.dump(foo)
>>> foo._p_changed
False
>>> dm._registered_objects
{}
>>> dm.commit(None)
Let's now reset the data manager, so we do not hit a cache while loading
the object again:
>>> dm.reset()
We can now load the object:
>>> foo2 = dm.load(foo._p_oid)
>>> foo == foo2
False
>>> foo._p_oid = foo2._p_oid
"""
def doctest_PJDataManager_insertWithExplicitId():
"""
Objects can be inserted by specifying new object id explicitly.
>>> foo = Foo('foo')
>>> foo_ref = dm.insert(foo, '000000000000000000000001')
>>> dm.tpc_finish(None)
Now, Foo object should be have the provided id
>>> foo._p_oid.id
'000000000000000000000001'
"""
def doctest_PJDataManager_flush():
r"""PJDataManager: flush()
This method writes all registered objects to PsotGreSQL. It can be used at
any time during the transaction when a dump is necessary, but is also used
at the end of the transaction to dump all remaining objects.
Let's now add an object to the database and reset the manager like it is
done at the end of a transaction:
>>> foo = Foo('foo')
>>> foo_ref = dm.dump(foo)
>>> dm.commit(None)
Let's now load the object again and make a modification:
>>> foo_new = dm.load(foo._p_oid)
>>> foo_new.name = 'Foo'
The object is now registered with the data manager:
>>> list(dm._registered_objects.values())
[<Foo Foo>]
Let's now flush the registered objects:
>>> dm.flush()
There are several side effects that should be observed:
* During a given transaction, we guarantee that the user will always receive
the same Python object. This requires that flush does not reset the object
cache.
>>> id(dm.load(foo._p_oid)) == id(foo_new)
True
* The object is removed from the registered objects and the ``_p_changed``
flag is set to ``False``.
>>> dm._registered_objects
{}
>>> foo_new._p_changed
False
"""
def doctest_PJDataManager_insert():
r"""PJDataManager: insert(obj)
This method inserts an object into the database.
>>> foo = Foo('foo')
>>> foo_ref = dm.insert(foo)
After insertion, the original is not changed:
>>> foo._p_changed
False
It is also added to the list of inserted objects:
>>> list(dm._inserted_objects.values())
[<Foo foo>]
Let's make sure it is really in PostGreSQL:
>>> dm.commit(None)
>>> foo_new = dm.load(foo_ref)
>>> foo_new
<Foo foo>
Notice, that we cannot insert the object again:
>>> dm.insert(foo_new)
Traceback (most recent call last):
...
ValueError: ('Object._p_oid is already set.', <Foo foo>)
Finally, registering a new object will not trigger an insert, but only
schedule the object for writing. This is done, since sometimes objects are
registered when we only want to store a stub since we otherwise end up in
endless recursion loops.
>>> foo2 = Foo('Foo 2')
>>> dm.register(foo2)
>>> list(dm._registered_objects.values())
[<Foo Foo 2>]
But storing works as expected (flush is implicit before find):
>>> dm.flush()
>>> dumpTable(dm._get_table_from_object(foo2)[1])
[{'data': {'_py_persistent_type': 'pjpersist.tests.test_datamanager.Foo',
'name': 'foo'},
'id': '0001020304050607080a0b0c0'},
{'data': {'_py_persistent_type': 'pjpersist.tests.test_datamanager.Foo',
'name': 'Foo 2'},
'id': '0001020304050607080a0b0c0'}]
"""
def doctest_PJDataManager_remove():
r"""PJDataManager: remove(obj)
This method removes an object from the database.
>>> foo = Foo('foo')
>>> foo_ref = dm.insert(foo)
>>> dm.commit(None)
Let's now load the object and remove it.
>>> foo_new = dm.load(foo_ref)
>>> dm.remove(foo_new)
The object is removed from the table immediately:
>>> dumpTable(dm._get_table_from_object(foo)[1])
[]
Also, the object is added to the list of removed objects:
>>> list(dm._removed_objects.values())
[<Foo foo>]
Note that you cannot remove objects that are not in the database:
>>> dm.remove(Foo('Foo 2'))
Traceback (most recent call last):
ValueError: ('Object._p_oid is None.', <Foo Foo 2>)
There is an edge case, if the object is inserted and removed in the same
transaction:
>>> dm.commit(None)
>>> foo3 = Foo('Foo 3')
>>> foo3_ref = dm.insert(foo3)
>>> dm.remove(foo3)
In this case, the object is removed from PostGreSQL and from the inserted
object list, but it is still added to removed object list, just in case we
know if it was removed.
>>> dm._inserted_objects
{}
>>> list(dm._removed_objects.values())
[<Foo Foo 3>]
"""
def doctest_PJDataManager_insert_remove():
r"""PJDataManager: insert and remove in the same transaction
Let's insert an object:
>>> foo = Foo('foo')
>>> foo_ref = dm.insert(foo)
And remove it ASAP:
>>> dm.remove(foo)
>>> dm._inserted_objects
{}
>>> list(dm._removed_objects.values())
[<Foo foo>]
>>> dumpTable(dm._get_table_from_object(foo)[1])
[]
"""
def doctest_PJDataManager_insert_remove_modify():
r"""PJDataManager: insert and remove in the same transaction
Let's insert an object:
>>> foo = Foo('foo')
>>> foo_ref = dm.insert(foo)
And remove it ASAP:
>>> dm.remove(foo)
>>> dm._inserted_objects
{}
>>> list(dm._removed_objects.values())
[<Foo foo>]
>>> foo.name = 'bar'
>>> list(dm._removed_objects.values())
[<Foo bar>]
>>> list(dm._registered_objects.values())
[]
>>> dumpTable(dm._get_table_from_object(foo)[1])
[]
>>> dm.reset()
"""
def doctest_PJDataManager_remove_modify_flush():
r"""PJDataManager: An object is modified after removal.
Let's insert an object:
>>> foo = Foo('foo')
>>> foo_ref = dm.insert(foo)
>>> dm.reset()
Let's now remove it:
>>> dm.remove(foo)
>>> list(dm._removed_objects.values())
[<Foo foo>]
Within the same transaction we modify the object. But the object should
not appear in the registered objects list.
>>> foo._p_changed = True
>>> dm._registered_objects
{}
Now, because of other lookups, the changes are flushed, which should not
restore the object.
>>> dm.flush()
>>> dumpTable(dm._get_table_from_object(foo)[1])
[]
>>> dm.reset()
"""
def doctest_PJDataManager_remove_flush_modify():
r"""PJDataManager: An object is removed, DM flushed, object modified
Let's insert an object:
>>> foo = Foo('foo')
>>> foo_ref = dm.insert(foo)
>>> dm.reset()
Let's now remove it:
>>> foo._p_changed = True
>>> dm.remove(foo)
>>> list(dm._removed_objects.values())
[<Foo foo>]
Now, because of other lookups, the changes are flushed, which should not
restore the object.
>>> dm.flush()
>>> dumpTable(dm._get_table_from_object(foo)[1])
[]
Within the same transaction we modify the object. But the object should
not appear in the registered objects list.
>>> foo._p_changed = True
>>> dm._registered_objects
{}
>>> dumpTable(dm._get_table_from_object(foo)[1])
[]
>>> dm.reset()
"""
def doctest_PJDataManager_setstate():
r"""PJDataManager: setstate()
This method loads and sets the | |
import os, decimal
import numpy as np
import csv, math, json, pprint
from sklearn.manifold import TSNE
import pandas as pd
import string
from operator import itemgetter
def pipeline():
# raw file : NBA Season Data.csv
# rawfile = "../../static/skyflow/data/original/NBA Season Data.csv"
# rawfile_rows = read(rawfile)
rows = read('../../static/skyflow/data/original/NBA_redundancy_erased.csv')
# 중복되는 데이터가 있어서 중복 제거 -> NBA_redundancy_erased.csv
columns_needed = ['Year', 'Player', 'nameID', 'Tm', 'G', 'ORB%', 'DRB%', 'TRB%', 'AST%', 'STL%', 'BLK%', 'Shot%']
used = set()
tmp = [row['Year'] for row in rows]
years = [x for x in tmp if x not in used and (used.add(x) or True)]
# write players.json
tmp = [row['Player'] for row in rows]
players = list(set(tmp))
players.sort()
for row in rows:
row['nameID'] = players.index(row['Player'])
for i, player in enumerate(players):
players[i] = {'id': i, 'name': player}
playersJson = json.dumps({'players': players}, indent=4)
with open('players.json', 'w') as f:
f.write(playersJson)
f.flush()
# test_years = ['2016']
# Column picked
selected_rows = pick_column_needed(rows, columns_needed)
key_modified_rows = list()
cks = ['year', 'player', 'nameID', 'tm']
value_origin = ['G', 'ORB%', 'DRB%', 'TRB%', 'AST%', 'STL%', 'BLK%', 'Shot%']
values = ['g', 'orb', 'drb', 'trb', 'ast', 'stk', 'blk', 'shot']
joined = list(zip(cks, columns_needed))
print([(a, b) for a, b in joined])
for row in selected_rows:
krow = {a: row[b] for a, b in joined}
krow['values'] = {a: row[b] for a, b in list(zip(values, value_origin))}
key_modified_rows.append(krow)
#
#
# Normalize data
#
# normalized_rows = get_normalized_data(selected_rows)
# normalized_list = dict2list(normalized_rows, columns_needed)
for i, row in enumerate(key_modified_rows):
row['id'] = i
# Dominance graph build
#
# print(selected_list)
for year in years:
print(year)
data_list = [(x['id'], list(map(float, list(x['values'].values())))) for x in
list(filter(lambda x: x['year'] == year, key_modified_rows))]
nd_dom, nd_dom_by = find_dominating_list(data_list)
print(data_list)
for i, d in enumerate(data_list):
key_modified_rows[d[0]]['all_dom'] = nd_dom[i]
key_modified_rows[d[0]]['all_dom_by'] = nd_dom_by[i]
key_modified_rows[d[0]]['dir_dom'] = [x for x in nd_dom[i]]
# dominated_by_list = find_dominated_by_list(data_list)
# dominance_score = find_dominate_score(data_list)
#
# for i in dominating_list.keys():
# selected_rows[i]['dominating'] = dominating_list[i]
# selected_rows[i]['dominated_by'] = dominated_by_list[i]
# selected_rows[i]['dominance_score'] = dominance_score[i]
# skylines = classify_skylines(data_list)
# for i, skyline in enumerate(skylines):
# for id in skyline:
# selected_rows[id]['nth-skyline'] = i + 1
for row in key_modified_rows:
erase_list = list()
for dom_id in row['dir_dom']:
l = [True if dom_id in key_modified_rows[d]['dir_dom'] else False for d in row['dir_dom']]
if any(l):
erase_list.append(dom_id)
for i in erase_list:
row['dir_dom'].remove(i)
for row in key_modified_rows:
row['dir_dom_by'] = list()
for row in key_modified_rows:
for dom_id in row['dir_dom']:
key_modified_rows[dom_id]['dir_dom_by'].append(row['id'])
print()
# X_embedded = t_SNE(selected_rows, columns_needed)
# 1th skyline
print()
with open('../../static/skyflow/data/processed/NBA_dominance.json', 'w')as f:
f.write(json.dumps(key_modified_rows))
f.flush()
def absent_list():
rows = readJSON('../../static/skyflow/data/processed/NBA_dominance.json')
years = list()
for year in range(1978, 2016):
years.append([x['nameID'] for x in list(filter(lambda x: int(x['year']) == year, rows))])
print(years)
absents = list()
for year in range(len(years)):
absents.append(list())
for year in range(len(years) - 1):
for x in years[year]:
if x not in years[year + 1] and x not in absents[year]:
absents[year + 1].append(x)
for year in range(1, len(years)):
for x in years[year]:
if x not in years[year - 1] and x not in absents[year - 1]:
absents[year - 1].append(x)
print(absents)
for a in absents:
print(len(a))
def nth_skyline():
rows = readJSON('../../static/skyflow/data/processed/NBA_dominance.json')
already_counted = []
for row in rows:
if len(row['all_dom_by']) == 0:
row['nth-skyline'] = 0
already_counted.append(row['id'])
for i in range(1, 8):
candidate = []
for row in rows:
if 'nth-skyline' not in row:
if all([True if x in already_counted else False for x in row['all_dom_by']]):
row['nth-skyline'] = i
candidate.append(row['id'])
print(i, candidate)
already_counted.extend(candidate)
for row in rows:
keys = row['values'].keys()
for key in keys:
row['values'][key] = float(row['values'][key])
row['dominance'] = len(row['all_dom'])
if row['nth-skyline'] > 3:
print(row)
# print(rows.filter(lambda x: x if ))
with open('../../static/skyflow/data/processed/NBA_nth.json', 'w')as f:
f.write(json.dumps(rows))
f.flush()
print()
# 1 level, 2, 3 level Skyline
# print('write_process started')
# list2file(dominating_list, '../data/dominating_list.csv')
# list2file(dominated_by_list, '../data/dominated_by_list.csv')
# list2file_1elem(dominance_score, '../data/dominance_score.csv')
# list2jsonfile(dominating_list, '../data/dominating_list.json')
# list2jsonfile(dominated_by_list, '../data/dominated_by_list.json')
# list2jsonfile(dominance_score, '../data/dominance_score.json')
#
# list2file(skylines, '../data/skylines.csv')
# list2jsonfile(skylines, '../data/skylines.json')
# print(skylines)
#
# then what?
#
# t-SNE
#
# X_embedded = t_SNE(normalized_rows, columns_needed)
# write_tsne_coordinate(X_embedded, '../data/tsne.json')
#
#
# Measure subspace size
#
#
#
pass
def compareline_id():
rows = readJSON('../../static/skyflow/data/processed/NBA_nth.json')
results = []
for row in rows:
id = row['id']
nth = row['nth-skyline']
dom = []
dom_by = []
conflict = []
for pid in row['all_dom']:
nth = rows[pid]['nth-skyline']
while (len(dom) <= nth):
dom.append([])
dom[nth].append(pid)
for pid in row['all_dom_by']:
nth = rows[pid]['nth-skyline']
while (len(dom_by) <= nth):
dom_by.append([])
dom_by[nth].append(pid)
# filtered = list(filter(lambda x: x['year'] == row['year'], rows))
# for p in filtered:
# pid = p['id']
# if pid not in row['all_dom'] and pid not in row['all_dom_by']:
# nth = rows[pid]['nth-skyline']
# while (len(conflict) <= nth):
# conflict.append([])
# conflict[nth].append(pid)
results.append({'id': id, 'dom': dom, 'dom_by': dom_by})
# with open('../../static/skyflow/data/processed/NBA_nth')
def layers():
rows = readJSON('../../static/skyflow/data/processed/NBA_nth.json')
layers = list()
for y in range(1978, 2016 + 1):
layers.append([])
for row in rows:
while len(layers[int(row['year']) - 1978]) <= row['nth-skyline']:
layers[int(row['year']) - 1978].append([])
layers[int(row['year']) - 1978][row['nth-skyline']].append(row['id'])
with open('../../static/skyflow/data/processed/NBA_layers.json', 'w') as f:
f.write(json.dumps(layers))
f.flush()
def tsne_json():
file = '../../static/skyflow/data/processed/NBA processed.json'
output = '../../static/skyflow/data/processed/NBA_tsne.json'
keys = ['g', 'orb', '<KEY> 'ast', 'stk', 'blk', 'shot']
tsne(file, keys, output)
def vector_sum_json():
file = '../../static/skyflow/data/processed/NBA processed.json'
output = '../../static/skyflow/data/processed/NBA_vector_sum.json'
keys = ['g', 'orb', '<KEY> 'ast', 'stk', 'blk', 'shot']
vector_sum(file, keys, output)
# def vector_sum(file, keys, output):
def vector_sum(file, keys, output):
rows = readJSON(file)['data']
l = list()
for row in rows:
l.append([float(row['values'][x]) for x in keys])
norm_values = get_normalized_data(l)
print(l)
print(norm_values)
vectors = get_vector_sum(norm_values)
print(vectors)
for i, row in enumerate(rows):
row['x'] = vectors[i][0]
row['y'] = vectors[i][1]
with open(output, 'w') as f:
f.write(json.dumps({'data': rows}))
f.flush()
def vector_sum(file, keys, output):
rows = readJSON(file)['data']
l = list()
for row in rows:
l.append([float(row['values'][x]) for x in keys])
norm_values = get_normalized_data(l)
print(l)
print(norm_values)
vectors = get_vector_sum(norm_values)
print(vectors)
for i, row in enumerate(rows):
row['x'] = vectors[i][0]
row['y'] = vectors[i][1]
x = vectors[i][0]
y = vectors[i][1]
theta = 0
if x > 0 and y > 0: # 1사분면
theta = math.asin(y)
elif x < 0 and y > 0: # 2사분면
theta = math.acos(x)
elif x < 0 and y < 0:
theta = -math.asin(y) + math.pi
elif x > 0 and y < 0:
theta = 2 * math.pi - math.acos(x)
else:
print('error', x, y)
row['theta'] = theta
# print(vectors[i][0], vectors[i][1], theta * 180 / math.pi)
with open(output, 'w') as f:
f.write(json.dumps({'data': rows}))
f.flush()
def tsne(file, keys, output):
rows = readJSON(file)['data']
l = list()
for row in rows:
l.append([row['values'][x] for x in keys])
x = np.array(l)
# print(X)
x_embedded = TSNE(n_components=2).fit_transform(x)
for i, row in enumerate(x_embedded):
rows[i]['x'] = float(row[0])
rows[i]['y'] = float(row[1])
with open(output, 'w') as f:
f.write(json.dumps({'data': rows}))
f.flush()
def test():
df = pd.read_csv('../../static/skyflow/data/processed/NBA.csv')
df = df.fillna(0)
df.to_csv('../../static/skyflow/data/processed/NBA_fillna.csv')
print(df['3P%'].value_counts(dropna=False))
def skyline_all():
columns = ['PTS', 'AST', 'STL', 'BLK', 'TRB', 'ORB', 'DRB', '3P%', '3P', 'FG%', 'FG', 'G']
rows = list()
with open('../../static/skyflow/data/processed/NBA_fillna.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
rows.append(row)
# column 조합 nC2 -> nCn
# tsne location
# skyline relation
# abcde.json
# json : id, nameid, year, x, y, dom, dom_by
idxs = list(range(0, 12))
r = [x for x in powerset(idxs)]
# r = [[0, 1, 2, 3, 4, 5, 6, 7]]
r.sort()
print(r)
for l in r:
if len(l) != len(columns) - 3:
continue
print(l)
# list(map(chr, range(97, 123))) # or list(map(chr, range(ord('a'), ord('z')+1)))
# [chr(i) for i in range(ord('a'), ord('z') + 1)]
filename = ''.join(chr(ord('a') + x) for x in l)
print(filename)
selected_columns = [columns[idx] for idx in l]
# full_data = []
for year in range(1978, 2016):
# output = list()
adj_matrix = list()
year_list = list(filter(lambda x: int(x['Year']) == year, rows))
year_values = list([float(x[c]) if c.strip() != '' else 0 for c in selected_columns] for x in year_list)
# print(year, year_values)
x = np.array(year_values)
x_embedded = TSNE(n_components=2).fit_transform(x)
# for i, row in enumerate(x_embedded):
# print(float(row[0]), float(row[1]))
for i, p in enumerate(year_values):
adj_matrix.append(list())
adj_matrix[i].append(float(x_embedded[i][0]))
adj_matrix[i].append(float(x_embedded[i][1]))
dom = list()
dom_by = list()
# output.append(dict())
# output[i]['id'] = year_list[i]['id']
# output[i]['Year'] = year_list[i]['Year']
# output[i]['PlayerID'] = year_list[i]['PlayerID']
for idx, v in enumerate(year_values):
# print(p, v)
arr = [p[idx] - v[idx] for idx in range(len(l))]
equal_or_greater = [True if x >= 0 else False | |
# Copyright 2018 <NAME>
# Licensed under the MIT license
"""
Async-compatible version of itertools standard library functions.
These functions build on top of the async builtins components,
enabling use of both standard iterables and async iterables, without
needing to use if/else clauses or awkward logic. Standard iterables
get wrapped in async generators, and all functions are designed for
use with `await`, `async for`, etc.
See https://docs.python.org/3/library/itertools.html for reference.
"""
import asyncio
import builtins
import itertools
import operator
from typing import Any, AsyncIterator, List, Optional, Tuple, overload
from .builtins import enumerate, iter, list, next, zip
from .helpers import maybe_await
from .types import (
Accumulator,
AnyFunction,
AnyIterable,
AnyIterableIterable,
AnyStop,
KeyFunction,
N,
Predicate,
R,
T,
)
async def accumulate(
itr: AnyIterable[T], func: Accumulator[T] = operator.add
) -> AsyncIterator[T]:
"""
Yield the running accumulation of an iterable and operator.
Accepts both a standard function or a coroutine for accumulation.
Example:
data = [1, 2, 3, 4]
async def mul(a, b):
return a * b
async for total in accumulate(data, func=mul):
... # 1, 2, 6, 24
"""
itr = iter(itr)
try:
total: T = await next(itr)
except AnyStop:
return
yield total
async for item in itr:
total = await maybe_await(func(total, item))
yield total
class Chain:
def __call__(self, *itrs: AnyIterable[T]) -> AsyncIterator[T]:
"""
Yield values from one or more iterables in series.
Consumes the first iterable lazily, in entirety, then the second, and so on.
Example:
async for value in chain([1, 2, 3], [7, 8, 9]):
... # 1, 2, 3, 7, 8, 9
"""
return self.from_iterable(itrs)
async def from_iterable(self, itrs: AnyIterableIterable[T]) -> AsyncIterator[T]:
"""
Like chain, but takes an iterable of iterables.
Alias for chain(*itrs)
"""
async for itr in iter(itrs):
async for item in iter(itr):
yield item
chain = Chain()
async def combinations(itr: AnyIterable[T], r: int) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length subsequences from the given iterable.
Simple wrapper around itertools.combinations for asyncio.
This will consume the entire iterable before yielding values.
Example:
async for value in combinations(range(4), 3):
... # (0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)
"""
pool: List[T] = await list(itr)
for value in itertools.combinations(pool, r):
yield value
async def combinations_with_replacement(
itr: AnyIterable[T], r: int
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length subsequences from the given iterable with replacement.
Simple wrapper around itertools.combinations_with_replacement.
This will consume the entire iterable before yielding values.
Example:
async for value in combinations_with_replacement("ABC", 2):
... # ("A", "A"), ("A", "B"), ("A", "C"), ("B", "B"), ...
"""
pool: List[T] = await list(itr)
for value in itertools.combinations_with_replacement(pool, r):
yield value
async def compress(
itr: AnyIterable[T], selectors: AnyIterable[Any]
) -> AsyncIterator[T]:
"""
Yield elements only when the corresponding selector evaluates to True.
Stops when either the iterable or the selectors have been exhausted.
Example:
async for value in compress(range(5), [1, 0, 0, 1, 1]):
... # 0, 3, 4
"""
async for value, selector in zip(itr, selectors):
if selector:
yield value
async def count(start: N = 0, step: N = 1) -> AsyncIterator[N]:
"""
Yield an infinite series, starting at the given value and increasing by step.
Example:
async for value in counter(10, -1):
... # 10, 9, 8, 7, ...
"""
value = start
while True:
yield value
value += step
async def cycle(itr: AnyIterable[T]) -> AsyncIterator[T]:
"""
Yield a repeating series from the given iterable.
Lazily consumes the iterable when the next value is needed, and caching
the values in memory for future iterations of the series.
Example:
async for value in cycle([1, 2]):
... # 1, 2, 1, 2, 1, 2, ...
"""
items = []
async for item in iter(itr):
yield item
items.append(item)
while True:
for item in items:
yield item
async def dropwhile(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Drops all items until the predicate evaluates False; yields all items afterwards.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for item in dropwhile(pred, range(6)):
... # 4, 5, 6
"""
itr = iter(iterable)
async for item in itr:
if not await maybe_await(predicate(item)):
yield item
break
async for item in itr:
yield item
async def filterfalse(
predicate: Predicate[T], iterable: AnyIterable[T]
) -> AsyncIterator[T]:
"""
Yield items from the iterable only when the predicate evaluates to False.
Accepts both standard functions and coroutines for the predicate.
Example:
def pred(x):
return x < 4
async for item in filterfalse(pred, range(6)):
... # 4, 5
"""
async for item in iter(iterable):
if not await maybe_await(predicate(item)):
yield item
# pylint: disable=undefined-variable,multiple-statements
@overload
def groupby(itr: AnyIterable[T]) -> AsyncIterator[Tuple[T, List[T]]]: # pragma: nocover
pass
@overload
def groupby(
itr: AnyIterable[T], key: KeyFunction[T, R]
) -> AsyncIterator[Tuple[R, List[T]]]: # pragma: nocover
pass
# pylint: enable=undefined-variable,multiple-statements
async def groupby(
itr: AnyIterable[T], key: Optional[KeyFunction[T, R]] = None
) -> AsyncIterator[Tuple[Any, List[T]]]:
"""
Yield consecutive keys and groupings from the given iterable.
Items will be grouped based on the key function, which defaults to
the identity of each item. Accepts both standard functions and
coroutines for the key function. Suggest sorting by the key
function before using groupby.
Example:
data = ["A", "a", "b", "c", "C", "c"]
async for key, group in groupby(data, key=str.lower):
key # "a", "b", "c"
group # ["A", "a"], ["b"], ["c", "C", "c"]
"""
if key is None:
key = lambda x: x
grouping: List[T] = []
it = iter(itr)
try:
item = await next(it)
except StopAsyncIteration:
return
grouping = [item]
j = await maybe_await(key(item))
async for item in it:
k = await maybe_await(key(item))
if k != j:
yield j, grouping
grouping = [item]
else:
grouping.append(item)
j = k
yield j, grouping
# pylint: disable=undefined-variable,multiple-statements
@overload
def islice(
itr: AnyIterable[T], __stop: Optional[int]
) -> AsyncIterator[T]: # pragma: nocover
pass
@overload
def islice(
itr: AnyIterable[T], __start: int, __stop: Optional[int], __step: int = 1
) -> AsyncIterator[T]: # pragma: nocover
pass
# pylint: enable=undefined-variable,multiple-statements
async def islice(itr: AnyIterable[T], *args: Optional[int]) -> AsyncIterator[T]:
"""
Yield selected items from the given iterable.
islice(iterable, stop)
islice(iterable, start, stop[, step])
Starting from the start index (or zero), stopping at the stop
index (or until exhausted), skipping items if step > 0.
Example:
data = range(10)
async for item in islice(data, 5):
... # 0, 1, 2, 3, 4
async for item in islice(data, 2, 5):
... # 2, 3, 4
async for item in islice(data, 1, 7, 2):
... # 1, 3, 5
"""
start = 0
step = 1
if not args:
raise ValueError("must pass stop index")
if len(args) == 1:
(stop,) = args
elif len(args) == 2:
start, stop = args # type: ignore
elif len(args) == 3:
start, stop, step = args # type: ignore
else:
raise ValueError("too many arguments given")
assert start >= 0 and (stop is None or stop >= 0) and step >= 0
step = max(1, step)
if stop == 0:
return
async for index, item in enumerate(itr):
if index >= start and (index - start) % step == 0:
yield item
if stop is not None and index + 1 >= stop:
break
async def permutations(
itr: AnyIterable[T], r: Optional[int] = None
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield r length permutations of elements in the iterable.
Simple wrapper around itertools.combinations for asyncio.
This will consume the entire iterable before yielding values.
Example:
async for value in permutations(range(3)):
... # (0, 1, 2), (0, 2, 1), (1, 0, 2), ...
"""
pool: List[T] = await list(itr)
for value in itertools.permutations(pool, r):
yield value
async def product(
*itrs: AnyIterable[T], repeat: int = 1 # pylint: disable=redefined-outer-name
) -> AsyncIterator[Tuple[T, ...]]:
"""
Yield cartesian products of all iterables.
Simple wrapper around itertools.combinations for asyncio.
This will consume all iterables before yielding any values.
Example:
async for value in product("abc", "xy"):
... # ("a", "x"), ("a", "y"), ("b", "x"), ...
async for value in product(range(3), repeat=3):
... # (0, 0, 0), (0, 0, 1), (0, 0, 2), ...
"""
pools = await asyncio.gather(*[list(itr) for itr in itrs])
for value in itertools.product(*pools, repeat=repeat):
yield value
async def repeat(elem: T, n: int = -1) -> AsyncIterator[T]:
"""
Yield the given value repeatedly, forever or up to n times.
Example:
async for value in | |
<gh_stars>0
# -*- coding: utf-8 -*-
import os
import sys
import time
import stat
from core.env import env
import core.colorconsole as cc
import core.utils as utils
class InstallerBase:
def __init__(self):
self._all_ok = True
self._err_msg = list()
self._is_installed = False
self._install_path = ''
self._config_path = ''
self._data_path = ''
self._log_path = ''
self._installed_ver_str = 'UNKNOWN'
self._current_ver = 'UNKNOWN'
self._def_install_path = ''
ver_file = os.path.join(env.root_path, 'data', 'www', 'tpyaudit', 'webroot', 'app', 'app_ver.py')
try:
with open(ver_file, 'r') as f:
x = f.readlines()
for i in x:
s = i.split('=', 1)
if 'TP_SERVER_VER' == s[0].strip():
self._current_ver = s[1].strip()[1:-1]
break
except FileNotFoundError:
raise RuntimeError('Cannot detect installer version.')
def _init(self):
_width = 79
cc.v('')
cc.v('[]{}[]'.format('=' * (_width - 4)))
_str = 'tpyaudit Server Installation'
cc.o((cc.CR_VERBOSE, ' | '), (cc.CR_VERBOSE, _str), (cc.CR_VERBOSE, '{}|'.format(' ' * (_width - 5 - len(_str)))))
cc.v(' |{}|'.format('=' * (_width - 4)))
cc.o((cc.CR_VERBOSE, ' | ver: '), (cc.CR_ERROR, self._current_ver),
(cc.CR_VERBOSE, '{}|'.format(' ' * (_width - 13 - len(self._current_ver)))))
_str = 'author: <EMAIL>'
cc.v(' | {}{}|'.format(_str, ' ' * (_width - 5 - len(_str))))
cc.v('[]{}[]'.format('=' * (_width - 4)))
cc.v('')
cc.v('Welcome to install tpyaudit Server!')
cc.v('')
cc.o((cc.CR_VERBOSE,
'NOTICE: There are a few steps need you enter information or make choice,\n'
' if you want to use the DEFAULT choice, just press `Enter` key.'))
cc.o((cc.CR_VERBOSE, ' Otherwise you need enter the '), (cc.CR_ERROR, 'highlight character'),
(cc.CR_VERBOSE, ' to make choice.'))
cc.v('')
cc.v('')
cc.v('Prepare installation...')
self._check_installation()
self._check_installation_ver()
cc.v('')
def run(self):
self._init()
if not self._is_installed:
self._do_install()
else:
cc.v('')
cc.v('Found tpyaudit server have installed at `{}` already.'.format(self._install_path))
while True:
x = self._prompt_choice('What are you wanna to do?',
[('upgrade', 2, True), ('uninstall', 0, False), ('quit', 0, False)])
if x in ['q', 'quit']:
break
elif x in ['u', 'uninstall']:
self._do_uninstall()
break
elif x in ['g', 'upgrade']:
self._do_upgrade()
break
def _do_install(self):
while True:
cc.v('')
self._install_path = self._prompt_input('Set installation path', self._def_install_path)
_use_anyway = False
if os.path.exists(self._install_path):
while True:
cc.v('')
x = self._prompt_choice(
'The target path `{}` has already exists,\ndo you want to use it anyway?'.format(
self._install_path), [('Yes', 0, True), ('No', 0, False)])
if x in ['y', 'yes']:
_use_anyway = True
break
elif x in ['n', 'no']:
break
if _use_anyway:
break
else:
break
self._fix_path()
utils.make_dirs(self._install_path)
self._copy_files()
self._install_service()
self._start_service()
time.sleep(2)
self._check_service()
def _do_uninstall(self):
if not self._is_installed:
return
_del_settings = False
while True:
cc.v('')
x = self._prompt_choice('Do you want to keep your database and settings?',
[('Yes', 0, True), ('No', 0, False)])
if x in ['y', 'yes']:
break
elif x in ['n', 'no']:
_del_settings = True
break
if _del_settings:
while True:
cc.v('')
x = self._prompt_choice('Seriously!! Are you sure to remove all data and settings?',
[('Yes', 0, False), ('No', 0, True)])
if x in ['y', 'yes']:
break
elif x in ['n', 'no']:
_del_settings = False
break
self._stop_service()
time.sleep(2)
self._uninstall_service()
self._delete_files(_del_settings)
def _do_upgrade(self):
x = self._ver_compare(self._current_ver, self._installed_ver_str)
if x == 0:
while True:
cc.v('')
x = self._prompt_choice(
'The same version `{}` installed, are you sure to overwrite?'.format(self._current_ver),
[('Yes', 0, False), ('No', 0, True)])
if x in ['y', 'yes']:
break
elif x in ['n', 'no']:
return
elif x < 0:
while True:
cc.v('')
x = self._prompt_choice(
'A new version `{}` installed, rollback to old version `{}` may cause tpyaudit Server not functionally.\nAre you sure to rollback to old version?'.format(
self._installed_ver_str, self._current_ver), [('Yes', 0, False), ('No', 0, True)])
if x in ['y', 'yes']:
break
elif x in ['n', 'no']:
return
else:
while True:
cc.v('')
x = self._prompt_choice(
'Now upgrade from version `{}` to `{}`, \nAre you sure to upgrade to new version?'.format(
self._installed_ver_str, self._current_ver), [('Yes', 0, False), ('No', 0, True)])
if x in ['y', 'yes']:
break
elif x in ['n', 'no']:
return
while True:
cc.v('')
x = self._prompt_choice('Make sure you have backup your database and settings.\nAre you sure to continue?',
[('Yes', 0, False), ('No', 0, True)])
x = x.lower()
if x in ['y', 'yes']:
break
elif x in ['n', 'yes']:
return
self._stop_service()
time.sleep(2)
self._uninstall_service()
self._delete_files(False)
time.sleep(1)
self._copy_files()
self._install_service()
self._start_service()
time.sleep(2)
self._check_service()
@staticmethod
def _prompt_choice(message, choices):
cc.v('{} ['.format(message), end='')
def_choice = ''
for i in range(len(choices)):
if i > 0:
cc.v('/', end='')
msg = choices[i][0]
idx = choices[i][1]
if choices[i][2]:
msg = msg.upper()
def_choice = msg[idx]
cc.v(msg[:idx], end='')
cc.e(msg[idx], end='')
cc.v(msg[idx + 1:], end='')
else:
msg = msg.lower()
cc.v(msg[:idx], end='')
cc.e(msg[idx], end='')
cc.v(msg[idx + 1:], end='')
cc.v(']: ', end='')
try:
x = input().strip()
if len(x) == 0:
x = def_choice
except EOFError:
x = def_choice
return x.lower()
@staticmethod
def _prompt_input(message, def_value):
cc.v('{} ['.format(message), end='')
cc.w(def_value, end='')
cc.v(']: ', end='')
try:
x = input().strip()
if len(x) == 0:
x = def_value
except EOFError:
x = def_value
return x
@staticmethod
def _ver_compare(left, right):
l = left.split('.')
r = right.split('.')
len_l = len(l)
len_r = len(r)
if len_l < len_r:
for i in range(len_r - len_l):
l.append('0')
elif len_l > len_r:
for i in range(len_l - len_r):
r.append('0')
cnt = len(l)
for i in range(cnt):
if int(l[i]) < int(r[i]):
return -1
elif int(l[i]) > int(r[i]):
return 1
return 0
def _check_installation(self):
raise RuntimeError('`check_installation` not implement.')
def _check_installation_ver(self):
if not self._is_installed:
return
# try to get the installed version from www/tpyaudit/app/eom_ver.py
cc.o(' - check installed version ... ', end='')
ver_file = os.path.join(self._install_path, 'www', 'tpyaudit', 'webroot', 'app', 'app_ver.py')
try:
with open(ver_file) as f:
x = f.readlines()
for i in x:
s = i.split('=', 1)
if 'TP_SERVER_VER' == s[0].strip():
self._installed_ver_str = s[1].strip()[1:-1]
cc.i('[{}]'.format(self._installed_ver_str))
# self._installed_ver = self._ver_str_to_ver(self._installed_ver_str)
break
except FileNotFoundError:
cc.e('[failed]')
cc.e(' the installation maybe broken')
def _fix_path(self):
raise RuntimeError('`_fix_path` not implement.')
def _copy_files(self):
raise RuntimeError('`copy_files` not implement.')
def _delete_files(self, del_settings):
raise RuntimeError('`delete_files` not implement.')
def _install_service(self):
raise RuntimeError('`install_service` not implement.')
def _start_service(self):
raise RuntimeError('`start_service` not implement.')
def _stop_service(self):
raise RuntimeError('`stop_service` not implement.')
def _uninstall_service(self):
raise RuntimeError('`uninstall_service` not implement.')
def _check_service(self):
raise RuntimeError('`check_service` not implement.')
class InstallerWin(InstallerBase):
def __init__(self):
super().__init__()
self._core_service_name = 'tpyaudit Core Service'
self._web_service_name = 'tpyaudit Web Service'
self._old_core_service_name = 'EOM tpyaudit Core Service'
self._old_web_service_name = 'EOM tpyaudit Web Service'
self._def_install_path = r'{}\tpyaudit-server'.format(os.environ['SystemDrive'])
def _get_service_exec(self, service_name):
_err, _ = utils.sys_exec(r'sc query "{}"'.format(service_name))
if 1060 == _err:
return None
else:
_err, _o = utils.sys_exec(r'sc qc "{}"'.format(service_name))
if _err != 0:
raise RuntimeError('Can not get execute file path of service `{}`.'.format(service_name))
for i in _o:
_x = i.split(':', 1)
if 'BINARY_PATH_NAME' == _x[0].strip():
_path = _x[1].strip()
return _path
return None
def _check_installation(self):
cc.o(' - check local installation ... ', end='')
_check_service_name = [self._old_core_service_name, self._old_web_service_name, self._core_service_name,
self._web_service_name]
for _service_name in _check_service_name:
_exec_file = self._get_service_exec(_service_name)
if _exec_file is not None:
self._is_installed = True
self._install_path = os.path.abspath(os.path.join(os.path.dirname(_exec_file), '..'))
break
if self._is_installed:
cc.i('[{}]'.format(self._install_path))
self._fix_path()
else:
cc.i('[not exists]')
return
def _fix_path(self):
self._data_path = os.path.join(self._install_path, 'data')
self._config_path = os.path.join(self._data_path, 'etc')
self._log_path = os.path.join(self._data_path, 'log')
def _copy_files(self):
utils.copy_ex(os.path.join(env.src_path, 'bin'), os.path.join(self._install_path, 'bin'))
utils.copy_ex(os.path.join(env.src_path, 'www'), os.path.join(self._install_path, 'www'))
if not os.path.exists(self._config_path):
utils.copy_ex(os.path.join(env.src_path, 'tmp', 'etc'), self._config_path)
def _delete_files(self, del_settings):
utils.remove(os.path.join(self._install_path, 'bin'))
utils.remove(os.path.join(self._install_path, 'www'))
if del_settings:
utils.remove(self._data_path)
# utils.remove(self._config_path)
# utils.remove(self._log_path)
# only remove the installation path when it empty.
try:
os.rmdir(self._install_path)
except OSError:
pass
def _install_service(self):
cc.o(' - install tpyaudit core service ... ', end='')
_core = os.path.join(self._install_path, 'bin', 'tp_core.exe')
_err, _ = utils.sys_exec(r'"{}" -i'.format(_core))
if _err == 0 or _err == 1:
cc.i('[done]')
else:
cc.e('[failed]')
raise RuntimeError('Install core service failed. error code: {}'.format(_err))
cc.o(' - install tpyaudit web service ... ', end='')
_core = os.path.join(self._install_path, 'bin', 'tp_web.exe')
_err, _ = utils.sys_exec(r'"{}" -i'.format(_core))
if _err == 0 or _err == 1:
cc.i('[done]')
else:
cc.e('[failed]')
raise RuntimeError('Install web service failed. error code: {}'.format(_err))
return True
def _start_service(self):
cc.o(' - start tpyaudit core service ... ', end='')
_err, _o = utils.sys_exec(r'sc start "{}"'.format(self._core_service_name))
# print('start core', _err, _o)
if _err == 0:
cc.i('[done]')
else:
cc.e('[failed]')
raise RuntimeError('Can not start core service.')
cc.o(' - start tpyaudit web service ...', end='')
_err, _ = utils.sys_exec(r'sc start "{}"'.format(self._web_service_name))
if _err == 0:
cc.i('[done]')
else:
cc.e('[failed]')
raise RuntimeError('Can not start web service.')
def _stop_service(self):
_check_service_name = [self._old_core_service_name, self._old_web_service_name, self._core_service_name,
self._web_service_name]
for _service_name in _check_service_name:
cc.o(' - stop service [{}] ... '.format(_service_name), end='')
_err, _ = utils.sys_exec(r'sc stop "{}"'.format(_service_name))
if _err == 1060 or _err == 1062 or _err == 0:
cc.i('[done]')
elif _err == 1072:
| |
<gh_stars>0
"""Exception-catching middleware that allows interactive debugging.
This middleware catches all unexpected exceptions. A normal
traceback, like produced by
``weberror.exceptions.errormiddleware.ErrorMiddleware`` is given, plus
controls to see local variables and evaluate expressions in a local
context.
This can only be used in single-process environments, because
subsequent requests must go back to the same process that the
exception originally occurred in. Threaded or non-concurrent
environments both work.
This shouldn't be used in production in any way. That would just be
silly.
If calling from an XMLHttpRequest call, if the GET variable ``_`` is
given then it will make the response more compact (and less
Javascripty), since if you use innerHTML it'll kill your browser. You
can look for the header X-Debug-URL in your 500 responses if you want
to see the full debuggable traceback. Also, this URL is printed to
``wsgi.errors``, so you can open it up in another browser window.
"""
import httplib
import sys
import os
import cgi
import traceback
from cStringIO import StringIO
import pprint
import itertools
import time
import re
import types
import urllib
from pkg_resources import resource_filename
from paste import fileapp
from paste import registry
from paste import request
from paste import urlparser
from paste.util import import_string
import evalcontext
from weberror import errormiddleware, formatter, collector
from weberror.util import security
from tempita import HTMLTemplate
from webob import Request, Response
from webob import exc
limit = 200
def html_quote(v):
"""
Escape HTML characters, plus translate None to ''
"""
if v is None:
return ''
return cgi.escape(str(v), 1)
def preserve_whitespace(v, quote=True):
"""
Quote a value for HTML, preserving whitespace (translating
newlines to ``<br>`` and multiple spaces to use `` ``).
If ``quote`` is true, then the value will be HTML quoted first.
"""
if quote:
v = html_quote(v)
v = v.replace('\n', '<br>\n')
v = re.sub(r'()( +)', _repl_nbsp, v)
v = re.sub(r'(\n)( +)', _repl_nbsp, v)
v = re.sub(r'^()( +)', _repl_nbsp, v)
return '<code>%s</code>' % v
def _repl_nbsp(match):
if len(match.group(2)) == 1:
return ' '
return match.group(1) + ' ' * (len(match.group(2))-1) + ' '
def simplecatcher(application):
"""
A simple middleware that catches errors and turns them into simple
tracebacks.
"""
def simplecatcher_app(environ, start_response):
try:
return application(environ, start_response)
except:
out = StringIO()
traceback.print_exc(file=out)
start_response('500 Server Error',
[('content-type', 'text/html')],
sys.exc_info())
res = out.getvalue()
return ['<h3>Error</h3><pre>%s</pre>'
% html_quote(res)]
return simplecatcher_app
def wsgiapp():
"""
Turns a function or method into a WSGI application.
"""
def decorator(func):
def wsgiapp_wrapper(*args):
# we get 3 args when this is a method, two when it is
# a function :(
if len(args) == 3:
environ = args[1]
start_response = args[2]
args = [args[0]]
else:
environ, start_response = args
args = []
def application(environ, start_response):
form = request.parse_formvars(environ,
include_get_vars=True)
status = '200 OK'
form['environ'] = environ
try:
res = func(*args, **form.mixed())
except ValueError, ve:
status = '500 Server Error'
res = '<html>There was an error: %s</html>' % \
html_quote(ve)
start_response(status, [('content-type', 'text/html')])
return [res]
app = simplecatcher(application)
return app(environ, start_response)
wsgiapp_wrapper.exposed = True
return wsgiapp_wrapper
return decorator
def get_debug_info(func):
"""
A decorator (meant to be used under ``wsgiapp()``) that resolves
the ``debugcount`` variable to a ``DebugInfo`` object (or gives an
error if it can't be found).
"""
def debug_info_replacement(self, req):
if 'debugcount' not in req.params:
return exc.HTTPBadRequest(
"You must provide a debugcount parameter")
debugcount = req.params['debugcount']
try:
debugcount = int(debugcount)
except ValueError, e:
return exc.HTTPBadRequest(
"Invalid value for debugcount (%r): %s"
% (debugcount, e))
if debugcount not in self.debug_infos:
return exc.HTTPServerError(
"Debug %s not found (maybe it has expired, or the server was restarted)"
% debugcount)
req.debug_info = self.debug_infos[debugcount]
return func(self, req)
debug_info_replacement.exposed = True
return debug_info_replacement
def check_csrf_token(func):
"""
A decorator to verify that the sender is same-origin with the debug app
"""
def new_fn(self, req):
if 'csrf_token' not in req.params:
return exc.HTTPForbidden("You must provide a CSRF token")
csrf_token = req.params['csrf_token']
if not security.valid_csrf_token(csrf_secret, csrf_token):
return exc.HTTPForbidden("Invalid CSRF token")
return func(self, req)
new_fn.exposed = True
return new_fn
debug_counter = itertools.count(int(time.time()))
csrf_secret = security.gen_csrf_secret()
def get_debug_count(req):
"""
Return the unique debug count for the current request
"""
if hasattr(req, 'environ'):
environ = req.environ
else:
environ = req
# XXX: Legacy support for Paste restorer
if 'paste.evalexception.debug_count' in environ:
return environ['paste.evalexception.debug_count']
elif 'weberror.evalexception.debug_count' in environ:
return environ['weberror.evalexception.debug_count']
else:
next = debug_counter.next()
environ['weberror.evalexception.debug_count'] = next
environ['paste.evalexception.debug_count'] = next
return next
class InvalidTemplate(Exception):
pass
class EvalException(object):
"""Handles capturing an exception and turning it into an interactive
exception explorer"""
def __init__(self, application, global_conf=None,
error_template_filename=None,
xmlhttp_key=None, media_paths=None,
templating_formatters=None, head_html='', footer_html='',
reporters=None, libraries=None,
debug_url_prefix=None,
**params):
self.libraries = libraries or []
self.application = application
self.debug_infos = {}
self.templating_formatters = templating_formatters or []
self.head_html = HTMLTemplate(head_html)
self.footer_html = HTMLTemplate(footer_html)
if error_template_filename is None:
error_template_filename = resource_filename( "weberror",
"eval_template.html" )
if xmlhttp_key is None:
if global_conf is None:
xmlhttp_key = '_'
else:
xmlhttp_key = global_conf.get('xmlhttp_key', '_')
self.xmlhttp_key = xmlhttp_key
if debug_url_prefix is None:
if global_conf is None:
debug_url_prefix = '_debug'
else:
debug_url_prefix = global_conf.get('debug_url_prefix', '_debug')
self.debug_url_prefix = debug_url_prefix.split('/')
self.media_paths = media_paths or {}
self.error_template = HTMLTemplate.from_filename(error_template_filename)
if reporters is None:
reporters = []
self.reporters = reporters
def __call__(self, environ, start_response):
## FIXME: print better error message (maybe fall back on
## normal middleware, plus an error message)
assert not environ['wsgi.multiprocess'], (
"The EvalException middleware is not usable in a "
"multi-process environment")
# XXX: Legacy support for Paste restorer
environ['weberror.evalexception'] = environ['paste.evalexception'] = \
self
req = Request(environ)
req_path = req.path_info.split('/')[1:len(self.debug_url_prefix) + 1]
if req_path == self.debug_url_prefix:
return self.debug(req)(environ, start_response)
else:
return self.respond(environ, start_response)
def debug(self, req):
for path_part in self.debug_url_prefix:
assert req.path_info_pop() == path_part
next_part = req.path_info_pop()
method = getattr(self, next_part, None)
if method is None:
return exc.HTTPNotFound('Nothing could be found to match %r' % next_part)
if not getattr(method, 'exposed', False):
return exc.HTTPForbidden('Access to %r is forbidden' % next_part)
return method(req)
def post_traceback(self, req):
"""Post the long XML traceback to the host and path provided"""
debug_info = req.debug_info
long_xml_er = formatter.format_xml(debug_info.exc_data,
show_hidden_frames=True, show_extra_data=False,
libraries=self.libraries)[0]
host = req.GET['host']
headers = req.headers
conn = httplib.HTTPConnection(host)
headers = {'Content-Length':len(long_xml_er),
'Content-Type':'application/xml'}
conn.request("POST", req.GET['path'], long_xml_er, headers=headers)
resp = conn.getresponse()
res = Response()
for header, value in resp.getheaders():
if header.lower() in ['server', 'date']: continue
res.headers[header] = value
res.body = resp.read()
return res
post_traceback = check_csrf_token(get_debug_info(post_traceback))
def media(self, req):
"""Static path where images and other files live"""
first_part = req.path_info_peek()
if first_part in self.media_paths:
req.path_info_pop()
path = self.media_paths[first_part]
else:
path = resource_filename("weberror", "eval-media")
app = urlparser.StaticURLParser(path)
return app
media.exposed = True
def summary(self, req):
"""
Returns a JSON-format summary of all the cached
exception reports
"""
res = Response(content_type='text/x-json')
data = [];
items = self.debug_infos.values()
items.sort(lambda a, b: cmp(a.created, b.created))
data = [item.json() for item in items]
res.body = repr(data)
return res
summary.exposed = True
def view(self, req):
"""
View old exception reports
"""
id = int(req.path_info_pop())
if id not in self.debug_infos:
return exc.HTTPServerError(
"Traceback by id %s does not exist (maybe "
"the server has been restarted?)" % id)
debug_info = self.debug_infos[id]
return debug_info.wsgi_application
view.exposed = True
def make_view_url(self, environ, base_path, count):
return base_path + '/view/%s' % count
#@get_debug_info
def show_frame(self, req):
tbid = int(req.params['tbid'])
frame = req.debug_info.frame(tbid)
vars = frame.tb_frame.f_locals
if vars:
registry.restorer.restoration_begin(req.debug_info.counter)
try:
local_vars = make_table(vars)
finally:
registry.restorer.restoration_end()
else:
local_vars = 'No local vars'
res = Response(content_type='text/html')
res.body = input_form.substitute(tbid=tbid, debug_info=req.debug_info) + local_vars
return res
show_frame = get_debug_info(show_frame)
#@get_debug_info
def exec_input(self, req):
input = req.params.get('input')
if not input.strip():
return ''
input = input.rstrip() + '\n'
frame = req.debug_info.frame(int(req.params['tbid']))
vars = frame.tb_frame.f_locals
glob_vars = frame.tb_frame.f_globals
context = evalcontext.EvalContext(vars, glob_vars)
registry.restorer.restoration_begin(req.debug_info.counter)
try:
output = context.exec_expr(input)
finally:
registry.restorer.restoration_end()
input_html = formatter.str2html(input)
res = Response(content_type='text/html')
res.write(
'<code style="color: #060">>>></code> '
'%s<br>\n%s'
% (preserve_whitespace(input_html, quote=False),
preserve_whitespace(output)))
return res
exec_input = check_csrf_token(get_debug_info(exec_input))
def source_code(self, req):
location = req.params['location']
module_name, lineno = location.split(':', 1)
module = sys.modules.get(module_name)
if module is None:
# Something weird indeed
res = Response(content_type='text/html', charset='utf8')
res.unicode_body = 'The module <code>%s</code> does not have an entry in sys.modules' % html_quote(module_name)
return res
filename = module.__file__
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
elif filename.endswith('$py.class'):
filename = '%s.py' % filename[:-9]
f = open(filename, 'rb')
source = f.read()
f.close()
html = (
('<div>Module: <b>%s</b> file: %s</div>'
'<style type="text/css">%s</style>'
% (html_quote(module_name), html_quote(filename), formatter.pygments_css))
+ formatter.highlight(filename, source, linenos=True))
source_lines = len(source.splitlines())
if source_lines < 60:
html += '\n<br>'*(60-source_lines)
res | |
"""Module for classification using quantum machine learning models.
"""
import numpy as np
import pickle
import json
from tqdm.auto import tqdm
from scipy.optimize import minimize
from .circuitML import circuitML
from .utility import CE_loss
SCIPY_METHODS = {
'bfgs', 'nelder-mead', 'powell', 'cg',
'newton-cg', 'l-bfgs-b', 'tnc', 'cobyla',
'slsqp', 'trust-constr', 'dogleg',
}
class Classifier():
"""Class for quantum classifiers. Defines the API using the scikit-learn
format.
Parameters
----------
circuit : circuitML
Quantum circuit to simulate, how to use and store is defined in child
classes.
bitstr : list of int or list of str
Which bitstrings should correspond to each class. The number of
classes for the classification is defined by the number of elements.
params : vector, optional
Initial model paramters. If ``None`` (default) uses
:meth:`circuitML.random_params`.
nbshots : int, optional
Number of shots for the quantum circuit. If 0, negative or None, then
exact proabilities are computed, by default ``None``.
nbshots_increment : float, int or callable, optional
How to increase the number of shots as optimization progress. If float
or int, the increment arise every `nbshots_incr_delay` iterations: if
float, then the increment is multiplicative; if int, then it is added.
If callable, the new nbshots is computed by calling
`nbshots_increment(nbshots, n_iter, loss_value)`.
nbshots_incr_delay : int, optional
After how many iteration nb_shots has to increse. By default 20, if
nbshots_increment is given
loss : callable, optional
Loss function, by default Negative LogLoss (Cross entropy).
job_size : int, optional
Number of runs for each circuit job, by default the number of
observations.
budget : int, optional
Maximum number of optimization steps, by default 100
name : srt, optional
Name to identify this classifier.
save_path : str, optional
Where to save intermediate training results, by deafult None. If
``None``, intermediate results are not saved.
Attributes
----------
bitstr : list[int]
Bitstrings (as int) on which to read the classes
nbshots : int
Number of shots to run circuit
job_size : int
Number of circuits to run in each backend job
nfev : int
Number if times the circuit has been run
"""
def __init__(self, circuit, bitstr, **kwargs):
super().__init__()
# Retrieve keyword arguments
params = kwargs.get('params')
nbshots = kwargs.get('nbshots')
nbshots_increment = kwargs.get('nbshots_increment')
nbshots_incr_delay = kwargs.get('nbshots_incr_delay')
loss = kwargs.get('loss', CE_loss)
job_size = kwargs.get('job_size')
budget = kwargs.get('budget', 100)
name = kwargs.get('name')
save_path = kwargs.get('save_path')
# Testing circuit and setting it
self.set_circuit(circuit)
# Setting bitstrings
self.set_bitstr(bitstr)
# Setting parameters
if params is None:
self.set_params(circuit.random_params())
else:
self.set_params(params)
# Testing for nbshots type
if not (isinstance(nbshots, int) or (nbshots is None)):
raise TypeError("Invalid `nbshots` type")
if nbshots is not None and nbshots < 1:
nbshots = None
self.nbshots = nbshots
# Testing for nbshots_incr_delay
if not (
isinstance(nbshots_incr_delay, int) or (nbshots_incr_delay is None)
):
raise TypeError("Invalid `nbshots_incr_delay` type")
self.nbshots_incr_delay = 20
if nbshots_incr_delay is not None:
self.nbshots_incr_delay = nbshots_incr_delay
self.__set_nbshots_increment__(nbshots_increment)
if not isinstance(budget, int):
raise TypeError("Invalid `budget` type")
self.__budget__ = budget
self.job_size = job_size
self.__loss__ = loss
self.__min_loss__ = np.inf
self.__fit_conv__ = False
self.__last_loss_value__ = None
self.__last_output__ = None
self.__last_params__ = None
self.__loss_progress__ = []
self.__output_progress__ = []
self.__params_progress__ = []
self.__name__ = name
self.__save_path__ = save_path
self.nfev = 0
def __verify_circuit__(self, circuit):
"""Test wheter a circuit is valid and raise TypeError if it is not.
Parameters
----------
circuit : circuitML
QML circuit
Raises
------
TypeError
If the circuit is not a circuitML
ValueError
If self has a circuit and the new circuit does not uses the same
make_circuit fuction
"""
if not isinstance(circuit, circuitML):
raise TypeError(
f"Circuit was type {type(circuit)} while circuitML was \
expected."
)
if hasattr(self, 'circuit'):
if self.circuit != circuit:
raise ValueError(
"Given circuit is different from previous circuit"
)
def set_circuit(self, circuit):
"""Set the circuit after testing for validity.
For a circuit to be valid, it has to be an instance of circuitML and,
in case self already has a circuit, to use the same make_circuit
function.
Parameters
----------
circuit : circuitML
QML circuit
Raises
------
Union[TypeError, ValueError]
If the circuit is invalid.
"""
self.__verify_circuit__(circuit)
self.circuit = circuit
def set_params(self, params):
"""Parameters setter
Parameters
----------
params : vector
Parameters vector
"""
self.params = params
def set_bitstr(self, bitstr):
"""Bitstring setter
Parameters
----------
bitstr : list[str] or list[int]
Bitstrings on which to read the class predictions.
Raises
------
TypeError
If bitstrings are of wrong type or have eterogenous types
"""
if isinstance(bitstr[0], int):
for i in bitstr:
if not isinstance(i, int):
raise TypeError("All bitstrings must have the same type")
self.bitstr = bitstr
elif isinstance(bitstr[0], str):
for i in bitstr:
if not isinstance(i, str):
raise TypeError("All bitstrings must have the same type")
self.bitstr = [int(bit, 2) for bit in bitstr]
else:
raise TypeError("Bitstrings must be either int or binary strings")
def __set_nbshots_increment__(self, nbshots_increment):
__incr__ = nbshots_increment
if nbshots_increment is None:
def __incr__(nbshots, n_iter, loss_value):
return nbshots
elif isinstance(nbshots_increment, float):
def __incr__(nbshots, n_iter, loss_value):
if n_iter % self.nbshots_incr_delay == 0:
return int(nbshots_increment * nbshots)
else:
return nbshots
elif isinstance(nbshots_increment, int):
def __incr__(nbshots, n_iter, loss_value):
if n_iter % self.nbshots_incr_delay == 0:
return nbshots + nbshots_increment
else:
return nbshots
self.nbshots_increment = __incr__
def run_circuit(self, X, params=None):
"""Run the circuit with input `X` and parameters `params`.
Parameters
----------
X : array-like
Input matrix of shape (nb_samples, nb_features).
params : vector-like, optional
Parameter vector, by default uses the model
:attr:`~polyadicqml.Classifier.params`
Returns
-------
array
Bitstring counts as an array of shape (nb_samples, 2**nbqbits)
"""
if params is None:
params = self.params
self.nfev += 1
return self.circuit.run(
X, params, self.nbshots, job_size=self.job_size
)
def predict_proba(self, X, params=None):
"""Compute the bitstring probabilities associated to each input point
of the design matrix.
Parameters
----------
X : array
Design matrix of n samples
params : vector, optional
Circuit parameters, by default None. If not given, model
parameters are used.
Returns
-------
array
Predicted bitstring probabilities. Rows correspond to samples and
columns to bitstrings, whose order is defined in
:attr:`~polyadicqml.quantumClassifier.bitstr`.
"""
out = self.run_circuit(X, params)
if self.nbshots:
out = out / float(self.nbshots)
return out[:, self.bitstr]
def proba_to_label(self, proba) -> np.ndarray:
"""Transforms a matrix of real values in integer labels.
Parameters
----------
proba : array
Real valued array
Returns
-------
vector
Labels vector
"""
return np.argmax(proba, axis=1)
def predict(self, X):
"""Compute the predicted class for each input point of the design
matrix.
Parameters
----------
X : array
Design matrix of n samples
Returns
-------
vector
Labels vector
"""
return self.proba_to_label(self.predict_proba(X))
def __call__(self, X):
"""Compute the predicted class for each input point of the design
matrix.
Equivalent to :meth:`~polyadicqml.quantumClassifier.predict`
Parameters
----------
X : array
Design matrix of n samples
params : vector, optional
Circuit parameters, by default None. If not given, model
parameters are used.
Returns
-------
vector
Labels vector
"""
return self.predict(X)
def set_loss(self, loss=None):
"""Loss function setter.
Parameters
----------
loss : callable, optional
Loss function of the form loss(y_true, y_pred, labels), by default
None. If None is given, nothing happens.
"""
if loss is not None:
self.__loss__ = loss
def __callback__(self, params, loss=False, output=False, ):
"""Callback function for optimization. It is called after each step.
Parameters
----------
params : vector
Current parameter vector
loss : bool, optional
Wheter to store the loss value, by default False
output : bool, optional
Wheter to store the current output and parameters , by default
False
"""
self.__n_iter__ += 1
self.pbar.update()
if loss or output:
self.__loss_progress__.append(self.__last_loss_value__)
if output:
self.__output_progress__.append(self.__last_output__.tolist())
self.__params_progress__.append(params.tolist())
if self.__save_path__ and self.__n_iter__ % 10 == 0:
self.save()
# We randomize the indices only after the callback
# this is necessary to estimate the gradient by FD
self._rnd_indices = np.random.choice(
self.__indices, size=self.__batch_size, replace=False)
def __scipy_minimize__(
self, input_train, target_train, labels, method,
save_loss_progress, save_output_progress,
**kwargs
):
def to_optimize(params):
self.nbshots = self.nbshots_increment(
self.nbshots, self.__n_iter__, self.__min_loss__)
probas = self.predict_proba(
input_train[self.__rnd_indices], params
)
loss_value = self.__loss__(
target_train[self.__rnd_indices], probas, labels=labels
)
self.__last_loss_value__ = loss_value
self.__last_output__ = probas[np.argsort(self.__rnd_indices)]
if loss_value < self.__min_loss__:
self.__min_loss__ = loss_value
self.set_params(params.copy())
if method.lower() == "cobyla":
self.__callback__(
params, save_loss_progress, save_output_progress
)
return loss_value
# SCIPY.MINIMIZE IMPLEMENTATION
options = kwargs.get('options', {'maxiter': self.__budget__})
bounds = kwargs.get('bounds')
if method == 'L-BFGS-B' and bounds is | |
long > > >
Create a vector of length p with with matrices with symbolic primitives of
sym(str name, int nrow, int ncol, int p) -> std::vector< casadi::Matrix< long long >,std::allocator< casadi::Matrix< long long > > >
Create a vector of length p with nrow-by-ncol symbolic primitives.
sym(str name, Sparsity sp, int p, int r) -> [[IM]]
Create a vector of length r of vectors of length p with symbolic primitives
sym(str name, int nrow, int ncol, int p, int r) -> [[IM]]
symbolic primitives.
> sym(str name, (int,int) rc)
------------------------------------------------------------------------
Construct a symbolic primitive with given dimensions.
> sym(str name, int nrow, int ncol, int p)
------------------------------------------------------------------------
Create a vector of length p with nrow-by-ncol symbolic primitives.
> sym(str name, Sparsity sp, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with symbolic primitives
with given sparsity.
> sym(str name, int nrow, int ncol, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with nrow-by-ncol
symbolic primitives.
> sym(str name, Sparsity sp)
------------------------------------------------------------------------
Create symbolic primitive with a given sparsity pattern.
> sym(str name, int nrow, int ncol)
------------------------------------------------------------------------
Create an nrow-by-ncol symbolic primitive.
> sym(str name, Sparsity sp, int p)
------------------------------------------------------------------------
Create a vector of length p with with matrices with symbolic primitives of
given sparsity.
"""
return _casadi.GenIM_sym(*args)
sym = staticmethod(sym)
def zeros(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
zeros(int nrow, int ncol) -> IM
zeros((int,int) rc) -> IM
zeros(Sparsity sp) -> IM
zero.
"""
return _casadi.GenIM_zeros(*args)
zeros = staticmethod(zeros)
def ones(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
ones(int nrow, int ncol) -> IM
ones((int,int) rc) -> IM
ones(Sparsity sp) -> IM
one.
"""
return _casadi.GenIM_ones(*args)
ones = staticmethod(ones)
def __init__(self, *args):
"""
GenIM()
GenIM(GenIM other)
"""
this = _casadi.new_GenIM(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _casadi.delete_GenIM
GenIM_swigregister = _casadi.GenIM_swigregister
GenIM_swigregister(GenIM)
def GenIM_sym(*args):
"""
Create a vector of length r of vectors of length p with nrow-by-ncol
sym(str name, int nrow, int ncol) -> IM
Create an nrow-by-ncol symbolic primitive.
sym(str name, (int,int) rc) -> IM
Construct a symbolic primitive with given dimensions.
sym(str name, Sparsity sp) -> IM
Create symbolic primitive with a given sparsity pattern.
sym(str name, Sparsity sp, int p) -> std::vector< casadi::Matrix< long long >,std::allocator< casadi::Matrix< long long > > >
Create a vector of length p with with matrices with symbolic primitives of
sym(str name, int nrow, int ncol, int p) -> std::vector< casadi::Matrix< long long >,std::allocator< casadi::Matrix< long long > > >
Create a vector of length p with nrow-by-ncol symbolic primitives.
sym(str name, Sparsity sp, int p, int r) -> [[IM]]
Create a vector of length r of vectors of length p with symbolic primitives
sym(str name, int nrow, int ncol, int p, int r) -> [[IM]]
symbolic primitives.
> sym(str name, (int,int) rc)
------------------------------------------------------------------------
Construct a symbolic primitive with given dimensions.
> sym(str name, int nrow, int ncol, int p)
------------------------------------------------------------------------
Create a vector of length p with nrow-by-ncol symbolic primitives.
> sym(str name, Sparsity sp, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with symbolic primitives
with given sparsity.
> sym(str name, int nrow, int ncol, int p, int r)
------------------------------------------------------------------------
Create a vector of length r of vectors of length p with nrow-by-ncol
symbolic primitives.
> sym(str name, Sparsity sp)
------------------------------------------------------------------------
Create symbolic primitive with a given sparsity pattern.
> sym(str name, int nrow, int ncol)
------------------------------------------------------------------------
Create an nrow-by-ncol symbolic primitive.
> sym(str name, Sparsity sp, int p)
------------------------------------------------------------------------
Create a vector of length p with with matrices with symbolic primitives of
given sparsity.
"""
return _casadi.GenIM_sym(*args)
def GenIM_zeros(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
zeros(int nrow, int ncol) -> IM
zeros((int,int) rc) -> IM
zeros(Sparsity sp) -> IM
zero.
"""
return _casadi.GenIM_zeros(*args)
def GenIM_ones(*args):
"""
Create a dense matrix or a matrix with specified sparsity with all entries
ones(int nrow, int ncol) -> IM
ones((int,int) rc) -> IM
ones(Sparsity sp) -> IM
one.
"""
return _casadi.GenIM_ones(*args)
class GenDM(GenericMatrixCommon, SparsityInterfaceCommon):
"""
"""
__swig_setmethods__ = {}
for _s in [GenericMatrixCommon, SparsityInterfaceCommon]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, GenDM, name, value)
__swig_getmethods__ = {}
for _s in [GenericMatrixCommon, SparsityInterfaceCommon]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, GenDM, name)
__repr__ = _swig_repr
def nnz(self, *args):
"""
Get the number of (structural) non-zero elements.
nnz(self) -> int
"""
return _casadi.GenDM_nnz(self, *args)
def nnz_lower(self, *args):
"""
Get the number of non-zeros in the lower triangular half.
nnz_lower(self) -> int
"""
return _casadi.GenDM_nnz_lower(self, *args)
def nnz_upper(self, *args):
"""
Get the number of non-zeros in the upper triangular half.
nnz_upper(self) -> int
"""
return _casadi.GenDM_nnz_upper(self, *args)
def nnz_diag(self, *args):
"""
Get get the number of non-zeros on the diagonal.
nnz_diag(self) -> int
"""
return _casadi.GenDM_nnz_diag(self, *args)
def numel(self, *args):
"""
Get the number of elements.
numel(self) -> int
"""
return _casadi.GenDM_numel(self, *args)
def size1(self, *args):
"""
Get the first dimension (i.e. number of rows)
size1(self) -> int
"""
return _casadi.GenDM_size1(self, *args)
def rows(self, *args):
"""
Get the number of rows, Octave-style syntax.
rows(self) -> int
"""
return _casadi.GenDM_rows(self, *args)
def size2(self, *args):
"""
Get the second dimension (i.e. number of columns)
size2(self) -> int
"""
return _casadi.GenDM_size2(self, *args)
def columns(self, *args):
"""
Get the number of columns, Octave-style syntax.
columns(self) -> int
"""
return _casadi.GenDM_columns(self, *args)
def dim(self, *args):
"""
Get string representation of dimensions. The representation is e.g. "4x5"
dim(self, bool with_nz) -> str
or "4x5,10nz".
"""
return _casadi.GenDM_dim(self, *args)
def size(self, *args):
"""
Get the size along a particular dimensions.
size(self) -> (int,int)
Get the shape.
size(self, int axis) -> int
> size(self)
------------------------------------------------------------------------
Get the shape.
> size(self, int axis)
------------------------------------------------------------------------
Get the size along a particular dimensions.
"""
return _casadi.GenDM_size(self, *args)
def is_empty(self, *args):
"""
Check if the sparsity is empty, i.e. if one of the dimensions is zero (or
is_empty(self, bool both) -> bool
optionally both dimensions)
"""
return _casadi.GenDM_is_empty(self, *args)
def is_dense(self, *args):
"""
Check if the matrix expression is dense.
is_dense(self) -> bool
"""
return _casadi.GenDM_is_dense(self, *args)
def is_scalar(self, *args):
"""
Check if the matrix expression is scalar.
is_scalar(self, bool scalar_and_dense) -> bool
"""
return _casadi.GenDM_is_scalar(self, *args)
def is_square(self, *args):
"""
Check if the matrix expression is square.
is_square(self) -> bool
"""
return _casadi.GenDM_is_square(self, *args)
def is_vector(self, *args):
"""
Check if the matrix is a row or column vector.
is_vector(self) -> bool
"""
return _casadi.GenDM_is_vector(self, *args)
def is_row(self, *args):
"""
Check if the matrix is a row vector (i.e. size1()==1)
is_row(self) -> bool
"""
return _casadi.GenDM_is_row(self, *args)
def is_column(self, *args):
"""
Check if the matrix is a column vector (i.e. size2()==1)
is_column(self) -> bool
"""
return _casadi.GenDM_is_column(self, *args)
def is_triu(self, *args):
"""
Check if the matrix is upper triangular.
is_triu(self) -> bool
"""
return _casadi.GenDM_is_triu(self, *args)
def is_tril(self, *args):
"""
Check if the matrix is lower triangular.
is_tril(self) -> bool
"""
return _casadi.GenDM_is_tril(self, *args)
def row(self, *args):
"""
Get the sparsity pattern. See the Sparsity class for details.
row(self) -> [int]
row(self, int el) -> int
"""
return _casadi.GenDM_row(self, *args)
def colind(self, *args):
"""
Get the sparsity pattern. See the Sparsity class for details.
colind(self) -> [int]
colind(self, int col) -> int
"""
return _casadi.GenDM_colind(self, *args)
def sparsity(self, *args):
"""
Get the sparsity pattern.
sparsity(self) -> Sparsity
"""
return _casadi.GenDM_sparsity(self, *args)
def sym(*args):
"""
Create a vector of length r of vectors of length p with nrow-by-ncol
sym(str name, int nrow, int ncol) -> DM
Create an nrow-by-ncol symbolic primitive.
sym(str name, (int,int) rc) -> DM
Construct a symbolic primitive with given dimensions.
sym(str name, Sparsity sp) -> DM
Create symbolic primitive with a given sparsity pattern.
| |
import imp
import numpy
import torch
from torch._C import set_flush_denormal
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import copy
from typing import Optional
from einops import rearrange
import math
"""
TranST: spatial_encoder, temporal_encoder,
Args:
"""
class TranST(nn.Module):
def __init__(self,
d_temporal_branch=512,
d_spatial_branch=512,
n_head=8,
fusion=False,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
drop_path_rate=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
rm_first_self_attn=False,
rm_res_self_attn=False,
t_only = False
):
super().__init__()
self.t_only = t_only
self.num_encoder_layers = num_encoder_layers
self.rm_first_self_attn = rm_first_self_attn
self.rm_res_self_attn = rm_res_self_attn
if num_encoder_layers > 0:
if not t_only:
spatial_encoder_layer = TransformerEncoderLayer(d_spatial_branch, n_head, dim_feedforward,
dropout, drop_path_rate, activation, normalize_before)
spatial_encoder_norm = nn.LayerNorm(d_spatial_branch) if normalize_before else None
self.spatial_encoder = TransformerEncoder(spatial_encoder_layer, num_encoder_layers, spatial_encoder_norm)
temporal_encoder_layer = TransformerEncoderLayer(d_temporal_branch, n_head, dim_feedforward,
dropout, drop_path_rate, activation, normalize_before)
temporal_encoder_norm = nn.LayerNorm(d_temporal_branch) if normalize_before else None
self.temporal_encoder = TransformerEncoder(temporal_encoder_layer, num_encoder_layers, temporal_encoder_norm)
if not t_only:
spatial_decoder_layer = TransformerDecoderLayer(d_spatial_branch, n_head, dim_feedforward,
dropout, drop_path_rate, activation, normalize_before)
spatial_decoder_norm = nn.LayerNorm(d_spatial_branch)
else:
spatial_decoder_layer = None
spatial_decoder_norm = None
temporal_decoder_layer = TransformerDecoderLayer(d_temporal_branch, n_head, dim_feedforward,
dropout, drop_path_rate, activation, normalize_before)
temporal_decoder_norm = nn.LayerNorm(d_temporal_branch)
self.STLD = STLD(spatial_decoder_layer, temporal_decoder_layer, num_decoder_layers,
spatial_decoder_norm, temporal_decoder_norm,
d_spatial_branch=d_spatial_branch, d_temporal_branch=d_temporal_branch,
return_intermediate=return_intermediate_dec, fusion=fusion, temporal_only=t_only
)
# self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.rm_self_attn_dec_func()
self._reset_parameters()
def rm_self_attn_dec_func(self):
total_modifie_layer_num = 0
rm_list = []
layer_stack = zip(self.STLD.temporal_layers) if self.t_only else zip(self.STLD.spatial_layers, self.STLD.temporal_layers)
for idx, layer in enumerate(layer_stack):
if idx == 0 and not self.rm_first_self_attn:
continue
if idx != 0 and not self.rm_res_self_attn:
continue
layer_t = layer[0]
if not self.t_only:
(layer_s, layer_t) = layer
layer_s.omit_selfattn = True
del layer_s.self_attn
del layer_s.norm1
layer_t.omit_selfattn = True
del layer_t.self_attn
del layer_t.norm1
total_modifie_layer_num += 1
rm_list.append(idx)
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src_s, src_t, query_embed, pos_s, pos_t, mask=None):
# flatten NxCxHxW to HWxNxC
# bs, c, t = src_t.shape
#src = src.flatten(2).permute(2, 0, 1)
#pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
# bs, _, _ = src_t.shape
# query_embed = query_embed.unsqueeze(0).repeat(bs, 1, 1)
if not self.t_only:
src_s = src_s.permute(2, 0, 1)
pos_s = pos_s.permute(2, 0, 1)
src_t = src_t.permute(2, 0, 1)
pos_t = pos_t.permute(2, 0, 1)
query_embed = query_embed.transpose(0, 1)
# query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
if mask is not None:
mask = mask.flatten(1)
if self.num_encoder_layers > 0:
if not self.t_only:
memory_s = self.spatial_encoder(src_s, src_key_padding_mask=mask, pos=pos_s)
memory_t = self.temporal_encoder(src_t, src_key_padding_mask=mask, pos=pos_t)
else:
if not self.t_only:
memory_s = src_s
memory_t = src_t
tgt = torch.zeros_like(query_embed)
if not self.t_only:
hs, ht = self.STLD(tgt=tgt,
memory_s=memory_s, memory_t=memory_t,
pos_s=pos_s, pos_t=pos_t,
query_pos_s=query_embed,
query_pos_t=query_embed
)
# torch.save(attn_list, "visualize/charades/attn_map/attn.pkl")
# hs = self.drop_path(hs)
# ht = self.drop_path(ht)
return hs.transpose(1, 2), ht.transpose(1, 2)
else:
ht = self.STLD( tgt=tgt,
memory_s=None, memory_t=memory_t,
pos_s=None, pos_t=pos_t,
query_pos_s=None,
query_pos_t=query_embed
)
# ht = self.drop_path(ht)
return ht.transpose(1, 2)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
# 目前应该有两种fusion方案,一种是add, 一种是concat+Conv,
# 目前先不考虑slowfast,太特殊
class STLD(nn.Module):
def __init__(self, spatial_decoder_layer, temporal_decoder_layer,
num_layers, spatial_norm, temporal_norm,
return_intermediate=False,
d_temporal_branch=512,
d_spatial_branch=512,
fusion = False,
temporal_only = False
):
super().__init__()
self.t_only = temporal_only
self.fusion = fusion
if not self.t_only:
self.spatial_layers = _get_clones(spatial_decoder_layer, num_layers)
self.spatial_norm = spatial_norm
self.d_spatial = d_spatial_branch
self.temporal_layers = _get_clones(temporal_decoder_layer, num_layers)
self.num_layers = num_layers
self.temporal_norm = temporal_norm
self.d_temporal = d_temporal_branch
self.return_intermediate = return_intermediate
def forward(self, tgt, memory_s, memory_t,
tgt_mask_s: Optional[Tensor] = None,
tgt_mask_t: Optional[Tensor] = None,
memory_mask_s: Optional[Tensor] = None,
memory_mask_t: Optional[Tensor] = None,
tgt_key_padding_mask_s: Optional[Tensor] = None,
tgt_key_padding_mask_t: Optional[Tensor] = None,
memory_key_padding_mask_s: Optional[Tensor] = None,
memory_key_padding_mask_t: Optional[Tensor] = None,
pos_s: Optional[Tensor] = None,
pos_t: Optional[Tensor] = None,
query_pos_s: Optional[Tensor] = None,
query_pos_t: Optional[Tensor] = None):
if self.t_only:
output_t = tgt
intermediate_t = []
layer_num = len(self.temporal_layers)
current_layer = 0
for layer_t in self.temporal_layers:
current_layer+=1
output_t = layer_t(output_t, memory_t, tgt_mask=tgt_mask_t,
memory_mask=memory_mask_t,
tgt_key_padding_mask=tgt_key_padding_mask_t,
memory_key_padding_mask=memory_key_padding_mask_t,
pos=pos_t, query_pos=query_pos_t)
if self.return_intermediate:
intermediate_t.append(self.norm(output_t))
if self.temporal_norm is not None:
output_t = self.temporal_norm(output_t)
if self.return_intermediate:
intermediate_t.pop()
intermediate_t.append(output_t)
if self.return_intermediate:
return torch.stack(intermediate_t)
return output_t.unsqueeze(0)
else:
output_s = tgt
output_t = tgt
intermediate_s = []
intermediate_t = []
layer_num = len(self.temporal_layers)
current_layer = 0
for layer_s, layer_t in zip(self.spatial_layers, self.temporal_layers):
current_layer+=1
output_s = layer_s(output_s, memory_s, tgt_mask=tgt_mask_s,
memory_mask=memory_mask_s,
tgt_key_padding_mask=tgt_key_padding_mask_s,
memory_key_padding_mask=memory_key_padding_mask_s,
pos=pos_s, query_pos=query_pos_s)
output_t = layer_t(output_t, memory_t, tgt_mask=tgt_mask_t,
memory_mask=memory_mask_t,
tgt_key_padding_mask=tgt_key_padding_mask_t,
memory_key_padding_mask=memory_key_padding_mask_t,
pos=pos_t, query_pos=query_pos_t)
if self.fusion:
if not (current_layer == 1 or current_layer == layer_num):
tmp = output_s
output_s = output_t
output_t = tmp
if self.return_intermediate:
intermediate_t.append(self.norm(output_t))
intermediate_s.append(self.norm(output_s))
if self.spatial_norm is not None:
output_s = self.spatial_norm(output_s)
output_t = self.temporal_norm(output_t)
if self.return_intermediate:
intermediate_s.pop()
intermediate_t.pop()
intermediate_s.append(output_s)
intermediate_t.append(output_t)
if self.return_intermediate:
return torch.stack(intermediate_s), torch.stack(intermediate_t)
return output_s.unsqueeze(0), output_t.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, drop_path=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
# self.dropout1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self.debug_mode = False
self.debug_name = None
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2, _ = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.drop_path(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.drop_path(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2, _ = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.drop_path(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.drop_path(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, drop_path=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
# self.dropout1 = nn.Dropout(dropout) if dropout > 0. else nn.Identity()
# self.dropout2 = nn.Dropout(dropout) if dropout > 0. else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self.debug_mode = False
self.debug_name = None
self.omit_selfattn = False
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
# @get_local('cross_attn')
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
if not self.omit_selfattn:
tgt2, _ = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.drop_path(tgt2)
tgt = self.norm1(tgt)
# print(self_attn_map)
tgt2, _ = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
# print(cross_attn.shape)
# attn_list.append(cross_attn)
# print(len(cross_attn))
tgt = tgt + self.drop_path(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.drop_path(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2, _ = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.drop_path(tgt2)
tgt2 = self.norm2(tgt)
tgt2, _ = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.drop_path(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.drop_path(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
| |
Ltd", "CED", datetime.date(1996, 11, 29)),
"CMR": pnp.Vendor("Cambridge Research Systems Ltd", "CMR", datetime.date(2002, 4, 25)),
"CNN": pnp.Vendor("Canon Inc", "CNN", datetime.date(1996, 11, 29)),
"CAI": pnp.Vendor("Canon Inc.", "CAI", datetime.date(2001, 11, 6)),
"UBU": pnp.Vendor("Canonical Ltd.", "UBU", datetime.date(2013, 5, 24)),
"CAN": pnp.Vendor("Canopus Company Ltd", "CAN", datetime.date(1996, 11, 29)),
"CPM": pnp.Vendor("Capella Microsystems Inc.", "CPM", datetime.date(2012, 5, 9)),
"CCP": pnp.Vendor("Capetronic USA Inc", "CCP", datetime.date(1996, 11, 29)),
"DJE": pnp.Vendor("Capstone Visua lProduct Development", "DJE", datetime.date(2008, 10, 9)),
"CAR": pnp.Vendor("Cardinal Company Ltd", "CAR", datetime.date(1996, 11, 29)),
"CRD": pnp.Vendor("Cardinal Technical Inc", "CRD", datetime.date(1996, 11, 29)),
"CLX": pnp.Vendor("CardLogix", "CLX", datetime.date(2001, 3, 15)),
"CKJ": pnp.Vendor("Carina System Co., Ltd.", "CKJ", datetime.date(2010, 9, 3)),
"CZE": pnp.Vendor("<NAME>", "CZE", datetime.date(2009, 6, 3)),
"CAS": pnp.Vendor("CASIO COMPUTER CO.,LTD", "CAS", datetime.date(1998, 10, 6)),
"CAA": pnp.Vendor("Castles Automation Co., Ltd", "CAA", datetime.date(2000, 1, 13)),
"CAV": pnp.Vendor("Cavium Networks, Inc", "CAV", datetime.date(2011, 2, 2)),
"FVX": pnp.Vendor("C-C-C Group Plc", "FVX", datetime.date(1998, 5, 4)),
"CCL": pnp.Vendor("CCL/ITRI", "CCL", datetime.date(1997, 3, 31)),
"CCC": pnp.Vendor("C-Cube Microsystems", "CCC", datetime.date(1996, 11, 29)),
"CEP": pnp.Vendor("C-DAC", "CEP", datetime.date(1996, 11, 29)),
"CBR": pnp.Vendor("Cebra Tech A/S", "CBR", datetime.date(1996, 11, 29)),
"CEF": pnp.Vendor("Cefar Digital Vision", "CEF", datetime.date(1997, 2, 19)),
"CEN": pnp.Vendor("Centurion Technologies P/L", "CEN", datetime.date(2000, 10, 23)),
"TCE": pnp.Vendor("Century Corporation", "TCE", datetime.date(1996, 11, 29)),
"CRV": pnp.Vendor("Cerevo Inc.", "CRV", datetime.date(2010, 7, 13)),
"CER": pnp.Vendor("Ceronix", "CER", datetime.date(2008, 9, 2)),
"TOM": pnp.Vendor("Ceton Corporation", "TOM", datetime.date(2014, 5, 8)),
"CHP": pnp.Vendor("CH Products", "CHP", datetime.date(1997, 4, 24)),
"CHD": pnp.Vendor("ChangHong Electric Co.,Ltd", "CHD", datetime.date(2001, 11, 30)),
"CHA": pnp.Vendor("Chase Research PLC", "CHA", datetime.date(1996, 11, 29)),
"CHY": pnp.Vendor("Cherry GmbH", "CHY", datetime.date(1999, 5, 16)),
"CMO": pnp.Vendor("Chi Mei Optoelectronics corp.", "CMO", datetime.date(2001, 3, 15)),
"CHM": pnp.Vendor("CHIC TECHNOLOGY CORP.", "CHM", datetime.date(1999, 7, 16)),
"CEC": pnp.Vendor("Chicony Electronics Company Ltd", "CEC", datetime.date(1996, 11, 29)),
"CMN": pnp.Vendor("Chimei Innolux Corporation", "CMN", datetime.date(2010, 9, 2)),
"HLG": pnp.Vendor("China Hualu Group Co., Ltd.", "HLG", datetime.date(2013, 5, 13)),
"CHL": pnp.Vendor("Chloride-R&D", "CHL", datetime.date(1996, 11, 29)),
"CDG": pnp.Vendor("Christie Digital Systems Inc", "CDG", datetime.date(2001, 4, 24)),
"CVP": pnp.Vendor("Chromatec Video Products Ltd", "CVP", datetime.date(2013, 8, 9)),
"CHI": pnp.Vendor("Chrontel Inc", "CHI", datetime.date(1996, 11, 29)),
"CHT": pnp.Vendor("Chunghwa Picture Tubes,LTD.", "CHT", datetime.date(2001, 3, 15)),
"CTE": pnp.Vendor("Chunghwa Telecom Co., Ltd.", "CTE", datetime.date(2002, 5, 16)),
"KCD": pnp.Vendor("Chunichi Denshi Co.,LTD.", "KCD", datetime.date(2010, 12, 23)),
"QQQ": pnp.Vendor("Chuomusen Co., Ltd.", "QQQ", datetime.date(2002, 8, 7)),
"CGS": pnp.Vendor("Chyron Corp", "CGS", datetime.date(2008, 11, 13)),
"CNE": pnp.Vendor("Cine-tal", "CNE", datetime.date(2007, 6, 13)),
"PTG": pnp.Vendor("Cipher Systems Inc", "PTG", datetime.date(1996, 11, 29)),
"CIP": pnp.Vendor("Ciprico Inc", "CIP", datetime.date(1996, 11, 29)),
"CPC": pnp.Vendor("Ciprico Inc", "CPC", datetime.date(1996, 11, 29)),
"FPX": pnp.Vendor("Cirel Systemes", "FPX", datetime.date(1996, 11, 29)),
"CRQ": pnp.Vendor("Cirque Corporation", "CRQ", datetime.date(1996, 11, 29)),
"CIR": pnp.Vendor("Cirrus Logic Inc", "CIR", datetime.date(1996, 11, 29)),
"CLI": pnp.Vendor("Cirrus Logic Inc", "CLI", datetime.date(1996, 11, 29)),
"SNS": pnp.Vendor("Cirtech (UK) Ltd", "SNS", datetime.date(1997, 8, 20)),
"WSC": pnp.Vendor("CIS Technology Inc", "WSC", datetime.date(1996, 11, 29)),
"CIS": pnp.Vendor("Cisco Systems Inc", "CIS", datetime.date(1996, 11, 29)),
"CIL": pnp.Vendor("Citicom Infotech Private Limited", "CIL", datetime.date(2000, 8, 10)),
"CIT": pnp.Vendor("Citifax Limited", "CIT", datetime.date(1997, 7, 16)),
"CIN": pnp.Vendor("Citron GmbH", "CIN", datetime.date(2005, 7, 28)),
"CLA": pnp.Vendor("Clarion Company Ltd", "CLA", datetime.date(1996, 11, 29)),
"CVS": pnp.Vendor("Clarity Visual Systems", "CVS", datetime.date(2000, 1, 13)),
"CLE": pnp.Vendor("<NAME>", "CLE", datetime.date(2006, 2, 16)),
"CLV": pnp.Vendor("Clevo Company", "CLV", datetime.date(1998, 1, 30)),
"PPM": pnp.Vendor("Clinton Electronics Corp.", "PPM", datetime.date(2003, 10, 1)),
"CLO": pnp.Vendor("Clone Computers", "CLO", datetime.date(1996, 11, 29)),
"CSL": pnp.Vendor("Cloudium Systems Ltd.", "CSL", datetime.date(2013, 2, 14)),
"CMC": pnp.Vendor("CMC Ltd", "CMC", datetime.date(1996, 11, 29)),
"CMI": pnp.Vendor("C-Media Electronics", "CMI", datetime.date(1996, 11, 29)),
"JQE": pnp.Vendor("CNet Technical Inc", "JQE", datetime.date(1996, 11, 29)),
"COB": pnp.Vendor("COBY Electronics Co., Ltd", "COB", datetime.date(2007, 6, 13)),
"COD": pnp.Vendor("CODAN Pty. Ltd.", "COD", datetime.date(2000, 10, 23)),
"COI": pnp.Vendor("Codec Inc.", "COI", datetime.date(2001, 11, 30)),
"CDN": pnp.Vendor("Codenoll Technical Corporation", "CDN", datetime.date(1996, 11, 29)),
"CNT": pnp.Vendor("COINT Multimedia Systems", "CNT", datetime.date(1999, 3, 20)),
"CDE": pnp.Vendor("Colin.de", "CDE", datetime.date(2005, 1, 18)),
"CMD": pnp.Vendor("Colorado MicroDisplay, Inc.", "CMD", datetime.date(1999, 3, 20)),
"CVI": pnp.Vendor("Colorado Video, Inc.", "CVI", datetime.date(2012, 8, 15)),
"MVX": pnp.Vendor("COM 1", "MVX", datetime.date(1996, 11, 29)),
"CMX": pnp.Vendor("Comex Electronics AB", "CMX", datetime.date(2004, 5, 28)),
"CIC": pnp.Vendor("Comm. Intelligence Corporation", "CIC", datetime.date(1996, 11, 29)),
"CLD": pnp.Vendor("COMMAT L.t.d.", "CLD", datetime.date(2000, 8, 10)),
"SDH": pnp.Vendor("Communications Specialies, Inc.", "SDH", datetime.date(2005, 9, 6)),
"INX": pnp.Vendor("Communications Supply Corporation (A division of WESCO)", "INX", datetime.date(2012, 11, 7)),
"CPL": pnp.Vendor("Compal Electronics Inc", "CPL", datetime.date(1996, 11, 29)),
"CPQ": pnp.Vendor("Compaq Computer Company", "CPQ", datetime.date(1996, 11, 29)),
"CPP": pnp.Vendor("Compound Photonics", "CPP", datetime.date(2013, 10, 1)),
"CPD": pnp.Vendor("CompuAdd", "CPD", datetime.date(1996, 11, 29)),
"CMS": pnp.Vendor("CompuMaster Srl", "CMS", datetime.date(1999, 2, 22)),
"CDS": pnp.Vendor("Computer Diagnostic Systems", "CDS", datetime.date(2001, 3, 15)),
"CPI": pnp.Vendor("Computer Peripherals Inc", "CPI", datetime.date(1996, 11, 29)),
"CTP": pnp.Vendor("Computer Technology Corporation", "CTP", datetime.date(1998, 3, 26)),
"CBI": pnp.Vendor("ComputerBoards Inc", "CBI", datetime.date(1998, 2, 3)),
"CTM": pnp.Vendor("Computerm Corporation", "CTM", datetime.date(1996, 11, 29)),
"CTN": pnp.Vendor("Computone Products", "CTN", datetime.date(1996, 11, 29)),
"COX": pnp.Vendor("Comrex", "COX", datetime.date(2011, 10, 18)),
"CTS": pnp.Vendor("Comtec Systems Co., Ltd.", "CTS", datetime.date(2002, 4, 25)),
"CMM": pnp.Vendor("Comtime GmbH", "CMM", datetime.date(2002, 9, 23)),
"COM": pnp.Vendor("Comtrol Corporation", "COM", datetime.date(1996, 11, 29)),
"CDI": pnp.Vendor("Concept Development Inc", "CDI", datetime.date(1996, 11, 29)),
"CSE": pnp.Vendor("Concept Solutions & Engineering", "CSE", datetime.date(1996, 12, 11)),
"DCI": pnp.Vendor("Concepts Inc", "DCI", datetime.date(1996, 11, 29)),
"CXT": pnp.Vendor("Conexant Systems", "CXT", datetime.date(1999, 1, 20)),
"CGT": pnp.Vendor("congatec AG", "CGT", datetime.date(2011, 6, 16)),
"CNI": pnp.Vendor("Connect Int'l A/S", "CNI", datetime.date(1996, 11, 29)),
"CWR": pnp.Vendor("Connectware Inc", "CWR", datetime.date(1996, 11, 29)),
"CRC": pnp.Vendor("CONRAC GmbH", "CRC", datetime.date(2004, 4, 20)),
"CAT": pnp.Vendor("Consultancy in Advanced Technology", "CAT", datetime.date(1997, 9, 19)),
"CEA": pnp.Vendor("Consumer Electronics Association", "CEA", datetime.date(2006, 9, 5)),
"CCJ": pnp.Vendor("CONTEC CO.,LTD.", "CCJ", datetime.date(2000, 8, 10)),
"CON": pnp.Vendor("Contec Company Ltd", "CON", datetime.date(1996, 11, 29)),
"CRH": pnp.Vendor("Contemporary Research Corp.", "CRH", datetime.date(2015, 2, 24)),
"CTR": pnp.Vendor("Control4 Corporation", "CTR", datetime.date(2014, 5, 28)),
"CDD": pnp.Vendor("Convergent Data Devices", "CDD", datetime.date(2004, 2, 27)),
"CDV": pnp.Vendor("Convergent Design Inc.", "CDV", datetime.date(2006, 9, 5)),
"CDC": pnp.Vendor("Core Dynamics Corporation", "CDC", datetime.date(1996, 11, 29)),
"ART": pnp.Vendor("Corion Industrial Corporation", "ART", datetime.date(1996, 11, 29)),
"COT": pnp.Vendor("Core Technology Inc", "COT", datetime.date(2000, 4, 19)),
"CLG": pnp.Vendor("CoreLogic", "CLG", datetime.date(1998, 11, 27)),
"CRN": pnp.Vendor("Cornerstone Imaging", "CRN", datetime.date(1996, 11, 29)),
"COR": pnp.Vendor("Corollary Inc", "COR", datetime.date(1996, 12, 13)),
"CSM": pnp.Vendor("Cosmic Engineering Inc.", "CSM", datetime.date(2012, 4, 18)),
"COS": pnp.Vendor("CoStar Corporation", "COS", datetime.date(1996, 11, 29)),
"CTA": pnp.Vendor("CoSystems Inc", "CTA", datetime.date(1998, 10, 24)),
"CVA": pnp.Vendor("Covia Inc.", "CVA", datetime.date(2010, 5, 11)),
"CPT": pnp.Vendor("cPATH", "CPT", datetime.date(1998, 3, 9)),
"CRA": pnp.Vendor("CRALTECH ELECTRONICA, S.L.", "CRA", datetime.date(2015, 3, 24)),
"CDK": pnp.Vendor("Cray Communications", "CDK", datetime.date(1996, 11, 29)),
"IOA": pnp.Vendor("CRE Technology Corporation", "IOA", datetime.date(1997, 6, 30)),
"CRE": pnp.Vendor("Creative Labs Inc", "CRE", datetime.date(1996, 11, 29)),
"CRL": pnp.Vendor("Creative Logic", "CRL", datetime.date(1997, 10, 16)),
"CTL": pnp.Vendor("Creative Technology Ltd", "CTL", datetime.date(1996, 11, 29)),
"CTX": pnp.Vendor("Creatix Polymedia GmbH", "CTX", datetime.date(1996, 11, 29)),
"CRS": pnp.Vendor("Crescendo Communication Inc", "CRS", datetime.date(1996, 11, 29)),
"CSD": pnp.Vendor("Cresta Systems Inc", "CSD", datetime.date(1997, 8, 1)),
"CEI": pnp.Vendor("Crestron Electronics, Inc.", "CEI", datetime.date(2006, 5, 8)),
"CRI": pnp.Vendor("Crio Inc.", "CRI", datetime.date(1999, 9, 13)),
"CII": pnp.Vendor("Cromack Industries Inc", "CII", datetime.date(1997, 1, 22)),
"XTL": pnp.Vendor("Crystal Computer", "XTL", datetime.date(1996, 11, 29)),
"CSC": pnp.Vendor("Crystal Semiconductor", "CSC", datetime.date(1996, 11, 29)),
"CLM": pnp.Vendor("CrystaLake Multimedia", "CLM", datetime.date(1996, 11, 29)),
"CSS": pnp.Vendor("CSS Laboratories", "CSS", datetime.date(1997, 1, 2)),
"CST": pnp.Vendor("CSTI Inc", "CST", datetime.date(1996, 11, 29)),
"CTC": pnp.Vendor("CTC Communication Development Company Ltd", "CTC", datetime.date(1997, 10, 21)),
"CUB": pnp.Vendor("Cubix Corporation", "CUB", datetime.date(1996, 11, 29)),
"CWC": pnp.Vendor("Curtiss-Wright Controls, Inc.", "CWC", datetime.date(2013, 4, 5)),
"CYL": pnp.Vendor("Cyberlabs", "CYL", datetime.date(1998, 4, 14)),
"CYB": pnp.Vendor("CyberVision", "CYB", datetime.date(1997, 5, 13)),
"CYW": pnp.Vendor("Cyberware", "CYW", datetime.date(2000, 2, 21)),
"CBX": pnp.Vendor("Cybex Computer Products Corporation", "CBX", datetime.date(1999, 11, 8)),
"CYD": pnp.Vendor("Cyclades Corporation", "CYD", datetime.date(2001, 5, 7)),
"CYC": pnp.Vendor("Cylink Corporation", "CYC", datetime.date(1996, 11, 29)),
"CYX": pnp.Vendor("Cyrix Corporation", "CYX", datetime.date(1997, 10, 21)),
"CRX": pnp.Vendor("Cyrix Corporation", "CRX", datetime.date(1997, 3, 21)),
"CYT": pnp.Vendor("Cytechinfo Inc", "CYT", datetime.date(1998, 3, 13)),
"CYV": pnp.Vendor("Cyviz AS", "CYV", datetime.date(2002, 4, 25)),
"DMP": pnp.Vendor("D&M Holdings Inc, Professional Business Company", "DMP", datetime.date(2006, 9, 5)),
"OPI": pnp.Vendor("D.N.S. Corporation", "OPI", datetime.date(1996, 11, 29)),
"DDA": pnp.Vendor("DA2 Technologies Corporation", "DDA", datetime.date(2006, 3, 13)),
"DAW": pnp.Vendor("DA2 Technologies Inc", "DAW", datetime.date(2005, 9, 6)),
"DWE": pnp.Vendor("Daewoo Electronics Company Ltd", "DWE", datetime.date(1996, 11, 29)),
"TLT": pnp.Vendor("Dai Telecom S.p.A.", "TLT", datetime.date(2003, 6, 4)),
"DIN": pnp.Vendor("Daintelecom Co., Ltd", "DIN", datetime.date(1999, 11, 8)),
"DAI": pnp.Vendor("DAIS SET Ltd.", "DAI", datetime.date(2000, 2, 21)),
"DAK": pnp.Vendor("Daktronics", "DAK", datetime.date(2004, 6, 23)),
"DCC": pnp.Vendor("Dale Computer Corporation", "DCC", datetime.date(1996, 11, 29)),
"DCT": pnp.Vendor("Dancall Telecom A/S", "DCT", datetime.date(1997, 8, 12)),
"DAN": pnp.Vendor("D<NAME> A/S", "DAN", datetime.date(2009, 12, 24)),
"DDD": pnp.Vendor("Danka Data Devices", "DDD", datetime.date(1996, | |
a tank wall.")
def checkClosure(start_point, end_point):
if start_point == end_point:
return True
def addCorner(corner_flag):
if corner_flag == 'x-y-':
corner = [[self.x0, self.y0]]
elif corner_flag == 'x+y-':
corner = [[self.x1, self.y0]]
elif corner_flag == 'x+y+':
corner = [[self.x1, self.y1]]
elif corner_flag == 'x-y+':
corner = [[self.x0, self.y1]]
# vertex flags
if corner_flag in ['x-y-', 'x+y-']:
corner_tag = [self.boundaryTags['y-']]
else:
corner_tag = [self.boundaryTags['y+']]
return corner, corner_tag
def addIntermediateCorners(first, last):
"""
Returns corner vertices (and flags) in between two segments
"""
ordering = getClockwiseOrder(first)
corners = [x for x in ordering
if x in self.corners.keys()
and ordering.index(x) < ordering.index(last)
]
corner_vertices = []
corner_flags = []
for corner in corners:
self.corners[corner] = True
vertex, flag = addCorner(corner)
corner_vertices += vertex
corner_flags += flag
return corner_vertices, corner_flags
def addRemainingCorners(first, last):
if first == last:
if self.full_circle:
return []
else:
return addAllCorners(first)
else:
return addIntermediateCorners(first, last)
def addAllCorners(starting_point):
"""
Returns all corners and flags.
"""
corner_vertices = []
corner_flags = []
ordering = getClockwiseOrder(starting_point)
for potential_corner in ordering:
if potential_corner in self.corners.keys():
self.corners[potential_corner] = True
vertex, flag = addCorner(potential_corner)
corner_vertices += vertex
corner_flags += flag
return corner_vertices, corner_flags
def addSpongeVertices():
sponge_vertices = []
sponge_vertexFlags = []
if self.spongeLayers['x-']:
sponge_vertices += [[v[0] - self.spongeLayers['x-'], v[1]]
for v in [self.x0y0, self.x0y1]]
sponge_vertexFlags += [self.boundaryTags['y-'],
self.boundaryTags['y+']]
if self.spongeLayers['x+']:
sponge_vertices += [[v[0] + self.spongeLayers['x+'], v[1]]
for v in [self.x1y0, self.x1y1]]
sponge_vertexFlags += [self.boundaryTags['y-'],
self.boundaryTags['y+']]
return sponge_vertices, sponge_vertexFlags
#--------------------------------------------------------#
vertices = []
vertexFlags = []
former_end = None
first_start = None
for obstacle in self.obstacles:
start = findLocation(obstacle[0])
end = findLocation(obstacle[-1])
if start == end and checkClosure(obstacle[0],obstacle[-1]):
raise ValueError("Obstacles must be open (start and end"
" vertices must be distinct)")
if start == former_end and checkClosure(obstacle[0], vertices[-1]):
vertices.pop()
vertexFlags.pop()
# ---- In-Between Corner Vertices ---- #
if former_end is not None:
new_vertices, new_flags = addIntermediateCorners(former_end, start)
vertices += new_vertices
vertexFlags += new_flags
# ---- Obstacle ---- #
vertices += obstacle
vertexFlags += [self.boundaryTags[start]
for i in range(len(obstacle))]
# ---- Paperwork ---- #
former_end = end
if first_start is None:
first_start = start
# ---- Remaining Corner Vertices ---- #
if first_start is not None:
new_vertices, new_flags = addRemainingCorners(former_end,
first_start)
else:
new_vertices, new_flags = addAllCorners('x-')
vertices += new_vertices
vertexFlags += new_flags
# ---- Check for Special Conditions ---- #
for vertex in self.special_BC_vertices:
flag_index = vertices.index(vertex)
boundary_index = self.special_BC_vertices.index(vertex)
boundary_name = self.special_boundaries[boundary_index]
vertexFlags[flag_index] = self.boundaryTags[boundary_name]
# ---- Adjustments for Sponge Zones ---- #
self._findSpongeLayerCorners(vertices=vertices)
# ---- Add Sponge Zone Vertices ---- #
new_vertices, new_flags = addSpongeVertices()
vertices += new_vertices
vertexFlags += new_flags
return vertices, vertexFlags
def _constructSegments(self, vertices, vertexFlags):
# VertexFlag --> SegmentFlag logic:
#
# if EITHER are x+ --> segment is x+
# UNLESS the other is x- --> y+
# if EITHER are x- --> segment is x-
# UNLESS the other is x+ --> y-
# if it STARTS y- --> segment is y-
# UNLESS they are vertical --> x+
# if it STARTS y+ --> segment is y+
# UNLESS they are vertical --> x-
# if BOTH are *** --> segment is ***
# (if two different *** are around, it takes the first)
segments = []
segmentFlags = []
on_sponge_edge = {'x-': False, 'x+': False}
sponge_edges_covered = {'x-': False, 'x+': False}
def checkSpongeStatus(start_index, end_index):
start_vertex = vertices[start_index]
if self.spongeLayers['x-']:
if not on_sponge_edge['x-']:
if start_vertex in (self.x0y0, self.x0y1):
on_sponge_edge['x-'] = True
elif not sponge_edges_covered['x-']:
if start_vertex in (self.x0y0, self.x0y1):
on_sponge_edge['x-'] = False
sponge_edges_covered['x-'] = True
else:
vertexFlags[start_index] = self.boundaryTags['sponge']
else:
pass
if self.spongeLayers['x+']:
if not on_sponge_edge['x+']:
if start_vertex in (self.x1y0, self.x1y1):
on_sponge_edge['x+'] = True
elif not sponge_edges_covered['x+']:
if start_vertex in (self.x1y0, self.x1y1):
on_sponge_edge['x+'] = False
sponge_edges_covered['x+'] = True
else:
vertexFlags[start_index] = self.boundaryTags['sponge']
else:
pass
end_vertex = vertices[end_index]
if on_sponge_edge['x-']:
if end_vertex not in (self.x0y0, self.x0y1):
vertexFlags[end_index] = self.boundaryTags['sponge']
if on_sponge_edge['x+']:
if end_vertex not in (self.x1y0, self.x1y1):
vertexFlags[end_index] = self.boundaryTags['sponge']
def getSegmentFlag(start, end):
if ((self.spongeLayers['x-'] and not sponge_edges_covered['x-']) or
(self.spongeLayers['x+'] and not sponge_edges_covered['x+'])):
checkSpongeStatus(start, end)
if on_sponge_edge['x-'] or on_sponge_edge['x+']:
return [self.boundaryTags['sponge'], ]
else:
if vertexFlags[start] == self.boundaryTags['x+']:
if vertexFlags[end] == self.boundaryTags['x-']:
return [self.boundaryTags['y+'], ]
else:
return [self.boundaryTags['x+'], ]
elif vertexFlags[start] == self.boundaryTags['x-']:
if vertexFlags[end] == self.boundaryTags['x+']:
return [self.boundaryTags['y-'], ]
else:
return [self.boundaryTags['x-'], ]
elif vertexFlags[end] == self.boundaryTags['x+']:
if vertexFlags[start] in [self.boundaryTags['y-'],
self.boundaryTags['y+']]:
return [self.boundaryTags['x+'], ]
elif vertexFlags[end] == self.boundaryTags['x-']:
if vertexFlags[start] in [self.boundaryTags['y-'],
self.boundaryTags['y+']]:
return [self.boundaryTags['x-'], ]
elif vertexFlags[start] == self.boundaryTags['y-']:
if (vertexFlags[end] == self.boundaryTags['y+']
and np.isclose(vertices[start][0], vertices[end][0])
):
return [self.boundaryTags['x+'], ]
else:
return [self.boundaryTags['y-'], ]
elif vertexFlags[start] == self.boundaryTags['y+']:
if (vertexFlags[end] == self.boundaryTags['y-']
and np.isclose(vertices[start][0], vertices[end][0])
):
return [self.boundaryTags['x-'], ]
else:
return [self.boundaryTags['y+'], ]
else:
return [vertexFlags[start], ]
# ---- Initial Sponge Logic ---- #
sponge_vertex_count = 0
if self.spongeLayers['x-']:
sponge_vertex_count += 2
if self.spongeLayers['x+']:
sponge_vertex_count += 2
# ---- Build Main Segments ---- #
for i in range(len(vertices) - 1 - sponge_vertex_count):
segments += [[i, i + 1], ]
segmentFlags += getSegmentFlag(i, i + 1)
segments += [[len(vertices) - 1 - sponge_vertex_count, 0], ]
segmentFlags += getSegmentFlag(len(vertices) - 1 - sponge_vertex_count,
0)
# ---- Build Sponge Segments ---- #
if self.spongeLayers['x-']:
segments += [[vertices.index(self.x0y0),
len(vertices) - sponge_vertex_count],
[len(vertices) - sponge_vertex_count,
len(vertices) - sponge_vertex_count + 1],
[len(vertices) - sponge_vertex_count + 1,
vertices.index(self.x0y1)]
]
segmentFlags += [self.boundaryTags['y-'],
self.boundaryTags['x-'],
self.boundaryTags['y+']]
if self.spongeLayers['x+']:
segments += [[vertices.index(self.x1y0), len(vertices) - 2],
[len(vertices) - 2, len(vertices) - 1],
[len(vertices) - 1, vertices.index(self.x1y1)]
]
segmentFlags += [self.boundaryTags['y-'],
self.boundaryTags['x+'],
self.boundaryTags['y+']]
return segments, segmentFlags
def _constructRegions(self, vertices, vertexFlags, segments, segmentFlags):
if True in self.corners.values():
regions = self._getCornerRegion()
else:
regions = self._getRandomRegion(vertices, segments)
ind_region = 1
regionFlags = [ind_region,]
self.regionIndice = {'tank': ind_region - 1}
sponge_half_height_x0 = 0.5 * (self.x0y0[1] + self.x0y1[1])
sponge_half_height_x1 = 0.5 * (self.x1y0[1] + self.x1y1[1])
sponge_x0 = self.x0y0[0]
sponge_x1 = self.x1y0[0]
if self.spongeLayers['x-']:
regions += [[sponge_x0 - 0.5 * self.spongeLayers['x-'],
sponge_half_height_x0]]
ind_region += 1
regionFlags += [ind_region]
self.regionIndice['x-'] = ind_region - 1
if self.spongeLayers['x+']:
regions += [[sponge_x1 + 0.5 * self.spongeLayers['x+'],
sponge_half_height_x1]]
ind_region += 1
regionFlags += [ind_region]
self.regionIndice['x+'] = ind_region - 1
return regions, regionFlags
def _findExtrema(self, points):
"""
Return the extrema of a series of points in n dimensions in the form:
max(x1), max(x2), ... , max(xn), min(x1), ... , min(xn)
"""
points = np.array(points)
return np.max(points,0).tolist() + np.min(points,0).tolist()
def _getCornerRegion(self):
eps = np.finfo(float).eps
if self.corners['x-y-']:
return [[self.x0 + eps, self.y0 + eps], ]
elif self.corners['x+y-']:
return [[self.x1 - eps, self.y0 + eps], ]
elif self.corners['x+y+']:
return [[self.x1 - eps, self.y1 - eps], ]
elif self.corners['x-y+']:
return [[self.x0 + eps, self.y1 - eps], ]
def _getRandomRegion(self, vertices, segments):
x_p, y_p, x_n, y_n = self._findExtrema(vertices)
if self.spongeLayers['x-']:
x_n += self.spongeLayers['x-']
if self.spongeLayers['x+']:
x_p -= self.spongeLayers['x+']
count = 0
allowed_tries = 100
while True:
count += 1
vertical_line = np.random.uniform(x_n, x_p)
if True in [np.isclose(vertical_line, vertex[0]) for vertex in
vertices]:
continue
lowest_intersect = second_intersect = y_p
for segment in segments:
line_x0 = vertices[segment[0]][0]
line_y0 = vertices[segment[0]][1]
line_x1 = vertices[segment[1]][0]
line_y1 = vertices[segment[1]][1]
if (line_x0 < vertical_line < line_x1
or line_x0 > vertical_line > line_x1):
# (due to the strict inequality check and
# our selection of vertical_line - x1 > x0 should be sure)
intersection_height = line_y0 + (
(line_y1 - line_y0)
* (vertical_line - line_x0)
/ (line_x1 - line_x0)
)
if intersection_height < lowest_intersect:
second_intersect = lowest_intersect
lowest_intersect = intersection_height
elif intersection_height < second_intersect:
second_intersect = intersection_height
interior_point = 0.5 * (lowest_intersect + second_intersect)
if lowest_intersect < interior_point < second_intersect:
break
if count > allowed_tries:
ValueError(
"Cannot find a proper interior point of the defined "
"shape after " + str(count) + " tries.")
return [[vertical_line, interior_point], ]
def setAbsorptionZones(self, x_n=False, x_p=False, dragAlpha=0.5/1.005e-6,
dragBeta=0., porosity=1.):
"""
Sets regions (x+, x-) to absorption zones
Parameters
----------
allSponge: bool
If True, all sponge layers are converted to absorption zones.
x_p: bool
If True, x+ region is converted to absorption zone.
x_n: bool
If True, x- region is converted to absorption zone.
dragAlpha: Optional[float]
Porous module parameter.
dragBeta: Optional[float]
Porous module parameter.
| |
<reponame>rperrin22/FEHM_supplementary
import numpy as np
import pandas as pd
from pylagrit import PyLaGriT
from matplotlib import pyplot as plt
from scipy import interpolate
from scipy.interpolate import griddata
class create_FEHM_run:
def __init__(self,test_number,param_file):
# read in the parameter file
temp = pd.read_csv(param_file)
params = temp[temp.test_num==test_number]
# initialize the geometry
file_prefix = params['prefix'].values[0]
self.upper_numlayers = int(params['upper_numlayers'].values[0])
self.middle_numlayers = int(params['middle_numlayers'].values[0])
self.lower_numlayers = int(params['lower_numlayers'].values[0])
self.min_x = float(params['min_x'].values[0])
self.min_y = float(params['min_y'].values[0])
self.min_z = float(params['min_z'].values[0])
self.max_x = float(params['max_x'].values[0])
self.max_y = float(params['max_y'].values[0])
self.max_z = float(params['max_z'].values[0])
self.dx = float(params['dx'].values[0])
self.dy = float(params['dy'].values[0])
self.dz = (self.max_z - self.min_z + 1)/(self.upper_numlayers + self.middle_numlayers + self.lower_numlayers)
self.crust_thickness = float(params['crust_thickness'].values[0])
self.temp_dz = self.crust_thickness / self.middle_numlayers
self.fault_heat = float(params['fault_heat'].values[0] * self.temp_dz * self.dy / 1e9)
self.xvec = np.arange(self.min_x,self.max_x,self.dx)
self.yvec = np.arange(self.min_y,self.max_y,self.dy)
self.zvec = np.arange(self.min_z,self.max_z,self.dz)
self.XX,self.YY,self.ZZ = np.meshgrid(self.xvec,self.yvec,self.zvec)
# create output filenames
self.csv_filename = '%s_grid_coords.csv' % file_prefix
self.inp_filename = '%s_grid_coords.inp' % file_prefix
self.fehm_filename = '%s_grid_coords' % file_prefix
self.material_zones_filename = '%s_materials.zone' % file_prefix
self.boundary_zones_filename = '%s_boundary.zone' % file_prefix
self.input_filename = '%s_input.dat' % file_prefix
self.control_filename = 'fehmn.files'
self.prefix_name = '%s' % file_prefix
self.lagrit_exec_filename = params['lagrit_exec'].values[0]
# populate model parameters
self.title = '%s_input.dat' % file_prefix
self.grad_cond1 = float(params['grad_cond1'].values[0])
self.grad_cond2 = float(params['grad_cond2'].values[0])
self.grad_cond3 = float(params['grad_cond3'].values[0])
self.grad_ref_loc = float(params['grad_ref_loc'].values[0])
self.grad_direction = float(params['grad_direction'].values[0])
self.grad_ref_sat = float(params['grad_ref_sat'].values[0])
self.grad_sat_slope = float(params['grad_sat_slope'].values[0])
self.grad_ref_temp = float(params['grad_ref_temp'].values[0])
self.grad_temp_slope = float(params['grad_temp_slope'].values[0])
self.grad_ref_pres = float(params['grad_ref_pres'].values[0])
self.grad_pres_slope = float(params['grad_pres_slope'].values[0])
self.perm_lower = float(params['perm_lower'].values[0])
self.perm_middle_ocean = float(params['perm_middle_ocean'].values[0])
self.perm_middle_continental = float(params['perm_middle_continental'].values[0])
self.perm_upper = float(params['perm_upper'].values[0])
self.mult_lower = float(params['mult_lower'].values[0])
self.mult_upper = float(params['mult_upper'].values[0])
# convert from mW/m^2 to MW if mult upper and lower are 0
if self.mult_lower == 0:
self.temp_lower = float(params['temp_lower'].values[0] * self.dx * self.dy / 1e9)
else:
self.temp_lower = float(params['temp_lower'].values[0])
if self.mult_upper ==0:
self.temp_upper = float(params['temp_upper'].values[0] * self.dx * self.dy / 1e9)
else:
self.temp_upper = float(params['temp_upper'].values[0])
self.cond_lower = float(params['cond_lower'].values[0])
self.cond_middle_ocean = float(params['cond_middle_ocean'].values[0])
self.cond_middle_continental = float(params['cond_middle_continental'].values[0])
self.cond_upper = float(params['cond_upper'].values[0])
self.rock_density = float(params['rock_density'].values[0])
self.rock_spec_heat = float(params['rock_spec_heat'].values[0])
self.rock_porosity = float(params['rock_porosity'].values[0])
self.solids_cond = 2
self.water_cond = 0.604
self.init_time_step = float(params['init_time_step'].values[0])
self.final_sim_time = float(params['final_sim_time'].values[0])
self.max_time_steps = float(params['max_time_steps'].values[0])
self.info_print_int = float(params['info_print_int'].values[0])
self.rp_mult = float(params['athy_multiplier'].values[0])
self.rp_exp = float(params['athy_exp'].values[0])
self.max_iterations = float(params['max_iterations'].values[0])
self.newton_tol = float(params['newton_tol'].values[0])
self.num_orth = float(params['num_orth'].values[0])
self.max_solve = float(params['max_solve'].values[0])
self.acc_method = params['acc_method'].values[0]
self.ja = float(params['ja'].values[0])
self.jb = float(params['jb'].values[0])
self.jc = float(params['jc'].values[0])
self.nards = float(params['nards'].values[0])
self.implicitness_factor = float(params['implicitness_factor'].values[0])
self.grav_direction = float(params['grav_direction'].values[0])
self.upstream_weight = float(params['upstream_weight'].values[0])
self.max_iter_mult = float(params['max_iter_mult'].values[0])
self.time_step_mult = float(params['time_step_mult'].values[0])
self.max_time_step_size = float(params['max_time_step_size'].values[0])
self.min_time_step_size = float(params['min_time_step_size'].values[0])
self.geom_id = float(params['geom_id'].values[0])
self.lda = float(params['lda'].values[0])
self.G1 = float(params['G1'].values[0])
self.G2 = float(params['G2'].values[0])
self.G3 = float(params['G3'].values[0])
self.TMCH = float(params['TMCH'].values[0])
self.OVERF = float(params['OVERF'].values[0])
self.IRDOF = float(params['IRDOF'].values[0])
self.ISLORD = float(params['ISLORD'].values[0])
self.IBACK = float(params['IBACK'].values[0])
self.ICOUPL = float(params['ICOUPL'].values[0])
self.RNMAX = float(params['RNMAX'].values[0])
self.zbulk = float(params['zbulk'].values[0])
self.surf_filename = params['surf_filename'].values[0]
self.boundary_filename = params['boundary_filename'].values[0]
def build_surfaces_real(self):
# Brings in an externally-generated surface for the top of the crust.
# The surface is space-delimited ascii file with 3 columns - X,Y,Z
# in the future, add an option here to generate within the script
# based on some function like a sine-wave
self.XXtemp,self.YYtemp = np.meshgrid(self.xvec,self.yvec)
header_list = ["X","Y","Z"]
self.D = pd.read_csv(self.surf_filename,sep=' ',names=header_list)
# interpolate onto the meshed grid
self.ZZtemp = griddata((self.D['X'], self.D['Y']),self.D['Z'],(self.XXtemp.flatten(),self.YYtemp.flatten()),method='linear')
self.surf_upper = self.ZZtemp + self.zbulk
self.surf_upper = np.reshape(self.surf_upper,self.XXtemp.shape)
self.surf_lower = self.surf_upper - self.crust_thickness
self.bound_upper = np.ones(self.XXtemp.shape)*self.max_z
self.bound_lower = np.ones(self.XXtemp.shape)*self.min_z
self.bottom_zone = np.linspace(self.bound_lower,self.surf_lower - self.dz/4,self.lower_numlayers)
self.middle_zone = np.linspace(self.surf_lower + self.dz/4,self.surf_upper - self.dz/4,self.middle_numlayers)
self.upper_zone = np.linspace(self.surf_upper + self.dz/4,self.bound_upper,self.upper_numlayers)
self.bottom_zone = np.transpose(self.bottom_zone,(1,2,0))
self.middle_zone = np.transpose(self.middle_zone,(1,2,0))
self.upper_zone = np.transpose(self.upper_zone,(1,2,0))
self.ZZ = np.concatenate((self.bottom_zone,self.middle_zone,self.upper_zone),axis=2)
def plot_upper_surface(self):
plt.imshow(self.surf_upper)
plt.colorbar()
plt.show()
def plot_lower_surface(self):
plt.imshow(self.surf_lower)
plt.colorbar()
plt.show()
def build_mat_prop_files(self):
self.mat_prop_filename = '%s.rock' % self.prefix_name
self.cond_filename = '%s.cond' % self.prefix_name
PZ = open(self.mat_prop_filename,'w+')
CZ = open(self.cond_filename,'w+')
PZ.write('rock\n')
CZ.write('cond\n')
for x in self.node_nums_upper:
temp_depth = self.max_z - self.ZZ_out[x-1]
temp_porosity = self.rp_mult*np.exp(self.rp_exp*temp_depth/1000)
temp_cond = self.solids_cond**(1 - temp_porosity) * self.water_cond**(temp_porosity)
PZ.write(' %d %d 1 %d %d %.1f\n' % (x,x,self.rock_density,self.rock_spec_heat,temp_porosity))
CZ.write(' %d %d 1 %.2f %.2f %.2f\n' % (x,x,temp_cond,temp_cond,temp_cond))
for x in self.node_nums_middle_ocean:
PZ.write(' %d %d 1 %d %d %.1f\n' % (x,x,self.rock_density,self.rock_spec_heat,self.rock_porosity))
CZ.write(' %d %d 1 %.2f %.2f %.2f\n' % (x,x,self.cond_middle_ocean,self.cond_middle_ocean,self.cond_middle_ocean))
for x in self.node_nums_middle_continental:
PZ.write(' %d %d 1 %d %d %.1f\n' % (x,x,self.rock_density,self.rock_spec_heat,self.rock_porosity))
CZ.write(' %d %d 1 %.2f %.2f %.2f\n' % (x,x,self.cond_middle_continental,self.cond_middle_continental,self.cond_middle_continental))
for x in self.node_nums_bottom:
PZ.write(' %d %d 1 %d %d %.1f\n' % (x,x,self.rock_density,self.rock_spec_heat,self.rock_porosity))
CZ.write(' %d %d 1 %.2f %.2f %.2f\n' % (x,x,self.cond_lower,self.cond_lower,self.cond_lower))
PZ.write('\nstop')
PZ.close()
CZ.write('\nstop')
CZ.close()
def save_coords_csv(self):
# export a csv with the XYZ's for the mesh nodes. This will be used later
# for running LaGriT but can also be used to externally plot the mesh nodes
# for QC purposes.
self.XX_out = self.XX.flatten()
self.YY_out = self.YY.flatten()
self.ZZ_out = self.ZZ.flatten()
self.DF = pd.DataFrame({'x':self.XX_out, 'y':self.YY_out, 'z':self.ZZ_out})
self.DF.to_csv(self.csv_filename,index=False,header=False)
def read_boundary_file(self):
# load in the boundary file describing the location of the border between
# the oceanic and continental crust
colnames = ['Easting','Northing','Elevation']
self.FF = pd.read_csv(self.boundary_filename,skiprows=1,names=colnames,header=None)
self.FF.Elevation = self.FF.Elevation + self.zbulk
def build_zones(self):
self.fsurf = interpolate.interp2d(self.xvec,self.yvec,self.surf_upper,kind='linear')
self.DF['upp_surf'] = 0
self.DF['low_surf'] = 0
for index,row in self.DF.iterrows():
self.DF.upp_surf[index] = self.fsurf(self.DF.x[index].copy(),self.DF.y[index].copy())
self.DF.low_surf = self.DF.upp_surf - self.crust_thickness
self.x_boun_vec = np.zeros(self.xvec.shape)
self.f = interpolate.interp1d(self.FF.Northing,self.FF.Easting, fill_value=(self.FF.Easting.iloc[-1]+10,self.FF.Easting.iloc[0]-10), bounds_error=False)
# first make an x-boundary column
self.DF['x_boun'] = self.f(self.DF['x'])
# initialize a zone column to zeros
# zones will be as follows:
# 1 - below the crust
# 2 - oceanic crust
# 3 - continental crust
# 4 - sediments
# 11 - crustal fault
self.DF['mat_zone'] = self.DF['x_boun']*0
self.DF.mat_zone[self.DF.x[:].copy() > self.DF.x_boun[:].copy()] = 11 # setting the crustal boundary cells
self.DF.mat_zone[(self.DF.x[:].copy() > self.DF.x_boun[:].copy()) & ((self.DF.y[:].copy() == max(self.DF.y[:].copy())) | (self.DF.y[:].copy() == min(self.DF.y[:].copy())))] = 12
self.DF.mat_zone[self.DF.x[:].copy() > (self.DF.x_boun[:].copy() + self.dx)] = 3 # setting continental crust
self.DF.mat_zone[self.DF.x[:].copy() <= self.DF.x_boun[:].copy()] = 2 # setting oceanic crust
self.DF.mat_zone[self.DF.z[:].copy() < self.DF.low_surf[:].copy()] = 1 # setting below the crust
self.DF.mat_zone[self.DF.z[:].copy() > self.DF.upp_surf[:].copy()] = 4 # setting the sediment zone
self.DF['orig_ind'] = self.DF.index*1
# create materials zone
# check the .copy() part
testerbob = self.DF[self.DF.mat_zone==1].copy()
self.node_nums_bottom = testerbob[['orig_ind']].to_numpy()+1
testerbob = self.DF[self.DF.mat_zone==2].copy()
self.node_nums_middle_ocean = testerbob[['orig_ind']].to_numpy()+1
testerbob = self.DF[(self.DF.mat_zone==3) | (self.DF.mat_zone==11) | (self.DF.mat_zone==12)].copy()
self.node_nums_middle_continental = testerbob[['orig_ind']].to_numpy()+1
testerbob = self.DF[self.DF.mat_zone==4].copy()
self.node_nums_upper = testerbob[['orig_ind']].to_numpy()+1
MZ = open(self.material_zones_filename,'w+')
zonecounter = 1
MZ.write('zone\n')
MZ.write('%05d\n' % zonecounter)
zonecounter = zonecounter + 1
MZ.write('nnum\n')
MZ.write(' %d\n' % self.node_nums_bottom.size)
col_ind = 1
for x in range(self.node_nums_bottom.size):
if x == self.node_nums_bottom.size-1:
MZ.write(' %d\n' % self.node_nums_bottom[x])
elif col_ind % 10 != 0:
MZ.write(' %d' % self.node_nums_bottom[x])
else:
MZ.write(' %d\n' % self.node_nums_bottom[x])
col_ind = col_ind + 1
MZ.write('%05d\n' % zonecounter)
zonecounter = zonecounter + 1
MZ.write('nnum\n')
MZ.write(' %d\n' % self.node_nums_middle_ocean.size)
col_ind = 1
for x in range(self.node_nums_middle_ocean.size):
if x == self.node_nums_middle_ocean.size-1:
MZ.write(' %d\n' % self.node_nums_middle_ocean[x])
elif col_ind % 10 != 0:
MZ.write(' %d' % self.node_nums_middle_ocean[x])
else:
MZ.write(' %d\n' % self.node_nums_middle_ocean[x])
col_ind = col_ind + 1
MZ.write('%05d\n' % zonecounter)
zonecounter = zonecounter + 1
MZ.write('nnum\n')
MZ.write(' %d\n' % self.node_nums_middle_continental.size)
col_ind = 1
for x in range(self.node_nums_middle_continental.size):
if x == self.node_nums_middle_continental.size-1:
MZ.write(' %d\n' % self.node_nums_middle_continental[x])
elif col_ind % 10 != 0:
MZ.write(' %d' % self.node_nums_middle_continental[x])
else:
MZ.write(' %d\n' % self.node_nums_middle_continental[x])
col_ind = col_ind + 1
MZ.write('%05d\n' % zonecounter)
zonecounter = zonecounter + 1
MZ.write('nnum\n')
MZ.write(' %d\n' % self.node_nums_upper.size)
col_ind = 1
for x in range(self.node_nums_upper.size):
if x == self.node_nums_upper.size-1:
MZ.write(' %d\n' % self.node_nums_upper[x])
elif col_ind % 10 != 0:
MZ.write(' %d' % self.node_nums_upper[x])
else:
MZ.write(' %d\n' % self.node_nums_upper[x])
col_ind = col_ind + 1
MZ.write('\n')
MZ.write('stop')
MZ.close()
# create boundary zones
# boundary zones will be:
# 00005 - bottom boundary
# 00006 - top boundary
# 00007 - top edges
# 00008 - bottom edges
# 00009 - top corners
# 00010 - bottom corners
# 00011 - fault zone
# 00012 - fault zone edges
# in the future add one here to create a vertical internal boundary
# that will be used to generate heat along the fault.
testerbob = self.DF[self.DF.mat_zone==11].copy()
self.node_nums_fault = testerbob[['orig_ind']].to_numpy()+1
testerbob = self.DF[self.DF.mat_zone==12].copy()
self.node_nums_fault_edges = testerbob[['orig_ind']].to_numpy()+1
self.node_nums_domain_bottom = np.where(self.ZZ_out == min(self.ZZ_out))
self.node_nums_domain_bottom = np.asarray(self.node_nums_domain_bottom).flatten()+1
self.node_nums_domain_top = np.where(self.ZZ_out == max(self.ZZ_out))
self.node_nums_domain_top = np.asarray(self.node_nums_domain_top).flatten()+1
self.node_nums_bottom_edges = np.where(
((self.XX_out == max(self.XX_out)) |
(self.YY_out == max(self.YY_out)) |
(self.XX_out == min(self.XX_out)) |
(self.YY_out == min(self.YY_out)))
&
(self.ZZ_out == min(self.ZZ_out))
)
self.node_nums_bottom_edges = np.asarray(self.node_nums_bottom_edges).flatten()+1
self.node_nums_top_edges = np.where(
((self.XX_out == max(self.XX_out)) |
(self.YY_out == max(self.YY_out)) |
(self.XX_out == min(self.XX_out)) |
| |
l.area = 3 and wn_id is not null) )) UNION (SELECT h.wn_id FROM (SELECT l.wn_id, COUNT(*) as job FROM location as l WHERE l.area = 1 GROUP BY l.wn_id) as h WHERE h.job < 4)"
elif areaa == 2:
sql9 = "(SELECT wn_id FROM ward_nurse WHERE wn_id NOT IN ((SELECT wn_id FROM location l WHERE l.area = 1 and wn_id is not null) UNION (SELECT wn_id FROM location l WHERE l.area = 2 and wn_id is not null) UNION (SELECT wn_id FROM location l WHERE l.area = 3 and wn_id is not null) )) UNION (SELECT h.wn_id FROM (SELECT l.wn_id, COUNT(*) as job FROM location as l WHERE l.area = 2 GROUP BY l.wn_id) as h WHERE h.job < 2)"
elif areaa == 3:
sql9 = "(SELECT wn_id FROM ward_nurse WHERE wn_id NOT IN ((SELECT wn_id FROM location l WHERE l.area = 1 and wn_id is not null) UNION (SELECT wn_id FROM location l WHERE l.area = 2 and wn_id is not null) UNION (SELECT wn_id FROM location l WHERE l.area = 3 and wn_id is not null) )) UNION (SELECT h.wn_id FROM (SELECT l.wn_id, COUNT(*) as job FROM location as l WHERE l.area = 3 GROUP BY l.wn_id) as h WHERE h.job < 1)"
cursor.execute(sql9)
db.commit()
res2 = cursor.fetchall()
# 无法送达因为没有空余护士
if len(res2) == 0:
msg = 'done'
return render_template('en_newp.html', messages=msg, username=username)
a = res[0][0]
b = res[0][1]
c = res[0][2]
sql5 = "UPDATE location as l SET l.p_id = '%s',l.wn_id = '%s' WHERE l.area = '%s' and l.room_no = '%s' and l.bed_no = '%s'" % (
p_id, res2[0][0], a, b, c)
print(sql5)
cursor.execute(sql5)
db.commit()
return render_template('en_newp.html', messages=msg, username=username)
return render_template('en_newp.html', username=username, userRole=userRole)
# 急诊护士选择
@app.route('/en_viewp_i', methods=['GET', 'POST'])
def en_viewp_i():
msg = ""
if request.method == 'GET':
return render_template('en_viewp_i.html')
if request.method == 'POST':
area = request.form.get('area')
print(area)
return render_template('en_viewp_i.html', messages=area, username=username, userRole=userRole)
@app.route('/en_viewp0', methods=['GET', 'POST'])
def en_viewp0():
msg = ""
if request.method == 'GET':
db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8')
cursor = db.cursor()
try:
cursor.execute("use Hospital")
except:
print("Error: unable to use database!")
# 查询
sql = "SELECT * FROM patient as p WHERE p_id = some(SELECT p_id FROM location WHERE Location.area = 0)"
cursor.execute(sql)
res = cursor.fetchall()
db.commit()
if len(res) != 0:
msg = "done"
print(msg)
print(len(res))
return render_template('en_viewp0.html', username=username, result=res, messages=msg,
userRole=userRole)
else:
print("NULL")
msg = "none"
return render_template('en_viewp0.html', username=username, messages=msg, userRole=userRole)
@app.route('/en_viewp1', methods=['GET', 'POST'])
def en_viewp1():
msg = ""
if request.method == 'GET':
db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8')
cursor = db.cursor()
try:
cursor.execute("use Hospital")
except:
print("Error: unable to use database!")
# 查询
sql = "SELECT * FROM patient as p WHERE p_id = some(SELECT p_id FROM location WHERE Location.area = 1)"
cursor.execute(sql)
res = cursor.fetchall()
db.commit()
if len(res) != 0:
msg = "done"
print(msg)
print(len(res))
return render_template('en_viewp1.html', username=username, result=res, messages=msg,
userRole=userRole)
else:
print("NULL")
msg = "none"
return render_template('en_viewp1.html', username=username, messages=msg, userRole=userRole)
@app.route('/en_viewp2', methods=['GET', 'POST'])
def en_viewp2():
msg = ""
if request.method == 'GET':
db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8')
cursor = db.cursor()
try:
cursor.execute("use Hospital")
except:
print("Error: unable to use database!")
# 查询
sql = "SELECT * FROM patient as p WHERE p_id = some(SELECT p_id FROM location WHERE Location.area = 2)"
cursor.execute(sql)
res = cursor.fetchall()
db.commit()
if len(res) != 0:
msg = "done"
print(msg)
print(len(res))
return render_template('en_viewp2.html', username=username, result=res, messages=msg,
userRole=userRole)
else:
print("NULL")
msg = "none"
return render_template('en_viewp2.html', username=username, messages=msg, userRole=userRole)
@app.route('/en_viewp3', methods=['GET', 'POST'])
def en_viewp3():
msg = ""
if request.method == 'GET':
db = pymysql.connect("localhost", "root", password="<PASSWORD>0106", db="Hospital", charset='utf8')
cursor = db.cursor()
try:
cursor.execute("use Hospital")
except:
print("Error: unable to use database!")
# 查询
sql = "SELECT * FROM patient as p WHERE p_id = some(SELECT p_id FROM location WHERE Location.area = 3)"
cursor.execute(sql)
res = cursor.fetchall()
db.commit()
if len(res) != 0:
msg = "done"
print(msg)
print(len(res))
return render_template('en_viewp3.html', username=username, result=res, messages=msg,
userRole=userRole)
else:
print("NULL")
msg = "none"
return render_template('en_viewp3.html', username=username, messages=msg, userRole=userRole)
@app.route('/d_viewp_m', methods=['GET', 'POST'])
def d_viewp_m():
msg = ""
if request.form["action"] == "确认条件":
recover = request.form['recover']
right = request.form['right']
print(recover)
print(right)
db = pymysql.connect("localhost", "root", password="<PASSWORD>", db="Hospital", charset='utf8')
cursor = db.cursor()
cursor.execute("use Hospital")
if right == "1" and recover == "0":
sql3 = "SELECT distinct area FROM location WHERE location.d_id = '%s'" % username
cursor.execute(sql3)
db.commit()
res = cursor.fetchall()
print(res[0])
# 检验3天体温和2次核酸
# for loop 找pid
areaa = res[0][0]
if areaa == '1':
pos = '轻症'
elif areaa == '2':
pos = '重症'
else:
pos = '危重症'
sql2 = "SELECT * FROM patient WHERE patient.p_id = some(SELECT p_id FROM location WHERE location.area = '%s' and location.area != 0) and patient.severity != '%s'" % (
areaa, pos)
cursor.execute(sql2)
db.commit()
all_p_id = cursor.fetchall()
the_list = []
for p_id in all_p_id:
sql1 = "SELECT temperature FROM daily_info WHERE daily_info.p_id = %s ORDER BY the_date DESC" % p_id[0]
cursor.execute(sql1)
db.commit()
print("查询成功")
res = cursor.fetchall()
print(res)
good = 0
counter = 0
all = 0
if len(res) < 3:
all += 1
for i in res:
if counter > 2:
break
counter += 1
if i[0] < '37.3':
good += 1
if good != 3:
all += 1
sql2 = "SELECT result FROM covid_test WHERE covid_test.p_id = '%s' ORDER BY the_date DESC" % p_id[0]
cursor.execute(sql2)
db.commit()
print("查询成功2")
res = cursor.fetchall()
print(res)
good = 0
counter = 0
if len(res) < 2:
all += 1
for i in res:
if counter > 1:
break
counter += 1
if not i[0]:
good += 1
if good != 2:
all += 1
if all != 0:
the_list.append(p_id)
if len(the_list) != 0:
msg = "done"
print(msg)
print(len(the_list))
return render_template('d_viewp_m.html', username=username, result=the_list, messages=msg,
userRole=userRole)
else:
print("NULL")
msg = "none"
return render_template('d_viewp_m.html', username=username, messages=msg, userRole=userRole)
if right == "0" and recover == "0":
sql3 = "SELECT distinct area FROM location WHERE location.d_id = '%s'" % username
cursor.execute(sql3)
db.commit()
res = cursor.fetchall()
print(res[0])
# 检验3天体温和2次核酸
# for loop 找pid
areaa = res[0][0]
if areaa == '1':
pos = '轻症'
elif areaa == '2':
pos = '重症'
else:
pos = '危重症'
sql2 = "SELECT * FROM patient WHERE patient.p_id = some(SELECT p_id FROM location WHERE location.area = '%s' and location.area != 0) and patient.severity = '%s'" % (
areaa, pos)
cursor.execute(sql2)
db.commit()
all_p_id = cursor.fetchall()
the_list = []
for p_id in all_p_id:
sql1 = "SELECT temperature FROM daily_info WHERE daily_info.p_id = %s ORDER BY the_date DESC" % p_id[0]
cursor.execute(sql1)
db.commit()
print("查询成功")
res = cursor.fetchall()
print(res)
good = 0
counter = 0
all = 0
if len(res) < 3:
all += 1
for i in res:
if counter > 2:
break
counter += 1
if i[0] < '37.3':
good += 1
if good != 3:
all += 1
sql2 = "SELECT result FROM covid_test WHERE covid_test.p_id = '%s' ORDER BY the_date DESC" % p_id[0]
cursor.execute(sql2)
db.commit()
print("查询成功2")
res = cursor.fetchall()
print(res)
good = 0
counter = 0
if len(res) < 2:
all += 1
for i in res:
if counter > 1:
break
counter += 1
if not i[0]:
good += 1
if good != 2:
all += 1
if all != 0:
the_list.append(p_id)
if len(the_list) != 0:
msg = "done"
print(msg)
print(len(the_list))
return render_template('d_viewp_m.html', username=username, result=the_list, messages=msg,
userRole=userRole)
else:
print("NULL")
msg = "none"
return render_template('d_viewp_m.html', username=username, messages=msg, userRole=userRole)
if right == "1" and recover == "1":
sql3 = "SELECT distinct area FROM location WHERE location.d_id = '%s'" % username
cursor.execute(sql3)
db.commit()
res = cursor.fetchall()
print(res[0])
if res[0][0] != '1':
msg = "none"
return render_template('d_viewp_m.html', messages=msg, username=username)
# 检验3天体温和2次核酸
# for loop 找pid
areaa = res[0][0]
if areaa == '1':
pos = '轻症'
elif areaa == '2':
pos = '重症'
else:
pos = '危重症'
sql2 = "SELECT * FROM patient WHERE patient.p_id = some(SELECT p_id FROM location WHERE location.area = '%s' and location.area != 0) and patient.severity != '%s'" % (
areaa, pos)
cursor.execute(sql2)
db.commit()
all_p_id = cursor.fetchall()
the_list = []
for p_id in all_p_id:
sql1 = "SELECT temperature FROM daily_info WHERE daily_info.p_id = %s ORDER BY the_date DESC" % p_id[0]
cursor.execute(sql1)
db.commit()
print("查询成功")
res = cursor.fetchall()
| |
a review is public, it cannot be made '
'private again.',
},
'summary': {
'type': str,
'description': 'The new review request summary.',
},
'target_groups': {
'type': str,
'description': 'A comma-separated list of review groups '
'that will be on the reviewer list.',
},
'target_people': {
'type': str,
'description': 'A comma-separated list of users that will '
'be on a reviewer list.',
},
'testing_done': {
'type': str,
'description': 'The new testing done text.',
},
},
)
def create(self, *args, **kwargs):
"""Creates a draft of a review request.
If a draft already exists, this will just reuse the existing draft.
"""
# A draft is a singleton. Creating and updating it are the same
# operations in practice.
result = self.update(*args, **kwargs)
if isinstance(result, tuple):
if result[0] == 200:
return (201,) + result[1:]
return result
@webapi_check_local_site
@webapi_login_required
@webapi_request_fields(
optional={
'branch': {
'type': str,
'description': 'The new branch name.',
},
'bugs_closed': {
'type': str,
'description': 'A comma-separated list of bug IDs.',
},
'changedescription': {
'type': str,
'description': 'The change description for this update.',
},
'description': {
'type': str,
'description': 'The new review request description.',
},
'public': {
'type': bool,
'description': 'Whether or not to make the changes public. '
'The new changes will be applied to the '
'review request, and the old draft will be '
'deleted.',
},
'summary': {
'type': str,
'description': 'The new review request summary.',
},
'target_groups': {
'type': str,
'description': 'A comma-separated list of review groups '
'that will be on the reviewer list.',
},
'target_people': {
'type': str,
'description': 'A comma-separated list of users that will '
'be on a reviewer list.',
},
'testing_done': {
'type': str,
'description': 'The new testing done text.',
},
},
)
def update(self, request, always_save=False, local_site_name=None,
*args, **kwargs):
"""Updates a draft of a review request.
This will update the draft with the newly provided data.
Most of the fields correspond to fields in the review request, but
there is one special one, ``public``. When ``public`` is set to ``1``,
the draft will be published, moving the new content to the
Review Request itself, making it public, and sending out a notification
(such as an e-mail) if configured on the server. The current draft will
then be deleted.
"""
try:
review_request = review_request_resource.get_object(
request, local_site_name=local_site_name, *args, **kwargs)
except ReviewRequest.DoesNotExist:
return DOES_NOT_EXIST
try:
draft = self.prepare_draft(request, review_request)
except PermissionDenied:
return _no_access_error(request.user)
modified_objects = []
invalid_fields = {}
for field_name, field_info in self.fields.iteritems():
if (field_info.get('mutable', True) and
kwargs.get(field_name, None) is not None):
field_result, field_modified_objects, invalid = \
self._set_draft_field_data(draft, field_name,
kwargs[field_name],
local_site_name)
if invalid:
invalid_fields[field_name] = invalid
elif field_modified_objects:
modified_objects += field_modified_objects
if always_save or not invalid_fields:
for obj in modified_objects:
obj.save()
draft.save()
if invalid_fields:
return INVALID_FORM_DATA, {
'fields': invalid_fields,
self.item_result_key: draft,
}
if request.POST.get('public', False):
review_request.publish(user=request.user)
return 200, {
self.item_result_key: draft,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Deletes a draft of a review request.
This is equivalent to pressing :guilabel:`Discard Draft` in the
review request's page. It will simply erase all the contents of
the draft.
"""
# Make sure this exists. We don't want to use prepare_draft, or
# we'll end up creating a new one.
try:
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
draft = review_request.draft.get()
except ReviewRequest.DoesNotExist:
return DOES_NOT_EXIST
except ReviewRequestDraft.DoesNotExist:
return DOES_NOT_EXIST
if not self.has_delete_permissions(request, draft, *args, **kwargs):
return _no_access_error(request.user)
draft.delete()
return 204, {}
@webapi_check_local_site
@webapi_login_required
@augment_method_from(WebAPIResource)
def get(self, request, review_request_id, *args, **kwargs):
"""Returns the current draft of a review request."""
pass
def _set_draft_field_data(self, draft, field_name, data, local_site_name):
"""Sets a field on a draft.
This will update a draft's field based on the provided data.
It handles transforming the data as necessary to put it into
the field.
if there is a problem with the data, then a validation error
is returned.
This returns a tuple of (data, modified_objects, invalid_entries).
``data`` is the transformed data.
``modified_objects`` is a list of objects (screenshots or change
description) that were affected.
``invalid_entries`` is a list of validation errors.
"""
modified_objects = []
invalid_entries = []
if field_name in ('target_groups', 'target_people'):
values = re.split(r",\s*", data)
target = getattr(draft, field_name)
target.clear()
for value in values:
# Prevent problems if the user leaves a trailing comma,
# generating an empty value.
if not value:
continue
try:
local_site = _get_local_site(local_site_name)
if field_name == "target_groups":
obj = Group.objects.get((Q(name__iexact=value) |
Q(display_name__iexact=value)) &
Q(local_site=local_site))
elif field_name == "target_people":
obj = self._find_user(username=value,
local_site=local_site)
target.add(obj)
except:
invalid_entries.append(value)
elif field_name == 'bugs_closed':
data = list(self._sanitize_bug_ids(data))
setattr(draft, field_name, ','.join(data))
elif field_name == 'changedescription':
if not draft.changedesc:
invalid_entries.append('Change descriptions cannot be used '
'for drafts of new review requests')
else:
draft.changedesc.text = data
modified_objects.append(draft.changedesc)
else:
if field_name == 'summary' and '\n' in data:
invalid_entries.append('Summary cannot contain newlines')
else:
setattr(draft, field_name, data)
return data, modified_objects, invalid_entries
def _sanitize_bug_ids(self, entries):
"""Sanitizes bug IDs.
This will remove any excess whitespace before or after the bug
IDs, and remove any leading ``#`` characters.
"""
for bug in entries.split(','):
bug = bug.strip()
if bug:
# RB stores bug numbers as numbers, but many people have the
# habit of prepending #, so filter it out:
if bug[0] == '#':
bug = bug[1:]
yield bug
def _find_user(self, username, local_site):
"""Finds a User object matching ``username``.
This will search all authentication backends, and may create the
User object if the authentication backend knows that the user exists.
"""
username = username.strip()
if local_site:
return local_site.users.get(username=username)
try:
return User.objects.get(username=username)
except User.DoesNotExist:
for backend in auth.get_backends():
try:
user = backend.get_or_create_user(username)
except:
pass
if user:
return user
return None
review_request_draft_resource = ReviewRequestDraftResource()
class BaseScreenshotCommentResource(BaseCommentResource):
"""A base resource for screenshot comments."""
model = ScreenshotComment
name = 'screenshot_comment'
fields = dict({
'id': {
'type': int,
'description': 'The numeric ID of the comment.',
},
'screenshot': {
'type': 'reviewboard.webapi.resources.ScreenshotResource',
'description': 'The screenshot the comment was made on.',
},
'text': {
'type': str,
'description': 'The comment text.',
},
'timestamp': {
'type': str,
'description': 'The date and time that the comment was made '
'(in YYYY-MM-DD HH:MM:SS format).',
},
'public': {
'type': bool,
'description': 'Whether or not the comment is part of a public '
'review.',
},
'user': {
'type': 'reviewboard.webapi.resources.UserResource',
'description': 'The user who made the comment.',
},
'x': {
'type': int,
'description': 'The X location of the comment region on the '
'screenshot.',
},
'y': {
'type': int,
'description': 'The Y location of the comment region on the '
'screenshot.',
},
'w': {
'type': int,
'description': 'The width of the comment region on the '
'screenshot.',
},
'h': {
'type': int,
'description': 'The height of the comment region on the '
'screenshot.',
},
}, **BaseCommentResource.fields)
uri_object_key = 'comment_id'
allowed_methods = ('GET',)
def get_queryset(self, request, *args, **kwargs):
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
return self.model.objects.filter(
screenshot__review_request=review_request,
review__isnull=False)
def serialize_public_field(self, obj):
return obj.review.get().public
def serialize_timesince_field(self, obj):
return timesince(obj.timestamp)
def serialize_user_field(self, obj):
return obj.review.get().user
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Returns information on the comment.
This contains the comment text, time the comment was made,
and the location of the comment region on the screenshot, amongst
other information. It can be used to reconstruct the exact
position of the comment for use as an overlay on the screenshot.
"""
pass
class ScreenshotCommentResource(BaseScreenshotCommentResource):
"""Provides information on screenshots comments made on a review request.
The list of comments cannot be modified from this resource. It's meant
purely as a way to see existing comments that were made on a diff. These
comments will span all public reviews.
"""
model_parent_key = 'screenshot'
uri_object_key = None
def get_queryset(self, request, review_request_id, screenshot_id,
*args, **kwargs):
q = super(ScreenshotCommentResource, self).get_queryset(
request, review_request_id, *args, **kwargs)
q = q.filter(screenshot=screenshot_id)
return q
@webapi_check_local_site
@augment_method_from(BaseScreenshotCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of screenshot comments on a screenshot.
This list of comments will cover all comments made on this
screenshot from all reviews.
"""
pass
screenshot_comment_resource = ScreenshotCommentResource()
class ReviewScreenshotCommentResource(BaseScreenshotCommentResource):
"""Provides information on screenshots comments made on a review.
If the review is a draft, | |
"Offsetdrucker", "<NAME>", "Zugverkehrsleiter",
"Regulatory Affairs Specialist", "Redaktionsassistent",
"Kleiderverkäufer", "Leiter E-Marketing", "Schulratspräsident",
"<NAME>", "Schulassistent", "Fahrzeug-Aufbereiter",
"Schleifer", "<NAME>", "Musiklehrer",
"Sachbearbeiter Soziale Dienste", "<NAME>",
"Hauswirtschaftslehrer", "Integrationsbeauftragter",
"Leiter Wohngruppe", "Medizinischer Codierer",
"Zerspanungsfachmann", "<NAME>", "Baukoordinator",
"Energieingenieur", "Statiker", "Trockenbauer",
"Leiter Werkzeugtechnik", "Reifenmonteur", "Dammingenieur",
"Leittechnik-Ingenieur", "Sendetechniker", "Medizinphysiker",
"Bahnpolizist", "Papeterist", "Python-Entwickler", "Leiter SAP",
"Steuerrechtsspezialist", "Host/PL1 Spezialist",
"SPS Software-Ingenieur", "Leiter Projekt-Controlling",
"Bibliotheksleiter", "<NAME>", "Zeitungsverteiler",
"Sachbearbeiter Valorenstammdaten", "TYPO3-Entwickler",
"Assistent Facility Management", "Assistent Produktentwicklung",
"Assistent Asset Management", "Assistent der Bankleitung",
"Assistent Disposition", "Assistent Stab", "Ausgrabungsleiter",
"Bauarbeiter", "Cobol-Entwickler", "Didaktiker",
"Leiter Release Management", "Sachbearbeiter Versand",
"Konstruktionsassistent", "Leiter Finanzberatung",
"Leiter Fachbereich Gesundheit", "<NAME>",
"Leiter psychologischer Fachdienst", "<NAME>",
"Leiter Stadtplanung", "Leiter Hypothekargeschäft",
"Leiter Telefon und Empfang", "Leiter Mechanik",
"Systemingenieur Tivoli", "Mitarbeiter IT-Zugriffsmanagement",
"Mitarbeiter Patientenadministration", "Mitarbeiter Werkstatt",
"<NAME>", "Dokumentenmanagementsystem-Spezialist",
"Leiter Category Management", "Assistent Brand Management",
"Unterhaltungschef", "Leiter Telematik",
"Intellectual Property Assistent", "Leiter Dokumentation",
"Sachbearbeiter Berufliche Vorsorge", "Chiropraktor",
"Wirtschaftsförderer", "Staatsanwalt",
"Mitarbeiter Corporate Actions", "Leiter Kostencontrolling",
"Einkaufscontroller", "Assistent Fallberater",
"Leiter IT-Sicherheit", "Infrastruktur-Spezialist",
"Fachmann für die berufliche Eingliederung", "Leiter After Sales",
"Filialleiter-Assistent", "Leiter Verpackung", "Kinetiker",
"Verwaltungsratspräsident", "<NAME>",
"Bijoutier", "<NAME>", "Event-Assistent", "Förster",
"Leiter Energy Trading", "Assistent Energy Trading",
"Sachbearbeiter Kollektivleben", "UVG-Spezialist",
"Sachbearbeiter PK-Verwaltung", "Vertragsadministrator",
"Delegierter der Tochtergesellschaften",
"Assistent der Spitaldirektion", "Wissenschaftlicher Redaktor",
"Fenstermonteur", "Ersatzteilverkäufer",
"Versicherungssachbearbeiter", "HR Business Developer",
"Supply Chain Assistant", "Anzeigendisponent", "Berater-Assistent",
"Treasury Accountant", "Kulturredaktor", "<NAME>igenverkauf",
"Leiter Standortförderung", "Leiter IT Operations",
"Zytotechnischer Assistent", "Kraftwerkingenieur",
"<NAME>", "Galeniklaborant", "Fahrzeugreiniger",
"Personaldisponent", "Speditionsassistent", "Crewmitarbeiter",
"Freizeitwissenschafter", "Geophysiker", "Wasserbautechniker",
"Spezialist Leistungen Kollektivleben",
"Leiter Informationsmanagement", "Cash Management Assistent",
"Leiter Offertwesen", "Leiter Network Engineering",
"SAP Security Administrator", "Zügelmann", "Schreinermeister",
"Sachbearbeiter öffentlicher Verkehr",
"Head of International Assignment", "Leiter Konkursamt",
"Leiter Lebensmittelinspektorat", "Philosoph",
"Leiter Kreditorenbuchhaltung", "Kardiotechniker",
"Studienkoordinator", "<NAME>sychotherapie",
"Leiter Telekommunikation", "Leiter Publishing",
"Assistent HR Consulting", "Telefonmonteur", "Abbrucharbeiter",
"Abdichtungsfachmann", "Agrobiologe", "Betriebspraktiker",
"Diätkoch", "Fotofachangestellter", "Textiltechnologe",
"<NAME>", "<NAME>",
"Projektleiter Strassenbau", "Assistent Messe",
"<NAME>", "Geoinformatiker", "Brunnenbauer",
"Chirurgiemechaniker", "Facharzt Arbeitsmedizin", "Gleisbaupolier",
"Abschlepper", "Akkordarbeiter", "Dispatchleiter", "Aerodynamiker",
"RAMS-Spezialist", "z/OS Spezialist", "Arzneimittelchemiker",
"Berufsfischer", "Automatikmonteur", "Fachmann Betreuung",
"Bestatter", "Fahrlehrer", "Gemüsegärtner", "Baupolier",
"Maltherapeut", "Maskenbildner", "Datenmigrationsfachmann",
"Obstgärtner", "Reitlehrer", "Schneesportlehrer", "Steinsetzer",
"Tanzlehrer", "Taxichauffeur", "Detailhandelsassistent",
"Osteopath", "Konditor-Confiseur", "Heizungsinstallateur",
"Küchenverkäufer", "Call Center Administrator",
"Elektro- und Elektronikverkäufer", "Metzgermeister",
"Kosmetikberater", "Haushaltartikelverkäufer",
"Branchenspezialist Reformprodukte",
"Branchenspezialist Teppiche, Boden-, Wandbeläge",
"Antriebsspezialist", "Normeningenieur",
"Produktsicherheitsingenieur", "Prüfstandsingenieur",
"Qualifizierungsingenieur", "Simulationsingenieur",
"Techniker Holzindustrie", "Betriebssanitäter", "EEG-Assistent",
"Assistent Mikrobiologie", "Assistent Molekularbiologie",
"Unterrichtsassistent", "Fachkrankenpfleger Klinische Geriatrie",
"Fachkrankenpfleger Nephrologie", "Fachkrankenpfleger Onkologie",
"Fachkrankenpfleger Palliativ- und Hospizpflege",
"Fachkrankenpfleger Rehabilitation und Langzeitpflege",
"Hippotherapeut", "Kieferorthopäde", "Nachtwache Krankenhaus",
"Rehabilitationslehrer", "Schulischer Heilpädagoge",
"Sicherheitsbeauftragter im Spital und Heim",
"Sozialpädagogischer Assistent", "Sporttherapeut", "Tanztherapeut",
"Tennislehrer", "Verhaltenstherapeut", "Yogalehrer",
"Baustoffverkäufer", "Bergführer", "Berufssoldat", "Imker",
"Bildhauer", "Bildungswissenschafter", "Bote", "Bügler",
"Bäckerei-Konditorei-Verkäufer", "Chemie-Assistent", "<NAME>",
"Giesser", "Forstarbeiter", "Gerber", "Gerichtsdiener",
"Giessereimechaniker", "Hutmacher", "Kabelmonteur",
"Kioskverkäufer", "Kirchendiener", "Kommunikationswissenschafter",
"Kunstlehrer", "Kunststoffverarbeiter", "Maschinenbaukonstrukteur",
"Medizinischer Dokumentationsassistent", "Oberflächentechniker",
"Rechtswissenschafter", "Religionslehrer", "Segellehrer",
"Sicherheitstechniker", "Solartechniker", "Sonderschullehrer",
"Sportwissenschafter", "Studiotechniker", "Sänger",
"Textilchemiker", "Tierzüchter", "Tänzer",
"Elektro-Sicherheitsberater", "Gardemanger",
"Tankstellenmitarbeiter", "Abacus-Supporter", "Leiter HLK",
"Leiter Automation", "Oracle Administrator", "Facharzt Radiologie",
"Facharzt für Chinesische Medizin", "Facharzt Dermatologie",
"Facharzt Infektiologie", "Facharzt Innere Medizin",
"Facharzt Kardiologie", "Facharzt Neurochirurgie",
"Facharzt Ophthalmologie", "Facharzt Orthopädie",
"Facharzt Pathologie", "Facharzt Präventivmedizin", "Butler",
"<NAME>", "Ofenbauer", "Technischer Operationsfachmann",
"Materialwissenschaftler", "Leiter Elektroplanung",
"Cognos CPM Spezialist", "Leiter Kundenarbeiten",
"Leiter Consulting", "<NAME>",
"Leiter Aktivierungstherapie", "Leiter Schuladministration",
"Leiter Spenglerei", "Leiter Physiotherapie", "Mineur",
"Oracle Systemingenieur", "Pianist", "Pizzaiolo",
"Sachbearbeiter Einwohnerkontrolle", "Sterilisationsfachmann",
"Assistenzarzt", "Laborwissenschaftler",
"Director Drug Safety Services",
"Leiter wissenschaftliche Publikationen",
"Regulatory Documentation Specialist", "Schwimmlehrer",
"Netzplaner", "Schuhverkäufer", "Field Force Manager",
"Feuerungsfachmann", "Medizinischer Direktor",
"Leiter Leitungsbau", "Leiter Abfallwirtschaft",
"Bauland-Entwickler", "Konkurssachbearbeiter", "Metallhändler",
"Leiter Elektroinstallation", "Leiter Wirtschaftsamt",
"Leiter Statistik", "Operationsdisponent", "Sozialinspektor",
"Bewegungswissenschafter", "Facharzt Pädiatrie",
"Facharzt Rheumatologie", "Leiter Systementwicklung",
"Gefängnisleiter", "Leiter Produktions-Controlling",
"Systemelektroniker", "Assistent Investment Management",
"Head Talent Management", "Leiter Betriebsplanung",
"Leiter Amtsvormundschaft", "Leiter Detailhandel",
"Lingerie-Mitarbeiter", "Altentherapeut", "Taucher",
"Edelmetallprüfer", "Messaging Architekt", "Websphere-Spezialist",
"Microsoft Dynamics NAV Entwickler", "VMWare Spezialist",
"Projektleiter MS Windows", "Leiter Informatik-Revision",
"Departements-Koordinator", "Center-Leiter", "Auslandredaktor",
"Verkehrsassistent", "Leiter Lernwerk",
"Leiter Operationsabteilung", "Fürsorger", "Fachberater Computer",
"Regionalvertreter", "Leiter Codierung ",
"Head of Monetary Policy", "TV-Produzent", "Leiter CRM",
"Leiter Vermögensverwaltung",
"Fachspezialist Personenversicherungen", "Coldfusion Entwickler",
"Getränkeberater", "Immobilien-Controller", "Tour Operator",
"Business Process Application Manager",
"Leiter Business Application SAP", "Leiter Umweltmanagement",
"Heizöl-Tankrevisor", "Importfachmann", "Fonds Produktentwickler",
"Leiter Software-Testing", "Bahntechniker", "Pharmatechniker",
"Lagerungspfleger", "Pflegefachmann Notfall", "Urologe",
"Sportarzt", "Patrouilleur", "Bank-Controller",
"Leiter des Amtssekretariats", "Verkäufer Food",
"Webmaschinenspezialist", "Schmuckverkäufer", "Bausekretär",
"Vormundschaftssekretär", "Web Analyse Spezialist", "Kutscher",
"Forstingenieur", "Leiter Handelsregisteramt", "Delphi-Entwickler",
"Dosimetrist", "Eisenbahningenieur", "Elektromontageleiter",
"Wärmemonteur", "<NAME>", "Löter", "Maurerpolier",
"Leiter Business Intelligence", "Freelancer", "Handwerker",
"Freiwilliger Mitarbeiter", "Polybauer", "Social Media Manager",
"Eisenleger", "Leiter Einwohnerkontrolle", "Pferdewart",
"Leiter Cash Management", "Casino-Host", "Gastroenterologe",
"Banksachbearbeiter", "Matrose", "<NAME>",
"Middle Office Spezialist", "Molekularbiologe", "Museumsaufseher",
"Neurologe", "Hundeführer", "Netztechnik-Ingenieur",
"Leiter Bauverwaltung", "Leiter Eingliederungsmanagement",
"Scrum Master", "Leiter Anästhesie",
"Microsoft Dynamics AX Entwickler", "Eisenwarenverkäufer",
"Assistent After Sales", "Automobil-Fachmann", "Plakatkleber",
"Biophysiker", "Klimatologe", "Pflegehelfer", "Dentalsekretär",
"Mitarbeiter auf Abruf", "Guest Relation Manager", "Scout",
"Personalassistent", "Personalmarketer", "Trainee",
"Software-Entwickler", "Elektrotechniker", "Hilfsarbeiter",
"Anwalt", "Ausbilder", "Verkaufsförderer", "Personalrekrutierer",
"Informatik-Berater", "Webentwickler", "Administrator",
"IT-Supporter", "Programmierer", "Systembetreuer",
"Systemadministrator", "IC-Techniker", "Netzwerkadministrator",
"EDV-Allrounder", "Datenbankadministrator", "Webmaster", "Berater",
"Texter", "Scientist", "Barangestellter", "Social Media Manager",
"Aussendienstmitarbeiter", "Entwicklungsingenieur", "Ingenieur",
"Informatiker", "Marketingfachmann",
"Verkaufsinnendienstmitarbeiter", "Musiklehrer", "Controller",
"Internet-Spezialist", "Online Redaktor", "Interviewer",
"Administrativer Assistent", "Java-Entwickler", "Manager",
"Journalist", "Associate", "Junior Business Analyst",
"Data Mining Spezialist", "Techniker", "Firmenkundenberater",
"Projektassistent", "Prorektor", "Key Account Manager",
"Kleinkinderzieher", "Lehrer", "Koch", "Konstrukteur",
"Contract Manager", "Lektor", "Webdesigner", "Marktanalyst",
"Kundenberater", "Dozent", "Allround-Assistent", "Laborant",
"Lagerist", "Landschaftsgärtner", "Beleuchter", "LINUX Spezialist",
"Logistiker", ".NET Entwickler", "Datenbankspezialist",
"Telefonverkäufer", "Direktionsassistent", "Marktforscher",
"Marketingassistent", "Marketingkommunikations-Spezialist",
"PR-Assistent", "Marketingberater", "Mediengestalter", "Redaktor",
"Microsoft-Spezialist", "Service Delivery Manager",
"Software-Spezialist", "Projektmanager", "Verlagsfachmann",
"Bauprojektleiter", "Chefredaktor", "PR-Manager", "Schreiner",
"Bankfachexperte", "Datatypist", "Assistent Produktentwicklung",
"Servicefachangestellter", "Call Center Agent",
"Detailhandelsverkäufer", "Vertriebskaufmann",
"Redaktionsassistent", "Barmitarbeiter",
"Customer Service Manager", "Buchhalter", "Leiter Entwicklung",
"Masseur", "Fotomodell", "Moderator", "Monteur", "Künstler",
"Verkaufsberater", "Musiktherapeut", "Qualitätsmanager",
"Messtechniker", "Absolvent",
"Fachspezialist für Personalvorsorge", "Campaigner",
"Schauspieler", "Office Manager", "E-Marketer", "Mediaexperte",
"Foto-Journalist", "Produktmanager", "Software-Qualitätsmanager",
"Einkäufer", "PHP-Entwickler", "PR-Berater", "Partner",
"PC/LAN-Supporter", "HR-Manager", "Personal Assistant", "Arzt",
"Personalberater", "Pflegefachmann", "Physiotherapeut",
"Wellness-Trainer", "Reporting Specialist", "Grafiker",
"Design-Ingenieur", "Produktionsassistent", "Produktionsleiter",
"C++ Entwickler", "Architekt", "Projektberater",
"E-Commerce-Manager", "Informations- und Dokumentationsassistent",
"Kosmetiker", "Prozessberater", "Visualizer", "Pädagoge",
"Verkaufskoordinator", "Reporter", "Berater-Assistent",
"Leiter Rechnungswesen", "Anwaltssekretär", "Jurist",
"Assistent Compliance Officer", "Assistent Personalrekrutierung",
"Giesser", "Freelancer", "Country Manager", "Reiseberater",
"Reiseleiter", "Supporter", "Reifenmonteur", "Repräsentant",
"Empfangsmitarbeiter", "Kaufmännischer Angestellter",
"Lastwagenführer", "Auftragsabwickler", "Verkaufsassistent",
"IT-Sales-Manager", "Sekretär", "Software Testmanager",
"Kundensupporter", "Auslieferer", "Service-Leiter",
"Leiter Sponsoring & Events", "Filialleiter",
"Sicherheitsmitarbeiter", "Kapitän", "Sales Agent", "Schlosser",
"Abteilungsleiter", "IT-Projektmanager",
"Mitarbeiter Patientenadministration", "Produktionsplaner",
"Stellvertretender Leiter", "Steuerberater",
"Leiter Systementwicklung", "Lehrling", "Stylist",
"Systemingenieur", "Unternehmensberater", "Dispatcher",
"Marketing Controller", "TYPO3-Entwickler", "Teamleiter",
"Technischer Supporter", "Vertriebsassistent", "Büroangestellte",
"Telefonist", "Operator", "Mystery Shopper", "Konzeptioner",
"Restaurant Operations Manager", "Übersetzer",
"Speditionsfachmann", "Head Corporate Communications",
"Usability Product Specialist", "Verkaufsleiter",
"Vermessungszeichner", "Vermögensberater", "Verwaltungsassistent",
"Glaser", "Volontär", "User Interface Designer",
"Gesundheitsberater", "Category Manager", "Innenarchitekt",
"IT-Assistent", "Leiter SAP", "Prototypenbauer",
"Wirtschaftsinformatiker", "Yogalehrer", "Bauzeichner", "Bote",
"Gouvernante", "Mechaniker", "E-Learning Spezialist",
"Leiter Telekommunikation", "Content Manager", "Analyst",
"Prozessmanager", "Organisationsentwickler", "Mathematiker",
"Commissions Specialist", "Dokumentalist", "CAD-Konstrukteur",
"Energieberater", "Case Manager", "VR-Assistent",
"Kommunikations-Designer", "Designer", "Multimedia-Designer",
"Geschäftsführer", "Keramik-Modelleur", "Art Director", "Layouter",
"Convention Sales Manager", "Werbeleiter", "Brand Manager",
"Business Analyst", "IT-Leiter", "Cash Manager",
"Chief Operating Officer", "Beteiligungscontroller",
"Business Development Manager", "Finanzcontroller", "Finanzleiter",
"Regulatory Affairs Specialist", "Personalleiter",
"Internal Auditor", "Leiter Management Support",
"Leiter Management-System", "Riskmanager", "Operationsdisponent",
"Treasurer", "Leiter Administration", "Detailhandelsfachmann",
"Informatik-Kaufmann", "Privatkundenberater", "Küchenbauer",
"Brauer", "Steuerungsfachmann", "Berufsschullehrer",
"Immobilienverkäufer", "Software-Supporter",
"Leiter Anzeigenverkauf", "Anzeigendisponent", "Mediaberater",
"Immobilienbewirtschafter", "Führungskraft", "Anzeigenverkäufer",
"Lagerungspfleger", "Facharzt Anästhesiologie",
"Pflegefachmann Anästhesie", "Ärzteberater",
"Leiter Verkaufsinnendienst", "International OTC Spezialist",
"Apotheker", "Entwickler Mobile Apps", "Release Manager",
"Anlagenbetreiber", "Leiter IT-Support", "Bautechniker", "Ökologe",
"Programmverantwortlicher", "Datenadministrator", "Chemiker",
"SAP-Berater", "Informatik-Controller",
"Oracle Applikationsentwickler", "SAP Applikationsentwickler",
"Integrationsmanager", "Life Cycle Manager",
"SAP Systemspezialist", "IT Sicherheitsberater",
"IT-System-Architekt", "Applikationsverantwortlicher", "Physiker",
"Antriebsspezialist", "Verkaufsingenieur",
"Applikations-Integrator", "Werkstoffingenieur",
"Hardwareentwickler", "Leiter IT-Sicherheit", "Exportfachmann",
"Kontakter", "Projektleiter", "Regionalverkaufsleiter",
"Koordinator", "Unternehmer", "Partnermanager",
"IT Solution Architect", "<NAME>",
"Distribution Manager", "SAP HR Berater", "Allrounder",
"Organisator", "Sachbearbeiter", "Fahrzeug-Elektriker",
"Elektroniker", "Automechaniker", "Leiter Mechanik",
"Mechatroniker", "Leiter Technischer Support",
"Leiter After Sales", "Werkstattchef", "Testingenieur",
"Service-Techniker", "Service-Ingenieur", "Jugendbetreuer",
"Assistenzarzt", "<NAME>", "Lehrer Vorschulstufe",
"Kinderkrankenschwester", "Tierpfleger", "Erzieher",
"<NAME>", "Klavierbauer",
"Administrativer Klinikleiter", "Steuerkommissär",
"Clinical Research Assistant", "Neuropsychologe", "Wissensmanager",
"Netzwerk-Ingenieur", "Gastronomie-Fachmann", "Kreditberater",
"Werbekaufmann", "Kommunikationschef",
"Online Kommunikationsmanager", "Kommunikationstechniker",
"Planer Marketingkommunikation", "Verkaufstrainer",
"SAP-Projektleiter", "Kommunikationswissenschafter",
"Supply Chain Manager", "Kompetenzzenter Manager",
"Komponentenentwickler", "Maschineningenieur", "Komponist",
"Konditor-Confiseur", "Konferenzmanager", "Event-Manager",
"Veranstaltungstechniker", "Change Manager",
"Finanz- und Anlage-Experte",
"Dokumentenmanagementsystem-Spezialist", "Strategieberater",
"Informatik-Ingenieur", "Data Warehouse Analyst", "CRM-Spezialist",
"Cash Service Berater", "Oracle-Berater", "Private Banker",
"SAP CRM Berater", "Treuhänder", "SAP SCM Berater",
"SAP BI Berater", "Beratungsingenieur", "Leiter Consulting",
"Zahnarzt", "Simulationsingenieur", "Berechnungsingenieur",
"Finanzbuchhalter", "Business Unit Manager", "Assistent CxO",
"Business Unit Controller", "Marketingleiter",
"Bereichspersonalleiter", "Montageleiter", "Kunststofftechnologe",
"Projektingenieur", "Produktionsingenieur", "Automobilingenieur",
"Naturwissenschafter", "Elektroingenieur", "Prozessingenieur",
"HLK-Techniker", "Gebäudetechniker", "Projektkoordinator",
"Technischer Berater", "Wirtschaftsingenieur",
"SPS Software-Ingenieur", "Konstruktionsingenieur", "Richter",
"Gewerkschaftsfunktionär", "ABAP-Entwickler",
"Organisationsberater", "SAP FI/CO Berater",
"Microsoft Dynamics NAV Entwickler", "Compliance Officer",
"Disponent", "Produktionsmanager", "Inkasso-Spezialist",
"Wohnberater", "Traffic Manager", "Innovationsmanager",
"Automations-Ingenieur", "Betriebsleiter",
"Fremdsprachen-Assistent", "Leiter Marketing und Verkauf",
"Vertriebsleiter", "Kommerzsachbearbeiter",
"Schadensachbearbeiter", "Inspektor", "Leiter Telefon und Empfang",
"Schleifer", "Gründungsberater", "Prüffeld-Techniker",
"Facharzt | |
= mt.atan2(temp1,temp2)
return trim
class trig_atan2d2(funcionestrig):
def __init__(self, exp1, exp2 ):
self.exp1 = exp1
self.exp2 = exp2
def ejecutar(self):
try:
temp1 = float(self.exp1)
temp2 = float(self.exp2)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.atan2d(temp1,temp2)
return trim
class trig_cos2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cos(float(temp))
return trim
class trig_cosd2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cosd(float(temp))
return trim
class trig_cot2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cot(float(temp))
return trim
class trig_cotd2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cotd(float(temp))
return trim
class trig_sin2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.sin(float(temp))
return trim
class trig_sind2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.sind(float(temp))
return trim
class trig_tan2(funcionestrig):
def __init__(self, exp ):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.tan(float(temp))
return trim
class trig_tand2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.tand(float(temp))
return trim
class trig_sinh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.sinh(float(temp))
return trim
class trig_cosh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.cosh(float(self.exp))
return trim
class trig_tanh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.tanh(float(temp))
return trim
class trig_asinh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.asinh(float(temp))
return trim
class trig_acosh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.acosh(float(temp))
return trim
class trig_atanh2(funcionestrig):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
try:
temp = float(self.exp)
except ValueError:
e = errores.CError(0,0,"Error en funcion trigonometrica",'Semantico')
errores.insert_error(e)
return e
trim = mt.atanh(float(temp))
return trim
#FUNCIONES GENERALES
class funciongen():
'clase Abstracta'
class fun_length2(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp )
trim = len(temp)
return trim
class fun_trim2(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
temp = str(self.exp)
trim = temp.strip()
return trim
class fun_md52(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp )
crypt = hashlib.md5()
crypt.update(temp.encode('utf-8'))
r = crypt.hexdigest()
return r
class fun_sha2562(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp)
crypt = hashlib.sha256()
crypt.update(temp.encode('utf-8'))
r = crypt.hexdigest()
return r
class fun_substr2(funciongen):
def __init__ (self,exp,min,max):
self.exp = exp
self.min = min
self.max = max
def ejecutar(self):
#saco el substring y lo devuelvo
temp = str(self.exp)
sub = temp[self.min:self.max]
return sub
class fun_greatest2(funciongen):
def __init__ (self,lexps):
self.lexps = lexps
def ejecutar(self):
try:
maximo = float(self.lexps[0])
for dato in self.lexps:
temp = float(dato)
if maximo < temp:
maximo = temp
return maximo
except:
e = errores.CError(0,0,"Funcion least necesita una lista",'Semantico')
errores.insert_error(e)
return e
class fun_least2(funciongen):
def __init__ (self,lexps):
self.lexps = lexps
def ejecutar(self):
try:
maximo = float(self.lexps[0])
for dato in self.lexps:
temp = float(dato)
if maximo > temp:
maximo = temp
return maximo
except:
e = errores.CError(0,0,"Funcion least necesita una lista",'Semantico')
errores.insert_error(e)
return e
class dato2(funciongen):
def __init__ (self,val):
self.val = val
class fun_now2(funciongen):
def __init__ (self,exp):
self.exp = exp
def ejecutar(self):
# dd/mm/YY
today = date.today()
d1 = today.strftime("%Y-%m-%d %H:%M:%S")
return d1
def VerificarTipo(TipoColumna,ValorColumna):
"""try:
if float(ValorColumna):
TipoRegistro = definir_tipo(float(ValorColumna))
elif int(ValorColumna):
TipoRegistro = definir_tipo(int(ValorColumna))
except:
TipoRegistro = definir_tipo(ValorColumna)
if TipoRegistro == "smallint" and TipoColumna == "integer":
TipoRegistro = "integer"
try:
if TipoColumna.restipo.lower() == TipoRegistro:
return True
else:
return False
except:
if TipoColumna.lower() == TipoRegistro:
return True
else:
return False"""
return True
def definir_tipo(entrada):
"""if isinstance(entrada,int) or isinstance(entrada,float):
if entrada < 32767 and entrada > -32768:
return "smallint"
elif entrada < 214783648 and entrada > -214783648:
return "integer"
elif entrada < 9223372036854775808 and entrada > -9223372036854775808:
return "bigint"
elif entrada < 92233720368547758.08 and entrada > -92233720368547758.08 :
return "money"
else:
return "decimal"
elif isinstance(entrada,bool):
return "boolean"
else:
g = entrada.count('-')
dp = entrada.count(':')
if len(entrada) == 1:
return "char"
elif g == 3 and dp == 3:
return "time"
elif g == 3 and dp == 0:
return "date"
else:
return "varchar"""
#UPDATE-----------------------------------------
class update(instruccion):
def __init__(self,iden, cond, wherecond):
self.iden = iden
self.cond = cond
self.wherecond = wherecond
def traducir(self):
traduccion = '\t'
traduccion += 'sql.execute("UPDATE'
traduccion += ' ' + self.iden
NombreColumna = self.cond.iden
traduccion += ' SET ' + NombreColumna
traduccion += ' = '
if isinstance(self.cond.tipo , (int, float, complex)):
traduccion += str(self.cond.tipo)
elif isinstance(self.cond.tipo , str):
traduccion += "'" + self.cond.tipo + "'"
elif isinstance(self.cond.tipo, bool):
traduccion += str(self.cond.tipo )
else:
try:
temp = self.cond.tipo.ejecutar()
if isinstance(temp, (int, float, complex)):
traduccion += str(temp)
elif isinstance(temp, str):
traduccion += temp
elif isinstance(temp, bool):
traduccion += str(temp)
except:
'''error'''
traduccion += ' WHERE '
tempwherw = self.wherecond
if isinstance(tempwherw,wherecond1):
traduccion += ' ' + tempwherw.iden
traduccion += ' ' + tempwherw.signo
if isinstance(tempwherw.tipo, str):
traduccion += " '" + tempwherw.tipo + "'"
elif isinstance(tempwherw.tipo, (int, float, complex)):
traduccion += ' ' + str(tempwherw.tipo)
if "ejecutar" in dir(self.wherecond.tipo):
traduccion += ' ' + str(self.wherecond.tipo.ejecutar())
if isinstance(tempwherw, wherecond):
traduccion += ' ' + tempwherw.iden + ' BETWEEN'
try:
traduccion += ' ' + str(tempwherw.tipo.ejecutar())
except:
traduccion += ' ' + tempwherw.tipo
traduccion += ' AND '
try:
traduccion += ' ' + str(tempwherw.tipo2.ejecutar()) + ' '
except:
traduccion += ' ' + str(tempwherw.tipo2) + ' '
traduccion += ';")'
traduccion += '\n'
return traduccion
def ejecutar(self):
global resultadotxt
global cont
global tabla
global NombreDB
resultadotxt = ""
try:
TuplasTabla = []
ColumnasTabla = []
#OBTENER LAS TUPLAS Y COLUMNAS DE LA TABLA
TablaActual = tabla.BuscarNombre(self.iden)
for simbolo in tabla.simbolos:
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.TUPLA:
TuplasTabla.append(tabla.simbolos[simbolo])
if tabla.simbolos[simbolo].ambito == TablaActual.id and tabla.simbolos[simbolo].tipo == TS.TIPO.COLUMN:
ColumnasTabla.append(tabla.simbolos[simbolo])
#OBTENER CAMPO DE CONDICION
#Condicion = self.wherecond.tipo
try:
if self.wherecond.tipo.exp:
Condicion = self.wherecond.tipo.ejecutar()
if isinstance(Condicion,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
try:
if self.wherecond.tipo.exp1:
Condicion = self.wherecond.tipo.ejecutar()
if isinstance(Condicion,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
Condicion = self.wherecond.tipo
NombreColumna = self.cond.iden
columnacond = self.wherecond.iden
try:
#cond2 = self.wherecond.tipo2
try:
if self.wherecond.tipo2.exp:
cond2 = self.wherecond.tipo2.ejecutar()
if isinstance(cond2,errores.CError):
e = errores.CError(0,0,"Funcion Erroneo",'Semantico')
errores.insert_error(e)
return "Funcion Erronea"
except:
try:
if self.wherecond.tipo2.exp1:
cond2 = self.wherecond.tipo2.ejecutar()
if isinstance(cond2,errores.CError):
e = | |
<filename>flarepy/utils/database_utils.py
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 29 12:30:07 2017
@author: alex_
"""
from sunpy.io import fits, file_tools as sunpy_filetools
import sunpy.database
from astropy.units import Unit, nm, equivalencies
from sunpy.database.tables import *
from sunpy.time import parse_time, TimeRange, is_time_in_given_format
from sunpy.extern import six
from datetime import datetime, timedelta
import os
import sunpy.timeseries as ts
from sunpy.util import MetaDict
def entries_from_goes_file(file, default_waveunit=None, source=None):
"""Use the headers of a FITS file to generate an iterator of
:class:`sunpy.database.tables.DatabaseEntry` instances. Gathered
information will be saved in the attribute `fits_header_entries`. If the
key INSTRUME, WAVELNTH or DATE-OBS / DATE_OBS is available, the attribute
`instrument`, `wavemin` and `wavemax` or `observation_time_start` is set,
respectively. If the wavelength unit can be read, the values of `wavemin`
and `wavemax` are converted to nm (nanometres). The value of the `file`
parameter is used to set the attribute `path` of each generated database
entry.
Parameters
----------
file : str or file-like object
Either a path pointing to a FITS file or a an opened file-like object.
If an opened file object, its mode must be one of the following rb,
rb+, or ab+.
default_waveunit : str, optional
The wavelength unit that is used for a header if it cannot be
found.
Raises
------
sunpy.database.WaveunitNotFoundError
If `default_waveunit` is not given and the wavelength unit cannot
be found in one of the FITS headers
sunpy.WaveunitNotConvertibleError
If a wavelength unit could be found but cannot be used to create an
instance of the type ``astropy.units.Unit``. This can be the case
for example if a FITS header has the key `WAVEUNIT` with the value
`nonsense`.
Examples
--------
>>> from sunpy.database.tables import entries_from_file
>>> import sunpy.data
>>> sunpy.data.download_sample_data(overwrite=False) # doctest: +SKIP
>>> import sunpy.data.sample
>>> entries = list(entries_from_file(sunpy.data.sample.SWAP_LEVEL1_IMAGE))
>>> len(entries)
1
>>> entry = entries.pop()
>>> entry.instrument
'SWAP'
>>> entry.observation_time_start, entry.observation_time_end
(datetime.datetime(2012, 1, 1, 0, 16, 7, 836000), None)
>>> entry.wavemin, entry.wavemax
(17.400000000000002, 17.400000000000002)
>>> len(entry.fits_header_entries)
111
"""
headers = fits.get_header(file)
if isinstance(file, (str, six.text_type)):
filename = file
else:
filename = getattr(file, 'name', None)
for header in headers[0:1]:
statinfo = os.stat(file)
#print('a header')
entry = DatabaseEntry(path=filename)
entry.size = statinfo.st_size
"""
for key, value in six.iteritems(header):
# Yes, it is possible to have an empty key in a FITS file.
# Example: sunpy.data.sample.EIT_195_IMAGE
# Don't ask me why this could be a good idea.
if key == '':
value = str(value)
elif key == 'KEYCOMMENTS':
for k, v in six.iteritems(value):
entry.fits_key_comments.append(FitsKeyComment(k, v))
continue
entry.fits_header_entries.append(FitsHeaderEntry(key, value))
#
if key == 'TELESCOP': # Not 'INSTRUME'
entry.instrument = value # So E.G. 'GOES 6' instead 'X-ray Detector'
# NOTE: the key DATE-END or DATE_END is not part of the official
# FITS standard, but many FITS files use it in their header
elif key in ('DATE-END', 'DATE_END'):
entry.observation_time_end = parse_time(value)
elif key in ('DATE-OBS', 'DATE_OBS'):
entry.observation_time_start = parse_time(value)
"""
# Add/tweak start/end entries for GOES
if header.get('TELESCOP','') != '':
#header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'
entry.instrument = header['TELESCOP']
if (header.get('DATE-OBS','') != '') and (header.get('DATE-END','') != ''):
if is_time_in_given_format(header['DATE-OBS'], '%d/%m/%Y'):
start_time = datetime.strptime(header['DATE-OBS'], '%d/%m/%Y')
elif is_time_in_given_format(header['DATE-OBS'], '%d/%m/%y'):
start_time = datetime.strptime(header['DATE-OBS'], '%d/%m/%y')
end_time = start_time + timedelta(days=1,seconds=-1)
#header['DATE-OBS'] = start_time.strftime('%Y/%m/%d')#'%d/%m/%Y')
#header['TIME-OBS'] = start_time.strftime('%H:%M:%S')
#header['DATE-END'] = end_time.strftime('%Y/%m/%d')#'%d/%m/%Y')
#header['TIME-END'] = end_time.strftime('%H:%M:%S')
# Add these to the entry
entry.observation_time_start = start_time
entry.observation_time_end = end_time
#print('')
#print(dir(entry))
#print('')
#entry.wavemax = 0.8 * nm # XRSB '1.0--8.0 $\AA$'
#entry.wavemin = 0.05 * nm # XRSA '0.5--4.0 $\AA$'
entry.wavemax = 0.8 # XRSB '1.0--8.0 $\AA$'
entry.wavemin = 0.05 # XRSA '0.5--4.0 $\AA$'
"""
waveunit = fits.extract_waveunit(header)
if waveunit is None:
waveunit = default_waveunit
unit = None
if waveunit is not None:
try:
unit = Unit(waveunit)
except ValueError:
raise WaveunitNotConvertibleError(waveunit)
"""
"""
for header_entry in entry.fits_header_entries:
key, value = header_entry.key, header_entry.value
if key == 'INSTRUME':
entry.instrument = value
elif key == 'WAVELNTH':
if unit is None:
raise WaveunitNotFoundError(file)
# use the value of `unit` to convert the wavelength to nm
entry.wavemin = entry.wavemax = unit.to(
nm, value, equivalencies.spectral())
"""
yield entry
def entries_from_goes_ts_files(*files, default_waveunit=None, source=None):
"""Use the headers of a FITS file to generate an iterator of
:class:`sunpy.database.tables.DatabaseEntry` instances. Gathered
information will be saved in the attribute `fits_header_entries`. If the
key INSTRUME, WAVELNTH or DATE-OBS / DATE_OBS is available, the attribute
`instrument`, `wavemin` and `wavemax` or `observation_time_start` is set,
respectively. If the wavelength unit can be read, the values of `wavemin`
and `wavemax` are converted to nm (nanometres). The value of the `file`
parameter is used to set the attribute `path` of each generated database
entry.
Parameters
----------
file : str or file-like object
Either a path pointing to a FITS file or a an opened file-like object.
If an opened file object, its mode must be one of the following rb,
rb+, or ab+.
default_waveunit : str, optional
The wavelength unit that is used for a header if it cannot be
found.
Raises
------
sunpy.database.WaveunitNotFoundError
If `default_waveunit` is not given and the wavelength unit cannot
be found in one of the FITS headers
sunpy.WaveunitNotConvertibleError
If a wavelength unit could be found but cannot be used to create an
instance of the type ``astropy.units.Unit``. This can be the case
for example if a FITS header has the key `WAVEUNIT` with the value
`nonsense`.
Examples
--------
>>> from sunpy.database.tables import entries_from_file
>>> import sunpy.data
>>> sunpy.data.download_sample_data(overwrite=False) # doctest: +SKIP
>>> import sunpy.data.sample
>>> entries = list(entries_from_file(sunpy.data.sample.SWAP_LEVEL1_IMAGE))
>>> len(entries)
1
>>> entry = entries.pop()
>>> entry.instrument
'SWAP'
>>> entry.observation_time_start, entry.observation_time_end
(datetime.datetime(2012, 1, 1, 0, 16, 7, 836000), None)
>>> entry.wavemin, entry.wavemax
(17.400000000000002, 17.400000000000002)
>>> len(entry.fits_header_entries)
111
"""
"""
ts_goes = ts.TimeSeries(file)
statinfo = os.stat(file)
entry = DatabaseEntry(path=file)
entry.size = statinfo.st_size
#header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'
entry.instrument = ts_goes.meta.get('TELESCOP').values()
entry.instrument = ts_goes.meta.get('TELESCOP').values()
entry.wavemax = 0.8 # XRSB '1.0--8.0 $\AA$'
entry.wavemin = 0.05 # XRSA '0.5--4.0 $\AA$'
#
entry.observation_time_start = ts_goes.meta.get('date-beg').values()[0]
entry.observation_time_end = ts_goes.meta.get('date-end').values()[0]
entry.metadata = ts_goes.meta.metadata[0][2]
#entry.tags = [ sunpy.database.attrs.Tag('raw') ]
"""
for file in files:
headers = fits.get_header(file)
if isinstance(file, (str, six.text_type)):
filename = file
else:
filename = getattr(file, 'name', None)
statinfo = os.stat(file)
#print('a header')
entry = DatabaseEntry(path=filename)
entry.size = statinfo.st_size
# Add/tweak start/end entries for GOES
if headers[0].get('TELESCOP','') != '':
#header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'
entry.instrument = headers[0]['TELESCOP']
elif headers[1].get('TELESCOP','') != '':
entry.instrument = headers[1]['TELESCOP']
if (headers[0].get('DATE-OBS','') != ''):
if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):
start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')
elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):
start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')
else:
start_time = parse_time(headers[0]['DATE-OBS'])
elif (headers[1].get('DATE-OBS','') != ''):
if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):
start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')
elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):
start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')
else:
start_time = parse_time(headers[1]['DATE-OBS'])
if (headers[0].get('DATE-END','') != ''):
if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):
end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')
elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):
end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')
else:
end_time = parse_time(headers[0]['DATE-END'])
elif (headers[1].get('DATE-END','') != ''):
if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):
end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')
elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):
end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')
else:
end_time = parse_time(headers[1]['DATE-END'])
else:
end_time = start_time + timedelta(days=1,seconds=-1)
# Add these to the entry
entry.observation_time_start = start_time
entry.observation_time_end = end_time
entry.wavemax = 0.8 # XRSB '1.0--8.0 $\AA$'
entry.wavemin = 0.05 # XRSA '0.5--4.0 $\AA$'
if source:
entry.source = source
entry.metadata = MetaDict(headers[1])
#entry.tags = sunpy.database.attrs.Tag('raw')
#entry = DatabaseEntry(instrument='EIT', wavemin=25.0)
#return entry
yield entry
def add_entries_from_goes_ts_files(database, *files, tags=[], default_waveunit=None, source=None):
for entry in entries_from_goes_ts_files(*files, default_waveunit=default_waveunit, source=source):
database.add(entry)
for tag in tags:
database.tag(entry, tag)
def entries_from_goes_ts_file2(file, default_waveunit=None):
"""Use the headers of a FITS file to generate an iterator of
:class:`sunpy.database.tables.DatabaseEntry` instances. Gathered
information will be saved in the attribute `fits_header_entries`. If the
key INSTRUME, WAVELNTH or DATE-OBS / DATE_OBS is available, the attribute
`instrument`, `wavemin` and `wavemax` or `observation_time_start` is set,
respectively. If the wavelength unit can be read, the values of `wavemin`
and `wavemax` are converted to nm (nanometres). The value of the `file`
parameter is used to set the attribute `path` of each generated database
entry.
Parameters
----------
file : str or file-like object
Either a path pointing to a FITS file or a an opened file-like object.
If an opened file object, its mode must be one of the | |
DZITA',
66388: 'OLD PERMIC LETTER E',
66411: 'OLD PERMIC LETTER EF',
66386: 'OLD PERMIC LETTER GAI',
66412: 'OLD PERMIC LETTER HA',
66393: 'OLD PERMIC LETTER I',
66421: 'OLD PERMIC LETTER IA',
66418: 'OLD PERMIC LETTER IE',
66394: 'OLD PERMIC LETTER KOKE',
66395: 'OLD PERMIC LETTER LEI',
66396: 'OLD PERMIC LETTER MENOE',
66397: 'OLD PERMIC LETTER NENOE',
66409: 'OLD PERMIC LETTER O',
66410: 'OLD PERMIC LETTER OO',
66399: 'OLD PERMIC LETTER PEEI',
66400: 'OLD PERMIC LETTER REI',
66406: 'OLD PERMIC LETTER SHCHOOI',
66405: 'OLD PERMIC LETTER SHOOI',
66401: 'OLD PERMIC LETTER SII',
66402: 'OLD PERMIC LETTER TAI',
66413: 'OLD PERMIC LETTER TSIU',
66403: 'OLD PERMIC LETTER U',
66414: 'OLD PERMIC LETTER VER',
66398: 'OLD PERMIC LETTER VOOI',
66420: 'OLD PERMIC LETTER YA',
66417: 'OLD PERMIC LETTER YAT',
66415: 'OLD PERMIC LETTER YER',
66416: 'OLD PERMIC LETTER YERI',
66408: 'OLD PERMIC LETTER YERU',
66407: 'OLD PERMIC LETTER YRY',
66419: 'OLD PERMIC LETTER YU',
66391: 'OLD PERMIC LETTER ZATA',
66389: 'OLD PERMIC LETTER ZHOI',
128435: 'OLD PERSONAL COMPUTER',
128116: 'OLDER MAN',
128117: 'OLDER WOMAN',
128329: 'OM SYMBOL',
128283: 'ON WITH EXCLAMATION MARK WITH LEFT RIGHT ARROW ABOVE',
128664: 'ONCOMING AUTOMOBILE',
128653: 'ONCOMING BUS',
128753: 'ONCOMING FIRE ENGINE',
128660: 'ONCOMING POLICE CAR',
128662: 'ONCOMING TAXI',
128431: 'ONE BUTTON MOUSE',
128214: 'OPEN BOOK',
128194: 'OPEN FILE FOLDER',
128449: 'OPEN FOLDER',
128080: 'OPEN HANDS SIGN',
128275: 'OPEN LOCK',
128237: 'OPEN MAILBOX WITH LOWERED FLAG',
128236: 'OPEN MAILBOX WITH RAISED FLAG',
983190: 'OPERATING SYSTEM COMMAND',
9934: 'OPHIUCHUS',
128191: 'OPTICAL DISC',
128440: 'OPTICAL DISC ICON',
128217: 'ORANGE BOOK',
2934: 'ORIYA FRACTION ONE EIGHTH',
2931: 'ORIYA FRACTION ONE HALF',
2930: 'ORIYA FRACTION ONE QUARTER',
2933: 'ORIYA FRACTION ONE SIXTEENTH',
2932: 'ORIYA FRACTION THREE QUARTERS',
2935: 'ORIYA FRACTION THREE SIXTEENTHS',
983191: 'OSC',
128228: 'OUTBOX TRAY',
128471: 'OVERLAP',
128002: 'OX',
128230: 'PACKAGE',
983121: 'PAD',
983120: 'PADDING CHARACTER',
128463: 'PAGE',
128196: 'PAGE FACING UP',
128479: 'PAGE WITH CIRCLED TEXT',
128195: 'PAGE WITH CURL',
128223: 'PAGER',
128464: 'PAGES',
93067: 'PAHAWH HMONG CLAN SIGN FAJ',
93065: 'PAHAWH HMONG CLAN SIGN HAM',
93059: 'PAHAWH HMONG CLAN SIGN HAWJ',
93064: 'PAHAWH HMONG CLAN SIGN KHAB',
93058: 'PAHAWH HMONG CLAN SIGN KOO',
93070: 'PAHAWH HMONG CLAN SIGN KWM',
93056: 'PAHAWH HMONG CLAN SIGN LAUJ',
93055: 'PAHAWH HMONG CLAN SIGN LIS',
93060: 'PAHAWH HMONG CLAN SIGN MUAS',
93063: 'PAHAWH HMONG CLAN SIGN PHAB',
93061: 'PAHAWH HMONG CLAN SIGN THOJ',
93062: 'PAHAWH HMONG CLAN SIGN TSAB',
93053: 'PAHAWH HMONG CLAN SIGN TSHEEJ',
93069: 'PAHAWH HMONG CLAN SIGN TSWB',
93066: 'PAHAWH HMONG CLAN SIGN VAJ',
93071: 'PAHAWH HMONG CLAN SIGN VWJ',
93057: 'PAHAWH HMONG CLAN SIGN XYOOJ',
93068: 'PAHAWH HMONG CLAN SIGN YAJ',
93054: 'PAHAWH HMONG CLAN SIGN YEEG',
92973: 'PAHAWH HMONG CONSONANT AU',
92975: 'PAHAWH HMONG CONSONANT CAU',
92967: 'PAHAWH HMONG CONSONANT CHAU',
92959: 'PAHAWH HMONG CONSONANT HAU',
92965: 'PAHAWH HMONG CONSONANT HLAU',
92969: 'PAHAWH HMONG CONSONANT HNAU',
92958: 'PAHAWH HMONG CONSONANT LAU',
92966: 'PAHAWH HMONG CONSONANT MAU',
92972: 'PAHAWH HMONG CONSONANT NAU',
92968: 'PAHAWH HMONG CONSONANT NCHAU',
92962: 'PAHAWH HMONG CONSONANT NKAU',
92960: 'PAHAWH HMONG CONSONANT NLAU',
92971: 'PAHAWH HMONG CONSONANT NTHAU',
92957: 'PAHAWH HMONG CONSONANT NTSAU',
92970: 'PAHAWH HMONG CONSONANT PLHAU',
92963: 'PAHAWH HMONG CONSONANT QHAU',
92961: 'PAHAWH HMONG CONSONANT RAU',
92956: 'PAHAWH HMONG CONSONANT VAU',
92974: 'PAHAWH HMONG CONSONANT XAU',
92964: 'PAHAWH HMONG CONSONANT YAU',
93016: 'PAHAWH HMONG DIGIT EIGHT',
93013: 'PAHAWH HMONG DIGIT FIVE',
93012: 'PAHAWH HMONG DIGIT FOUR',
93017: 'PAHAWH HMONG DIGIT NINE',
93009: 'PAHAWH HMONG DIGIT ONE',
93015: 'PAHAWH HMONG DIGIT SEVEN',
93014: 'PAHAWH HMONG DIGIT SIX',
93011: 'PAHAWH HMONG DIGIT THREE',
93010: 'PAHAWH HMONG DIGIT TWO',
93008: 'PAHAWH HMONG DIGIT ZERO',
92981: 'PAHAWH HMONG MARK CIM HOM',
92978: 'PAHAWH HMONG MARK CIM KES',
92979: 'PAHAWH HMONG MARK CIM KHAV',
92977: 'PAHAWH HMONG MARK CIM SO',
92980: 'PAHAWH HMONG MARK CIM SUAM',
92982: 'PAHAWH HMONG MARK CIM TAUM',
92976: 'PAHAWH HMONG MARK CIM TUB',
93023: 'PAHAWH HMONG NUMBER HUNDRED MILLIONS',
93020: 'PAHAWH HMONG NUMBER HUNDREDS',
93022: 'PAHAWH HMONG NUMBER MILLIONS',
93024: 'PAHAWH HMONG NUMBER TEN BILLIONS',
93021: 'PAHAWH HMONG NUMBER TEN THOUSANDS',
93019: 'PAHAWH HMONG NUMBER TENS',
93025: 'PAHAWH HMONG NUMBER TRILLIONS',
93036: 'PAHAWH HMONG SIGN AV',
92985: 'PAHAWH HMONG SIGN CIM CHEEM',
93043: 'PAHAWH HMONG SIGN CIM CUAM TSHOOJ',
93042: 'PAHAWH HMONG SIGN CIM HAIS LUS NTOG NTOG',
93047: 'PAHAWH HMONG SIGN CIM NRES TOS',
93046: 'PAHAWH HMONG SIGN CIM PUB DAWB',
92997: 'PAHAWH HMONG SIGN CIM TSOV ROG',
93044: 'PAHAWH HMONG SIGN CIM TXWV',
93045: 'PAHAWH HMONG SIGN CIM TXWV CHWV',
93029: 'PAHAWH HMONG SIGN HLI',
93032: 'PAHAWH HMONG SIGN HNUB',
92995: 'PAHAWH HMONG SIGN IB YAM',
93040: 'PAHAWH HMONG SIGN LOS',
92993: 'PAHAWH HMONG SIGN MEEJ SUAB',
93038: 'PAHAWH HMONG SIGN MEEJ TSEEB',
93041: 'PAHAWH HMONG SIGN MUS',
93033: 'PAHAWH HMONG SIGN NQIG',
93035: 'PAHAWH HMONG SIGN NTUJ',
93039: 'PAHAWH HMONG SIGN TAU',
93030: 'PAHAWH HMONG SIGN THIRD-STAGE HLI',
93037: 'PAHAWH HMONG SIGN TXHEEJ CEEV',
92987: 'PAHAWH HMONG SIGN VOS FEEM',
93027: 'PAHAWH HMONG SIGN VOS LUB',
92994: 'PAHAWH HMONG SIGN VOS NRUA',
92992: 'PAHAWH HMONG SIGN VOS SEEV',
92986: 'PAHAWH HMONG SIGN VOS THIAB',
92983: 'PAHAWH HMONG SIGN VOS THOM',
92984: 'PAHAWH HMONG SIGN VOS TSHAB CEEB',
92996: 'PAHAWH HMONG SIGN XAUS',
93034: 'PAHAWH HMONG SIGN XIAB',
92991: 'PAHAWH HMONG SIGN XYEEM FAIB',
92988: 'PAHAWH HMONG SIGN XYEEM NTXIV',
92989: 'PAHAWH HMONG SIGN XYEEM RHO',
92990: 'PAHAWH HMONG SIGN XYEEM TOV',
93028: 'PAHAWH HMONG SIGN XYOO',
93031: 'PAHAWH HMONG SIGN ZWJ THAJ',
92954: 'PAHAWH HMONG VOWEL KAAB',
92955: 'PAHAWH HMONG VOWEL KAAV',
92950: 'PAHAWH HMONG VOWEL KAB',
92938: 'PAHAWH HMONG VOWEL KAIB',
92939: 'PAHAWH HMONG VOWEL KAIV',
92932: 'PAHAWH HMONG VOWEL KAUB',
92933: 'PAHAWH HMONG VOWEL KAUV',
92951: 'PAHAWH HMONG VOWEL KAV',
92942: 'PAHAWH HMONG VOWEL KAWB',
92943: 'PAHAWH HMONG VOWEL KAWV',
92936: 'PAHAWH HMONG VOWEL KEB',
92928: 'PAHAWH HMONG VOWEL KEEB',
92929: 'PAHAWH HMONG VOWEL KEEV',
92937: 'PAHAWH HMONG VOWEL KEV',
92948: 'PAHAWH HMONG VOWEL KIAB',
92949: 'PAHAWH HMONG VOWEL KIAV',
92930: 'PAHAWH HMONG VOWEL KIB',
92931: 'PAHAWH HMONG VOWEL KIV',
92946: 'PAHAWH HMONG VOWEL KOB',
92940: 'PAHAWH HMONG VOWEL KOOB',
92941: 'PAHAWH HMONG VOWEL KOOV',
92947: 'PAHAWH HMONG VOWEL KOV',
92944: 'PAHAWH HMONG VOWEL KUAB',
92945: 'PAHAWH HMONG VOWEL KUAV',
92934: 'PAHAWH HMONG VOWEL KUB',
92935: 'PAHAWH HMONG VOWEL KUV',
92952: 'PAHAWH HMONG VOWEL KWB',
92953: 'PAHAWH HMONG VOWEL KWV',
127796: 'PALM TREE',
67703: 'PALMYRENE LEFT-POINTING FLEURON',
67680: 'PALMYRENE LETTER ALEPH',
67696: 'PALMYRENE LETTER AYIN',
67681: 'PALMYRENE LETTER BETH',
67683: 'PALMYRENE LETTER DALETH',
67693: 'PALMYRENE LETTER FINAL NUN',
67682: 'PALMYRENE LETTER GIMEL',
67684: 'PALMYRENE LETTER HE',
67687: 'PALMYRENE LETTER HETH',
67690: 'PALMYRENE LETTER KAPH',
67691: 'PALMYRENE LETTER LAMEDH',
67692: 'PALMYRENE LETTER MEM',
67694: 'PALMYRENE LETTER NUN',
67697: 'PALMYRENE LETTER PE',
67699: 'PALMYRENE LETTER QOPH',
67700: 'PALMYRENE LETTER RESH',
67698: 'PALMYRENE LETTER SADHE',
67695: 'PALMYRENE LETTER SAMEKH',
67701: 'PALMYRENE LETTER SHIN',
67702: 'PALMYRENE LETTER TAW',
67688: 'PALMYRENE LETTER TETH',
67685: 'PALMYRENE LETTER WAW',
67689: 'PALMYRENE LETTER YODH',
67686: 'PALMYRENE LETTER ZAYIN',
67709: 'PALMYRENE NUMBER FIVE',
67708: 'PALMYRENE NUMBER FOUR',
67705: 'PALMYRENE NUMBER ONE',
67710: 'PALMYRENE NUMBER TEN',
67707: 'PALMYRENE NUMBER THREE',
67711: 'PALMYRENE NUMBER TWENTY',
67706: 'PALMYRENE NUMBER TWO',
67704: 'PALMYRENE RIGHT-POINTING FLEURON',
128060: 'PANDA FACE',
128206: 'PAPERCLIP',
983148: 'PARTIAL LINE BACKWARD',
983146: 'PARTIAL LINE DOWN',
983145: 'PARTIAL LINE FORWARD',
983149: 'PARTIAL LINE UP',
127881: 'PARTY POPPER',
128755: 'PASSENGER SHIP',
128706: 'PASSPORT CONTROL',
72437: 'PAU CIN HAU GLOTTAL STOP',
72440: 'PAU CIN HAU GLOTTAL STOP FINAL',
72432: 'PAU CIN HAU GLOTTAL STOP VARIANT',
72405: 'PAU CIN HAU LETTER A',
72396: 'PAU CIN HAU LETTER BA',
72397: 'PAU CIN HAU LETTER CA',
72404: 'PAU CIN HAU LETTER CHA',
72388: 'PAU CIN HAU LETTER DA',
72406: 'PAU CIN HAU LETTER E',
72403: 'PAU CIN HAU LETTER FA',
72413: 'PAU CIN HAU LETTER FINAL K',
72417: 'PAU CIN HAU LETTER FINAL L',
72415: 'PAU CIN HAU LETTER FINAL M',
72416: 'PAU CIN HAU LETTER FINAL N',
72419: 'PAU CIN HAU LETTER FINAL NG',
72412: 'PAU CIN HAU LETTER FINAL P',
72414: 'PAU CIN HAU LETTER FINAL T',
72418: 'PAU CIN HAU LETTER FINAL W',
72420: 'PAU CIN HAU LETTER FINAL Y',
72393: 'PAU CIN HAU LETTER GA',
72392: 'PAU CIN HAU LETTER HA',
72407: 'PAU CIN HAU LETTER I',
72411: 'PAU CIN HAU LETTER IA',
72385: 'PAU CIN HAU LETTER KA',
72394: 'PAU CIN HAU LETTER KHA',
72386: 'PAU CIN HAU LETTER LA',
72387: 'PAU CIN HAU LETTER MA',
72400: 'PAU CIN HAU LETTER NA',
72391: 'PAU CIN HAU LETTER NGA',
72408: 'PAU CIN HAU LETTER O',
72384: 'PAU CIN HAU LETTER PA',
72401: 'PAU CIN HAU LETTER PHA',
72402: 'PAU CIN HAU LETTER RA',
72395: 'PAU CIN HAU LETTER SA',
72398: 'PAU CIN HAU LETTER TA',
72399: 'PAU CIN HAU LETTER THA',
72409: 'PAU CIN HAU LETTER U',
72410: 'PAU CIN HAU LETTER UA',
72390: 'PAU CIN HAU LETTER VA',
72389: 'PAU CIN HAU LETTER ZA',
72436: 'PAU CIN HAU LOW-FALLING TONE',
72439: 'PAU CIN HAU LOW-FALLING TONE FINAL',
72435: 'PAU CIN HAU LOW-FALLING TONE LONG',
72438: 'PAU CIN HAU LOW-FALLING TONE LONG FINAL',
72431: 'PAU CIN HAU MID-LEVEL TONE',
72434: 'PAU CIN HAU MID-LEVEL TONE FINAL',
72433: 'PAU CIN HAU MID-LEVEL TONE LONG FINAL',
72422: 'PAU CIN HAU RISING TONE',
72425: 'PAU CIN HAU RISING TONE FINAL',
72421: 'PAU CIN HAU RISING TONE LONG',
72424: 'PAU CIN HAU RISING TONE LONG FINAL',
72423: 'PAU CIN HAU SANDHI GLOTTAL STOP',
72426: 'PAU CIN HAU SANDHI GLOTTAL STOP FINAL',
72428: 'PAU CIN HAU SANDHI TONE',
72430: 'PAU CIN HAU SANDHI TONE FINAL',
72427: 'PAU CIN HAU SANDHI TONE LONG',
72429: 'PAU CIN HAU SANDHI TONE LONG FINAL',
128062: 'PAW PRINTS',
983220: 'PDF',
983229: 'PDI',
127825: 'PEACH',
127824: 'PEAR',
128694: 'PEDESTRIAN',
128390: 'PEN OVER STAMPED ENVELOPE',
128039: 'PENGUIN',
128532: 'PENSIVE FACE',
9956: 'PENTAGRAM',
127917: 'PERFORMING ARTS',
128547: 'PERSEVERING FACE',
128583: 'PERSON BOWING DEEPLY',
128589: 'PERSON FROWNING',
128588: 'PERSON RAISING BOTH HANDS IN CELEBRATION',
128113: 'PERSON WITH BLOND HAIR',
128591: | |
field `Relative Humidity Fraction 14`"""
self["Relative Humidity Fraction 14"] = value
@property
def water_vapor_diffusion_resistance_factor_14(self):
"""field `Water Vapor Diffusion Resistance Factor 14`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 14`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_14` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 14"]
@water_vapor_diffusion_resistance_factor_14.setter
def water_vapor_diffusion_resistance_factor_14(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
14`"""
self["Water Vapor Diffusion Resistance Factor 14"] = value
@property
def relative_humidity_fraction_15(self):
"""field `Relative Humidity Fraction 15`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 15`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_15` or None if not set
"""
return self["Relative Humidity Fraction 15"]
@relative_humidity_fraction_15.setter
def relative_humidity_fraction_15(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 15`"""
self["Relative Humidity Fraction 15"] = value
@property
def water_vapor_diffusion_resistance_factor_15(self):
"""field `Water Vapor Diffusion Resistance Factor 15`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 15`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_15` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 15"]
@water_vapor_diffusion_resistance_factor_15.setter
def water_vapor_diffusion_resistance_factor_15(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
15`"""
self["Water Vapor Diffusion Resistance Factor 15"] = value
@property
def relative_humidity_fraction_16(self):
"""field `Relative Humidity Fraction 16`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 16`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_16` or None if not set
"""
return self["Relative Humidity Fraction 16"]
@relative_humidity_fraction_16.setter
def relative_humidity_fraction_16(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 16`"""
self["Relative Humidity Fraction 16"] = value
@property
def water_vapor_diffusion_resistance_factor_16(self):
"""field `Water Vapor Diffusion Resistance Factor 16`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 16`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_16` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 16"]
@water_vapor_diffusion_resistance_factor_16.setter
def water_vapor_diffusion_resistance_factor_16(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
16`"""
self["Water Vapor Diffusion Resistance Factor 16"] = value
@property
def relative_humidity_fraction_17(self):
"""field `Relative Humidity Fraction 17`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 17`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_17` or None if not set
"""
return self["Relative Humidity Fraction 17"]
@relative_humidity_fraction_17.setter
def relative_humidity_fraction_17(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 17`"""
self["Relative Humidity Fraction 17"] = value
@property
def water_vapor_diffusion_resistance_factor_17(self):
"""field `Water Vapor Diffusion Resistance Factor 17`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 17`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_17` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 17"]
@water_vapor_diffusion_resistance_factor_17.setter
def water_vapor_diffusion_resistance_factor_17(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
17`"""
self["Water Vapor Diffusion Resistance Factor 17"] = value
@property
def relative_humidity_fraction_18(self):
"""field `Relative Humidity Fraction 18`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 18`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_18` or None if not set
"""
return self["Relative Humidity Fraction 18"]
@relative_humidity_fraction_18.setter
def relative_humidity_fraction_18(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 18`"""
self["Relative Humidity Fraction 18"] = value
@property
def water_vapor_diffusion_resistance_factor_18(self):
"""field `Water Vapor Diffusion Resistance Factor 18`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 18`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_18` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 18"]
@water_vapor_diffusion_resistance_factor_18.setter
def water_vapor_diffusion_resistance_factor_18(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
18`"""
self["Water Vapor Diffusion Resistance Factor 18"] = value
@property
def relative_humidity_fraction_19(self):
"""field `Relative Humidity Fraction 19`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 19`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_19` or None if not set
"""
return self["Relative Humidity Fraction 19"]
@relative_humidity_fraction_19.setter
def relative_humidity_fraction_19(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 19`"""
self["Relative Humidity Fraction 19"] = value
@property
def water_vapor_diffusion_resistance_factor_19(self):
"""field `Water Vapor Diffusion Resistance Factor 19`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 19`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_19` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 19"]
@water_vapor_diffusion_resistance_factor_19.setter
def water_vapor_diffusion_resistance_factor_19(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
19`"""
self["Water Vapor Diffusion Resistance Factor 19"] = value
@property
def relative_humidity_fraction_20(self):
"""field `Relative Humidity Fraction 20`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 20`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_20` or None if not set
"""
return self["Relative Humidity Fraction 20"]
@relative_humidity_fraction_20.setter
def relative_humidity_fraction_20(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 20`"""
self["Relative Humidity Fraction 20"] = value
@property
def water_vapor_diffusion_resistance_factor_20(self):
"""field `Water Vapor Diffusion Resistance Factor 20`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 20`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_20` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 20"]
@water_vapor_diffusion_resistance_factor_20.setter
def water_vapor_diffusion_resistance_factor_20(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
20`"""
self["Water Vapor Diffusion Resistance Factor 20"] = value
@property
def relative_humidity_fraction_21(self):
"""field `Relative Humidity Fraction 21`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 21`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_21` or None if not set
"""
return self["Relative Humidity Fraction 21"]
@relative_humidity_fraction_21.setter
def relative_humidity_fraction_21(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 21`"""
self["Relative Humidity Fraction 21"] = value
@property
def water_vapor_diffusion_resistance_factor_21(self):
"""field `Water Vapor Diffusion Resistance Factor 21`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 21`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_21` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 21"]
@water_vapor_diffusion_resistance_factor_21.setter
def water_vapor_diffusion_resistance_factor_21(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
21`"""
self["Water Vapor Diffusion Resistance Factor 21"] = value
@property
def relative_humidity_fraction_22(self):
"""field `Relative Humidity Fraction 22`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 22`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_22` or None if not set
"""
return self["Relative Humidity Fraction 22"]
@relative_humidity_fraction_22.setter
def relative_humidity_fraction_22(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 22`"""
self["Relative Humidity Fraction 22"] = value
@property
def water_vapor_diffusion_resistance_factor_22(self):
"""field `Water Vapor Diffusion Resistance Factor 22`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 22`
Raises:
ValueError: if | |
# in best case integrated into netzob later on
# netzob/src/netzob/Inference/Vocabulary/PayloadFinder.py or alike
# netzob import
from netzob.Common.Utils.Decorators import typeCheck
from netzob.Inference.Vocabulary.Format import Format
from netzob.Model.Vocabulary.Messages.RawMessage import RawMessage
from netzob.Model.Vocabulary.Messages.L2NetworkMessage import L2NetworkMessage
# internal import
from WithPayloadMessage import WithPayloadMessage
# external import
from scapy.all import Ether, IP, IPv6, IPv46, load_layer
class PayloadFinder(object):
"""This utility class finds known protocols that are included in a payload
to define the actual payload of a message that should get reversed and to
get additional context information.
This might be handy in cases where the protocol is on a low layer (i.e.
layer 2) and it is known that internet traffic (i.e. HTTP GET requests) are
encapsulated. This class should identify and cut off this underlying traffic.
"""
# TODO example usage above
def __init__(self, known_IPs=set(), known_MACs=set()):
self.known_IPs = known_IPs
self.known_MACs = known_MACs
pass
@typeCheck(list, float)
def _offsetCandidates(self, messages, separator=0.2):
"""Create a dict of possible payload offsets based on small messages and return list of
biggest messages.
We cluster all messages based on their size. Assuming that smaller messages probably have
no payload in it, we might be lucky and find the typical header offset (=the size of small
messages). We should try these offset candidates for the bigger messages that actually
contain a payload. Thus, we return the biggest messages and a dict of possible offsets for
the payload of this big messages.
The separator defines where we separate "big" and "small" messages for this metric (e.g.
0.2 will return the biggest 20% messages and return the sizes of the 80% small messages as
offsets).
"""
candidates = {}
testMessages = []
# create cluster based on message sizes
cluster = Format.clusterBySize(messages)
# create dict {offset candidate: occurences} of all sizes below separator
for i in range(0,int(separator * len(cluster))):
any, candidate = cluster[i].name.split('_')
candidates[int(candidate)] = 0
# create list of all messages of size above separator
for i in range(int(separator * len(cluster)), len(cluster)):
testMessages.extend(cluster[i].messages)
return candidates, testMessages
@typeCheck(dict,list,int,bool,bool)
def _testOffsets(self, offsets, messages, msgsToTest=50, debug=False, omit_ether=False):
# TODO A description would be great
load_layer("http")
load_layer("dns")
load_layer("tls")
load_layer("dhcp")
def runTest(msgs, protos):
"""Try for all given messages if any protocol included in protos can be found.
At first we try the known/given offsets, beginning by the most used. If these fail,
we try all possible offsets (bytewise). To limit processing time, we only try for
msgsToTest messages.
"""
# assume all message have no payload, remove below if payload found
nopayload = msgs.copy()
# counter for messages in a row for which we did not found an offset
notFoundCnt = 0
def tryAndStore(packet):
"""Check if any protocol of protos is included in packet.
If new IPs or MACs are found, store these in self.known_IPs/self.known_MACs
Returns True if protocol was found, False if none was found or false positive is
likely
"""
# check for protocols in protos
if any(proto in packet for proto in protos):
# only trust layer3/2 parsing if known IP/MAC is involved
if IP in protos and IP in packet and \
not packet[IP].src in self.known_IPs and \
not packet[IP].dst in self.known_IPs:
return False # maybe a false positive
if IPv6 in protos and IPv6 in packet and \
not packet[IPv6].src in self.known_IPs and \
not packet[IPv6].dst in self.known_IPs:
return False # maybe a false positive
if Ether in protos and \
not packet[Ether].src in self.known_MACs and \
not packet[Ether].dst in self.known_MACs:
return False # maybe a false positive
# build sets of known IPs and MACs based on found payloads
if IP in packet:
self.known_IPs.add(packet[IP].src)
self.known_IPs.add(packet[IP].dst)
if IPv6 in packet:
self.known_IPs.add(packet[IPv6].src)
self.known_IPs.add(packet[IPv6].dst)
if Ether in packet:
self.known_MACs.add(packet[Ether].src)
self.known_MACs.add(packet[Ether].dst)
return True # We found a protocol and/or new a IP/MAC
else:
return False # we did not found a protocol
# start with biggest messages
for m in sorted(msgs, key=lambda x: len(x.data), reverse=True):
not_found = True
# most used offsets are tried first
for offset in sorted(offsets, key=offsets.get, reverse=True):
# create scapy packet based on current offset
# FIXME you could try IP only and test Ether if this was successful to avoid this flag and thus generalize this function
if omit_ether:
if offset > len(m.data)-40: # at least 40 bytes of IPv6 Header
break # next message please!
packet = IPv46(m.data[offset:])
else:
if offset > len(m.data)-14: # at least 14 bytes of Ethernet Header
break # next message please!
packet = Ether(m.data[offset:])
if tryAndStore(packet):
offsets[offset]+=1
nopayload.remove(m) # remove from nopayload list
not_found = False
break # good offset found, next message please!
else:
continue # did not found a protocol, try next offset
# if known offsets did not work, try all possible offset variants instead
if not_found is True:
for offset in range(1, len(m.data)):
if offset not in offsets:
# create scapy packet based on current offset
if omit_ether:
if offset > len(m.data)-40: # at least 40 bytes of IPv6 Header
break # next message please!
packet = IPv46(m.data[offset:])
else:
if offset > len(m.data)-14: # at least 14 bytes of Ethernet Header
break # next message please!
packet = Ether(m.data[offset:])
if tryAndStore(packet):
offsets[offset] = 1
nopayload.remove(m) # remove from nopayload list
not_found = False
break # good offset found, next message please!
else:
continue # did not found a protocol, try next offset
if not_found:
notFoundCnt += 1
if notFoundCnt > msgsToTest:
break
return nopayload
def sortOffsets(offsets, clean=False):
sorted_offsets = {}
for offset in sorted(offsets, key=offsets.get, reverse=True):
if clean:
sorted_offsets[offset] = 0
else:
sorted_offsets[offset] = offsets[offset]
return sorted_offsets
# try offsets for all messages to find payload, remember messages without payload
if debug:
print("\nTesting possible offsets for payloads...")
nopayload = runTest(messages, [DNS, HTTP, TLS, DHCP])
# try messages without payload again, now for layer3 with known IPs
nopayload = runTest(nopayload, [IP, IPv6])
# try messages without payload again, now for layer2 with known MACs
if not omit_ether:
nopayload = runTest(nopayload, [Ether])
# most used offset first on next run
offsets = sortOffsets(offsets, clean=True)
# try all messages again, now for layer2 with known MACs only
if omit_ether:
nopayload = runTest(messages, [IP, IPv6])
else:
nopayload = runTest(messages, [Ether])
offsets = sortOffsets(offsets)
# announce results
if debug:
if offsets:
print("\nPayload offsets found:")
self._printOffsets(offsets)
else:
print("\nNo payload offsets found!")
if nopayload:
print("\nThere was NO payload found for {} messages.".format(len(nopayload)))
# create simple list of successful offsets instead of the current dict
goodOffsets = []
for offset,findings in offsets.items():
if findings > 0:
goodOffsets.append(int(offset))
return goodOffsets
def _parsePayloads(self, messages, offsets, omit_ether=False):
"""Uses the offsets to parse messages
"""
# TODO more explanation please!
parsed_messages = []
for m in messages:
if isinstance(m, L2NetworkMessage):
new_m = WithPayloadMessage(m.data, m.date, m.l2Protocol)
else:
new_m = WithPayloadMessage(m.data, m.date)
new_m.metadata = m.metadata
for offset in offsets:
if omit_ether:
if offset > len(m.data)-40: # at least 40 bytes of IPv6 Header
break # next message please!
packet = IPv46(m.data[offset:])
else:
if offset > len(m.data)-14: # at least 14 bytes of Ethernet Header
break # next message please!
packet = Ether(m.data[offset:])
offset_is_fine = False
# check parsed MAC addresses to prevent false positives
if Ether in packet:
if packet[Ether].src in self.known_MACs or packet[Ether].dst in self.known_MACs:
offset_is_fine = True
# check parsed IP addresses to prevent false positives
if IP in packet:
if packet[IP].src in self.known_IPs or packet[IP].dst in self.known_IPs:
offset_is_fine = True
if IPv6 in packet:
if packet[IPv6].src in self.known_IPs or packet[IPv6].dst in self.known_IPs:
offset_is_fine = True
if offset_is_fine: # parsing seems to be fine
new_m.payload = packet # store parsed scapy packet
new_m.payload_data = m.data[offset:] # store bytes
new_m.data = m.data[:offset] # cut off payload for good
break # offset found, stop trying others
else:
continue # try another offsets
parsed_messages.append(new_m)
return parsed_messages
def _printOffsets(self, offsets, gt0=True):
# only print offsets with values > 0
if gt0:
print("\n<offset>: <findings>")
for offset, occurences in offsets.items():
if occurences > 0:
print(str(offset) + ': ' + str(occurences))
# print all offsets (you do not really need | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._phone_numbers_client_enums import *
class AcquiredPhoneNumber(msrest.serialization.Model):
"""Represents an acquired phone number.
All required parameters must be populated in order to send to Azure.
:param id: Required. The id of the phone number, e.g. 11234567890.
:type id: str
:param phone_number: Required. String of the E.164 format of the phone number, e.g.
+11234567890.
:type phone_number: str
:param country_code: Required. The ISO 3166-2 code of the phone number's country, e.g. US.
:type country_code: str
:param phone_number_type: Required. The phone number's type, e.g. Geographic, TollFree.
Possible values include: "geographic", "tollFree".
:type phone_number_type: str or ~azure.communication.phonenumbers.models.PhoneNumberType
:param capabilities: Required. Capabilities of a phone number.
:type capabilities: ~azure.communication.phonenumbers.models.PhoneNumberCapabilities
:param assignment_type: Required. The assignment type of the phone number. A phone number can
be assigned to a person, or to an application. Possible values include: "person",
"application".
:type assignment_type: str or
~azure.communication.phonenumbers.models.PhoneNumberAssignmentType
:param purchase_date: Required. The date and time that the phone number was purchased.
:type purchase_date: ~datetime.datetime
:param cost: Required. The incurred cost for a single phone number.
:type cost: ~azure.communication.phonenumbers.models.PhoneNumberCost
"""
_validation = {
'id': {'required': True},
'phone_number': {'required': True},
'country_code': {'required': True},
'phone_number_type': {'required': True},
'capabilities': {'required': True},
'assignment_type': {'required': True},
'purchase_date': {'required': True},
'cost': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'phone_number': {'key': 'phoneNumber', 'type': 'str'},
'country_code': {'key': 'countryCode', 'type': 'str'},
'phone_number_type': {'key': 'phoneNumberType', 'type': 'str'},
'capabilities': {'key': 'capabilities', 'type': 'PhoneNumberCapabilities'},
'assignment_type': {'key': 'assignmentType', 'type': 'str'},
'purchase_date': {'key': 'purchaseDate', 'type': 'iso-8601'},
'cost': {'key': 'cost', 'type': 'PhoneNumberCost'},
}
def __init__(
self,
*,
id: str,
phone_number: str,
country_code: str,
phone_number_type: Union[str, "PhoneNumberType"],
capabilities: "PhoneNumberCapabilities",
assignment_type: Union[str, "PhoneNumberAssignmentType"],
purchase_date: datetime.datetime,
cost: "PhoneNumberCost",
**kwargs
):
super(AcquiredPhoneNumber, self).__init__(**kwargs)
self.id = id
self.phone_number = phone_number
self.country_code = country_code
self.phone_number_type = phone_number_type
self.capabilities = capabilities
self.assignment_type = assignment_type
self.purchase_date = purchase_date
self.cost = cost
class AcquiredPhoneNumbers(msrest.serialization.Model):
"""The list of acquired phone numbers.
All required parameters must be populated in order to send to Azure.
:param phone_numbers: Required. Represents a list of phone numbers.
:type phone_numbers: list[~azure.communication.phonenumbers.models.AcquiredPhoneNumber]
:param next_link: Represents the URL link to the next page of phone number results.
:type next_link: str
"""
_validation = {
'phone_numbers': {'required': True},
}
_attribute_map = {
'phone_numbers': {'key': 'phoneNumbers', 'type': '[AcquiredPhoneNumber]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
phone_numbers: List["AcquiredPhoneNumber"],
next_link: Optional[str] = None,
**kwargs
):
super(AcquiredPhoneNumbers, self).__init__(**kwargs)
self.phone_numbers = phone_numbers
self.next_link = next_link
class CommunicationError(msrest.serialization.Model):
"""The Communication Services error.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code.
:type code: str
:param message: Required. The error message.
:type message: str
:ivar target: The error target.
:vartype target: str
:ivar details: Further details about specific errors that led to this error.
:vartype details: list[~azure.communication.phonenumbers.models.CommunicationError]
:ivar inner_error: The inner error if any.
:vartype inner_error: ~azure.communication.phonenumbers.models.CommunicationError
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
'target': {'readonly': True},
'details': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CommunicationError]'},
'inner_error': {'key': 'innererror', 'type': 'CommunicationError'},
}
def __init__(
self,
*,
code: str,
message: str,
**kwargs
):
super(CommunicationError, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = None
self.details = None
self.inner_error = None
class CommunicationErrorResponse(msrest.serialization.Model):
"""The Communication Services error.
All required parameters must be populated in order to send to Azure.
:param error: Required. The Communication Services error.
:type error: ~azure.communication.phonenumbers.models.CommunicationError
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'CommunicationError'},
}
def __init__(
self,
*,
error: "CommunicationError",
**kwargs
):
super(CommunicationErrorResponse, self).__init__(**kwargs)
self.error = error
class PhoneNumberCapabilities(msrest.serialization.Model):
"""Capabilities of a phone number.
All required parameters must be populated in order to send to Azure.
:param calling: Required. Capability value for calling. Possible values include: "none",
"inbound", "outbound", "inbound+outbound".
:type calling: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
:param sms: Required. Capability value for SMS. Possible values include: "none", "inbound",
"outbound", "inbound+outbound".
:type sms: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
"""
_validation = {
'calling': {'required': True},
'sms': {'required': True},
}
_attribute_map = {
'calling': {'key': 'calling', 'type': 'str'},
'sms': {'key': 'sms', 'type': 'str'},
}
def __init__(
self,
*,
calling: Union[str, "PhoneNumberCapabilityType"],
sms: Union[str, "PhoneNumberCapabilityType"],
**kwargs
):
super(PhoneNumberCapabilities, self).__init__(**kwargs)
self.calling = calling
self.sms = sms
class PhoneNumberCapabilitiesRequest(msrest.serialization.Model):
"""Capabilities of a phone number.
:param calling: Capability value for calling. Possible values include: "none", "inbound",
"outbound", "inbound+outbound".
:type calling: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
:param sms: Capability value for SMS. Possible values include: "none", "inbound", "outbound",
"inbound+outbound".
:type sms: str or ~azure.communication.phonenumbers.models.PhoneNumberCapabilityType
"""
_attribute_map = {
'calling': {'key': 'calling', 'type': 'str'},
'sms': {'key': 'sms', 'type': 'str'},
}
def __init__(
self,
*,
calling: Optional[Union[str, "PhoneNumberCapabilityType"]] = None,
sms: Optional[Union[str, "PhoneNumberCapabilityType"]] = None,
**kwargs
):
super(PhoneNumberCapabilitiesRequest, self).__init__(**kwargs)
self.calling = calling
self.sms = sms
class PhoneNumberCost(msrest.serialization.Model):
"""The incurred cost for a single phone number.
All required parameters must be populated in order to send to Azure.
:param amount: Required. The cost amount.
:type amount: float
:param currency_code: Required. The ISO 4217 currency code for the cost amount, e.g. USD.
:type currency_code: str
:param billing_frequency: Required. The frequency with which the cost gets billed. Possible
values include: "monthly".
:type billing_frequency: str or ~azure.communication.phonenumbers.models.BillingFrequency
"""
_validation = {
'amount': {'required': True},
'currency_code': {'required': True},
'billing_frequency': {'required': True},
}
_attribute_map = {
'amount': {'key': 'amount', 'type': 'float'},
'currency_code': {'key': 'currencyCode', 'type': 'str'},
'billing_frequency': {'key': 'billingFrequency', 'type': 'str'},
}
def __init__(
self,
*,
amount: float,
currency_code: str,
billing_frequency: Union[str, "BillingFrequency"],
**kwargs
):
super(PhoneNumberCost, self).__init__(**kwargs)
self.amount = amount
self.currency_code = currency_code
self.billing_frequency = billing_frequency
class PhoneNumberOperation(msrest.serialization.Model):
"""Long running operation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param status: Required. Status of operation. Possible values include: "notStarted", "running",
"succeeded", "failed".
:type status: str or ~azure.communication.phonenumbers.models.PhoneNumberOperationStatus
:param resource_location: URL for retrieving the result of the operation, if any.
:type resource_location: str
:param created_date_time: Required. The date that the operation was created.
:type created_date_time: ~datetime.datetime
:param error: The Communication Services error.
:type error: ~azure.communication.phonenumbers.models.CommunicationError
:param id: Required. Id of operation.
:type id: str
:param operation_type: Required. The type of operation, e.g. Search. Possible values include:
"purchase", "releasePhoneNumber", "search", "updatePhoneNumberCapabilities".
:type operation_type: str or ~azure.communication.phonenumbers.models.PhoneNumberOperationType
:ivar last_action_date_time: The most recent date that the operation was changed.
:vartype last_action_date_time: ~datetime.datetime
"""
_validation = {
'status': {'required': True},
'created_date_time': {'required': True},
'id': {'required': True},
'operation_type': {'required': True},
'last_action_date_time': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'resource_location': {'key': 'resourceLocation', 'type': 'str'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'CommunicationError'},
'id': {'key': 'id', 'type': 'str'},
'operation_type': {'key': 'operationType', 'type': 'str'},
'last_action_date_time': {'key': 'lastActionDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
status: Union[str, "PhoneNumberOperationStatus"],
created_date_time: datetime.datetime,
id: str,
operation_type: Union[str, "PhoneNumberOperationType"],
resource_location: Optional[str] = None,
error: Optional["CommunicationError"] = None,
**kwargs
):
super(PhoneNumberOperation, self).__init__(**kwargs)
self.status = status
self.resource_location = resource_location
self.created_date_time = created_date_time
self.error = error
self.id = id
self.operation_type = operation_type
self.last_action_date_time = None
class PhoneNumberPurchaseRequest(msrest.serialization.Model):
"""The phone number search purchase request.
:param search_id: The search id.
:type search_id: str
"""
_attribute_map = {
'search_id': {'key': 'searchId', 'type': 'str'},
}
def __init__(
self,
*,
search_id: Optional[str] = None,
**kwargs
):
super(PhoneNumberPurchaseRequest, self).__init__(**kwargs)
self.search_id = search_id
class PhoneNumberSearchRequest(msrest.serialization.Model):
"""Represents a phone number search request to find phone numbers. Found phone numbers are temporarily held for a following purchase.
All required parameters must be populated in order to send to Azure.
:param phone_number_type: Required. The type of phone numbers to search for, e.g. geographic,
or tollFree. Possible | |
0.6848,
't3.large': 0.162,
't3.medium': 0.0856,
't3.micro': 0.026,
't3.nano': 0.013,
't3.small': 0.052,
't3.xlarge': 0.3424,
'x1.16xlarge': 15.949,
'x1.32xlarge': 31.898}},
'us-east-1': {'linux': {'a1.2xlarge': 0.204,
'a1.4xlarge': 0.408,
'a1.large': 0.051,
'a1.medium': 0.0255,
'a1.xlarge': 0.102,
'c1.medium': 0.13,
'c1.xlarge': 0.52,
'c3.2xlarge': 0.42,
'c3.4xlarge': 0.84,
'c3.8xlarge': 1.68,
'c3.large': 0.105,
'c3.xlarge': 0.21,
'c4.2xlarge': 0.398,
'c4.4xlarge': 0.796,
'c4.8xlarge': 1.591,
'c4.large': 0.1,
'c4.xlarge': 0.199,
'c5.18xlarge': 3.06,
'c5.2xlarge': 0.34,
'c5.4xlarge': 0.68,
'c5.9xlarge': 1.53,
'c5.large': 0.085,
'c5.xlarge': 0.17,
'c5d.18xlarge': 3.456,
'c5d.2xlarge': 0.384,
'c5d.4xlarge': 0.768,
'c5d.9xlarge': 1.728,
'c5d.large': 0.096,
'c5d.xlarge': 0.192,
'c5n.18xlarge': 3.888,
'c5n.2xlarge': 0.432,
'c5n.4xlarge': 0.864,
'c5n.9xlarge': 1.944,
'c5n.large': 0.108,
'c5n.xlarge': 0.216,
'cc2.8xlarge': 2.0,
'cr1.8xlarge': 3.5,
'd2.2xlarge': 1.38,
'd2.4xlarge': 2.76,
'd2.8xlarge': 5.52,
'd2.xlarge': 0.69,
'f1.16xlarge': 13.2,
'f1.2xlarge': 1.65,
'f1.4xlarge': 3.3,
'g2.2xlarge': 0.65,
'g2.8xlarge': 2.6,
'g3.16xlarge': 4.56,
'g3.4xlarge': 1.14,
'g3.8xlarge': 2.28,
'g3s.xlarge': 0.75,
'h1.16xlarge': 3.744,
'h1.2xlarge': 0.468,
'h1.4xlarge': 0.936,
'h1.8xlarge': 1.872,
'hs1.8xlarge': 4.6,
'i2.2xlarge': 1.705,
'i2.4xlarge': 3.41,
'i2.8xlarge': 6.82,
'i2.xlarge': 0.853,
'i3.16xlarge': 4.992,
'i3.2xlarge': 0.624,
'i3.4xlarge': 1.248,
'i3.8xlarge': 2.496,
'i3.large': 0.156,
'i3.metal': 4.992,
'i3.xlarge': 0.312,
'm1.large': 0.175,
'm1.medium': 0.087,
'm1.small': 0.044,
'm1.xlarge': 0.35,
'm2.2xlarge': 0.49,
'm2.4xlarge': 0.98,
'm2.xlarge': 0.245,
'm3.2xlarge': 0.532,
'm3.large': 0.133,
'm3.medium': 0.067,
'm3.xlarge': 0.266,
'm4.10xlarge': 2.0,
'm4.16xlarge': 3.2,
'm4.2xlarge': 0.4,
'm4.4xlarge': 0.8,
'm4.large': 0.1,
'm4.xlarge': 0.2,
'm5.12xlarge': 2.304,
'm5.24xlarge': 4.608,
'm5.2xlarge': 0.384,
'm5.4xlarge': 0.768,
'm5.large': 0.096,
'm5.xlarge': 0.192,
'm5a.12xlarge': 2.064,
'm5a.24xlarge': 4.128,
'm5a.2xlarge': 0.344,
'm5a.4xlarge': 0.688,
'm5a.large': 0.086,
'm5a.xlarge': 0.172,
'm5d.12xlarge': 2.712,
'm5d.24xlarge': 5.424,
'm5d.2xlarge': 0.452,
'm5d.4xlarge': 0.904,
'm5d.large': 0.113,
'm5d.xlarge': 0.226,
'p2.16xlarge': 14.4,
'p2.8xlarge': 7.2,
'p2.xlarge': 0.9,
'p3.16xlarge': 24.48,
'p3.2xlarge': 3.06,
'p3.8xlarge': 12.24,
'p3dn.24xlarge': 31.212,
'r3.2xlarge': 0.665,
'r3.4xlarge': 1.33,
'r3.8xlarge': 2.66,
'r3.large': 0.166,
'r3.xlarge': 0.333,
'r4.16xlarge': 4.256,
'r4.2xlarge': 0.532,
'r4.4xlarge': 1.064,
'r4.8xlarge': 2.128,
'r4.large': 0.133,
'r4.xlarge': 0.266,
'r5.12xlarge': 3.024,
'r5.24xlarge': 6.048,
'r5.2xlarge': 0.504,
'r5.4xlarge': 1.008,
'r5.large': 0.126,
'r5.xlarge': 0.252,
'r5a.12xlarge': 2.712,
'r5a.24xlarge': 5.424,
'r5a.2xlarge': 0.452,
'r5a.4xlarge': 0.904,
'r5a.large': 0.113,
'r5a.xlarge': 0.226,
'r5d.12xlarge': 3.456,
'r5d.24xlarge': 6.912,
'r5d.2xlarge': 0.576,
'r5d.4xlarge': 1.152,
'r5d.large': 0.144,
'r5d.xlarge': 0.288,
't1.micro': 0.02,
't2.2xlarge': 0.3712,
't2.large': 0.0928,
't2.medium': 0.0464,
't2.micro': 0.0116,
't2.nano': 0.0058,
't2.small': 0.023,
't2.xlarge': 0.1856,
't3.2xlarge': 0.3328,
't3.large': 0.0832,
't3.medium': 0.0416,
't3.micro': 0.0104,
't3.nano': 0.0052,
't3.small': 0.0208,
't3.xlarge': 0.1664,
'x1.16xlarge': 6.669,
'x1.32xlarge': 13.338,
'x1e.16xlarge': 13.344,
'x1e.2xlarge': 1.668,
'x1e.32xlarge': 26.688,
'x1e.4xlarge': 3.336,
'x1e.8xlarge': 6.672,
'x1e.xlarge': 0.834,
'z1d.12xlarge': 4.464,
'z1d.2xlarge': 0.744,
'z1d.3xlarge': 1.116,
'z1d.6xlarge': 2.232,
'z1d.large': 0.186,
'z1d.xlarge': 0.372},
'windows': {'c1.medium': 0.21,
'c1.xlarge': 0.84,
'c3.2xlarge': 0.752,
'c3.4xlarge': 1.504,
'c3.8xlarge': 3.008,
'c3.large': 0.188,
'c3.xlarge': 0.376,
'c4.2xlarge': 0.766,
'c4.4xlarge': 1.532,
'c4.8xlarge': 3.091,
'c4.large': 0.192,
'c4.xlarge': 0.383,
'c5.18xlarge': 6.372,
'c5.2xlarge': 0.708,
'c5.4xlarge': 1.416,
'c5.9xlarge': 3.186,
'c5.large': 0.177,
'c5.xlarge': 0.354,
'c5d.18xlarge': 6.768,
'c5d.2xlarge': 0.752,
'c5d.4xlarge': 1.504,
'c5d.9xlarge': 3.384,
'c5d.large': 0.188,
'c5d.xlarge': 0.376,
'c5n.18xlarge': 7.2,
'c5n.2xlarge': 0.8,
'c5n.4xlarge': 1.6,
'c5n.9xlarge': 3.6,
'c5n.large': 0.2,
'c5n.xlarge': 0.4,
'cc2.8xlarge': 2.57,
'cr1.8xlarge': 3.831,
'd2.2xlarge': 1.601,
'd2.4xlarge': 3.062,
'd2.8xlarge': 6.198,
'd2.xlarge': 0.821,
'g2.2xlarge': 0.767,
'g2.8xlarge': 2.878,
'g3.16xlarge': 7.504,
'g3.4xlarge': 1.876,
'g3.8xlarge': 3.752,
'g3s.xlarge': 0.934,
'h1.16xlarge': 6.688,
'h1.2xlarge': 0.836,
'h1.4xlarge': 1.672,
'h1.8xlarge': 3.344,
'hs1.8xlarge': 4.931,
'i2.2xlarge': 1.946,
'i2.4xlarge': 3.891,
'i2.8xlarge': 7.782,
'i2.xlarge': 0.973,
'i3.16xlarge': 7.936,
'i3.2xlarge': 0.992,
'i3.4xlarge': 1.984,
'i3.8xlarge': 3.968,
'i3.large': 0.248,
'i3.metal': 7.936,
'i3.xlarge': 0.496,
'm1.large': 0.299,
'm1.medium': 0.149,
'm1.small': 0.075,
'm1.xlarge': 0.598,
'm2.2xlarge': 0.69,
'm2.4xlarge': 1.38,
'm2.xlarge': 0.345,
'm3.2xlarge': 1.036,
'm3.large': 0.259,
'm3.medium': 0.13,
'm3.xlarge': 0.518,
'm4.10xlarge': 3.84,
'm4.16xlarge': 6.144,
'm4.2xlarge': 0.768,
'm4.4xlarge': 1.536,
'm4.large': 0.192,
'm4.xlarge': 0.384,
'm5.12xlarge': 4.512,
'm5.24xlarge': 9.024,
'm5.2xlarge': 0.752,
'm5.4xlarge': 1.504,
'm5.large': 0.188,
'm5.xlarge': 0.376,
'm5a.12xlarge': 4.272,
'm5a.24xlarge': 8.544,
'm5a.2xlarge': 0.712,
'm5a.4xlarge': 1.424,
'm5a.large': 0.178,
'm5a.xlarge': 0.356,
'm5d.12xlarge': 4.92,
'm5d.24xlarge': 9.84,
'm5d.2xlarge': 0.82,
'm5d.4xlarge': 1.64,
'm5d.large': 0.205,
'm5d.xlarge': 0.41,
'p2.16xlarge': 17.344,
'p2.8xlarge': 8.672,
'p2.xlarge': 1.084,
'p3.16xlarge': 27.424,
'p3.2xlarge': 3.428,
'p3.8xlarge': 13.712,
'p3dn.24xlarge': 35.628,
'r3.2xlarge': 1.045,
'r3.4xlarge': 1.944,
'r3.8xlarge': 3.5,
'r3.large': 0.291,
'r3.xlarge': 0.583,
'r4.16xlarge': 7.2,
'r4.2xlarge': 0.9,
'r4.4xlarge': 1.8,
'r4.8xlarge': 3.6,
'r4.large': 0.225,
'r4.xlarge': 0.45,
'r5.12xlarge': 5.232,
'r5.24xlarge': 10.464,
'r5.2xlarge': 0.872,
'r5.4xlarge': 1.744,
'r5.large': 0.218,
'r5.xlarge': 0.436,
'r5a.12xlarge': 4.92,
'r5a.24xlarge': 9.84,
'r5a.2xlarge': 0.82,
'r5a.4xlarge': 1.64,
'r5a.large': 0.205,
'r5a.xlarge': 0.41,
'r5d.12xlarge': 5.664,
'r5d.24xlarge': 11.328,
'r5d.2xlarge': 0.944,
'r5d.4xlarge': 1.888,
'r5d.large': 0.236,
'r5d.xlarge': 0.472,
't1.micro': 0.02,
't2.2xlarge': 0.4332,
't2.large': 0.1208,
't2.medium': 0.0644,
't2.micro': 0.0162,
't2.nano': 0.0081,
't2.small': 0.032,
't2.xlarge': 0.2266,
't3.2xlarge': 0.48,
't3.large': 0.1108,
't3.medium': 0.06,
't3.micro': 0.0196,
't3.nano': 0.0098,
't3.small': 0.0392,
't3.xlarge': 0.24,
'x1.16xlarge': 9.613,
'x1.32xlarge': 19.226,
'x1e.16xlarge': 16.288,
'x1e.2xlarge': 2.036,
'x1e.32xlarge': 32.576,
'x1e.4xlarge': 4.072,
'x1e.8xlarge': 8.144,
'x1e.xlarge': 1.018,
'z1d.12xlarge': 6.672,
'z1d.2xlarge': 1.112,
'z1d.3xlarge': 1.668,
'z1d.6xlarge': 3.336,
'z1d.large': 0.278,
'z1d.xlarge': 0.556}},
'us-east-2': {'linux': {'a1.2xlarge': 0.204,
'a1.4xlarge': 0.408,
'a1.large': 0.051,
'a1.medium': 0.0255,
'a1.xlarge': 0.102,
'c4.2xlarge': 0.398,
'c4.4xlarge': 0.796,
'c4.8xlarge': 1.591,
'c4.large': 0.1,
'c4.xlarge': 0.199,
'c5.18xlarge': 3.06,
'c5.2xlarge': 0.34,
'c5.4xlarge': 0.68,
'c5.9xlarge': 1.53,
'c5.large': 0.085,
'c5.xlarge': 0.17,
'c5d.18xlarge': 3.456,
'c5d.2xlarge': 0.384,
'c5d.4xlarge': 0.768,
'c5d.9xlarge': 1.728,
'c5d.large': 0.096,
'c5d.xlarge': 0.192,
'c5n.18xlarge': 3.888,
'c5n.2xlarge': 0.432,
'c5n.4xlarge': 0.864,
'c5n.9xlarge': 1.944,
'c5n.large': 0.108,
'c5n.xlarge': 0.216,
'd2.2xlarge': 1.38,
'd2.4xlarge': 2.76,
'd2.8xlarge': 5.52,
'd2.xlarge': 0.69,
'g3.16xlarge': 4.56,
'g3.4xlarge': 1.14,
'g3.8xlarge': 2.28,
'g3s.xlarge': 0.75,
'h1.16xlarge': 3.744,
'h1.2xlarge': 0.468,
'h1.4xlarge': 0.936,
'h1.8xlarge': 1.872,
'i2.2xlarge': 1.705,
'i2.4xlarge': 3.41,
'i2.8xlarge': 6.82,
'i2.xlarge': 0.853,
'i3.16xlarge': 4.992,
'i3.2xlarge': 0.624,
'i3.4xlarge': 1.248,
'i3.8xlarge': 2.496,
'i3.large': 0.156,
'i3.metal': 4.992,
'i3.xlarge': 0.312,
'm4.10xlarge': 2.0,
'm4.16xlarge': 3.2,
'm4.2xlarge': 0.4,
'm4.4xlarge': 0.8,
'm4.large': 0.1,
'm4.xlarge': 0.2,
'm5.12xlarge': 2.304,
'm5.24xlarge': 4.608,
'm5.2xlarge': 0.384,
'm5.4xlarge': 0.768,
'm5.large': 0.096,
'm5.xlarge': 0.192,
'm5a.12xlarge': 2.064,
'm5a.24xlarge': 4.128,
'm5a.2xlarge': 0.344,
'm5a.4xlarge': 0.688,
'm5a.large': 0.086,
'm5a.xlarge': 0.172,
'm5d.12xlarge': 2.712,
'm5d.24xlarge': 5.424,
'm5d.2xlarge': 0.452,
'm5d.4xlarge': 0.904,
'm5d.large': 0.113,
'm5d.xlarge': 0.226,
'p2.16xlarge': 14.4,
'p2.8xlarge': 7.2,
'p2.xlarge': 0.9,
'p3.16xlarge': 24.48,
'p3.2xlarge': 3.06,
'p3.8xlarge': 12.24,
'r3.2xlarge': 0.664,
'r3.4xlarge': 1.328,
'r3.8xlarge': 2.656,
'r3.large': 0.166,
'r3.xlarge': 0.332,
'r4.16xlarge': 4.256,
'r4.2xlarge': 0.532,
'r4.4xlarge': 1.064,
'r4.8xlarge': 2.128,
'r4.large': 0.133,
'r4.xlarge': 0.266,
'r5.12xlarge': 3.024,
'r5.24xlarge': 6.048,
'r5.2xlarge': 0.504,
'r5.4xlarge': 1.008,
'r5.large': 0.126,
'r5.xlarge': 0.252,
'r5a.12xlarge': 2.712,
'r5a.24xlarge': 5.424,
'r5a.2xlarge': 0.452,
'r5a.4xlarge': 0.904,
'r5a.large': 0.113,
'r5a.xlarge': 0.226,
'r5d.12xlarge': 3.456,
'r5d.24xlarge': 6.912,
'r5d.2xlarge': 0.576,
'r5d.4xlarge': 1.152,
'r5d.large': 0.144,
'r5d.xlarge': 0.288,
't2.2xlarge': 0.3712,
't2.large': 0.0928,
't2.medium': 0.0464,
't2.micro': 0.0116,
't2.nano': 0.0058,
't2.small': 0.023,
't2.xlarge': 0.1856,
't3.2xlarge': 0.3328,
't3.large': 0.0832,
't3.medium': 0.0416,
't3.micro': 0.0104,
't3.nano': 0.0052,
't3.small': 0.0208,
't3.xlarge': 0.1664,
'x1.16xlarge': 6.669,
'x1.32xlarge': 13.338},
'windows': {'c4.2xlarge': 0.766,
'c4.4xlarge': 1.532,
'c4.8xlarge': 3.091,
'c4.large': 0.192,
'c4.xlarge': 0.383,
'c5.18xlarge': 6.372,
'c5.2xlarge': 0.708,
'c5.4xlarge': 1.416,
'c5.9xlarge': 3.186,
'c5.large': 0.177,
'c5.xlarge': 0.354,
'c5d.18xlarge': 6.768,
'c5d.2xlarge': 0.752,
'c5d.4xlarge': 1.504,
'c5d.9xlarge': 3.384,
'c5d.large': 0.188,
'c5d.xlarge': 0.376,
'c5n.18xlarge': 7.2,
'c5n.2xlarge': 0.8,
'c5n.4xlarge': 1.6,
'c5n.9xlarge': 3.6,
'c5n.large': 0.2,
'c5n.xlarge': 0.4,
'd2.2xlarge': 1.601,
'd2.4xlarge': 3.062,
'd2.8xlarge': 6.198,
'd2.xlarge': 0.821,
'g3.16xlarge': 7.504,
'g3.4xlarge': 1.876,
'g3.8xlarge': 3.752,
'g3s.xlarge': 0.934,
'h1.16xlarge': 6.688,
'h1.2xlarge': 0.836,
'h1.4xlarge': 1.672,
'h1.8xlarge': 3.344,
'i2.2xlarge': 1.946,
'i2.4xlarge': 3.891,
'i2.8xlarge': 7.782,
'i2.xlarge': 0.973,
'i3.16xlarge': 7.936,
'i3.2xlarge': 0.992,
'i3.4xlarge': 1.984,
'i3.8xlarge': 3.968,
'i3.large': 0.248,
'i3.metal': 7.936,
'i3.xlarge': 0.496,
'm4.10xlarge': 3.84,
'm4.16xlarge': 6.144,
'm4.2xlarge': 0.768,
'm4.4xlarge': 1.536,
'm4.large': 0.192,
'm4.xlarge': 0.384,
'm5.12xlarge': 4.512,
'm5.24xlarge': 9.024,
'm5.2xlarge': 0.752,
'm5.4xlarge': 1.504,
'm5.large': 0.188,
'm5.xlarge': 0.376,
'm5a.12xlarge': 4.272,
'm5a.24xlarge': 8.544,
'm5a.2xlarge': 0.712,
'm5a.4xlarge': 1.424,
'm5a.large': 0.178,
'm5a.xlarge': 0.356,
'm5d.12xlarge': 4.92,
'm5d.24xlarge': 9.84,
'm5d.2xlarge': 0.82,
'm5d.4xlarge': 1.64,
'm5d.large': 0.205,
'm5d.xlarge': 0.41,
'p2.16xlarge': 17.344,
'p2.8xlarge': 8.672,
'p2.xlarge': 1.084,
'p3.16xlarge': 27.424,
'p3.2xlarge': 3.428,
'p3.8xlarge': 13.712,
'r3.2xlarge': 1.045,
'r3.4xlarge': 1.944,
'r3.8xlarge': 3.5,
'r3.large': 0.291,
'r3.xlarge': 0.583,
'r4.16xlarge': 7.2,
'r4.2xlarge': 0.9,
'r4.4xlarge': 1.8,
'r4.8xlarge': 3.6,
'r4.large': 0.225,
'r4.xlarge': 0.45,
'r5.12xlarge': 5.232,
'r5.24xlarge': 10.464,
'r5.2xlarge': 0.872,
'r5.4xlarge': 1.744,
'r5.large': 0.218,
'r5.xlarge': 0.436,
'r5a.12xlarge': 4.92,
'r5a.24xlarge': 9.84,
'r5a.2xlarge': 0.82,
'r5a.4xlarge': 1.64,
'r5a.large': 0.205,
'r5a.xlarge': 0.41,
'r5d.12xlarge': 5.664,
'r5d.24xlarge': 11.328,
'r5d.2xlarge': 0.944,
'r5d.4xlarge': 1.888,
'r5d.large': 0.236,
'r5d.xlarge': 0.472,
't2.2xlarge': 0.4332,
't2.large': 0.1208,
't2.medium': 0.0644,
't2.micro': 0.0162,
't2.nano': 0.0081,
't2.small': 0.032,
't2.xlarge': 0.2266,
't3.2xlarge': 0.48,
't3.large': 0.1108,
't3.medium': 0.06,
't3.micro': 0.0196,
't3.nano': 0.0098,
't3.small': 0.0392,
't3.xlarge': 0.24,
'x1.16xlarge': 9.613,
'x1.32xlarge': 19.226}},
'us-west-1': {'linux': {'c1.medium': 0.148,
'c1.xlarge': 0.592,
'c3.2xlarge': 0.478,
'c3.4xlarge': 0.956,
'c3.8xlarge': 1.912,
'c3.large': 0.12,
'c3.xlarge': 0.239,
'c4.2xlarge': 0.498,
'c4.4xlarge': 0.997,
'c4.8xlarge': 1.993,
'c4.large': 0.124,
'c4.xlarge': 0.249,
'c5.18xlarge': 3.816,
'c5.2xlarge': 0.424,
'c5.4xlarge': 0.848,
'c5.9xlarge': 1.908,
'c5.large': 0.106,
'c5.xlarge': 0.212,
'c5d.18xlarge': 4.32,
'c5d.2xlarge': 0.48,
'c5d.4xlarge': 0.96,
'c5d.9xlarge': 2.16,
'c5d.large': 0.12,
'c5d.xlarge': 0.24,
'd2.2xlarge': 1.563,
'd2.4xlarge': 3.125,
'd2.8xlarge': 6.25,
'd2.xlarge': 0.781,
'f1.16xlarge': 15.304,
'f1.2xlarge': 1.913,
'f1.4xlarge': 3.826,
'g2.2xlarge': 0.702,
'g2.8xlarge': 2.808,
'g3.16xlarge': 6.136,
'g3.4xlarge': 1.534,
'g3.8xlarge': 3.068,
'g3s.xlarge': 1.009,
'i2.2xlarge': 1.876,
'i2.4xlarge': 3.751,
'i2.8xlarge': 7.502,
'i2.xlarge': 0.938,
'i3.16xlarge': 5.504,
'i3.2xlarge': 0.688,
'i3.4xlarge': 1.376,
'i3.8xlarge': 2.752,
'i3.large': 0.172,
'i3.metal': 5.504,
'i3.xlarge': 0.344,
'm1.large': 0.19,
'm1.medium': 0.095,
'm1.small': 0.047,
'm1.xlarge': 0.379,
'm2.2xlarge': 0.55,
'm2.4xlarge': 1.1,
'm2.xlarge': 0.275,
'm3.2xlarge': 0.616,
'm3.large': | |
<reponame>dvtrung/dl-torch<filename>dlex/torch/backend.py
"""Train a model."""
import os
import random
import sys
import traceback
from collections import namedtuple, defaultdict
from datetime import datetime
from typing import Callable, Dict
import torch
from dlex import FrameworkBackend, TrainingProgress
from dlex.configs import Configs, Params
from dlex.datasets.torch import Dataset
from dlex.datatypes import ModelReport
from dlex.torch.models.base import BaseModel, ModelWrapper
from dlex.torch.utils.model_utils import get_model
from dlex.utils import check_interval_passed, Datasets
from dlex.utils.logging import logger, epoch_info_logger, log_result, json_dumps, \
log_outputs
from dlex.utils.model_utils import get_dataset
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
DEBUG_NUM_ITERATIONS = 5
DEBUG_BATCH_SIZE = 4
EvaluationResults = namedtuple("EvaluationResults", "results outputs")
class PytorchBackend(FrameworkBackend):
def __init__(self, params: Params, training_idx: int = 0, report_queue=None):
super().__init__(params, training_idx, report_queue)
def run_cross_validation_training(self) -> ModelReport:
report = self.report
report.results = []
train_cfg = self.params.train
for i in range(train_cfg.cross_validation):
# Reset random seed so the same order is returned after shuffling dataset
self.set_seed()
summary_writer = SummaryWriter(
os.path.join(self.configs.log_dir, "runs", str(self.training_idx), str(i + 1)))
self.params.dataset.cv_current_fold = i + 1
self.params.dataset.cv_num_folds = train_cfg.cross_validation
self.update_report()
model, datasets = self.load_model("train")
results = self.train(
model, datasets, summary_writer=summary_writer,
tqdm_desc=f"[{self.params.env_name}-{self.training_idx}] CV {i + 1}/{train_cfg.cross_validation} - ",
tqdm_position=self.training_idx)
report.results.append(results)
self.update_report()
summary_writer.close()
logger.info(f"Training finished.")
for metric in report.metrics:
logger.info(f"Results ({metric}): {report.get_result_text()}")
report.finish()
return report
def run_train(self) -> ModelReport:
logger.info(f"Training started ({self.training_idx})")
report = self.report
report.results = {name: {m: None for m in self.report.metrics} for name in self.report.test_sets}
if self.params.train.cross_validation:
return self.run_cross_validation_training()
else:
summary_writer = SummaryWriter(os.path.join(self.params.log_dir, "runs", str(self.training_idx)))
model, datasets = self.load_model("train")
res = self.train(
model, datasets, summary_writer,
tqdm_position=self.training_idx,
on_epoch_finished=self.update_report)
report.results = res
report.finish()
summary_writer.close()
return report
def run_evaluate(self):
model, datasets = self.load_model("test")
for name, dataset in datasets.test_sets.items():
logger.info(f"Evaluate model on dataset '{name}'")
logger.info(f"Log dir: {self.configs.log_dir}")
ret = self.evaluate(
model, dataset,
output_path=os.path.join(self.params.log_dir, "results"),
output_tag=f"{self.args.load}_{name}")
# for output in random.choices(outputs, k=50):
# logger.info(str(output))
def load_model(self, mode):
"""
Load model and dataset
:param mode: train, test, dev
:return:
"""
params = self.params
if not self.configs:
self.configs = Configs(mode=mode, argv=self.argv)
envs, args = self.configs.environments, self.configs.args
assert len(envs) == 1
assert len(envs[0].configs_list) == 1
params = envs[0].configs_list[0]
else:
args = self.configs.args
if mode == "train":
if args.debug:
params.train.batch_size = DEBUG_BATCH_SIZE
params.test.batch_size = DEBUG_BATCH_SIZE
# Init dataset
dataset_builder = get_dataset(params)
assert dataset_builder, "Dataset not found."
if not args.no_prepare:
dataset_builder.prepare(download=args.download, preprocess=args.preprocess)
datasets = Datasets(
"pytorch", dataset_builder,
train_set=params.train.train_set,
valid_set=params.train.valid_set,
test_sets=params.test.test_sets)
# Init model
model_cls = get_model(params)
assert model_cls, "Model not found."
model = model_cls(params, next(iter(datasets.test_sets.values())) or datasets.train_set or datasets.valid_set)
# log model summary
self.report.set_model_summary(
variable_names=[name for name, _ in model.named_parameters()],
variable_shapes=[list(p.shape) for _, p in model.named_parameters()],
variable_trainable=[p.requires_grad for _, p in model.named_parameters()]
)
if torch.cuda.is_available() and params.gpu:
logger.info("CUDA available: %s", torch.cuda.get_device_name(0))
gpus = [f"cuda:{g}" for g in params.gpu]
model = ModelWrapper(model, gpus)
logger.info("Preparing %d GPU(s): %s", len(params.gpu), str(params.gpu))
torch.cuda.set_device(torch.device(gpus[0]))
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
model = ModelWrapper(model)
logger.debug("Dataset: %s. Model: %s", str(dataset_builder), str(model_cls))
# Load checkpoint or initialize new training
if args.load:
self.configs.training_id = model.load_checkpoint(args.load)
logger.info("Loaded checkpoint: %s", args.load)
if mode == "train":
logger.info("EPOCH: %f", model.global_step / len(datasets.train_set))
return model, datasets
def record_results(
self,
select_model: str,
model,
datasets) -> bool:
"""
:param select_model:
:return: whether the results are updated
"""
report = self.report
valid_results = report.get_current_valid_results()
test_results = report.get_current_test_results()
loss = report.epoch_losses[-1]
updated = False
if select_model == "last":
report.current_test_results = test_results
updated = True
elif select_model == "best":
if not datasets.valid_set:
# there's no valid set, report test result with lowest loss
if loss <= min(report.epoch_losses):
dataset = report.test_sets[0]
report.current_test_results = test_results
logger.info("Result updated (lowest loss reached: %.4f) - %s" % (
loss,
", ".join(["%s: %.2f" % (metric, res) for metric, res in report.current_test_results[dataset].items()])
))
model.save_checkpoint("best")
updated = True
else:
for metric in report.metrics:
valid_best_result = max([r[metric] for r in report.epoch_valid_results])
if valid_best_result == valid_results[metric]:
updated = True
if datasets.test_sets:
# report test result of best model on valid set
# logger.info("Best result: %f", test_result['result'][metric])
for name in datasets.test_sets.keys():
report.current_test_results[name][metric] = test_results[name][metric]
logger.info(f"{name} results updated (better result on valid set: %.4f) - %.4f" % (
valid_results[metric],
test_results[name][metric]
))
# log_result(f"valid_test_{metric}", params, test_result, datasets.builder.is_better_result)
# log_outputs("valid_test", params, test_outputs)
else:
pass
# there's no test set, report best valid result
# log_result(f"valid_{metric}", params, valid_result, datasets.builder.is_better_result)
# report.current_results[metric] = valid_results[metric]
# log_outputs("valid", params, valid_outputs)
return updated
def train(
self,
model,
datasets: Datasets,
summary_writer: SummaryWriter,
tqdm_desc="",
tqdm_position=None,
on_epoch_finished: Callable[[], None] = None) -> Dict[str, Dict[str, float]]:
"""
:param model:
:param datasets:
:param summary_writer:
:param tqdm_desc:
:param tqdm_position:
:param on_epoch_finished:
:return:
"""
report = self.report
args = self.configs.args
train_cfg = self.params.train
params = self.params
epoch = model.global_step // len(datasets.train_set)
num_samples = model.global_step % len(datasets.train_set)
training_progress = TrainingProgress(params, num_samples=len(datasets.train_set))
report.epoch_losses = []
report.valid_results = dict()
report.test_results = dict()
report.current_test_results = {name: {} for name in datasets.test_sets.keys()}
report.training_progress = training_progress
# num_samples = 0
for current_epoch in range(epoch + 1, train_cfg.num_epochs + 1):
training_progress.new_epoch(current_epoch)
log_dict = dict(epoch=current_epoch)
log_dict['total_time'], loss = self.train_epoch(
current_epoch, model, datasets, report, num_samples,
training_progress=training_progress,
tqdm_desc=tqdm_desc + f"Epoch {current_epoch}",
tqdm_position=tqdm_position)
report.epoch_losses.append(loss)
summary_writer.add_scalar(f"loss", loss, current_epoch)
log_dict['loss'] = loss
num_samples = 0
def _evaluate(name, dataset):
# Evaluate model
ret = self.evaluate(
model, dataset,
output_path=os.path.join(params.log_dir, "results"),
output_tag="latest",
tqdm_desc=tqdm_desc + f"Epoch {current_epoch}",
tqdm_position=None if tqdm_position is None else tqdm_position)
best_result = log_result(name, params, ret.results, datasets.builder.is_better_result)
return ret.results, best_result, ret.outputs
if training_progress.should_eval() or current_epoch == train_cfg.num_epochs:
# Evaluate test sets
test_results = {}
for name, dataset in datasets.test_sets.items():
test_result, test_best_result, test_outputs = _evaluate(name, dataset)
test_results[name] = test_result['result']
log_outputs("test", params, test_outputs)
log_dict['test_result'] = test_result['result']
for metric in test_result['result']:
summary_writer.add_scalar(f"{name}_{metric}", test_result['result'][metric], current_epoch)
report.test_results[current_epoch] = test_results
# Evaluate valid set
valid_result = None
if datasets.valid_set:
valid_result, valid_best_result, valid_outputs = _evaluate("valid", datasets.valid_set)
log_outputs("valid", params, valid_outputs)
log_dict['valid_result'] = valid_result['result']
for metric in valid_result['result']:
summary_writer.add_scalar(
f"valid_{metric}",
valid_result['result'][metric], current_epoch)
valid_result = valid_result['result']
report.valid_results[current_epoch] = valid_result
# results for reporting
if self.record_results(train_cfg.select_model, model, datasets):
report.save()
if args.output_test_samples:
logger.info("Random samples")
for output in random.choices(test_outputs if datasets.test_sets else valid_outputs, k=5):
logger.info(str(output))
epoch_info_logger.info(json_dumps(log_dict))
log_msgs = [
"time: %s" % log_dict['total_time'].split('.')[0],
"loss: %.4f" % log_dict['loss']
]
for metric in report.metrics:
if datasets.valid_set:
log_msgs.append(f"dev ({metric}): %.2f" % (
log_dict['valid_result'][metric],
# valid_best_result[metric]['result'][metric]
))
if datasets.test_sets:
log_msgs.append(f"test ({metric}): %.2f" % (
log_dict['test_result'][metric],
# test_best_result[metric]['result'][metric],
))
logger.info(f"session {report.training_idx} - epoch {current_epoch}: " + " - ".join(log_msgs))
# Early stopping
if params.train.early_stop:
ne = params.train.early_stop.num_epochs
min_diff = params.train.early_stop.min_diff or 0.
if datasets.valid_set is not None:
last_results = report.epoch_valid_results
if len(last_results) > ne:
if all(
max([r[metric] for r in last_results[-ne:]]) <=
max([r[metric] for r in last_results[:-ne]])
for metric in report.metrics):
logger.info("Early stop at epoch %s", current_epoch)
break
else:
losses = report.epoch_losses
if len(losses) > ne:
diff = min(losses[:-ne]) - min(losses[-ne:])
logger.debug("Last %d epochs decrease: %.4f", ne, diff)
if diff <= min_diff:
logger.info("Early stop at epoch %s", current_epoch)
break
if on_epoch_finished:
on_epoch_finished()
return report.current_test_results
def train_epoch(
self,
current_epoch: int,
model,
datasets: Datasets,
report: ModelReport,
num_samples=0,
training_progress: TrainingProgress = None,
tqdm_desc="Epoch {current_epoch}",
tqdm_position=None):
"""Train."""
args = self.configs.args
params = self.params
if self.params.dataset.shuffle:
datasets.train_set.shuffle()
model.reset_counter()
start_time = datetime.now()
if isinstance(params.train.batch_size, int): # fixed batch size
batch_sizes = {0: params.train.batch_size}
elif isinstance(params.train.batch_size, dict):
batch_sizes = params.train.batch_size
else:
raise ValueError("Batch size is not valid.")
for key in batch_sizes:
batch_sizes[key] *= (len(self.params.gpu) if self.params.gpu else 1) or 1
assert 0 in batch_sizes
with tqdm(
desc=tqdm_desc.format(current_epoch=current_epoch),
total=training_progress.num_samples, leave=False,
position=tqdm_position,
disable=not args.show_progress) as t:
t.update(num_samples)
batch_size_checkpoints = sorted(batch_sizes.keys())
for start, end in zip(batch_size_checkpoints, batch_size_checkpoints[1:] + [100]):
if end / 100 < num_samples / len(datasets.train_set):
continue
batch_size = batch_sizes[start]
data_train = datasets.train_set.get_iter(
batch_size,
start=max(start * len(datasets.train_set) // 100, num_samples),
end=end * len(datasets.train_set) // 100
)
for epoch_step, batch in enumerate(data_train):
loss = model.training_step(batch)
metrics = model.get_metrics()
try:
if batch is None or len(batch) == 0:
raise Exception("Batch size 0")
# loss = model.training_step(batch)
# metrics = model.get_metrics()
# clean
torch.cuda.empty_cache()
except RuntimeError as e:
torch.cuda.empty_cache()
logger.error(str(e))
logger.info("Saving model before exiting...")
model.save_checkpoint("latest")
sys.exit(2)
except Exception as e:
logger.error(str(e))
continue
else:
t.set_postfix(
# loss="%.4f" % loss,
loss="%.4f" % model.epoch_loss,
# lr=mean(model.learning_rates())
**{metric: "%.2f" % val for metric, val in metrics.items()},
# **(report.current_results or {})
)
# if args.debug and epoch_step > DEBUG_NUM_ITERATIONS:
# break
t.update(len(batch))
training_progress.update(len(batch))
model.current_epoch = current_epoch
model.global_step = (current_epoch - 1) * len(datasets.train_set) + num_samples
if report.summary_writer is not None:
report.summary_writer.add_scalar("loss", loss, model.global_step)
# Save model
if training_progress.should_save():
if args.save_all:
model.save_checkpoint("epoch-%02d" % current_epoch)
else:
model.save_checkpoint("latest")
# Log
if training_progress.should_log():
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script builds the domain specific language model for the Public Transport Info domain (Czech)
The training procedure is as follows:
#. Append bootstrap text, possibly handwritten, to the text extracted from the indomain data.
#. Build a class based language model using the data generated in the previous step.
#. Score the general (domain independent) data.
#. Select 1M sentences with lowest perplexity given the class based language model.
#. Append the selected sentences to the training data generated in the 1. step.
#. Re-build the class based language model.
"""
if __name__ == '__main__':
import autopath
import os
import xml.dom.minidom
import glob
import codecs
import random
import alex.corpustools.lm as lm
import alex.utils.various as various
from alex.corpustools.text_norm_cs import normalise_text, exclude_lm
from alex.corpustools.wavaskey import save_wavaskey
def is_srilm_available():
"""Test whether SRILM is available in PATH."""
return os.system("which ngram-count") == 0
def require_srilm():
"""Test whether SRILM is available in PATH, try to import it from env
variable and exit the program in case there are problems with it."""
if not is_srilm_available():
if 'SRILM_PATH' in os.environ:
srilm_path = os.environ['SRILM_PATH']
os.environ['PATH'] += ':%s' % srilm_path
if not is_srilm_available():
print 'SRILM_PATH you specified does not contain the ' \
'utilities needed. Please make sure you point to the ' \
'directory with the SRILM binaries.'
exit(1)
else:
print 'SRILM not found. Set SRILM_PATH environment variable to ' \
'the path with SRILM binaries.'
exit(1)
def exit_on_system_fail(cmd, msg=None):
system_res = os.system(cmd)
if not system_res == 0:
err_msg = "Command failed, exitting."
if msg:
err_msg = "%s %s" % (err_msg, msg, )
raise Exception(err_msg)
if __name__ == '__main__':
# Test if SRILM is available.
require_srilm()
train_data_size = 0.90
bootstrap_text = "bootstrap.txt"
classes = "../data/database_SRILM_classes.txt"
indomain_data_dir = "indomain_data"
gen_data = lm.download_general_LM_data('cs')
fn_pt_trn = "reference_transcription_trn.txt"
fn_pt_dev = "reference_transcription_dev.txt"
gen_data_norm = '01_gen_data_norm.txt.gz'
indomain_data_text_trn = "04_indomain_data_trn.txt"
indomain_data_text_trn_norm = "04_indomain_data_trn_norm.txt"
indomain_data_text_dev = "05_indomain_data_dev.txt"
indomain_data_text_dev_norm = "05_indomain_data_dev_norm.txt"
indomain_data_text_trn_norm_vocab = "06_indomain_data_trn_norm.txt.vocab"
indomain_data_text_trn_norm_count1 = "06_indomain_data_trn_norm.txt.count1"
indomain_data_text_trn_norm_pg_arpa = "06_indomain_data_trn_norm.txt.pg.arpa"
indomain_data_text_trn_norm_cls = "07_indomain_data_trn_norm_cls.txt"
indomain_data_text_trn_norm_cls_classes = "07_indomain_data_trn_norm_cls.classes"
indomain_data_text_trn_norm_cls_vocab = "07_indomain_data_trn_norm_cls.vocab"
indomain_data_text_trn_norm_cls_count1 = "07_indomain_data_trn_norm_cls.count1"
indomain_data_text_trn_norm_cls_pg_arpa = "07_indomain_data_trn_norm_cls.pg.arpa"
indomain_data_text_trn_norm_cls_pg_arpa_scoring = "10_indomain_data_trn_norm_cls.pg.arpa.gen_scoring.gz"
gen_data_norm_selected = '11_gen_data_norm.selected.txt'
extended_data_text_trn_norm = "20_extended_data_trn_norm.txt"
extended_data_text_trn_norm_cls = "20_extended_data_trn_norm_cls.txt"
extended_data_text_trn_norm_cls_classes = "20_extended_data_trn_norm_cls.classes"
extended_data_text_trn_norm_cls_vocab = "20_extended_data_trn_norm_cls.vocab"
extended_data_text_trn_norm_cls_count1 = "20_extended_data_trn_norm_cls.count1"
extended_data_text_trn_norm_cls_pg_arpa = "20_extended_data_trn_norm_cls.pg.arpa"
extended_data_text_trn_norm_cls_pg_arpa_filtered = "25_extended_data_trn_norm_cls.filtered.pg.arpa"
expanded_lm_vocab = "26_expanded.vocab"
expanded_lm_pg = "26_expanded.pg.arpa"
mixing_weight = "0.8"
mixed_lm_vocab = "27_mixed.vocab"
mixed_lm_pg = "27_mixed.pg.arpa"
final_lm_vocab = "final.vocab"
final_lm_pg = "final.pg.arpa"
final_lm_qg = "final.qg.arpa"
final_lm_tg = "final.tg.arpa"
final_lm_bg = "final.bg.arpa"
final_lm_dict = "final.dict"
final_lm_dict_sp_sil = "final.dict.sp_sil"
print
print "Data for the general language model:", gen_data
print "-"*120
###############################################################################################
if not os.path.exists(gen_data_norm):
print "Normalizing general data"
print "-"*120
###############################################################################################
cmd = r"zcat %s | iconv -f UTF-8 -t UTF-8//IGNORE | sed 's/\. /\n/g' | sed 's/[[:digit:]]/ /g; s/[^[:alnum:]]/ /g; s/[ˇ]/ /g; s/ \+/ /g' | sed 's/[[:lower:]]*/\U&/g' | sed s/[\%s→€…│]//g | gzip > %s" % \
(gen_data,
"'",
gen_data_norm)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm):
print "Generating train and dev data"
print "-"*120
###############################################################################################
files = []
files.append(glob.glob(os.path.join(indomain_data_dir, 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', '*', 'asr_transcribed.xml')))
files.append(glob.glob(os.path.join(indomain_data_dir, '*', '*', '*', '*', '*', 'asr_transcribed.xml')))
files = various.flatten(files)
tt = []
pt = []
for fn in files:
# print "Processing:", fn
doc = xml.dom.minidom.parse(fn)
turns = doc.getElementsByTagName("turn")
for turn in turns:
recs_list = turn.getElementsByTagName("rec")
trans_list = turn.getElementsByTagName("asr_transcription")
if trans_list:
trans = trans_list[-1]
t = various.get_text_from_xml_node(trans)
t = normalise_text(t)
if exclude_lm(t):
continue
# The silence does not have a label in the language model.
t = t.replace('_SIL_', '')
tt.append(t)
wav_file = recs_list[0].getAttribute('fname')
wav_path = os.path.realpath(os.path.join(os.path.dirname(fn), wav_file))
pt.append((wav_path, t))
random.seed(10)
sf = [(a, b) for a, b in zip(tt, pt)]
random.shuffle(sf)
sf_train = sorted(sf[:int(train_data_size*len(sf))], key=lambda k: k[1][0])
sf_dev = sorted(sf[int(train_data_size*len(sf)):], key=lambda k: k[1][0])
t_train = [a for a, b in sf_train]
pt_train = [b for a, b in sf_train]
t_dev = [a for a, b in sf_dev]
pt_dev = [b for a, b in sf_dev]
with codecs.open(indomain_data_text_trn,"w", "UTF-8") as w:
w.write('\n'.join(t_train))
with codecs.open(indomain_data_text_dev,"w", "UTF-8") as w:
w.write('\n'.join(t_dev))
save_wavaskey(fn_pt_trn, dict(pt_train))
save_wavaskey(fn_pt_dev, dict(pt_dev))
# train data
cmd = r"cat %s %s | iconv -f UTF-8 -t UTF-8//IGNORE | sed 's/\. /\n/g' | sed 's/[[:digit:]]/ /g; s/[^[:alnum:]_]/ /g; s/[ˇ]/ /g; s/ \+/ /g' | sed 's/[[:lower:]]*/\U&/g' | sed s/[\%s→€…│]//g > %s" % \
(bootstrap_text,
indomain_data_text_trn,
"'",
indomain_data_text_trn_norm)
print cmd
exit_on_system_fail(cmd)
# dev data
cmd = r"cat %s | iconv -f UTF-8 -t UTF-8//IGNORE | sed 's/\. /\n/g' | sed 's/[[:digit:]]/ /g; s/[^[:alnum:]_]/ /g; s/[ˇ]/ /g; s/ \+/ /g' | sed 's/[[:lower:]]*/\U&/g' | sed s/[\%s→€…│]//g > %s" % \
(indomain_data_text_dev,
"'",
indomain_data_text_dev_norm)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm_cls_pg_arpa):
print "Generating class-based 5-gram language model from trn in-domain data"
print "-"*120
###############################################################################################
# convert surface forms to classes
cmd = r"[ -e %s ] && replace-words-with-classes addone=10 normalize=1 outfile=%s classes=%s %s > %s || exit 1" % \
(classes,
indomain_data_text_trn_norm_cls_classes,
classes,
indomain_data_text_trn_norm,
indomain_data_text_trn_norm_cls)
print cmd
exit_on_system_fail(cmd, "Maybe you forgot to run "
"'../data/database.py build'?")
cmd = "ngram-count -text %s -write-vocab %s -write1 %s -order 5 -wbdiscount -memuse -lm %s" % \
(indomain_data_text_trn_norm_cls,
indomain_data_text_trn_norm_cls_vocab,
indomain_data_text_trn_norm_cls_count1,
indomain_data_text_trn_norm_cls_pg_arpa)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm_pg_arpa):
print
print "Generating full 5-gram in-domain language model from in-domain data"
print "-"*120
cmd = "ngram-count -text %s -write-vocab %s -write1 %s -order 5 -wbdiscount -memuse -lm %s" % \
(indomain_data_text_trn_norm,
indomain_data_text_trn_norm_vocab,
indomain_data_text_trn_norm_count1,
indomain_data_text_trn_norm_pg_arpa)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(indomain_data_text_trn_norm_cls_pg_arpa_scoring):
print
print "Scoring general text data using the in-domain language model"
print "-"*120
###############################################################################################
exit_on_system_fail("ngram -lm %s -classes %s -order 5 -debug 1 -ppl %s | gzip > %s" % \
(indomain_data_text_trn_norm_cls_pg_arpa,
indomain_data_text_trn_norm_cls_classes,
gen_data_norm,
indomain_data_text_trn_norm_cls_pg_arpa_scoring))
if not os.path.exists(gen_data_norm_selected):
print
print "Selecting similar sentences to in-domain data from general text data"
print "-"*120
###############################################################################################
exit_on_system_fail("zcat %s | ../../../corpustools/srilm_ppl_filter.py > %s " % (indomain_data_text_trn_norm_cls_pg_arpa_scoring, gen_data_norm_selected))
if not os.path.exists(extended_data_text_trn_norm_cls_pg_arpa):
print
print "Training the in-domain model on the extended data"
print "-"*120
###############################################################################################
cmd = r"cat %s %s > %s" % (indomain_data_text_trn_norm, gen_data_norm_selected, extended_data_text_trn_norm)
# cmd = r"cat %s > %s" % (indomain_data_text_trn_norm, extended_data_text_trn_norm)
print cmd
exit_on_system_fail(cmd)
# convert surface forms to classes
cmd = r"[ -e %s ] && replace-words-with-classes addone=10 normalize=1 outfile=%s classes=%s %s > %s || exit 1" % \
(classes,
extended_data_text_trn_norm_cls_classes,
classes,
extended_data_text_trn_norm,
extended_data_text_trn_norm_cls)
print cmd
exit_on_system_fail(cmd, "Maybe you forgot to run "
"'../data/database.py build'?")
cmd = "ngram-count -text %s -vocab %s -limit-vocab -write-vocab %s -write1 %s -order 5 -wbdiscount -memuse -lm %s" % \
(extended_data_text_trn_norm_cls,
indomain_data_text_trn_norm_cls_vocab,
extended_data_text_trn_norm_cls_vocab,
extended_data_text_trn_norm_cls_count1,
extended_data_text_trn_norm_cls_pg_arpa)
print cmd
exit_on_system_fail(cmd)
cmd = "cat %s | grep -v 'CL_[[:alnum:]_]\+[[:alnum:] _]\+CL_'> %s" % \
(extended_data_text_trn_norm_cls_pg_arpa,
extended_data_text_trn_norm_cls_pg_arpa_filtered)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 5 -write-lm %s -renorm" % \
(extended_data_text_trn_norm_cls_pg_arpa_filtered,
extended_data_text_trn_norm_cls_pg_arpa_filtered)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(expanded_lm_pg):
print
print "Expanding the language model"
print "-"*120
###############################################################################################
cmd = "ngram -lm %s -classes %s -order 5 -expand-classes 5 -write-vocab %s -write-lm %s -prune 0.0000001 -renorm" \
% (extended_data_text_trn_norm_cls_pg_arpa_filtered,
extended_data_text_trn_norm_cls_classes,
expanded_lm_vocab,
expanded_lm_pg)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(mixed_lm_pg):
print
print "Mixing the expanded class-based model and the full model"
print "-"*120
###############################################################################################
cmd = "ngram -lm %s -mix-lm %s -lambda %s -order 5 -write-vocab %s -write-lm %s -prune 0.00000001 -renorm" \
% (expanded_lm_pg,
indomain_data_text_trn_norm_pg_arpa,
mixing_weight,
mixed_lm_vocab,
mixed_lm_pg)
print cmd
exit_on_system_fail(cmd)
if not os.path.exists(final_lm_pg):
print
print "Building the final language models"
print "-"*120
###############################################################################################
cmd = "ngram -lm %s -order 5 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_pg)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 4 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_qg)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 3 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_tg)
print cmd
exit_on_system_fail(cmd)
cmd = "ngram -lm %s -order 2 -write-lm %s -prune-lowprobs -prune 0.0000001 -renorm" \
% (mixed_lm_pg,
final_lm_bg)
print cmd
exit_on_system_fail(cmd)
cmd = "cat %s | grep -v '\-pau\-' | grep -v '<s>' | grep -v '</s>' | grep -v '<unk>' | grep -v 'CL_' | grep -v '{' | grep -v '_' > %s" % \
(mixed_lm_vocab,
final_lm_vocab)
print cmd
exit_on_system_fail(cmd)
cmd = "echo '' > {dict}".format(dict=final_lm_dict)
print cmd
exit_on_system_fail(cmd)
cmd = "perl ../../../tools/htk/bin/PhoneticTranscriptionCS.pl %s %s" | |
All required parameters must be populated in order to send to Azure.
:param directory: Target directory name. Must not contain or start with '..'. If '.' is
supplied, the volume directory will be the git repository. Otherwise, if specified, the volume
will contain the git repository in the subdirectory with the given name.
:type directory: str
:param repository: Required. Repository URL.
:type repository: str
:param revision: Commit hash for the specified revision.
:type revision: str
"""
_validation = {
'repository': {'required': True},
}
_attribute_map = {
'directory': {'key': 'directory', 'type': 'str'},
'repository': {'key': 'repository', 'type': 'str'},
'revision': {'key': 'revision', 'type': 'str'},
}
def __init__(
self,
*,
repository: str,
directory: Optional[str] = None,
revision: Optional[str] = None,
**kwargs
):
super(GitRepoVolume, self).__init__(**kwargs)
self.directory = directory
self.repository = repository
self.revision = revision
class GpuResource(msrest.serialization.Model):
"""The GPU resource.
All required parameters must be populated in order to send to Azure.
:param count: Required. The count of the GPU resource.
:type count: int
:param sku: Required. The SKU of the GPU resource. Possible values include: "K80", "P100",
"V100".
:type sku: str or ~azure.mgmt.containerinstance.models.GpuSku
"""
_validation = {
'count': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'sku': {'key': 'sku', 'type': 'str'},
}
def __init__(
self,
*,
count: int,
sku: Union[str, "GpuSku"],
**kwargs
):
super(GpuResource, self).__init__(**kwargs)
self.count = count
self.sku = sku
class ImageRegistryCredential(msrest.serialization.Model):
"""Image registry credential.
All required parameters must be populated in order to send to Azure.
:param server: Required. The Docker image registry server without a protocol such as "http" and
"https".
:type server: str
:param username: Required. The username for the private registry.
:type username: str
:param password: The password for the private registry.
:type password: str
"""
_validation = {
'server': {'required': True},
'username': {'required': True},
}
_attribute_map = {
'server': {'key': 'server', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
*,
server: str,
username: str,
password: Optional[str] = None,
**kwargs
):
super(ImageRegistryCredential, self).__init__(**kwargs)
self.server = server
self.username = username
self.password = password
class InitContainerDefinition(msrest.serialization.Model):
"""The init container definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name for the init container.
:type name: str
:param image: The image of the init container.
:type image: str
:param command: The command to execute within the init container in exec form.
:type command: list[str]
:param environment_variables: The environment variables to set in the init container.
:type environment_variables: list[~azure.mgmt.containerinstance.models.EnvironmentVariable]
:ivar instance_view: The instance view of the init container. Only valid in response.
:vartype instance_view:
~azure.mgmt.containerinstance.models.InitContainerPropertiesDefinitionInstanceView
:param volume_mounts: The volume mounts available to the init container.
:type volume_mounts: list[~azure.mgmt.containerinstance.models.VolumeMount]
"""
_validation = {
'name': {'required': True},
'instance_view': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'image': {'key': 'properties.image', 'type': 'str'},
'command': {'key': 'properties.command', 'type': '[str]'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentVariable]'},
'instance_view': {'key': 'properties.instanceView', 'type': 'InitContainerPropertiesDefinitionInstanceView'},
'volume_mounts': {'key': 'properties.volumeMounts', 'type': '[VolumeMount]'},
}
def __init__(
self,
*,
name: str,
image: Optional[str] = None,
command: Optional[List[str]] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
volume_mounts: Optional[List["VolumeMount"]] = None,
**kwargs
):
super(InitContainerDefinition, self).__init__(**kwargs)
self.name = name
self.image = image
self.command = command
self.environment_variables = environment_variables
self.instance_view = None
self.volume_mounts = volume_mounts
class InitContainerPropertiesDefinitionInstanceView(msrest.serialization.Model):
"""The instance view of the init container. Only valid in response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar restart_count: The number of times that the init container has been restarted.
:vartype restart_count: int
:ivar current_state: The current state of the init container.
:vartype current_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar previous_state: The previous state of the init container.
:vartype previous_state: ~azure.mgmt.containerinstance.models.ContainerState
:ivar events: The events of the init container.
:vartype events: list[~azure.mgmt.containerinstance.models.Event]
"""
_validation = {
'restart_count': {'readonly': True},
'current_state': {'readonly': True},
'previous_state': {'readonly': True},
'events': {'readonly': True},
}
_attribute_map = {
'restart_count': {'key': 'restartCount', 'type': 'int'},
'current_state': {'key': 'currentState', 'type': 'ContainerState'},
'previous_state': {'key': 'previousState', 'type': 'ContainerState'},
'events': {'key': 'events', 'type': '[Event]'},
}
def __init__(
self,
**kwargs
):
super(InitContainerPropertiesDefinitionInstanceView, self).__init__(**kwargs)
self.restart_count = None
self.current_state = None
self.previous_state = None
self.events = None
class IpAddress(msrest.serialization.Model):
"""IP address for the container group.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param ports: Required. The list of ports exposed on the container group.
:type ports: list[~azure.mgmt.containerinstance.models.Port]
:param type: Required. Specifies if the IP is exposed to the public internet or private VNET.
Possible values include: "Public", "Private".
:type type: str or ~azure.mgmt.containerinstance.models.ContainerGroupIpAddressType
:param ip: The IP exposed to the public internet.
:type ip: str
:param dns_name_label: The Dns name label for the IP.
:type dns_name_label: str
:ivar fqdn: The FQDN for the IP.
:vartype fqdn: str
"""
_validation = {
'ports': {'required': True},
'type': {'required': True},
'fqdn': {'readonly': True},
}
_attribute_map = {
'ports': {'key': 'ports', 'type': '[Port]'},
'type': {'key': 'type', 'type': 'str'},
'ip': {'key': 'ip', 'type': 'str'},
'dns_name_label': {'key': 'dnsNameLabel', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
}
def __init__(
self,
*,
ports: List["Port"],
type: Union[str, "ContainerGroupIpAddressType"],
ip: Optional[str] = None,
dns_name_label: Optional[str] = None,
**kwargs
):
super(IpAddress, self).__init__(**kwargs)
self.ports = ports
self.type = type
self.ip = ip
self.dns_name_label = dns_name_label
self.fqdn = None
class LogAnalytics(msrest.serialization.Model):
"""Container group log analytics information.
All required parameters must be populated in order to send to Azure.
:param workspace_id: Required. The workspace id for log analytics.
:type workspace_id: str
:param workspace_key: Required. The workspace key for log analytics.
:type workspace_key: str
:param log_type: The log type to be used. Possible values include: "ContainerInsights",
"ContainerInstanceLogs".
:type log_type: str or ~azure.mgmt.containerinstance.models.LogAnalyticsLogType
:param metadata: Metadata for log analytics.
:type metadata: dict[str, str]
"""
_validation = {
'workspace_id': {'required': True},
'workspace_key': {'required': True},
}
_attribute_map = {
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'workspace_key': {'key': 'workspaceKey', 'type': 'str'},
'log_type': {'key': 'logType', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': '{str}'},
}
def __init__(
self,
*,
workspace_id: str,
workspace_key: str,
log_type: Optional[Union[str, "LogAnalyticsLogType"]] = None,
metadata: Optional[Dict[str, str]] = None,
**kwargs
):
super(LogAnalytics, self).__init__(**kwargs)
self.workspace_id = workspace_id
self.workspace_key = workspace_key
self.log_type = log_type
self.metadata = metadata
class Logs(msrest.serialization.Model):
"""The logs.
:param content: The content of the log.
:type content: str
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
*,
content: Optional[str] = None,
**kwargs
):
super(Logs, self).__init__(**kwargs)
self.content = content
class Operation(msrest.serialization.Model):
"""An operation for Azure Container Instance service.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the operation.
:type name: str
:param display: Required. The display information of the operation.
:type display: ~azure.mgmt.containerinstance.models.OperationDisplay
:param properties: The additional properties.
:type properties: object
:param origin: The intended executor of the operation. Possible values include: "User",
"System".
:type origin: str or ~azure.mgmt.containerinstance.models.ContainerInstanceOperationsOrigin
"""
_validation = {
'name': {'required': True},
'display': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'object'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
display: "OperationDisplay",
properties: Optional[object] = None,
origin: Optional[Union[str, "ContainerInstanceOperationsOrigin"]] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
self.properties = properties
self.origin = origin
class OperationDisplay(msrest.serialization.Model):
"""The display information of the operation.
:param provider: The name of the provider of the operation.
:type provider: str
:param resource: The name of the resource type of the operation.
:type resource: str
:param operation: The friendly name of the operation.
:type operation: str
:param description: The description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, | |
if self.TrailerAccessible is not None:
namespaceprefix_ = self.TrailerAccessible_nsprefix_ + ':' if (UseCapturedNS_ and self.TrailerAccessible_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sTrailerAccessible>%s</%sTrailerAccessible>%s' % (namespaceprefix_ , self.gds_format_boolean(self.TrailerAccessible, input_name='TrailerAccessible'), namespaceprefix_ , eol_))
if self.LoadingDockAvailable is not None:
namespaceprefix_ = self.LoadingDockAvailable_nsprefix_ + ':' if (UseCapturedNS_ and self.LoadingDockAvailable_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sLoadingDockAvailable>%s</%sLoadingDockAvailable>%s' % (namespaceprefix_ , self.gds_format_boolean(self.LoadingDockAvailable, input_name='LoadingDockAvailable'), namespaceprefix_ , eol_))
if self.ShipmentOnSkids is not None:
namespaceprefix_ = self.ShipmentOnSkids_nsprefix_ + ':' if (UseCapturedNS_ and self.ShipmentOnSkids_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sShipmentOnSkids>%s</%sShipmentOnSkids>%s' % (namespaceprefix_ , self.gds_format_boolean(self.ShipmentOnSkids, input_name='ShipmentOnSkids'), namespaceprefix_ , eol_))
if self.NumberOfSkids is not None:
namespaceprefix_ = self.NumberOfSkids_nsprefix_ + ':' if (UseCapturedNS_ and self.NumberOfSkids_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sNumberOfSkids>%s</%sNumberOfSkids>%s' % (namespaceprefix_ , self.gds_format_integer(self.NumberOfSkids, input_name='NumberOfSkids'), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'UntilTime':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'UntilTime')
value_ = self.gds_validate_string(value_, node, 'UntilTime')
self.UntilTime = value_
self.UntilTime_nsprefix_ = child_.prefix
elif nodeName_ == 'PickUpLocation':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'PickUpLocation')
value_ = self.gds_validate_string(value_, node, 'PickUpLocation')
self.PickUpLocation = value_
self.PickUpLocation_nsprefix_ = child_.prefix
elif nodeName_ == 'SupplyRequestCodes':
obj_ = SupplyRequestCodes.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.SupplyRequestCodes = obj_
obj_.original_tagname_ = 'SupplyRequestCodes'
elif nodeName_ == 'TrailerAccessible':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'TrailerAccessible')
ival_ = self.gds_validate_boolean(ival_, node, 'TrailerAccessible')
self.TrailerAccessible = ival_
self.TrailerAccessible_nsprefix_ = child_.prefix
elif nodeName_ == 'LoadingDockAvailable':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'LoadingDockAvailable')
ival_ = self.gds_validate_boolean(ival_, node, 'LoadingDockAvailable')
self.LoadingDockAvailable = ival_
self.LoadingDockAvailable_nsprefix_ = child_.prefix
elif nodeName_ == 'ShipmentOnSkids':
sval_ = child_.text
ival_ = self.gds_parse_boolean(sval_, node, 'ShipmentOnSkids')
ival_ = self.gds_validate_boolean(ival_, node, 'ShipmentOnSkids')
self.ShipmentOnSkids = ival_
self.ShipmentOnSkids_nsprefix_ = child_.prefix
elif nodeName_ == 'NumberOfSkids' and child_.text:
sval_ = child_.text
ival_ = self.gds_parse_integer(sval_, node, 'NumberOfSkids')
ival_ = self.gds_validate_integer(ival_, node, 'NumberOfSkids')
self.NumberOfSkids = ival_
self.NumberOfSkids_nsprefix_ = child_.prefix
# end class ModifyPickupInstruction
class ModifyPickUpResponse(Response):
"""ModifyPickUpResponse"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = Response
def __init__(self, PickUpConfirmationNumber=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
super(ModifyPickUpResponse, self).__init__( **kwargs_)
self.PickUpConfirmationNumber = PickUpConfirmationNumber
self.PickUpConfirmationNumber_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ModifyPickUpResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ModifyPickUpResponse.subclass:
return ModifyPickUpResponse.subclass(*args_, **kwargs_)
else:
return ModifyPickUpResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_PickUpConfirmationNumber(self):
return self.PickUpConfirmationNumber
def set_PickUpConfirmationNumber(self, PickUpConfirmationNumber):
self.PickUpConfirmationNumber = PickUpConfirmationNumber
def hasContent_(self):
if (
self.PickUpConfirmationNumber is not None or
super(ModifyPickUpResponse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ModifyPickUpResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ModifyPickUpResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ModifyPickUpResponse':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ModifyPickUpResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ModifyPickUpResponse', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ModifyPickUpResponse'):
super(ModifyPickUpResponse, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ModifyPickUpResponse')
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ModifyPickUpResponse', fromsubclass_=False, pretty_print=True):
super(ModifyPickUpResponse, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PickUpConfirmationNumber is not None:
namespaceprefix_ = self.PickUpConfirmationNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.PickUpConfirmationNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sPickUpConfirmationNumber>%s</%sPickUpConfirmationNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.PickUpConfirmationNumber), input_name='PickUpConfirmationNumber')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(ModifyPickUpResponse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'PickUpConfirmationNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'PickUpConfirmationNumber')
value_ = self.gds_validate_string(value_, node, 'PickUpConfirmationNumber')
self.PickUpConfirmationNumber = value_
self.PickUpConfirmationNumber_nsprefix_ = child_.prefix
super(ModifyPickUpResponse, self).buildChildren(child_, node, nodeName_, True)
# end class ModifyPickUpResponse
class SchedulePickUpResponse(Response):
"""SchedulePickUpRespone"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = Response
def __init__(self, PickUpConfirmationNumber=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
super(SchedulePickUpResponse, self).__init__( **kwargs_)
self.PickUpConfirmationNumber = PickUpConfirmationNumber
self.PickUpConfirmationNumber_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SchedulePickUpResponse)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SchedulePickUpResponse.subclass:
return SchedulePickUpResponse.subclass(*args_, **kwargs_)
else:
return SchedulePickUpResponse(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_PickUpConfirmationNumber(self):
return self.PickUpConfirmationNumber
def set_PickUpConfirmationNumber(self, PickUpConfirmationNumber):
self.PickUpConfirmationNumber = PickUpConfirmationNumber
def hasContent_(self):
if (
self.PickUpConfirmationNumber is not None or
super(SchedulePickUpResponse, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='SchedulePickUpResponse', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SchedulePickUpResponse')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'SchedulePickUpResponse':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SchedulePickUpResponse')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='SchedulePickUpResponse', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='SchedulePickUpResponse'):
super(SchedulePickUpResponse, self).exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='SchedulePickUpResponse')
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='SchedulePickUpResponse', fromsubclass_=False, pretty_print=True):
super(SchedulePickUpResponse, self).exportChildren(outfile, level, namespaceprefix_, namespacedef_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.PickUpConfirmationNumber is not None:
namespaceprefix_ = self.PickUpConfirmationNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.PickUpConfirmationNumber_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sPickUpConfirmationNumber>%s</%sPickUpConfirmationNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.PickUpConfirmationNumber), input_name='PickUpConfirmationNumber')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(SchedulePickUpResponse, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'PickUpConfirmationNumber':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'PickUpConfirmationNumber')
value_ = self.gds_validate_string(value_, node, 'PickUpConfirmationNumber')
self.PickUpConfirmationNumber = value_
self.PickUpConfirmationNumber_nsprefix_ = child_.prefix
super(SchedulePickUpResponse, self).buildChildren(child_, node, nodeName_, True)
# end class SchedulePickUpResponse
class SchedulePickUpRequest(Request):
"""SchedulePickUpRequest"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = Request
def __init__(self, BillingAccountNumber=None, PartnerID=None, PickupInstruction=None, Address=None, ShipmentSummary=None, NotificationEmails=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
super(SchedulePickUpRequest, self).__init__( **kwargs_)
self.BillingAccountNumber = BillingAccountNumber
self.BillingAccountNumber_nsprefix_ = None
self.PartnerID = PartnerID
self.PartnerID_nsprefix_ = None
self.PickupInstruction = PickupInstruction
self.PickupInstruction_nsprefix_ = None
self.Address = Address
self.Address_nsprefix_ = None
self.ShipmentSummary = ShipmentSummary
self.ShipmentSummary_nsprefix_ = None
self.NotificationEmails = NotificationEmails
self.NotificationEmails_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SchedulePickUpRequest)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SchedulePickUpRequest.subclass:
return SchedulePickUpRequest.subclass(*args_, **kwargs_)
else:
return SchedulePickUpRequest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_BillingAccountNumber(self):
return self.BillingAccountNumber
def set_BillingAccountNumber(self, BillingAccountNumber):
self.BillingAccountNumber = BillingAccountNumber
def get_PartnerID(self):
return self.PartnerID
def set_PartnerID(self, PartnerID):
self.PartnerID = PartnerID
def get_PickupInstruction(self):
return self.PickupInstruction
def set_PickupInstruction(self, PickupInstruction):
self.PickupInstruction = PickupInstruction
def get_Address(self):
return self.Address
def set_Address(self, Address):
self.Address = Address
def get_ShipmentSummary(self):
return self.ShipmentSummary
def set_ShipmentSummary(self, ShipmentSummary):
self.ShipmentSummary = ShipmentSummary
def get_NotificationEmails(self):
return self.NotificationEmails
def set_NotificationEmails(self, NotificationEmails):
self.NotificationEmails = NotificationEmails
def hasContent_(self):
if (
self.BillingAccountNumber is not None or
self.PartnerID is not None or
self.PickupInstruction is not None or
self.Address is not None or
self.ShipmentSummary is not None or
self.NotificationEmails is not None or
super(SchedulePickUpRequest, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='SchedulePickUpRequest', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SchedulePickUpRequest')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = | |
<gh_stars>1-10
import asyncio
import sys
import numpy as np
import pandas as pd
import serial
import serial.tools.list_ports
import bokeh.plotting
import bokeh.io
import bokeh.layouts
import bokeh.driving
from bokeh.server.server import Server
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
from . import boards
from . import callbacks
from . import comms
from . import parsers
# Allowed values of selector parameters
allowed_baudrates = (
300,
1200,
2400,
4800,
9600,
19200,
38400,
57600,
74880,
115200,
230400,
250000,
500000,
1000000,
2000000,
)
allowed_time_columns = ("none", 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
allowed_delimiters = (
"comma",
"space",
"tab",
"whitespace",
"vertical line",
"semicolon",
"asterisk",
"slash",
)
allowed_timeunits = ("none", "µs", "ms", "s", "min", "hr")
allowed_glyphs = ("lines", "dots", "both")
allowed_rollover = (100, 200, 400, 800, 1600, 3200)
max_max_cols = 10
def _check_baudrate(baudrate):
if baudrate not in allowed_baudrates:
err_str = "Inputted baudrate {baudrate} is not allowed. Allowed baudrates (in units of baud = bits per second) are: \n"
for br in allowed_baudrates:
err_str += f" {br}\n"
raise RuntimeError(err_str)
def _check_maxcols(maxcols):
if maxcols < 1 or maxcols > max_max_cols:
raise RuntimeError(
f"Inputted maxcols {maxcols} is invalid. maxcols must be between 1 and {max_max_cols}."
)
def _check_delimiter(delimiter):
if delimiter not in allowed_delimiters:
err_str = f'Inputted delimiter "{delimiter}" is not allowed. Allowed delimiters are: \n'
for dl in allowed_delimiters:
err_str += f" {dl}\n"
raise RuntimeError(err_str)
def _check_timecolumn(timecolumn, maxcols):
if timecolumn == "none":
return None
try:
timecolumn = int(timecolumn)
except:
raise RuntimeError(
"Inputted timecolumn {timecolumn} is invalid. timecolumn must be an integer."
)
if timecolumn < 0 or timecolumn >= maxcols:
raise RuntimeError(
f"Inputted timecolumn {timecolumn} is invalid. Must have 0 ≤ timecolumn < maxcols. You have selected maxcols = {maxcols}."
)
def _check_timeunits(timeunits):
if timeunits not in allowed_timeunits:
err_str = f'Inputted timeunits "{timeunits}" is not allowed. Allowed time units are: \n'
for tu in allowed_timeunits:
err_str += f" {tu}\n"
raise RuntimeError(err_str)
def _check_rollover(rollover):
if rollover not in allowed_rollover:
err_str = f'Inputted rollover "{rollover}" is not allowed. Allowed rollover values are: \n'
for ro in allowed_rollover:
err_str += f" {ro}\n"
raise RuntimeError(err_str)
def _check_inputtype(inputtype):
if inputtype not in ["ascii", "bytes"]:
raise RuntimeError(
'Inputted input type "{inputtype}" is not allowed. Must be either "ascii" or "bytes".'
)
def _check_glyph(glyph):
if glyph not in allowed_glyphs:
err_str = (
f' Inputted glyph "{glyph}" is not allowed. Allowed glyph choises are: \n'
)
for g in allowed_glyphs:
err_str += f" {g}\n"
raise RuntimeError(err_str)
class SerialConnection(object):
"""Class containing details about a serial connection.
Attributes
----------
ser : serial.Serial instance
Serial connection to a device.
port : str
Name of the port of the connection. This is not the device name,
but a descriptive name.
baudrate : int
Baud rate of the connection
bytesize : int
Number of data bits. Possible values: serial.FIVEBITS,
serial.SIXBITS, serial.SEVENBITS, serial.EIGHTBITS
parity : int
Enable parity checking. Possible values: serial.PARITY_NONE,
serial.PARITY_EVEN, serial.PARITY_ODD, serial.PARITY_MARK,
serial.PARITY_SPACE.
stopbits : int
Number of stop bits. Possible values: serial.STOPBITS_ONE,
serial.STOPBITS_ONE_POINT_FIVE, serial.STOPBITS_TWO
ports : list
List of ports that are available. Each entry is a
serial.tools.list_ports_common.ListPortInfo instance.
available_ports : dict
A dictionary with the descriptive port names as keys and strings
with the name of the ports such that they can be opened with
`serial.Serial()` as values.
reverse_available_ports : dict
A dictionary with the descriptive port names as values and
strings with the name of the ports such that they can be opened
with `serial.Serial()` as keys.
port_status : str
The status of the port. Either "disconnected", "establishing",
"connected", or "failed".
daq_task : async task
Task for data acquisition.
daq_delay : float
Approximate time, in milliseconds, between data acquisitions.
port_search_task : async task
Task for checking for available ports.
port_search_delay : float
Approximate time, in milliseconds, between checks of available
ports.
kill_app : bool
If True, kill the connect/app.
"""
def __init__(
self,
port=None,
baudrate=115200,
daq_delay=20,
port_search_delay=1000,
bytesize=8,
parity="N",
stopbits=1,
):
"""Create an instance storing information about a serial
connection.
Parameters
----------
port : str, default None
If given, name of the port to connect to. If None, no device
is connected.
baudrate : int
Baud rate of the connection
daq_delay : float
Approximate time, in milliseconds, between data acquisitions.
bytesize : int
Number of data bits. Possible values: serial.FIVEBITS,
serial.SIXBITS, serial.SEVENBITS, serial.EIGHTBITS
parity : int
Enable parity checking. Possible values: serial.PARITY_NONE,
serial.PARITY_EVEN, serial.PARITY_ODD, serial.PARITY_MARK,
serial.PARITY_SPACE.
stopbits : int
Number of stop bits. Possible values: serial.STOPBITS_ONE,
serial.STOPBITS_ONE_POINT_FIVE, serial.STOPBITS_TWO
"""
self.ser = None
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.ports = []
self.available_ports = dict()
self.reverse_available_ports = dict()
self.port_status = "disconnected"
self.daq_task = None
self.daq_delay = daq_delay
self.port_search_task = None
self.port_search_delay = port_search_delay
self.kill_app = False
# Attempt to connect to a port if provided
if port is None:
self.port = port
else:
self.connect(port)
def portsearch(self, on_change=True):
"""Search for ports and update port information.
Parameters
----------
on_change : bool, default True
If True, only update `ports`, `available_ports`, and
`reverse_available_ports` attributes if there was a change
in the available ports.
"""
ports = serial.tools.list_ports.comports()
if not on_change or ports != self.ports:
self.ports = [port for port in ports]
options = [comms.device_name(port_name) for port_name in ports]
# Dictionary of port names and name in port selector
self.available_ports = {
port_name.device: option_name
for port_name, option_name in zip(ports, options)
}
# Reverse lookup for value in port selector to port name
self.reverse_available_ports = {
option_name: port_name.device
for port_name, option_name in zip(ports, options)
}
def connect(self, port, allow_disconnect=False, handshake=True):
"""Connect to a port.
Parameters
----------
port : str, int, or serial.tools.list_ports_common.ListPortInfo instance
Port to which to connect. If an int, connect to port given
by self.ports[port].
allow_disconnect : bool, default True
If already connected to a port, allow disconnection. If
False, raise an exception if already connected.
handshake : bool, default True
If True, "handshake" with the connected device by closing,
reopening connection waiting a second, and then clearing
the input buffer.
"""
# Disconnect, if necessary
if self.ser is not None and self.ser.is_open:
if allow_disconnect:
try:
self.ser.close()
self.ser = None
except:
pass
self.port_status = "disconnected"
elif raise_exceptions:
raise RuntimeError(f"Already connected to port {self.port}.")
# Match requested port with known port
if port in self.ports:
port = port.device
elif type(port) == int and port < len(self.ports):
port = self.ports[port].device
elif port in self.reverse_available_ports:
port = self.reverse_available_ports[port]
elif port not in self.available_ports:
# A port search hasn't been done that includes port being asked for
self.portsearch()
# Indentify the port we're trying to connect to
self.port = port
# Make the connection
try:
self.ser = serial.Serial(
port=self.port,
baudrate=self.baudrate,
bytesize=self.bytesize,
parity=self.parity,
stopbits=self.stopbits,
)
self.port_status = "connected"
except:
self.ser = None
self.port_status = "failed"
raise RuntimeError(f"Connection to port {port} failed.")
# Handshake
if handshake:
comms.handshake_board(self.ser)
def disconnect(self):
"""Disconnect port."""
try:
self.ser.close()
except:
pass
self.ser = None
self.port_status = "disconnected"
class Controls(object):
def __init__(
self,
baudrate=115200,
max_cols=max_max_cols,
delimiter="comma",
columnlabels="",
timecolumn="none",
timeunits="ms",
rollover=400,
glyph="lines",
inputtype="ascii",
fileprefix="_tmp",
):
"""Create all of the controls for the serial dashboard."""
self.plot_stream = bokeh.models.Toggle(
label="stream", button_type="success", width=100
)
self.plot_clear = bokeh.models.Button(
label="clear", button_type="warning", width=100
)
self.monitor_stream = bokeh.models.Toggle(
label="stream", button_type="success", width=100
)
self.monitor_clear = bokeh.models.Button(
label="clear", button_type="warning", width=100
)
self.plot_save = bokeh.models.Button(
label="save", button_type="primary", width=100
)
self.plot_file_input = bokeh.models.TextAreaInput(
title="file name", value=f"{fileprefix}.csv", width=150, visible=False
)
self.plot_write = bokeh.models.Button(
label="save", button_type="primary", width=50, visible=False
)
self.plot_save_notice = bokeh.models.Div(
text='<p style="font-size: 8pt;">No data saved.</p>', width=100
)
self.glyph = bokeh.models.RadioGroup(
labels=list(allowed_glyphs), active=allowed_glyphs.index(glyph), width=50
)
self.monitor_save = bokeh.models.Button(
label="save", button_type="primary", width=100
)
self.monitor_file_input = bokeh.models.TextAreaInput(
title="file name", value=f"{fileprefix}.txt", width=150, visible=False
)
self.monitor_write = bokeh.models.Button(
label="save", button_type="primary", width=50, visible=False
)
self.monitor_save_notice = bokeh.models.Div(
text='<p style="font-size: 8pt;">No data saved.</p>', width=100
)
self.delimiter = bokeh.models.Select(
title="delimiter",
value=delimiter,
options=list(allowed_delimiters),
width=100,
)
self.rollover = bokeh.models.Select(
title="plot rollover",
value=str(rollover),
options=[str(ro) for ro in allowed_rollover],
width=100,
)
self.max_cols = bokeh.models.Spinner(
title="maximum number of columns",
value=max_cols,
low=1,
high=max_max_cols,
step=1,
width=100,
)
self.col_labels = bokeh.models.TextInput(
title="column labels", value=columnlabels, width=200
)
# Set up port selector
self.port = bokeh.models.Select(title="port", options=[], value="", width=200)
# Set up baud rate with Arduino defaults
self.baudrate = bokeh.models.Select(
title="baud rate",
options=[str(br) for br in allowed_baudrates],
value=str(baudrate),
width=100,
)
self.port_connect = bokeh.models.Button(
label="connect", button_type="success", width=100
)
self.port_disconnect = bokeh.models.Button(
label="disconnect", button_type="danger", | |
import pymc3 as pm
import numpy as np
import scipy as sp
import theano.tensor as tt
import patsy as p
import utilities
def cross_validate_rwfmm(rwfmm_args,rwfmm_kwargs,param_for_tuning,tuning_set,criterion='LOO'):
model_dict = {}
trace_list = []
for param_val in tuning_set:
modified_kwargs = rwfmm_kwargs.copy()
modified_kwargs[param_for_tuning] = param_val
trace,model = rwfmm(*rwfmm_args,**modified_kwargs)
model_dict[model] = trace
rankings = pm.stats.compare(model_dict,ic = criterion)
return rankings, model_dict
def rwfmm(functional_data,static_data,Y,
func_coef_sd = 'prior', method='nuts',
robust=False, func_coef_sd_hypersd = 0.1,
coefficient_prior='flat', include_random_effect = True,
variable_func_scale = True, time_rescale_func = False,
sampler_kwargs = {'init':'adapt_diag','chains':1,'tune':500,'draws':500},
return_model_only = False, n_spline_knots = 20,
func_coef_type = 'random_walk', spline_degree = 3,spline_coef_sd = 'prior',
spline_coef_hyper_sd = 2.,
spline_coef_prior = 'random_walk',spline_rw_sd = 1.,average_every_n = 1,
spline_rw_hyper_sd = 1.,poly_order=4):
'''
Fits a functional mixed model with a random-walk model of
the functional coefficient. A range of different priors is available for
the model coefficients.
Parameters
----------
functional_data : 4D Numpy array
Data inputs for functional covariates with expected shape (S,V,T,F)
where S denotes the number of subjects, V denotes the number of
visits or repeated observations for each subject, T denotes the
dimension of the functional data (i.e. number of timesteps)
and F denotes the number of functional coefficients.
static_data: 3D Numpy array
Data inputs for static (i.e. non-functional) covariates which are
constant for each subject/visits combination.
This array is expected to have the shape (S,V,C) where
C denotes the number of static covariates.
Y: 3D Numpy array
Responses for the functional regression. This array is expected to
have the same dimensions as static_dataself.
tune: int
Number of tuning steps used in MCMC
draws: int
Number of post-tuning draws sampled.
chains: int
Number of MCMC chains used.
func_coef_sd: float or string
The standard deviation of the Gaussian random walk for all
functional coefficients. If set to "prior", then this quantity
will also be treated as a parameter that needs to be estimated.
method: string
Designates the method to be ued to fit the model.
This must be one of "nuts", "mh" or one of the approximate inference
methods at https://docs.pymc.io/api/inference.html#variational.
n_iter_approx: int
Number of optimization iterations to be used if the model fitting
method is an approximate inference method.
robust: bool
Determines whether a normal error model or a robust Student-T error
model is assumed for the residuals.
func_coef_sd_hypersd: float
If func_coef_sd is set to "prior", then this parameter sets the
standard deviation of the half-normal distribution over the
functional coefficient standard deviation (func_coef_sd). Note that
in this case, each functional coefficient gets its own standard
deviation drawn from the same prior defined by this parameter.
coefficient_prior: string
Determines the prior placed on the static covariate coefficients as
well as the mean (a.k.a. the level) of the functional coefficient.
The options are "flat","normal","horseshoe","finnish_horseshoe".
include_random_effect: bool
Determines whether or not a per-subject random intercept is included.
variable_func_scale : bool
Determines whether or not to allow the functional coefficients be
multiplied by a positive number. This can lead to identifiability issues
if a weak prior is specified on the functional coefficient evolution
variance.
time_rescale_func : bool
If true, divides the functional coefficient by T. This can help make
the coefficient more interpretable.
sampler_kwargs: dict
Any additional arguments to be passed to pm.sample().
return_model_only: bool
If true, returns only the model object without sampling. This can be
helpful for debugging.
func_coef_type : string
One of 'constant','random_walk', 'bspline_recursive', 'natural_spline',
'linear','bspline_design' or 'polynomial'.
This determines how the functional coefficient will be parameterized. If it
is 'random_walk', then the coefficient will be computed as the cumulative
sum of many small normally-distributed jumps whose standard deviation
is controlled by 'func_coef_sd'. Alternatively, if one of the bspline
options is used, then the functional coefficient will be a bspline. The option
'bspline_recursive' builds the coefficient using the de Boor algorithm
while the options 'bspline_design' and 'natural_spline' build a design
matrix using patsy's functionality and then estimates the coefficients
linking that matrix to the functional coefficients. Using 'polynomial'
specifies the functional coefficient as a polynomial of order 'poly_order'.
'linear' makes the functional coefficient a linear function of the function
domain.
poly_order : int
The degree of the polynomial used if the functional coefficient type is
set to 'polynomial'.
n_spline_knots : int
In the event that the functional coefficient is one of the bspline choices,
then this controls how many knots or breakpoints the spline has. In general,
higher numbers for this value are required for higher spline orders.
spline_degree : int
The order of the spline if the functional coefficient is parameterized as a
bspline. This is also the order of the polynomial for each spline section
plus 1. Set this equal to 4 for cubic polynomial approximations in the spline.
spline_coef_sd : float
The standard deviation of the normal prior on the spline coefficients.
spline_coef_prior : string
One of 'normal', 'flat', or 'random_walk'. This controls how the
bspline coefficients are smoothed.
spline_rw_sd : string or float
Either 'prior' or a float. This controls how much the spline coefficients
are allowed to jump when using a random walk for the spline coefficient
prior.
spline_rw_hyper_sd : float
If 'spline_rw_sd' is set to 'prior', this is the standard deviation
of the half-Normal prior on the spline random walk jump standard
deviation.
average_every_n : int
This is used to average every n measurements of the functional data
together. For example, if the functional data corresponds to 96 hourly
timesteps' worth of data, setting this to 4 would take the 24 hour average
and reduce the size of T from 96 to 24. The default setting of 1 leaves
the data unchanged.
Returns
-------
trace: pymc3 Trace
Samples produced either via MCMC or approximate inference during
fitting.
model: pymc3 Model
The model object describing the RWFMM.
'''
with pm.Model() as model:
S,V,T,F = functional_data.shape
_,_,C = static_data.shape
#functional_data = np.mean(functional_data.reshape(-1, average_every_n), axis=1)
# We want to make sure the two data arrays agree in the number of
# subjects (S) and visits (V).
assert static_data.shape[0:2] == functional_data.shape[0:2]
# Total number of functional and static coefficients.
# This does not include the random-walk jumps.
n_covariates = F + C
if include_random_effect:
random_effect_mean = pm.Flat('random_effect_mean')
random_effect_sd = pm.HalfCauchy('random_effect_sd',beta = 1.)
random_effect_unscaled = pm.Normal('random_effect_unscaled',shape = [S,1])
random_effect = pm.Deterministic('random_effect',random_effect_unscaled * random_effect_sd + random_effect_mean)
else:
random_effect = 0.
if coefficient_prior == 'flat':
coef = pm.Flat('coef',shape = n_covariates)
elif coefficient_prior == 'normal':
coef_sd = pm.HalfCauchy('coef_sd',beta = 1.)
coef = pm.Normal('coef',sd = coef_sd,shape = [n_covariates] )
elif coefficient_prior == 'cauchy':
coef_sd = pm.HalfCauchy('coef_sd',beta = 1.0)
coef = pm.Cauchy('coef',alpha = 0., beta = coef_sd,shape = [n_covariates] )
elif coefficient_prior == 'horseshoe':
loc_shrink = pm.HalfCauchy('loc_shrink',beta = 1,shape = [n_covariates])
glob_shrink= pm.HalfCauchy('glob_shrink',beta = 1)
coef = pm.Normal('coef',sd = (loc_shrink * glob_shrink),shape = [n_covariates])
# Implemented per Piironnen and Vehtari '18
elif coefficient_prior == 'finnish_horseshoe':
loc_shrink = pm.HalfCauchy('loc_shrink',beta = 1,shape = [n_covariates])
glob_shrink = pm.HalfCauchy('glob_shrink',beta = 1)
# In order to get some of the values within the prior calculations,
# we need to know the variance of the predictors.
static_var = np.var(static_data,axis = (0,1))
func_var = np.var(functional_data,axis = (0,1,2))
variances = np.concatenate([static_var,func_var])
nu_c = pm.Gamma('nu_c',alpha = 2.0, beta = 0.1)
c = pm.InverseGamma('c',alpha = nu_c/2, beta = nu_c * variances / 2,shape = [n_covariates])
regularized_loc_shrink = c * loc_shrink**2 / (c + glob_shrink**2 * loc_shrink**2)
coef = pm.Normal('coef',sd = (regularized_loc_shrink * glob_shrink**2)**0.5,shape = [n_covariates])
if func_coef_type == 'constant':
func_coef = pm.Deterministic('func_coef',tt.zeros([T,F]) + coef[C:])
elif func_coef_type == 'random_walk':
if func_coef_sd == 'prior':
func_coef_sd = pm.HalfNormal('func_coef_sd',sd = func_coef_sd_hypersd,shape=F)
# The 'jumps' are the small deviations about the mean of the functional
# coefficient.
if variable_func_scale:
log_scale = pm.Normal('log_scale',shape = F)
else:
log_scale = 0.0
jumps = pm.Normal('jumps',sd = func_coef_sd,shape=(T,F))
random_walks = tt.cumsum(jumps,axis=0) * tt.exp(log_scale) + coef[C:]
func_coef = pm.Deterministic('func_coef',random_walks)
elif (func_coef_type == 'natural_spline' or func_coef_type == 'bspline_design'):
x = np.arange(T)
# The -1 in the design matrix creation is to make sure that there
# is no constant term which would be made superfluous by 'coef'
# | |
total std > ts better
ts = config.ef_ts
for i in range(config.num_label):
mask = (lab_labels == i).nonzero()
mask_num = mask.nelement()
if mask_num <= 1:
continue
part_lab_feat = lab_feat[mask[:,0]]
plf_std = torch.std(part_lab_feat, 0)
ef_loss += torch.mean(torch.max(plf_std - ts,
Variable(torch.zeros(plf_std.size())).cuda()))
ef_loss += ts
# total std
ef_std = torch.std(unl_feat, 0)
ef_loss += torch.mean(torch.max(ts - ef_std, Variable(torch.zeros(ef_std.size())).cuda()))
ef_loss *= config.ef_weight
if config.gf_weight > 0 or config.gn_weight > 0 or config.gl_weight > 0:
gen_feat = self.gen[0](gen_images, encode=True)
# gen lab feat loss: mean(En(xl)) - mean(En(De(En(xl))))
if config.gl_weight > 0:
for i in range(config.num_label):
gen_unl_feat = torch.mean(gen_feat[range(i*img_per_gen, (i+1)*img_per_gen)], 0)
gl_loss += nn.KLDivLoss()(gen_unl_feat, self.lab_feat_cen[i])
gl_loss *= config.gl_weight
#
# diff_ul = torch.abs(torch.mean(lab_feat, 0) - torch.mean(unl_feat, 0))
# gl_ts = torch.mean(diff_ul) * 2
# for i in range(config.num_label):
# mask = (lab_labels == i).nonzero()
# mask_num = mask.nelement()
# if mask_num < 1:
# continue
# # part_lab_images = lab_images[mask[:,0]]
# # gen_lab_feat = self.gen[0](self.gen[i].decode(
# # self.gen[0](part_lab_images, skip_encode=True)), encode=True)
# mean_mask_feat = lab_feat[mask[:,0]]
# if mask_num != 1:
# mean_mask_feat = torch.mean(mean_mask_feat, 0)
# # gen_lab_feat = torch.mean(gen_lab_feat, 0)
# # gen_unl_feat = self.gen[i].decode(self.gen[0](unl_images, skip_encode=True))
# gen_unl_feat = torch.mean(gen_feat[range(i*img_per_gen, (i+1)*img_per_gen)], 0)
# diff = torch.abs(mean_mask_feat - gen_unl_feat)
# gl_loss += mask_num * \
# torch.mean(torch.max(diff - gl_ts,
# Variable(torch.zeros(diff.size())).cuda()))
# gl_loss /= lab_feat.size(0)
# gl_loss *= config.gl_weight
# Feature matching loss: En(xu) - En(De(En(xu)))
if config.gf_weight > 0:
fm_loss += nn.KLDivLoss()(torch.mean(gen_feat, 0), torch.mean(unl_feat, 0)) + \
torch.mean(torch.abs(torch.std(gen_feat, 0) - torch.std(unl_feat, 0)))
# fm_loss = torch.mean(torch.abs(gen_feat - unl_feat[:gen_feat.size(0)]))
fm_loss *= config.gf_weight
if config.gc_weight > 0:
key_ = "layer_{}".format(model.UNetWithResnetEncoder.DEPTH - 1)
feat_size = self.gen[0](unl_images, skip_encode=True)[key_][:img_per_gen*config.num_label].size()
rand_codes = Variable(torch.rand(feat_size).cuda()) # .unsqueeze(-1).unsqueeze(-1)
gen_rand_feat = self.gen[0](
self.get_gens_img(unl_images, codes=rand_codes), encode=True)
rand_codes = rand_codes.mean(3, True).mean(2, True) # .repeat(config.num_label, 1)
gc_loss = nn.MSELoss()(gen_rand_feat, rand_codes)
gc_loss *= config.gc_weight
# reconstruction loss
if config.gr_weight > 0:
unl_tmp = unl_images[:img_per_gen].repeat(config.num_label, 1, 1, 1)
# blur
# get nn.L1Loss;F.MSELoss;nn.KLDivLoss
if self.config.rssim:
gr_loss = -losses.SSIM()(gen_images, unl_tmp)
else:
gr_loss = nn.MSELoss()(gen_images, unl_tmp)
gr_loss *= config.gr_weight
# could impact the gr
# gen neighbor loss: same => closer; diff => farther
if config.gn_weight > 0:
pos, neg = 0, 0
diff = None
for j in range(config.num_label-1):
gen_feat_j = gen_feat[range(j*img_per_gen, (j+1)*img_per_gen)]
for i in range(j+1, config.num_label):
# if i <= j:
# continue
diff_ = gen_feat_j - \
gen_feat[range(i*img_per_gen, (i+1)*img_per_gen)]
diff_ = torch.mean(torch.abs(diff_), 0, True)
if diff is None:
diff = diff_
else:
diff = torch.cat((diff, diff_), dim=0)
mean_gen_feat_j = torch.mean(gen_feat_j, 0, True).repeat(img_per_gen, 1).detach()
pos += nn.KLDivLoss()(gen_feat_j, mean_gen_feat_j)
gen_feat_j = gen_feat[range((config.num_label-1)*img_per_gen, (config.num_label)*img_per_gen)]
mean_gen_feat_j = torch.mean(gen_feat_j, 0, True).repeat(img_per_gen, 1).detach()
pos += nn.KLDivLoss()(gen_feat_j, mean_gen_feat_j)
# pos /= config.num_label
# diff = torch.mean(diff, 0, True)
neg = torch.mean(torch.max(config.nei_margin - diff, Variable(torch.zeros(diff.size()).cuda())))
# neg /= (config.num_label - 1) * gen_feat.size(1) # * config.num_label
gn_loss = pos + neg # (torch.mean(torch.cat((pos, neg), 0)))
gn_loss *= self.consistency_weight * config.gn_weight
# neighbor loss
if config.nei_coef > 0:
tot_feat = torch.cat((lab_feat, unl_feat), dim=0)
inds = torch.randperm(tot_feat.size(0)).cuda()
# pdb.set_trace()
# topk do
if config.nei_top>1:
_, ema_lbl = torch.topk(ema_unl_logits,config.nei_top,dim=1)
ema_lbl = torch.zeros(ema_unl_logits.size()).cuda().scatter_(1,ema_lbl.data.long(),1)
lab_labels_tmp = torch.zeros(lab_logits.size()).cuda().scatter_(1,lab_labels.data.long().unsqueeze(1),1)
ema_lbl = Variable(torch.cat((lab_labels_tmp, ema_lbl), dim=0))
ema_lbl = ema_lbl[inds]
nei_mask = ema_lbl[:config.train_batch_size] * ema_lbl[config.train_batch_size:]
nei_mask = torch.sum(nei_mask, 1).float() / config.nei_top
else: # top1 do
_, ema_lbl = torch.max(ema_unl_logits, 1)
ema_lbl = torch.cat((lab_labels, ema_lbl), dim=0)
ema_lbl = ema_lbl[inds]
nei_mask = torch.eq(ema_lbl[:config.train_batch_size], ema_lbl[config.train_batch_size:]).float() # nei or not
tot_feat = tot_feat[inds]
diff = tot_feat[:config.train_batch_size] - tot_feat[config.train_batch_size:]
diff = torch.sqrt(torch.mean(diff ** 2, 1))
pos = nei_mask * diff
neg = (1 - nei_mask) * (torch.max(config.nei_margin - diff, Variable(torch.zeros(diff.size())).cuda()) ** 2)
nei_loss = self.consistency_weight * config.nei_coef * \
(torch.mean(pos + neg))
# tv losss
if config.tv_weight > 0:
(_, c_x, h_x, w_x) = gen_images.size()
count_h = c_x * (h_x - 1) * w_x
count_w = c_x * h_x * (w_x - 1)
h_tv = torch.pow((gen_images[:, :, 1:, :] - gen_images[:, :, :-1, :]), 2).sum()
w_tv = torch.pow((gen_images[:, :, :, 1:] - gen_images[:, :, :, :-1]), 2).sum()
tv_loss = config.tv_weight * (h_tv / count_h + w_tv / count_w) / config.train_batch_size
if config.st_weight > 0:
# key = "layer_{}".format(model.UNetWithResnet50Encoder.DEPTH - 1)
# gen_gram = self.gen[0](gen_images, skip_encode=True)
# gen_gram = gen_gram[key]
# gen_gram = self.gram_matrix(gen_gram)
# unl_gram = self.gen[0](unl_images, skip_encode=True)
# unl_gram = unl_gram[key].detach()
# unl_gram = self.gram_matrix(unl_gram)
gen_gram = self.gram_matrix(gen_images)
unl_gram = self.gram_matrix(unl_images)
st_loss += config.st_weight * nn.KLDivLoss()(gen_gram, unl_gram)
# Generator loss
g_loss = Variable(torch.zeros((1,1)), requires_grad=True).cuda() + \
fm_loss + nei_loss + \
ef_loss + el_loss + \
tv_loss + st_loss + \
gl_loss + gn_loss + gr_loss + gc_loss
self.gen_optimizer.zero_grad()
g_loss.backward()
self.gen_optimizer.step()
monitor_dict = OrderedDict()
monitor_dict['unl acc'] = unl_acc.data[0]
if gen_acc is not None: monitor_dict['gen acc'] = gen_acc.data[0] * config.dg_ratio
else: monitor_dict['gen acc'] = 0
monitor_dict['max unl acc'] = max_unl_acc.data[0]
if gen_acc is not None: monitor_dict['max gen acc'] = max_gen_acc.data[0] * config.dg_ratio
else: monitor_dict['max gen acc'] = 0
monitor_dict['lab loss'] = lab_loss.data[0]
monitor_dict['unl loss'] = unl_loss.data[0]
if config.dgl_weight > 0: monitor_dict['dgl loss'] = dgl_loss.data[0]
if config.con_coef > 0: monitor_dict['con loss'] = cons_loss.data[0]
if config.dis_double:
monitor_dict['la2 loss'] = lab_loss2.data[0]
if config.dt_weight > 0: monitor_dict['tri loss'] = tri_loss.data[0]
if config.ut_weight > 0: monitor_dict['ult loss'] = ult_loss.data[0]
if hasattr(self, 'gen') and iter % config.dg_ratio == 0:
if config.gf_weight > 0: monitor_dict['fm loss'] = fm_loss.data[0] * config.dg_ratio
if config.ef_weight > 0: monitor_dict['ef loss'] = ef_loss.data[0] * config.dg_ratio
if config.el_weight > 0: monitor_dict['el loss'] = el_loss.data[0] * config.dg_ratio
if config.tv_weight > 0: monitor_dict['tv loss'] = tv_loss.data[0] * config.dg_ratio
if config.st_weight > 0: monitor_dict['st loss'] = st_loss.data[0] * config.dg_ratio
if config.nei_coef > 0: monitor_dict['nei loss'] = nei_loss.data[0] * config.dg_ratio
if config.gl_weight > 0: monitor_dict['gl loss'] = gl_loss.data[0] * config.dg_ratio
if config.gn_weight > 0: monitor_dict['gn loss'] = gn_loss.data[0] * config.dg_ratio
if config.gr_weight > 0: monitor_dict['gr loss'] = gr_loss.data[0] * config.dg_ratio
if config.gc_weight > 0: monitor_dict['gc loss'] = gc_loss.data[0] * config.dg_ratio
# if config.gl_weight > 0: monitor_dict['gl ts'] = gl_ts.data[0] * config.dg_ratio
elif iter % config.dg_ratio != 0:
if config.gf_weight > 0: monitor_dict['fm loss'] = 0
if config.ef_weight > 0: monitor_dict['ef loss'] = 0
if config.el_weight > 0: monitor_dict['el loss'] = 0
if config.tv_weight > 0: monitor_dict['tv loss'] = 0
if config.st_weight > 0: monitor_dict['st loss'] = 0
if config.nei_coef > 0: monitor_dict['nei loss'] = 0
if config.gl_weight > 0: monitor_dict['gl loss'] = 0
if config.gn_weight > 0: monitor_dict['gn loss'] = 0
if config.gr_weight > 0: monitor_dict['gr loss'] = 0
if config.gc_weight > 0: monitor_dict['gc loss'] = 0
# if config.gl_weight > 0: monitor_dict['gl ts'] = 0
return monitor_dict
def eval_true_fake(self, data_loader, max_batch=None):
self.gen.eval()
self.dis.eval()
cnt = 0
unl_acc, gen_acc, max_unl_acc, max_gen_acc = 0., 0., 0., 0.
for i, (images, _) in enumerate(data_loader.get_iter()):
images = Variable(images.cuda(), volatile=True)
unl_feat = self.gen[0](images, encode=True)
gen_feat = self.gen[0](self.get_gens_img(images), encode=True)
unl_logits = self.dis(unl_feat)
gen_logits = self.dis(gen_feat)
unl_logsumexp = log_sum_exp(unl_logits)
gen_logsumexp = log_sum_exp(gen_logits)
##### Monitoring (eval mode)
# true-fake accuracy
unl_acc += torch.mean(nn.functional.sigmoid(unl_logsumexp).gt(0.5).float()).data[0]
gen_acc += torch.mean(nn.functional.sigmoid(gen_logsumexp).gt(0.5).float()).data[0]
# top-1 logit compared to 0: to verify Assumption (2) and (3)
max_unl_acc += torch.mean(unl_logits.max(1)[0].gt(0.0).float()).data[0]
max_gen_acc += torch.mean(gen_logits.max(1)[0].gt(0.0).float()).data[0]
cnt += 1
if max_batch is not None and i >= max_batch - 1: break
return unl_acc / cnt, gen_acc / cnt, max_unl_acc / cnt, max_gen_acc / cnt
def eval(self, data_loader, max_batch=None, ema=False, tri=0):
if ema:
# if self.consistency_weight == 0.:
# return 0.
dis = self.ema_dis
else:
dis = self.dis
if tri == 0:
dis_out = dis
elif tri == 2:
dis_out = self.dis_dou.out_net3
else: # 1
dis_out = self.dis_dou.out_net2
# self.gen.eval()
dis.eval()
loss, incorrect, cnt = 0, 0, 0
for i, (images, labels) in enumerate(data_loader.get_iter()):
images = Variable(images.cuda(), volatile=True)
labels = Variable(labels.cuda(), volatile=True)
feat = self.gen[0](images, encode=True)
pred_prob = dis_out(feat)
loss += self.d_criterion(pred_prob, labels).data[0]
cnt += 1
incorrect += torch.ne(torch.max(pred_prob, 1)[1], labels).data.sum()
if max_batch is not None and i >= max_batch - 1: break
return loss / cnt, incorrect
def get_gens_img(self, images, spbatch=False, partcode=False, lbls=None, codes=None):
# images: Variable(Tensor)
gen_images = []
if lbls is not None:
new_lbls = []
img_per_gen = images.size(0) // self.config.num_label
num_part = []
for j in range(self.config.num_label):
if spbatch:
num_part.append(range(img_per_gen))
elif lbls is not None:
mask | |
# --------------------------------------------------------
# Who_where
# Copyright (c) 2016 University of Virginia
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> @ U.Va (2016)
# --------------------------------------------------------
import os, cv2
import os.path as osp
import numpy as np
from config import cfg
################################################################################
# Path
################################################################################
def maybe_create(dir_path):
if not osp.exists(dir_path):
os.makedirs(dir_path)
################################################################################
# BBox representations
################################################################################
def clip_boxes(boxes, width, height):
"""clip the boxes to make them valid."""
boxes[:, 0] = np.maximum(boxes[:, 0], 0)
boxes[:, 1] = np.maximum(boxes[:, 1], 0)
boxes[:, 2] = np.minimum(boxes[:, 2], width - 1)
boxes[:, 3] = np.minimum(boxes[:, 3], height - 1)
return boxes.astype(np.int32)
def xywh_to_xyxy(boxes, width, height):
"""Convert [x y w h] box format to [x1 y1 x2 y2] format."""
w = boxes[:, 2]
h = boxes[:, 3] #np.divide(w, boxes[:, 3])
xmin = boxes[:, 0] - 0.5 * w + 1.0
xmax = boxes[:, 0] + 0.5 * w
ymin = boxes[:, 1] - h + 1.0 #boxes[:, 1] - 0.5 * h + 1.0
ymax = boxes[:, 1] #boxes[:, 1] + 0.5 * h
xyxy = np.vstack((xmin, ymin, xmax, ymax)).transpose()
return clip_boxes(xyxy, width, height)
def xyxy_to_xywh(boxes):
"""Convert [x1 y1 x2 y2] box format to [x y w h] format."""
x = 0.5 * (boxes[:, 0] + boxes[:, 2])
y = boxes[:, 3] #0.5 * (boxes[:, 1] + boxes[:, 3])
w = boxes[:, 2] - boxes[:, 0] + 1.0
h = boxes[:, 3] - boxes[:, 1] + 1.0
# r = np.divide(w, h)
return np.vstack((x, y, w, h)).transpose()
def xyxy_areas(boxes):
areas = np.multiply(boxes[:, 2] - boxes[:, 0] + 1, \
boxes[:, 3] - boxes[:, 1] + 1)
return areas
def expand_xyxy(boxes, width, height, ratio=0.2):
bw = boxes[:, 2] - boxes[:, 0] + 1.0
bh = boxes[:, 3] - boxes[:, 1] + 1.0
ox = (bw * ratio).astype(np.int)
oy = (bh * ratio).astype(np.int)
xyxy = boxes.copy()
xyxy[:,0] -= ox; xyxy[:,1] -= oy
xyxy[:,2] += ox; xyxy[:,3] += oy
return clip_boxes(xyxy, width, height)
def normalize_xywh(boxes, width, height):
# max_logr = np.log(cfg.ASPECT_RATIO_RANGE[1])
# min_logr = np.log(cfg.ASPECT_RATIO_RANGE[0])
x = np.divide(boxes[:, 0], float(width))
y = np.divide(boxes[:, 1], float(height))
w = np.divide(boxes[:, 2], float(width))
h = np.divide(boxes[:, 3], float(height))
# r = np.maximum(boxes[:, 3], cfg.ASPECT_RATIO_RANGE[0])
# r = np.minimum(r, cfg.ASPECT_RATIO_RANGE[1])
# r = np.divide(np.log(r) - min_logr, max_logr - min_logr)
return np.vstack((x, y, w, h)).transpose()
def denormalize_xywh(boxes, width, height):
# max_logr = np.log(cfg.ASPECT_RATIO_RANGE[1])
# min_logr = np.log(cfg.ASPECT_RATIO_RANGE[0])
x = np.multiply(boxes[:, 0], float(width))
y = np.multiply(boxes[:, 1], float(height))
w = np.multiply(boxes[:, 2], float(width))
h = np.multiply(boxes[:, 3], float(height))
# r = np.multiply(boxes[:, 3], max_logr - min_logr) + min_logr
# r = np.exp(r)
return np.vstack((x, y, w, h)).transpose()
def ind2sub(indices, resolution):
# indices: linear indices
# resolution: [x_dim, y_dim]
# output: normalized xy
x = (indices % resolution[0] + 0.5) / resolution[0]
y = (indices // resolution[1] + 0.5) / resolution[1]
return np.vstack((x, y)).transpose()
def sub2ind(subscripts, resolution):
# subscripts: normalized subscript
# resolution: [x_dim, y_dim]
# output: linear indices
scaled_xy = np.multiply(subscripts, np.array(resolution).reshape((1, 2)))
scaled_xy = np.ceil(scaled_xy)
scaled_xy = np.maximum(0, scaled_xy-1)
indices = scaled_xy[:,0] + scaled_xy[:,1] * resolution[0]
return indices.astype(np.int32)
def normH2ind(normH, bin_size):
ind = np.floor(normH * bin_size + 0.5)
return np.maximum(0, ind - 1)
def ind2normH(ind, bin_size):
return ind/float(bin_size)
def indices_to_boxes(indices, resolution):
# indices: [np_samples, 2], linear indices of boxes
# resolution: resolution of the subscripts
# output: normalized boxes
sub_1 = ind2sub(indices[:, 0], resolution[:2])
sub_2 = ind2sub(indices[:, 1], resolution[2:])
return np.hstack((sub_1, sub_2)).astype(np.float)
def boxes_to_indices(boxes, resolution):
# boxes: normalized boxes
ind_1 = sub2ind(boxes[:,:2], resolution[:2])
ind_2 = sub2ind(boxes[:,2:], resolution[2:])
return np.vstack((ind_1, ind_2)).transpose()
def centers_to_rois(centers, encode_dims, decode_dims, radius=cfg.PEEPHOLE_RADIUS):
nxy = ind2sub(centers, encode_dims)
xy = np.multiply(nxy, np.array(decode_dims).reshape((1,2))).astype(np.int)
rois = np.zeros((centers.shape[0], decode_dims[0], decode_dims[1]), dtype=np.float)
for i in range(centers.shape[0]):
rois[i, (xy[i,1]-2*radius):(xy[i,1]+1), \
(xy[i,0]-radius):(xy[i,0]+radius+1)] = 1.0
return rois
def scene_volume_from_entry(entry, cur_box=None, n_cls=80):
width = entry['width']
height = entry['height']
vol = np.zeros((height, width, n_cls), dtype=np.float)
boxes = np.array(entry['all_boxes']).reshape((-1,4))
clses = np.array(entry['all_clses']).flatten()
for i in range(boxes.shape[0]):
bb = boxes[i].astype(np.int)
if not (cur_box is None):
diff = np.sum(np.absolute(bb - cur_box))
if diff < 4:
continue
bb = boxes[i].astype(np.int)
cls = clses[i] - 1
vol[bb[1]:(bb[3]+1), bb[0]:(bb[2]+1), cls] = 255
return vol
def scene_layout(width, height, boxes, clses, color_palette):
colors = np.zeros((height, width, 3), dtype=np.float)
counts = np.zeros((height, width), dtype=np.float)
for i in range(boxes.shape[0]):
cur_color = np.zeros((height, width, 3))
cur_count = np.zeros((height, width))
bb = boxes[i].astype(np.int)
cls = clses[i]
rgb = np.array(color_palette[cls])
cur_color[bb[1]:(bb[3]+1), bb[0]:(bb[2]+1), :] = rgb.reshape((1,1,3))
cur_count[bb[1]:(bb[3]+1), bb[0]:(bb[2]+1)] = 1
colors = colors + cur_color
counts = counts + cur_count
counts = counts + 1e-3
average = np.divide(colors, np.expand_dims(counts, axis=-1))
output = average.copy().astype(np.int)
output[average > 255] = 255
output[average < 0] = 0
sum_output = np.sum(output, axis=-1)
sum_output = sum_output.flatten()
indices = np.where(sum_output < 0.5)[0]
output = output.reshape((-1, 3))
output[indices, :] = np.array(color_palette[0]).flatten()
output = output.reshape((height, width, 3))
return output
def scene_volume(width, height, boxes, clses, ex_box=None, n_cls=80):
vol = np.zeros((height, width, n_cls), dtype=np.float)
for i in range(boxes.shape[0]):
bb = boxes[i].astype(np.int)
if not (ex_box is None):
diff = np.sum(np.absolute(bb - ex_box))
if diff < 4:
continue
bb = boxes[i].astype(np.int)
cls = clses[i] - 1
vol[bb[1]:(bb[3]+1), bb[0]:(bb[2]+1), cls] = 255
return vol
################################################################################
# Box transformation for offset regression
################################################################################
def bbox_transform(grid_xywh, gt_xywh):
# Assume the input xywh have been normalized.
dxdy = np.divide(gt_xywh[:,:2] - grid_xywh[:,:2], grid_xywh[:,2:])
dxdy = dxdy.transpose()
dw = np.log(gt_xywh[:, 2] / grid_xywh[:, 2])
dh = np.log(gt_xywh[:, 3] / grid_xywh[:, 3])
deltas = np.vstack((dxdy, dw, dh)).transpose()
return deltas
def bbox_transform_inv(grid_xywh, deltas):
# Assume the input xywh have been normalized.
new_xywh = grid_xywh.copy()
new_xywh[:,:2] = new_xywh[:,:2] + np.multiply(deltas[:,:2], grid_xywh[:,2:])
new_xywh[:,2:] = np.multiply(np.exp(deltas[:,2:]), grid_xywh[:, 2:])
return new_xywh
def unique_boxes(boxes, scale=1.0):
# Return indices of unique boxes.
v = np.array([1, 1e3, 1e6, 1e9])
hashes = np.round(boxes * scale).dot(v)
_, index = np.unique(hashes, return_index=True)
return np.sort(index)
def validate_boxes(boxes, width=0, height=0):
# Check that a set of boxes are valid.
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
assert (x1 >= 0).all()
assert (y1 >= 0).all()
assert (x2 >= x1).all()
assert (y2 >= y1).all()
assert (x2 < width).all()
assert (y2 < height).all()
def filter_small_boxes(boxes, min_size):
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
keep = np.where((w >= min_size) & (h > min_size))[0]
return keep
################################################################################
# Box and Images
################################################################################
def create_squared_image(img, pad_value):
width = img.shape[1]
height = img.shape[0]
max_dim = np.maximum(width, height)
offset_x = int(0.5 * (max_dim - width))
offset_y = int(0.5 * (max_dim - height))
output_img = pad_value.reshape(1, 1, img.shape[-1]) * \
np.ones((max_dim, max_dim, img.shape[-1]))
output_img[offset_y : offset_y + height, \
offset_x : offset_x + width, :] = img
return output_img, offset_x, offset_y
def crop_image(img, xyxy, pad_value, dilation_ratio=0.0):
xywh = xyxy_to_xywh(xyxy.reshape((1,4))).flatten()
factor = 1.0 + dilation_ratio
img_width = img.shape[1]
img_height = img.shape[0]
out_width = int(xywh[2] * factor)
out_height = int(xywh[3] * factor)
out_img = np.ones((out_height, out_width, 3), dtype=np.float) * \
pad_value.reshape((1,1,3))
box_cenx = int(xywh[0])
box_ceny = int(xywh[1] - 0.5 * xywh[3])
out_cenx = int(0.5 * out_width)
out_ceny = int(0.5 * out_height)
left_radius = min(box_cenx, out_cenx)
right_radius = min(img_width - box_cenx, out_cenx)
top_radius = min(box_ceny, out_ceny)
bottom_radius = min(img_height - box_ceny, out_ceny)
out_img[(out_ceny-top_radius):(out_ceny+bottom_radius), \
(out_cenx-left_radius):(out_cenx+right_radius),:] \
= img[(box_ceny-top_radius):(box_ceny+bottom_radius), \
(box_cenx-left_radius):(box_cenx+right_radius),:]
return out_img
def crop_and_resize(img, xyxy, full_resolution, crop_resolution):
# xyxy: scaled xyxy
# full_resolution: output resolution
# crop_resolution: mask resolution
width = xyxy[2] - xyxy[0] + 1
height = xyxy[3] - xyxy[1] + 1
cenx = 0.5 * (xyxy[2] + xyxy[0])
ceny = 0.5 * (xyxy[3] + xyxy[1])
xywh = np.array([cenx, ceny, width, height])
img_width = img.shape[1]
img_height = img.shape[0]
out_width = int(xywh[2] * full_resolution[1]/crop_resolution[1])
out_height = int(xywh[3] * full_resolution[0]/crop_resolution[0])
out_img = np.ones((out_height, out_width, 3), dtype=np.float) * \
cfg.PIXEL_MEANS.reshape((1,1,3))
box_cenx = int(xywh[0])
box_ceny = int(xywh[1])
out_cenx = int(0.5 * out_width)
out_ceny = int(0.5 * out_height)
left_radius = min(box_cenx, out_cenx)
right_radius = min(img_width - box_cenx, out_cenx)
top_radius = min(box_ceny, out_ceny)
bottom_radius = min(img_height - box_ceny, out_ceny)
out_img[(out_ceny-top_radius):(out_ceny+bottom_radius), \
(out_cenx-left_radius):(out_cenx+right_radius),:] \
= img[(box_ceny-top_radius):(box_ceny+bottom_radius), \
(box_cenx-left_radius):(box_cenx+right_radius),:]
return cv2.resize(out_img, (full_resolution[1], full_resolution[0])).astype(np.int32)
def dilate_mask(mask, radius):
inv_mask = 255 - mask
dm = cv2.distanceTransform(inv_mask, cv2.cv.CV_DIST_L2, 5)
new_mask = mask.copy()
new_mask[dm < radius] = 255
return new_mask
def random_box(full_box, min_dim=50):
w = full_box[2] - full_box[0] + 1
h = full_box[3] - full_box[1] + 1
max_dim = min(w, h)
if max_dim < min_dim:
return None
dim = np.random.randint(min_dim, | |
<gh_stars>1-10
from collections import OrderedDict, namedtuple
from datetime import datetime
from decimal import Decimal
import functools
import io
from pathlib import Path
from typing import Any, Callable, Mapping, Sequence, Union
import aiohttp
import aiohttp.web
from dateutil.tz import tzutc
from multidict import CIMultiDict
import json as modjson
from .auth import generate_signature
from .exceptions import BackendClientError, BackendAPIError
from .session import BaseSession, Session as SyncSession, AsyncSession
__all__ = [
'Request',
'Response',
]
RequestContent = Union[
bytes, bytearray, str,
aiohttp.StreamReader,
io.IOBase,
None,
]
'''
The type alias for the set of allowed types for request content.
'''
AttachedFile = namedtuple('AttachedFile', 'filename stream content_type')
'''
A struct that represents an attached file to the API request.
:param str filename: The name of file to store. It may include paths
and the server will create parent directories
if required.
:param Any stream: A file-like object that allows stream-reading bytes.
:param str content_type: The content type for the stream. For arbitrary
binary data, use "application/octet-stream".
'''
_default_request_timeout = aiohttp.ClientTimeout(
total=None, connect=None,
sock_connect=30.0, sock_read=None,
)
class ExtendedJSONEncoder(modjson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Path):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
return super().default(obj)
class Request:
'''
The API request object.
'''
__slots__ = (
'config', 'session', 'method', 'path',
'date', 'headers', 'params', 'content_type',
'_content', '_attached_files',
'reporthook',
)
_allowed_methods = frozenset([
'GET', 'HEAD', 'POST',
'PUT', 'PATCH', 'DELETE',
'OPTIONS'])
def __init__(self, session: BaseSession,
method: str = 'GET',
path: str = None,
content: RequestContent = None, *,
content_type: str = None,
params: Mapping[str, str] = None,
reporthook: Callable = None) -> None:
'''
Initialize an API request.
:param BaseSession session: The session where this request is executed on.
:param str path: The query path. When performing requests, the version number
prefix will be automatically perpended if required.
:param RequestContent content: The API query body which will be encoded as
JSON.
:param str content_type: Explicitly set the content type. See also
:func:`Request.set_content`.
'''
self.session = session
self.config = session.config
self.method = method
if path.startswith('/'):
path = path[1:]
self.path = path
self.params = params
self.date = None
self.headers = CIMultiDict([
('User-Agent', self.config.user_agent),
('X-BackendAI-Version', self.config.version),
])
self._attached_files = None
self.set_content(content, content_type=content_type)
self.reporthook = reporthook
@property
def content(self) -> RequestContent:
'''
Retrieves the content in the original form.
Private codes should NOT use this as it incurs duplicate
encoding/decoding.
'''
return self._content
def set_content(self, value: RequestContent, *,
content_type: str = None):
'''
Sets the content of the request.
'''
assert self._attached_files is None, \
'cannot set content because you already attached files.'
guessed_content_type = 'application/octet-stream'
if value is None:
guessed_content_type = 'text/plain'
self._content = b''
elif isinstance(value, str):
guessed_content_type = 'text/plain'
self._content = value.encode('utf-8')
else:
guessed_content_type = 'application/octet-stream'
self._content = value
self.content_type = (content_type if content_type is not None
else guessed_content_type)
def set_json(self, value: object):
'''
A shortcut for set_content() with JSON objects.
'''
self.set_content(modjson.dumps(value, cls=ExtendedJSONEncoder),
content_type='application/json')
def attach_files(self, files: Sequence[AttachedFile]):
'''
Attach a list of files represented as AttachedFile.
'''
assert not self._content, 'content must be empty to attach files.'
self.content_type = 'multipart/form-data'
self._attached_files = files
def _sign(self, rel_url, access_key=None, secret_key=None, hash_type=None):
'''
Calculates the signature of the given request and adds the
Authorization HTTP header.
It should be called at the very end of request preparation and before
sending the request to the server.
'''
if access_key is None:
access_key = self.config.access_key
if secret_key is None:
secret_key = self.config.secret_key
if hash_type is None:
hash_type = self.config.hash_type
hdrs, _ = generate_signature(
self.method, self.config.version, self.config.endpoint,
self.date, str(rel_url), self.content_type, self._content,
access_key, secret_key, hash_type)
self.headers.update(hdrs)
def _pack_content(self):
if self._attached_files is not None:
data = aiohttp.FormData()
for f in self._attached_files:
data.add_field('src',
f.stream,
filename=f.filename,
content_type=f.content_type)
assert data.is_multipart, 'Failed to pack files as multipart.'
# Let aiohttp fill up the content-type header including
# multipart boundaries.
self.headers.pop('Content-Type', None)
return data
else:
return self._content
def _build_url(self):
base_url = self.config.endpoint.path.rstrip('/')
query_path = self.path.lstrip('/') if len(self.path) > 0 else ''
path = '{0}/{1}'.format(base_url, query_path)
url = self.config.endpoint.with_path(path)
if self.params:
url = url.with_query(self.params)
return url
# TODO: attach rate-limit information
def fetch(self, **kwargs) -> 'FetchContextManager':
'''
Sends the request to the server and reads the response.
You may use this method either with plain synchronous Session or
AsyncSession. Both the followings patterns are valid:
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import Session
with Session() as sess:
rqst = Request(sess, 'GET', ...)
with rqst.fetch() as resp:
print(resp.text())
.. code-block:: python3
from ai.backend.client.request import Request
from ai.backend.client.session import AsyncSession
async with AsyncSession() as sess:
rqst = Request(sess, 'GET', ...)
async with rqst.fetch() as resp:
print(await resp.text())
'''
assert self.method in self._allowed_methods, \
'Disallowed HTTP method: {}'.format(self.method)
self.date = datetime.now(tzutc())
self.headers['Date'] = self.date.isoformat()
if self.content_type is not None:
self.headers['Content-Type'] = self.content_type
full_url = self._build_url()
self._sign(full_url.relative())
rqst_ctx = self.session.aiohttp_session.request(
self.method,
str(full_url),
data=self._pack_content(),
timeout=_default_request_timeout,
headers=self.headers)
return FetchContextManager(self.session, rqst_ctx, **kwargs)
def connect_websocket(self, **kwargs) -> 'WebSocketContextManager':
'''
Creates a WebSocket connection.
.. warning::
This method only works with
:class:`~ai.backend.client.session.AsyncSession`.
'''
assert isinstance(self.session, AsyncSession), \
'Cannot use websockets with sessions in the synchronous mode'
assert self.method == 'GET', 'Invalid websocket method'
self.date = datetime.now(tzutc())
self.headers['Date'] = self.date.isoformat()
# websocket is always a "binary" stream.
self.content_type = 'application/octet-stream'
full_url = self._build_url()
self._sign(full_url.relative())
ws_ctx = self.session.aiohttp_session.ws_connect(
str(full_url),
headers=self.headers)
return WebSocketContextManager(self.session, ws_ctx, **kwargs)
class Response:
'''
Represents the Backend.AI API response.
Also serves as a high-level wrapper of :class:`aiohttp.ClientResponse`.
The response objects are meant to be created by the SDK, not the callers.
:func:`text`, :func:`json` methods return the resolved content directly with
plain synchronous Session while they return the coroutines with AsyncSession.
'''
__slots__ = (
'_session', '_raw_response', '_async_mode',
)
def __init__(self, session: BaseSession,
underlying_response: aiohttp.ClientResponse, *,
async_mode: bool = False):
self._session = session
self._raw_response = underlying_response
self._async_mode = async_mode
@property
def session(self) -> BaseSession:
return self._session
@property
def status(self) -> int:
return self._raw_response.status
@property
def reason(self) -> str:
return self._raw_response.reason
@property
def headers(self) -> Mapping[str, str]:
return self._raw_response.headers
@property
def raw_response(self) -> aiohttp.ClientResponse:
return self._raw_response
@property
def content_type(self) -> str:
return self._raw_response.content_type
@property
def content_length(self) -> int:
return self._raw_response.content_length
@property
def content(self) -> aiohttp.StreamReader:
return self._raw_response.content
def text(self) -> str:
if self._async_mode:
return self._raw_response.text()
else:
return self._session.worker_thread.execute(self._raw_response.text())
def json(self, *, loads=modjson.loads) -> Any:
loads = functools.partial(loads, object_pairs_hook=OrderedDict)
if self._async_mode:
return self._raw_response.json(loads=loads)
else:
return self._session.worker_thread.execute(
self._raw_response.json(loads=loads))
def read(self, n=-1) -> bytes:
return self._session.worker_thread.execute(self.aread(n))
async def aread(self, n=-1) -> bytes:
return await self._raw_response.content.read(n)
def readall(self) -> bytes:
return self._session.worker_thread.execute(self._areadall())
async def areadall(self) -> bytes:
return await self._raw_response.content.read(-1)
class FetchContextManager:
'''
The context manager returned by :func:`Request.fetch`.
It provides both synchronouse and asynchronous contex manager interfaces.
'''
__slots__ = (
'session', 'rqst_ctx', 'response_cls',
'check_status',
'_async_mode',
)
def __init__(self, session, rqst_ctx, *,
response_cls: Response = Response,
check_status: bool = True):
self.session = session
self.rqst_ctx = rqst_ctx
self.response_cls = response_cls
self.check_status = check_status
self._async_mode = True
def __enter__(self):
assert isinstance(self.session, SyncSession)
self._async_mode = False
return self.session.worker_thread.execute(self.__aenter__())
async def __aenter__(self):
try:
raw_resp = await self.rqst_ctx.__aenter__()
if self.check_status and raw_resp.status // 100 != 2:
msg = await raw_resp.text()
raise BackendAPIError(raw_resp.status, raw_resp.reason, msg)
return self.response_cls(self.session, raw_resp,
async_mode=self._async_mode)
except aiohttp.ClientError as e:
msg = 'Request to the API endpoint has failed.\n' \
'Check your network connection and/or the server status.\n' \
'\u279c {!r}'.format(e)
raise BackendClientError(msg) from e
def __exit__(self, *args):
return self.session.worker_thread.execute(self.__aexit__(*args))
async def __aexit__(self, *args):
return await self.rqst_ctx.__aexit__(*args)
class WebSocketResponse:
'''
A high-level wrapper of :class:`aiohttp.ClientWebSocketResponse`.
'''
__slots__ = ('_session', '_raw_ws', )
def __init__(self, session: BaseSession,
underlying_ws: aiohttp.ClientWebSocketResponse):
self._session = session
self._raw_ws = underlying_ws
@property
def session(self) -> BaseSession:
return self._session
@property
def raw_weboscket(self) -> aiohttp.ClientWebSocketResponse:
return self._raw_ws
@property
def closed(self):
return self._raw_ws.closed
async def close(self):
await self._raw_ws.close()
def __aiter__(self):
return self._raw_ws.__aiter__()
async def __anext__(self):
return await self._raw_ws.__anext__()
def exception(self):
return self._raw_ws.exception()
async def send_str(self, raw_str: str):
if self._raw_ws.closed:
raise aiohttp.ServerDisconnectedError('server disconnected')
await self._raw_ws.send_str(raw_str)
async def send_json(self, obj: Any):
if self._raw_ws.closed:
raise aiohttp.ServerDisconnectedError('server disconnected')
await self._raw_ws.send_json(obj)
async def send_bytes(self, data: bytes):
if self._raw_ws.closed:
raise aiohttp.ServerDisconnectedError('server disconnected')
await self._raw_ws.send_bytes(data)
async def receive_str(self) -> str:
if self._raw_ws.closed:
raise aiohttp.ServerDisconnectedError('server disconnected')
return await self._raw_ws.receive_str()
async def receive_json(self) -> Any:
if self._raw_ws.closed:
raise aiohttp.ServerDisconnectedError('server disconnected')
return await self._raw_ws.receive_json()
async def receive_bytes(self) -> bytes:
if self._raw_ws.closed:
raise aiohttp.ServerDisconnectedError('server disconnected')
return await self._raw_ws.receive_bytes()
class WebSocketContextManager:
'''
The context manager returned by :func:`Request.connect_websocket`.
'''
__slots__ | |
pdb]}
atomList1.append(atomEntry1)#atomic count
else:
if(residue2 in interfaceDictionary[uniprotPair2]):
if(atomEntry2 not in atomList2): #atomic count
interfaceDictionary[uniprotPair2][residue2][1] += 1
atomList2.append(atomEntry2) #atomic count
if(chainPair2 not in interfaceDictionary[uniprotPair2][residue2][0]):
interfaceDictionary[uniprotPair2][residue2][0] += '$' + chainPair2
if(pdb not in interfaceDictionary[uniprotPair2][residue2][2]):
interfaceDictionary[uniprotPair2][residue2][2] += '$' + pdb
else:
interfaceDictionary[uniprotPair2][residue2] = [chainPair2, 1, pdb] #format of the innermost dict
atomList2.append(atomEntry2) #atomic count
elif(uniprotPair1 not in interfaceDictionary):
if(uniprotPair2 in interfaceDictionary):
if(residue2 in interfaceDictionary[uniprotPair2]):
if(atomEntry2 not in atomList2): #atomic count
interfaceDictionary[uniprotPair2][residue2][1] += 1
atomList2.append(atomEntry2) #atomic count
if(chainPair2 not in interfaceDictionary[uniprotPair2][residue2][0]):
interfaceDictionary[uniprotPair2][residue2][0] += '$' + chainPair2
if(pdb not in interfaceDictionary[uniprotPair2][residue2][2]):
interfaceDictionary[uniprotPair2][residue2][2] += '$' + pdb
else:
interfaceDictionary[uniprotPair2][residue2] = [chainPair2, 1, pdb] #format of the innermost dict
atomList2.append(atomEntry2) #atomic count
else:
interfaceDictionary[uniprotPair2] = {residue2 : [chainPair2, 1, pdb]}
atomList2.append(atomEntry2) #atomic count
else:
if(residue1 in interfaceDictionary[uniprotPair1]):
if(atomEntry1 not in atomList1): #atomic count
interfaceDictionary[uniprotPair1][residue1][1] += 1
atomList1.append(atomEntry1) #atomic count
if(chainPair1 not in interfaceDictionary[uniprotPair1][residue1][0]):
interfaceDictionary[uniprotPair1][residue1][0] += '$' + chainPair1
if(pdb not in interfaceDictionary[uniprotPair1][residue1][2]):
interfaceDictionary[uniprotPair1][residue1][2] += '$' + pdb
else:
interfaceDictionary[uniprotPair1][residue1] = [chainPair1, 1, pdb] #format of the innermost dict
atomList1.append(atomEntry1) #atomic count
#CHECK FOR REDUNDANCY!!!
# if(uniprotPair1 in interfaceDictionary):
# if(residue1 in interfaceDictionary[uniprotPair1]):
# interfaceDictionary[uniprotPair1][residue1][1] += 1
# if(chainPair1 not in interfaceDictionary[uniprotPair1][residue1][0]):
# interfaceDictionary[uniprotPair1][residue1][0] += '$' + chainPair1
# if(pdb not in interfaceDictionary[uniprotPair1][residue1][2]):
# interfaceDictionary[uniprotPair1][residue1][2] += '$' + pdb
# else:
# interfaceDictionary[uniprotPair1][residue1] = [chainPair1, 1, pdb] #format of the innermost dict
# elif(uniprotPair2 in interfaceDictionary):
# if(residue2 in interfaceDictionary[uniprotPair2]):
# interfaceDictionary[uniprotPair2][residue2][1] += 1
# if(chainPair2 not in interfaceDictionary[uniprotPair2][residue2][0]):
# interfaceDictionary[uniprotPair2][residue2][0] += '$' + chainPair2
# if(pdb not in interfaceDictionary[uniprotPair2][residue2][2]):
# interfaceDictionary[uniprotPair2][residue2][2] += '$' + pdb
# else:
# interfaceDictionary[uniprotPair2][residue2] = [chainPair2, 1, pdb]
# else:
# interfaceDictionary[uniprotPair1] = {residue1 : [chainPair1, 1, pdb]}
except (IOError, KeyError) as e:
#print("Error: " + interfaceFile + " does not appear to exist.")
pass
# In[41]:
def normalize_count(interfaceDictionary):
for pair in interfaceDictionary:
for residue in interfaceDictionary[pair]:
pdbCount = len(interfaceDictionary[pair][residue][2].split('$'))
interfaceDictionary[pair][residue].append(interfaceDictionary[pair][residue][1] / pdbCount) #[3] is normalized by uniprot pair
# In[46]:
def average_histones(interfaceDictionary):
avgDict = {}
avgDict['type'] = {}
for pair in interfaceDictionary:
for residue in interfaceDictionary[pair]:
targetFields = interfaceDictionary[pair][residue][0].split('@')[0].split('|')
sourceFields = interfaceDictionary[pair][residue][0].split('@')[1].split('|')
if(targetFields[3] != 'other'):
histoneType = targetFields[3]
normalizedCount = interfaceDictionary[pair][residue][3]
if(histoneType in avgDict):
if(residue in avgDict[histoneType]):
avgDict[histoneType][residue][0] += normalizedCount
avgDict[histoneType][residue][1] += 1 #for normalization
else:
avgDict[histoneType][residue] = [normalizedCount, 1]
else:
avgDict[histoneType] = {residue : [normalizedCount, 1]}
elif(sourceFields[3] != 'other'):
histoneType = sourceFields[3]
normalizedCount = interfaceDictionary[pair][residue][3]
if(histoneType in avgDict):
if(residue in avgDict[histoneType]):
avgDict[histoneType][residue][0] += normalizedCount
avgDict[histoneType][residue][1] += 1 #for normalization
else:
avgDict[histoneType][residue] = [normalizedCount, 1]
else:
avgDict[histoneType] = {residue : [normalizedCount, 1]}
avgDict2 = {}
avgDict2['type'] = {}
for histoneType in avgDict:
for residue in avgDict[histoneType]:
if(histoneType in avgDict2):
avgDict2[residue] = avgDict[histoneType][residue][0] / avgDict[histoneType][residue][1]
else:
avgDict2[histoneType] = {residue : avgDict[histoneType][residue][0] / avgDict[histoneType][residue][1]}
return avgDict2
# In[48]:
def sum_contacts(interfaceDictionary):
sumDict = {}
histoneDict = {}
for pair in interfaceDictionary:
if(pair != 'uniprotPair'):
targetFields = []
sourceFields = []
nucleosomeFlag = 0
for residue in interfaceDictionary[pair]:
pairs = interfaceDictionary[pair][residue][0].split('$')
for instance in pairs:
targetFields = instance.split('@')[0].split('|')
sourceFields = instance.split('@')[1].split('|')
if(targetFields[7].split(':')[1] == '1' or sourceFields[7].split(':')[1] == '1'):
nucleosomeFlag = 1
break
if(nucleosomeFlag):
break
#randomResidue = list(interfaceDictionary[pair].keys())[0]
#randomPair = interfaceDictionary[pair][randomResidue][0].split('$')[0]
#targetFields = randomPair.split('@')[0].split('|')
#sourceFields = randomPair.split('@')[1].split('|')
pdbList = []
for residue in interfaceDictionary[pair]:
pdbIDs = interfaceDictionary[pair][residue][2].split('$')
for pdb in pdbIDs:
if(pdb not in pdbList):
pdbList.append(pdb)
if(targetFields[3] != 'other'):
histoneType = targetFields[3]
newPair = histoneType + '@'
if(sourceFields[3] != 'other'):
histoneType2 = sourceFields[3]
newPair += histoneType2
else:
for field in sourceFields:
if(field == sourceFields[len(sourceFields) - 1]):
newPair += field
else:
newPair += field + '|'
totalCount = 0
for residue in interfaceDictionary[pair]:
normalizedCount = interfaceDictionary[pair][residue][3]
totalCount += normalizedCount
if(newPair in sumDict):
sumDict[newPair][0] += totalCount
else:
sumDict[newPair] = [totalCount, pdbList]
if(newPair in histoneDict):
histoneDict[newPair] += 1
else:
histoneDict = {newPair : 1}
elif(sourceFields[3] != 'other'):
histoneType = sourceFields[3]
newPair = histoneType + '@'
for field in targetFields:
if(field == targetFields[len(targetFields) - 1]):
newPair += field
else:
newPair += field + '|'
totalCount = 0
for residue in interfaceDictionary[pair]:
normalizedCount = interfaceDictionary[pair][residue][3]
totalCount += normalizedCount
if(newPair in sumDict):
sumDict[newPair][0] += totalCount
else:
sumDict[newPair] = [totalCount, pdbList]
if(newPair in histoneDict):
histoneDict[newPair] += 1
else:
histoneDict = {newPair : 1}
else:
newPair = ''
for field in targetFields:
if(field == targetFields[len(targetFields) - 1]):
newPair += field
else:
newPair += field + '|'
newPair += '@'
for field in sourceFields:
if(field == sourceFields[len(sourceFields) - 1]):
newPair += field
else:
newPair += field + '|'
totalCount = 0
for residue in interfaceDictionary[pair]:
normalizedCount = interfaceDictionary[pair][residue][3]
totalCount += normalizedCount
if(newPair in sumDict):
sumDict[newPair][0] += totalCount
else:
sumDict[newPair] = [totalCount, pdbList]
if(newPair in histoneDict):
histoneDict[newPair] += 1
else:
histoneDict = {newPair : 1}
for pair in histoneDict:
sumDict[pair][0] = sumDict[pair][0] / histoneDict[pair]
#####Have to account for the case when an interface between two non-histone chains is already in the dictionary, but is stored in a reverse order!!!!
return sumDict
# In[49]:
def main():
chainDictionary = {}
chainDictionary['chain'] = {}
get_chain_dictionaries(CHAIN_FILE, chainDictionary)
# for pdb in chainDictionary:
# for chain in chainDictionary[pdb]:
# print(pdb + '\t' + chain + '\t' + str(chainDictionary[pdb][chain]))
interfaceFiles = []
get_files(PDB_LIST, interfaceFiles, 'interface')
interfaceDictionary = {}
interfaceDictionary['uniprotPair'] = {}
residue_count(interfaceFiles, chainDictionary, interfaceDictionary)
# for pair in interfaceDictionary:
# for a in interfaceDictionary[pair]:
# print(pair + '\t' + a + '\t' + str(interfaceDictionary[pair][a]))
normalize_count(interfaceDictionary)
# for pair in interfaceDictionary:
# for residue in interfaceDictionary[pair]:
# uniprots = pair.split('@')
# chains = str(interfaceDictionary[pair][residue][0]).split('@')
# name1 = chains[0].split('|')[2]
# name2 = chains[-1].split('|')[2]
# count = str(interfaceDictionary[pair][residue][3])
# print(uniprots[0] + '@' + name1 + '@' + uniprots[1] + '@' + name2 + '@' + residue + '@' + count)
# avgDict = average_histones(interfaceDictionary)
# for entry in avgDict:
# print(entry + '\t' + str(avgDict[entry]))
sumDict = sum_contacts(interfaceDictionary)
for pair in sumDict:
chains = pair.split('@')
target = chains[0]
source = chains[1]
pdbIDs = ''
for pdb in sumDict[pair][1]:
if(pdb == sumDict[pair][1][len(sumDict[pair][1]) - 1]):
pdbIDs += pdb
else:
pdbIDs += pdb + '|'
targetFields = target.split('|')
sourceFields = source.split('|')
contacts = sumDict[pair][0]
######
if(len(targetFields) > 1 and targetFields[6].split(':')[1] == '1'):
if(len(sourceFields) > 1):
print(sourceFields[2] + ';' + sourceFields[0] + ';' + sourceFields[1] + ';' + sourceFields[4] + ';' + sourceFields[5] + ';' + sourceFields[6].split('nucleosome')[0] + ';' + targetFields[2] + ';' + targetFields[0] + ';' + targetFields[1] + ';' + targetFields[4] + ';' + targetFields[5] + ';' + targetFields[6].split('nucleosome')[0] + ';' + pdbIDs + ';' + 'nucleosome' + ';' + str(contacts))
else:
print(source + ';' + ';' + ';' + ';' + ';' + ';' + targetFields[2] + ';' + targetFields[0] + ';' + targetFields[1] + ';' + targetFields[4] + ';' + targetFields[5] + ';' + targetFields[6].split('nucleosome')[0] + ';' + pdbIDs + ';' + 'nucleosome' + ';' + str(contacts))
elif(len(sourceFields) > 1 and sourceFields[6].split(':')[1] == '1'):
if(len(targetFields) > 1):
print(sourceFields[2] + ';' + sourceFields[0] + ';' + sourceFields[1] + ';' + sourceFields[4] + ';' + sourceFields[5] + ';' + sourceFields[6].split('nucleosome')[0] + ';' + targetFields[2] + ';' + targetFields[0] + ';' + targetFields[1] + ';' + targetFields[4] + ';' + targetFields[5] + ';' + targetFields[6].split('nucleosome')[0] + ';' + pdbIDs + ';' + 'nucleosome' + ';' + str(contacts))
else:
print(target + ';' + ';' + ';' + ';' + ';' + ';' + sourceFields[2] + ';' + sourceFields[0] + ';' + sourceFields[1] + ';' + sourceFields[4] + ';' + sourceFields[5] + ';' + sourceFields[6].split('nucleosome')[0] + ';' + pdbIDs + ';' + 'nucleosome' + ';' + str(contacts))
elif(len(sourceFields) == 1 and len(targetFields) == 1):
print(target + ';' + ';' + ';' + ';' + ';' + | |
"""
SR²: Super-Resolution With Structure-Aware Reconstruction
sr2/src/utils
@author: <NAME>
"""
import os
import random
import h5py
import numpy as np
import scipy.io as sio
import tensorflow as tf
from skimage.filters import gaussian
from skimage.transform import rescale
from random import sample
## This is the dataset class which handles the data storing, loading and preprocessing
class Dataset:
def __init__(self,data):
# Parameter for the dataset
# Input size of the image to the network and output size of the SR network
self.in_shape = data["in_shape"]
self.out_shape = data["out_shape"]
# Number of color channels (1 = grayscal, 3 = RGB)
self.n_channels = data["n_channels"]
# Number of classes for the classification task
self.n_classes = data["n_classes"]
# Magnification factor
self.scale = data["scale"]
# Dataset name (mnist or svhn)
self.dataset = data["dataset"]
# Bool whether noise is present or not
self.noise = data["noise"]
# Type of noise
self.noiseType = data["noiseType"]
# Amount of noise: std in [n_low, n_high]
self.n_low = data["noise_low"]
self.n_high = data["noise_high"]
# Name of the directory where the model is stored
self.local_dir = data["local_dir"]
# Name of the directory where the data is stored
self.data_dir = os.path.join(self.local_dir, self.dataset)
# Storage format of the data
self.ending = "hdf5"
# Specify noise function
if self.noise:
if self.noiseType == "gaussian":
self.noiser = self.gaussian
elif self.noiseType == "sp":
self.noiser = self.sp
elif self.noiseType == "speckle":
self.noiser = self.speckle
else:
self.noiser = self.empty
if not os.path.isfile(os.path.join(self.data_dir, 'train/idx_list.txt')):
print("Please store the data using dataset.store_data()")
# Function to load the HDF5 files and open them
def load_files(self):
# In IDs, the indexes of the images are stored. The IDs are shuffled after each epoch
self.IDs = {}
f = open(os.path.join(self.data_dir, 'train/idx_list.txt'), 'r')
self.IDs["train"] = (f.read().splitlines())
f.close()
f = open(os.path.join(self.data_dir, 'val/idx_list.txt'), 'r')
self.IDs["val"] = (f.read().splitlines())
f.close()
f =open(os.path.join(self.data_dir, 'test/idx_list.txt'), 'r')
self.IDs["test"] = (f.read().splitlines())
f.close()
# Open HDF5 files
self.files = {}
self.files["train"] = h5py.File(os.path.join(self.data_dir,"train/train.{0}".format(self.ending)), "r")
self.files["val"] = h5py.File(os.path.join(self.data_dir, "val/val.{0}".format(self.ending)), "r")
self.files["test"] = h5py.File(os.path.join(self.data_dir, "test/test.{0}".format(self.ending)), "r")
# Functions to load the data if model.train_generator is used
# Load batch for classification only
def load_data_generator_cl(self,IDs_batch,batchsize):
####
# IDs_batch: IDs of the images which are in the current batch
# batchsize: Size of the current batch
####
# Create empty tensor for low-resolution images and the labels
X_lr_batch = np.empty((batchsize, self.in_shape[0], self.in_shape[1], self.n_channels))
l_batch = np.empty((batchsize, self.n_classes), dtype=int)
for i,ID in enumerate(IDs_batch):
X_lr_batch[i,] = self.noiser((self.files['train']['images_lr{0}'.format(self.scale)])[int(ID)])
l_batch[i,] = (self.files['train'][self.label_name])[int(ID)]
return X_lr_batch,l_batch
# Load batch for super-resolution only
def load_data_generator_sr(self,IDs_batch,batchsize):
####
# IDs_batch: IDs of the images which are in the current batch
# batchsize: Size of the current batch
####
# Create empty tensor for low-resolution images and the high-resolution images
X_hr_batch = np.empty((batchsize, self.out_shape[0], self.out_shape[1], self.n_channels))
X_lr_batch = np.empty((batchsize, self.in_shape[0], self.in_shape[1], self.n_channels))
for i,ID in enumerate(IDs_batch):
X_hr_batch[i,] = (self.files['train']['images_hr'])[int(ID)]
X_lr_batch[i,] = self.noiser((self.files['train']['images_lr{0}'.format(self.scale)])[int(ID)])
return X_lr_batch,X_hr_batch
# Load batch for sr2
def load_data_generator_sr2(self,IDs_batch,batchsize):
####
# IDs_batch: IDs of the images which are in the current batch
# batchsize: Size of the current batch
####
# Create empty tensor for low-resolution images, high-resolution images and the labels
X_hr_batch = np.empty((batchsize, self.out_shape[0], self.out_shape[1], self.n_channels))
X_lr_batch = np.empty((batchsize, self.in_shape[0], self.in_shape[1], self.n_channels))
l_batch = np.empty((batchsize, self.n_classes), dtype=int)
for i,ID in enumerate(IDs_batch):
X_hr_batch[i,] = (self.files['train']['images_hr'])[int(ID)]
X_lr_batch[i,] = self.noiser((self.files['train']['images_lr{0}'.format(self.scale)])[int(ID)])
l_batch[i,] = (self.files['train']['labels'])[int(ID)]
return X_lr_batch,[X_hr_batch,l_batch]
# Function to load the data if model.train is used
def load_data(self,mode,num,sampling=False):
####
# possible modes are: train, val, test
# num: specifies the number of samples that are used , if set to a value < 0, the number of images available is used
# sampling: if True, randomly draw num images from the dataset
####
# check whether the file exists
if not os.path.isfile(os.path.join(self.data_dir,"{0}/{0}.{1}".format(mode,self.ending))):
# MNIST can be stored directly, but SVHN needs a manual download
if self.dataset == "MNIST":
self.store_data()
else:
raise Exception("Data not found! Please download {0}".format(self.dataset))
#load file IDs
if num <= len(self.IDs[mode]) and num > 0:
if sampling:
self.IDs[mode] = sample(self.IDs[mode],num)
else:
self.IDs[mode] = self.IDs[mode][0:num]
else:
num = len(self.IDs[mode])
# load data
fileData = self.files[mode]
X_hr = np.empty((num, self.out_shape[0], self.out_shape[1], self.n_channels))
X_lr = np.empty((num, self.in_shape[0], self.in_shape[1], self.n_channels))
l = np.empty((num, self.n_classes), dtype=int)
for i,ID in enumerate(self.IDs[mode]):
X_hr[i, ] = (fileData['images_hr'])[int(ID)]
X_lr[i, ] = self.noiser((fileData['images_lr{0}'.format(self.scale)])[int(ID)])
l[i,] = (fileData['labels'])[int(ID)]
return X_lr,X_hr,l
# Functions to add Gaussian noise to the low-resolution images
def gaussian(self,X_lr):
# Noise component, std is drawn from a uniform distributions [n_lwo,n_high]
gaussian = np.random.normal(0, random.uniform(self.n_low, self.n_high),
(self.in_shape[0], self.in_shape[1], self.n_channels))
X_lr = X_lr + gaussian
X_lr[X_lr > 1] = 1
X_lr[X_lr < 0] = 0
return X_lr
# Functions to add speckle noise to the low-resolution images
def speckle(self,X_lr):
# Noise component, std is drawn from a uniform distributions [n_lwo,n_high]
gaussian = np.random.normal(0, random.uniform(self.n_low, self.n_high),
(self.in_shape[0], self.in_shape[1], self.n_channels))
X_lr = X_lr + X_lr * gaussian
X_lr[X_lr > 1] = 1
X_lr[X_lr < 0] = 0
return X_lr
# Functions to add salt&pepper noise to the low-resolution images
def sp(self,X_lr):
p = random.uniform(self.n_low, self.n_high)
flipped= np.random.choice([True, False], size=(self.in_shape[0], self.in_shape[1], self.n_channels),
p=[p, 1 - p])
salted = np.random.choice([True, False], size=( self.in_shape[0], self.in_shape[1], self.n_channels),
p=[0.5, 0.5])
peppered = ~salted
X_lr[flipped & salted] = 1.0
X_lr[flipped & peppered] = 0.0
return X_lr
# Empty function if no noise is added
def empty(self,X_lr):
return X_lr
# Function to create the low-resulution images
def create_down(self,input, scale):
####
# input: low-resolution input image
# scale: Downsampling factor
####
# first blur the data
# multi channel is always True, because grayscale images are stored as (32,32,1)
input = gaussian(input.astype(np.float64), sigma=scale / 2, multichannel=True)
# Rescale the image
data_down = rescale(input, 1 / scale, multichannel=True)
return data_down.astype(np.float16)
# Function to store the data in HDF5 files
def store_data(self):
# First get the data
train_data_hr, train_label, val_data_hr, val_label, test_data_hr, test_label = self.get_data()
test_data_hr = test_data_hr.astype(np.float16)
val_data_hr = val_data_hr.astype(np.float16)
train_data_hr = train_data_hr.astype(np.float16)
# Make sure the images are in range [0,1]
if np.max(test_data_hr[0,] > 1):
test_data_hr = test_data_hr / 255
val_data_hr = val_data_hr / 255
train_data_hr = train_data_hr / 255
# store the test data
if not os.path.exists(os.path.join(self.data_dir, 'test')):
os.makedirs(os.path.join(self.data_dir, 'test'), exist_ok=True)
fileTest = open(os.path.join(os.path.join(self.data_dir, 'test'), 'idx_list.txt'), 'w')
sTest = test_data_hr.shape
down2 = np.zeros((sTest[0], int(sTest[1] / 2), int(sTest[2] / 2), sTest[3]))
down4 = np.zeros((sTest[0], int(sTest[1] / 4), int(sTest[2] / 4), sTest[3]))
for idx_test in range(test_data_hr.shape[0]):
fileTest.write('{0:06d}\n'.format(idx_test))
down2[idx_test,] = self.create_down(test_data_hr[idx_test,], 2)
down4[idx_test,] = self.create_down(test_data_hr[idx_test,], 4)
fileTest.close()
with h5py.File(os.path.join(os.path.join(self.data_dir, 'test'), 'test.hdf5'), "w") as g:
g.create_dataset('images_hr', data=test_data_hr)
g.create_dataset('images_lr2', data=down2)
g.create_dataset('images_lr4', data=down4)
g.create_dataset('labels', data=test_label)
del test_data_hr
del test_label
del down2
del down4
# store the val data
if not os.path.exists(os.path.join(self.data_dir, 'val')):
os.makedirs(os.path.join(self.data_dir, 'val'), exist_ok=True)
fileVal = open(os.path.join(os.path.join(self.data_dir, 'val'), 'idx_list.txt'), 'w')
sVal = val_data_hr.shape
down2 = np.zeros((sVal[0], int(sVal[1] / 2), int(sVal[2] / 2), sVal[3]))
down4 = np.zeros((sVal[0], int(sVal[1] / 4), int(sVal[2] / 4), sVal[3]))
for idx_val in range(val_data_hr.shape[0]):
fileVal.write('{0:06d}\n'.format(idx_val))
down2[idx_val,] = self.create_down(val_data_hr[idx_val,], 2)
down4[idx_val,] = self.create_down(val_data_hr[idx_val,], 4)
fileVal.close()
with h5py.File(os.path.join(os.path.join(self.data_dir, 'val'), 'val.hdf5'), "w") as g:
g.create_dataset('images_hr', data=val_data_hr)
g.create_dataset('images_lr2', data=down2)
g.create_dataset('images_lr4', data=down4)
g.create_dataset('labels', data=val_label)
del val_data_hr
del val_label
del down2
del down4
# store the train data
if not os.path.exists(os.path.join(self.data_dir, 'train')):
os.makedirs(os.path.join(self.data_dir, 'train'), exist_ok=True)
fileTrain = open(os.path.join(os.path.join(self.data_dir, 'train'), 'idx_list.txt'), 'w')
sTrain = train_data_hr.shape
down2 = np.zeros((sTrain[0], int(sTrain[1] / 2), int(sTrain[2] / 2), sTrain[3]))
down4 = np.zeros((sTrain[0], int(sTrain[1] / 4), int(sTrain[2] / 4), sTrain[3]))
for idx_train in range(train_data_hr.shape[0]):
fileTrain.write('{0:06d}\n'.format(idx_train))
down2[idx_train,] = self.create_down(train_data_hr[idx_train,], 2)
down4[idx_train,] = self.create_down(train_data_hr[idx_train,], 4)
fileTrain.close()
with h5py.File(os.path.join(os.path.join(self.data_dir, 'train'), 'train.hdf5'), "w") as g:
g.create_dataset('images_hr', data=train_data_hr)
g.create_dataset('images_lr2', data=down2)
g.create_dataset('images_lr4', data=down4)
g.create_dataset('labels', data=train_label)
del train_data_hr
del train_label
del down2
del down4
# Function to get the MNIST and SVHN data
def get_data(self):
if self.dataset == "mnist":
# download MNIST
(train_data_hr, train_label), (test_data_hr, test_label) = tf.keras.datasets.mnist.load_data()
# get size of the data
num_train_all,w,h = train_data_hr.shape[0],train_data_hr.shape[1], train_data_hr.shape[2]
num_test = test_data_hr.shape[0]
# add additional dimension for the network
train_data_hr = train_data_hr.reshape((num_train_all,w,h, 1))
test_data_hr = test_data_hr.reshape((num_test,w,h, 1))
# Split into training and validation data
num_val = int(0.2*num_train_all)
val_data_hr = train_data_hr[0:num_val,]
val_label = train_label[0:num_val,]
train_data_hr = train_data_hr[num_val:num_train_all,]
train_label = train_label[num_val:num_train_all,]
return train_data_hr, train_label, val_data_hr, val_label, test_data_hr, test_label
elif self.dataset == "svhn":
# Load the data from the mat files
train = sio.loadmat(self.data_dir + '/train_32x32.mat')
train_data_hr_tmp, l_train = train['X'], train['y']
trainextra | |
import datetime
import urllib
import urllib.parse
import urllib.request
import pprint as pp
from collections import defaultdict
from .utils import createSign, http_get_request, http_post_request
from ..exchange import Exchange
class Huobi(Exchange):
def __init__(self, key, secret):
self.api = HuobiAPI(key, secret)
self.all_pairs = self.get_all_trading_pairs()
super().__init__('huobi')
self.connect_success()
def get_pair(self, coin, base):
return (coin + base).lower()
def get_all_trading_pairs(self):
# ***** this needs to change form to updated one ***** #
all_pairs = set()
res = self.api.get_symbols()['data']
for info in res:
coin = info['base-currency']
base = info['quote-currency']
all_pairs.add(self.get_pair(coin, base))
return all_pairs
def get_all_trading_coins(self):
# ***** this needs to change form to updated one ***** #
all_coins = set()
res = self.api.get_symbols()['data']
for info in res:
coin = info['base-currency']
all_coins.add(coin)
return all_coins
def get_BTC_price(self):
return self.get_price('BTC', base='USDT')
def get_price(self, coin, base='BTC', _type=0):
TYPES = {0: 'bid', 1: 'ask'}
pair = self.get_pair(coin, base)
if pair in self.all_pairs:
return float(self.api.get_ticker(symbol=pair)['tick'][TYPES[_type]][0])
else:
return 0
def get_full_balance(self, allow_zero=False):
BTC_price = self.get_BTC_price()
coins = {
'total': {'BTC': 0, 'USD': 0, 'num': 0},
'USD': {'BTC': 0, 'USD': 0, 'num': 0}
}
for coinName, num in self.coins.items():
coinName = coinName.upper()
if allow_zero or num != 0:
if coinName == 'USDT':
coinName = 'USD'
BTC_value = num / BTC_price
elif coinName == 'BTC':
BTC_value = num
else:
BTC_value = self.get_price(coinName) * num
USD_value = BTC_value * BTC_price
# update info
coins[coinName] = {
'num': num,
'BTC': BTC_value,
'USD': USD_value
}
coins['total']['BTC'] += BTC_value
coins['total']['USD'] += USD_value
return coins
def get_all_coin_balance(self, allow_zero=False):
balances = self.api.get_balance()
coins = defaultdict(float)
for coin in balances:
coinName = coin['currency']
num = float(coin['balance'])
if coinName == 'USDT':
coinName = 'USD'
if allow_zero or num != 0:
coins[coinName] += num
return dict(coins)
# def market_buy(self, coin, base='BTC', quantity=0):
# pair = self.get_pair(coin, base)
# response = self.api.order_market_buy(symbol=pair, quantity=quantity)
# return {
# 'exchange': self.name,
# 'side': 'sell',
# 'pair': self.get_my_pair(coin, base),
# 'price': response['price'],
# 'quantity': response['executedQty'],
# 'total': None,
# 'fee': None,
# 'id': response['orderId'],
# 'id2': response['clientOrderId']
# }
# def market_sell(self, coin, base='BTC', quantity=0):
# pair = self.get_pair(coin, base)
# response = self.api.order_market_sell(symbol=pair, quantity=quantity)
# return {
# 'exchange': self.name,
# 'side': 'sell',
# 'pair': self.get_my_pair(coin, base),
# 'price': response['price'],
# 'quantity': response['executedQty'],
# 'total': None,
# 'fee': None,
# 'id': response['orderId'],
# 'id2': response['clientOrderId']
# }
# def market_sell_all(self, coin, base='BTC'):
# quantity = self.get_coin_balance(coin)
# if quantity <= 0:
# print('%s does not have enough balance to sell')
# return None
# else:
# step = self.get_step_size(self.get_pair(coin, base))
# if step > 0 and quantity >= step:
# quantity = quantity - (quantity % float(step))
# return self.market_sell(coin, base=base, quantity=quantity)
# ------------------------------------------------------------------ #
# --------------------------- API Wrapper -------------------------- #
# ------------------------------------------------------------------ #
class HuobiAPI:
# API 请求地址
MARKET_URL = "https://api.huobi.pro"
TRADE_URL = "https://api.huobi.pro"
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
self.margin_accs = set()
all_accounts = self.get_accounts()['data']
for acc in all_accounts:
acc_id = acc['id']
if acc['type'] == 'spot':
self.account_id = acc_id
elif acc['type'] == 'margin':
self.margin_accs.add(acc_id)
def api_key_get(self, params, request_path):
method = 'GET'
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
params.update({'AccessKeyId': self.api_key,
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': timestamp})
host_url = self.TRADE_URL
host_name = urllib.parse.urlparse(host_url).hostname
host_name = host_name.lower()
params['Signature'] = createSign(params, method, host_name, request_path, self.api_secret)
url = host_url + request_path
return http_get_request(url, params)
def api_key_post(self, params, request_path):
method = 'POST'
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
params_to_sign = {'AccessKeyId': self.api_key,
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': timestamp}
host_url = self.TRADE_URL
host_name = urllib.parse.urlparse(host_url).hostname
host_name = host_name.lower()
params_to_sign['Signature'] = createSign(params_to_sign, method, host_name, request_path, self.api_secret)
url = host_url + request_path + '?' + urllib.parse.urlencode(params_to_sign)
return http_post_request(url, params)
def get_kline(self, symbol, period, size=150):
"""
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return:
"""
params = {'symbol': symbol,
'period': period,
'size': size}
url = self.MARKET_URL + '/market/history/kline'
return http_get_request(url, params)
# 获取marketdepth
def get_depth(self, symbol, type):
"""
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return:
"""
params = {'symbol': symbol,
'type': type}
url = self.MARKET_URL + '/market/depth'
return http_get_request(url, params)
# 获取tradedetail
def get_trade(self, symbol):
"""
:param symbol
:return:
"""
params = {'symbol': symbol}
url = self.MARKET_URL + '/market/trade'
return http_get_request(url, params)
# 获取merge ticker
def get_ticker(self, symbol):
"""
:param symbol:
:return:
"""
params = {'symbol': symbol}
url = self.MARKET_URL + '/market/detail/merged'
return http_get_request(url, params)
# 获取 Market Detail 24小时成交量数据
def get_detail(self, symbol):
"""
:param symbol
:return:
"""
params = {'symbol': symbol}
url = self.MARKET_URL + '/market/detail'
return http_get_request(url, params)
# 获取 支持的交易对
def get_symbols(self, long_polling=None):
"""
"""
params = {}
if long_polling:
params['long-polling'] = long_polling
path = '/v1/common/symbols'
return self.api_key_get(params, path)
'''
Trade/Account API
'''
def get_accounts(self):
"""
:return:
"""
path = "/v1/account/accounts"
params = {}
return self.api_key_get(params, path)
# 获取当前账户资产
def _get_spot_balance(self):
"""
:param acct_id
:return:
"""
url = "/v1/account/accounts/{0}/balance".format(self.account_id)
params = {"account-id": self.account_id}
return self.api_key_get(params, url)['data']['list']
def _get_margin_balance(self):
"""
:param acct_id
:return:
"""
balances = []
for acc_id in self.margin_accs:
url = "/v1/account/accounts/{0}/balance".format(acc_id)
params = {"account-id": acc_id}
balances.extend(self.api_key_get(params, url)['data']['list'])
return balances
def get_balance(self):
balance = self._get_spot_balance()
margin_bal = self._get_margin_balance()
# pp.pprint(margin_bal)
balance.extend(margin_bal)
return balance
# 下单
# 创建并执行订单
def send_order(self, amount, source, symbol, _type, price=0):
"""
:param amount:
:param source: 如果使用借贷资产交易,请在下单接口,请求参数source中填写'margin-api'
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param price:
:return:
"""
try:
accounts = self.get_accounts()
acct_id = accounts['data'][0]['id']
except BaseException as e:
print ('get acct_id error.%s' % e)
acct_id = ACCOUNT_ID
params = {"account-id": acct_id,
"amount": amount,
"symbol": symbol,
"type": _type,
"source": source}
if price:
params["price"] = price
url = '/v1/order/orders/place'
return self.api_key_post(params, url)
# 撤销订单
def cancel_order(self, order_id):
"""
:param order_id:
:return:
"""
params = {}
url = "/v1/order/orders/{0}/submitcancel".format(order_id)
return self.api_key_post(params, url)
# 查询某个订单
def order_info(self, order_id):
"""
:param order_id:
:return:
"""
params = {}
url = "/v1/order/orders/{0}".format(order_id)
return self.api_key_get(params, url)
# 查询某个订单的成交明细
def order_matchresults(self, order_id):
"""
:param order_id:
:return:
"""
params = {}
url = "/v1/order/orders/{0}/matchresults".format(order_id)
return self.api_key_get(params, url)
# 查询当前委托、历史委托
def orders_list(self, symbol, states, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
"""
:param symbol:
:param states: 可选值 {pre-submitted 准备提交, submitted 已提交, partial-filled 部分成交, partial-canceled 部分成交撤销, filled 完全成交, canceled 已撤销}
:param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return:
"""
params = {'symbol': symbol,
'states': states}
if types:
params[types] = types
if start_date:
params['start-date'] = start_date
if end_date:
params['end-date'] = end_date
if _from:
params['from'] = _from
if direct:
params['direct'] = direct
if size:
params['size'] = size
url = '/v1/order/orders'
return self.api_key_get(params, url)
# 查询当前成交、历史成交
def orders_matchresults(self, symbol, types=None, start_date=None, end_date=None, _from=None, direct=None, size=None):
"""
:param symbol:
:param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return:
"""
params = {'symbol': symbol}
if types:
params[types] = types
if start_date:
params['start-date'] = start_date
if end_date:
params['end-date'] = end_date
if _from:
params['from'] = _from
if direct:
params['direct'] = direct
if size:
params['size'] = size
url = '/v1/order/matchresults'
return self.api_key_get(params, url)
# 申请提现虚拟币
def withdraw(self, address, amount, currency, fee=0, addr_tag=""):
"""
:param address_id:
:param amount:
:param currency:btc, ltc, bcc, eth, etc ...(火币Pro支持的币种)
:param fee:
:param addr-tag:
:return: {
"status": "ok",
"data": 700
}
"""
params = {'address': address,
'amount': amount,
"currency": currency,
"fee": fee,
"addr-tag": addr_tag}
url = '/v1/dw/withdraw/api/create'
return self.api_key_post(params, url)
# 申请取消提现虚拟币
def cancel_withdraw(self, address_id):
"""
:param address_id:
:return: {
"status": "ok",
"data": 700
}
"""
params = {}
url = '/v1/dw/withdraw-virtual/{0}/cancel'.format(address_id)
return self.api_key_post(params, url)
'''
借贷API
'''
# 创建并执行借贷订单
def send_margin_order(self, amount, source, symbol, _type, price=0):
"""
:param amount:
:param source: 'margin-api'
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param price:
:return:
"""
try:
accounts = self.get_accounts()
acct_id = accounts['data'][0]['id']
except BaseException as e:
print ('get acct_id error.%s' % e)
acct_id = ACCOUNT_ID
params = {"account-id": acct_id,
"amount": amount,
"symbol": symbol,
"type": _type,
"source": 'margin-api'}
if price:
| |
dfs_sel[col]
# slice TMDs out of dfs_sel, and save them in the new df_TMD
df_TMD['%s_SW_query_seq' % TMD] = dfs_sel.apply(utils.slice_SW_query_TMD_seq, args=(TMD,), axis=1)
df_TMD['%s_SW_markup_seq' % TMD] = dfs_sel.apply(utils.slice_SW_markup_TMD, args=(TMD,), axis=1)
df_TMD['%s_SW_match_seq' % TMD] = dfs_sel.apply(utils.slice_SW_match_TMD_seq, args=(TMD,), axis=1)
########################################################################################
# #
# FASTA: slice TMD plus surrounding sequence #
# #
# NOTE: probably not used in AAIMON, but if it is not here, #
# the full match_alignment_seq needs to be saved for the fasta algorithms somewhere. #
# Putting it here allows these large, memory-chewing columns to be dropped. #
# #
########################################################################################
# redefine the number of amino acids before and after the TMD to be inserted into the FastA files
n_aa_before_tmd = s["n_aa_before_tmd"]
n_aa_after_tmd = s["n_aa_after_tmd"]
# define the start of theTMD + surrounding sequence
dfs['%s_start_in_SW_alignment_plus_surr' % TMD] = dfs['%s_start_in_SW_alignment' % TMD] - n_aa_before_tmd
# replace negative values with zero
dfs.loc[dfs['%s_start_in_SW_alignment_plus_surr' % TMD] < 0, '%s_start_in_SW_alignment_plus_surr' % TMD] = 0
# define the end of the TMD + surrounding sequence. In python slicing, this end can be longer than the sequence.
dfs['%s_end_in_SW_alignment_plus_surr' % TMD] = dfs['%s_end_in_SW_alignment' % TMD] + n_aa_after_tmd
# select sequences that seem to have a start
dfs = dfs.loc[dfs['%s_start_in_SW_alignment_plus_surr' % TMD].notnull()]
# slice out the match seq + the surrounding sequence
df_TMD['%s_SW_match_seq_plus_surr' % TMD] = dfs.apply(utils.slice_SW_match_TMD_seq_plus_surr, args=(TMD,),axis=1)
# slice the query sequence. Could be useful for fasta_gap analysis.
df_TMD['%s_SW_query_seq_plus_surr'%TMD] = dfs.apply(utils.slice_SW_query_TMD_seq_plus_surr, args=(TMD,), axis=1)
#and the same for the TMD + surrounding sequence, useful to examine the TMD interface
# NOT DEEMED NECESSARY. WHY WOULD YOU NEED TO SLICE QUERY OR MARKUP + SURROUNDING?
# df_TMD['%s_SW_markup_seq_plus_surr'%TMD] = dfs.apply(utils.slice_SW_markup_TMD_plus_surr, args=(TMD,), axis=1)
########################################################################################
# #
# count number of gaps in the query and match TMD aligned sequence #
# #
########################################################################################
"""This is used as a filter in filter_and_save_fasta, therefore is conducted earlier in the slicing function. """
# count the number of gaps in the query and match sequences
df_TMD['%s_SW_query_num_gaps' % TMD] = df_TMD['%s_SW_query_seq' % TMD].str.count("-")
df_TMD['%s_SW_match_num_gaps' % TMD] = df_TMD['%s_SW_match_seq' % TMD].str.count("-")
########################################################################################
# #
# calculate the average number of gaps per residue in the TMD alignment #
# (number of gaps)/(length of sequence excluding gaps) #
# #
########################################################################################
df_TMD['%s_SW_q_gaps_per_q_residue' % TMD] = df_TMD['%s_SW_query_num_gaps' % TMD].dropna() / len(query_TMD_sequence)
# calculate hydrophobicity
df_TMD['%s_SW_match_lipo' % TMD] = df_TMD['%s_SW_match_seq'%TMD].dropna().apply(lambda x: utils.calc_lipophilicity(x))
else:
logging.info('{} does not have any valid homologues for {}. Re-downloading simap homologue XML may be necessary.'.format(acc, TMD))
df_TMD = pd.DataFrame()
return df_TMD
def slice_nonTMD_seqs(dfs, df_nonTMD_sliced, list_of_TMDs):
########################################################################################
# #
# nonTMD calculations [AAIMON] #
# #
########################################################################################
if dfs.shape[0] > 0:
# check if all tmds are in SW alignment
# create a list of columns to reindex the DataFrame
list_columns_TMD_in_SW_alignment = []
for TMD in list_of_TMDs:
# TMD found by regex
list_columns_TMD_in_SW_alignment.append('%s_in_SW_alignment' % TMD)
# TMD matching useful sequence
#list_columns_TMD_in_SW_alignment.append('%s_in_SW_align_match' % TMD)
# create a slice of the filtered dataframe that only contains the relevant columns (N.B. copy=False, this will provide a view, not a copy)
df2 = df_nonTMD_sliced.reindex(index=df_nonTMD_sliced.index, columns=list_columns_TMD_in_SW_alignment, copy=False)
# create a new column in the original dataframe that shows that ALL TMDs have been found in the SW alignment
df_nonTMD_sliced['all_tmds_in_SW_alignment'] = df2.dropna().all(axis=1)
# filter to contain only hits with all tmds
########################################################################################
# #
# start processing nonTMD region [AAIMON] #
# #
########################################################################################
# create a copy of the original df_cr dataframe containing only hits where all tmds are found in the match
#df_cr_nonTMD = df_cr.loc[df_cr['all_tmds_in_SW_alignment'].notnull()].query('all_tmds_in_SW_alignment == True')
# drop any homologues where not all TMDs were fould in the match
df_nonTMD_sliced.query('all_tmds_in_SW_alignment == True', inplace=True)
if df_nonTMD_sliced.empty:
# there are no homologues with all TMDs in the Smith Waterman alignment
# return an empty dataframe
return pd.DataFrame()
# filter to contain only hits where the index for the TMD is present
first_TMD_start_index = '%s_start_in_SW_alignment' % list_of_TMDs[0]
# FILTER REMOVED! HOPEFULLY STILL WORKS :)
#df_cr_nonTMD = df_cr_nonTMD.loc[df_cr_nonTMD[first_TMD_start_index].notnull()]
df_nonTMD_sliced['nonTMD_index_tuple_first'] = df_nonTMD_sliced[first_TMD_start_index].apply(lambda x: (0, int(x)))
# create start and stop indices for all sections between tmds
# the start of the last nonTMD section will be the end of the last TMD
df_nonTMD_sliced['nonTMD_index_tuple_last0'] = df_nonTMD_sliced['%s_end_in_SW_alignment' % list_of_TMDs[-1]].dropna().astype('int32')
# the end of the last nonTMD section will be the end of the full alignment sequence
df_nonTMD_sliced['nonTMD_index_tuple_last1'] = df_nonTMD_sliced['len_query_align_seq'].dropna().astype('int32')
# join to make a tuple
# df_cr_nonTMD['nonTMD_index_tuple_last'] = df_cr_nonTMD[['nonTMD_index_tuple_last0', 'nonTMD_index_tuple_last1']].apply(tuple, axis=1)
# create the index tuple
df_nonTMD_sliced['nonTMD_index_tuple_last'] = df_nonTMD_sliced.apply(utils.create_indextuple_nonTMD_last, axis=1)
########################################################################################
# #
# create the indices for the nonTMD region [AAIMON] #
# #
########################################################################################
# for each TMD EXCEPT the last, which ends at the sequence end, create the indices for the nonTMD region (after the TMD)
for TM_Nr in range(len(list_of_TMDs) - 1):
# the TMD is the equivalent item in the list
TMD = list_of_TMDs[TM_Nr]
# the next TMD, which contains the end index, is the next item in the list
next_TMD = list_of_TMDs[TM_Nr + 1]
# select only the columns in the dataframe that are of interest, and change the data type to integer
index_columns = ['%s_end_in_SW_alignment' % TMD, '%s_start_in_SW_alignment' % next_TMD]
df_nonTMD_sliced[index_columns] = df_nonTMD_sliced[index_columns].astype('int64')
# create a tuple containing the indices for the nonTMD sequence regions in between each TMD (middle indices)
df_nonTMD_sliced['nonTMD_index_%s' % TMD] = tuple(zip(df_nonTMD_sliced['%s_end_in_SW_alignment' % TMD],df_nonTMD_sliced['%s_start_in_SW_alignment' % next_TMD]))
# now join all the indices together to make one tuple of tuples for the non-TMD region
# df_cr_nonTMD = df_cr.query('all_tmds_in_SW_alignment == True')
df_nonTMD_sliced['nested_tuple_indices_all_nonTMD_regions'] = df_nonTMD_sliced[['nonTMD_index_tuple_last']].apply(tuple, axis=1)
# create a view of the dataframe that contains only the desired columns
# first create a list of the desired columns
# start with the first tuple, from 0 to the start of the first TMD
list_of_nonTMD_index_columns = ['nonTMD_index_tuple_first']
# create a nonTMD region for each of the TMDs (except the last one)
list_from_TMs = ['nonTMD_index_%s' % TMD2 for TMD2 in list_of_TMDs[:-1]]
# join lists
list_of_nonTMD_index_columns = list_of_nonTMD_index_columns + list_from_TMs
list_of_nonTMD_index_columns += ['nonTMD_index_tuple_last']
# create the new view by reindexing the dataframe with the list of desired columns
df_cr_tuples = df_nonTMD_sliced.reindex(index=df_nonTMD_sliced.index, columns=list_of_nonTMD_index_columns, copy=False)
# now for convenience, these tuples can be combined together to form one column, with a tuple of tuples
# first convert all values in each row to a list, excluding the index column
list_tuple_indices_all_nonTMD_regions = list(df_cr_tuples.itertuples(index=False))
# convert to a series, and reindex with the original index from the dataframe
tuples_series = pd.Series(list_tuple_indices_all_nonTMD_regions, index=df_nonTMD_sliced.index)
# for some reason, the tuples are a pandas object "Pandas(nonTMD_index_tuple_first=(0, 592), nonTMD_index_tuple_last=(615, 618))"
# convert to simple tuples
df_nonTMD_sliced['nested_tuple_indices_all_nonTMD_regions'] = tuples_series.apply(lambda x: tuple(x))
# change to a string, in case this solves the weird effect with only the last tuple shown
df_nonTMD_sliced['nested_tuple_indices_all_nonTMD_regions'] = df_nonTMD_sliced['nested_tuple_indices_all_nonTMD_regions'].astype(str)
# you can test that the original index is maintained as follows:
# filter dfs to only contain the rows of interest as contained by df_TMD_indice (homologues that contain all TMDs)
dfs = dfs.loc[df_nonTMD_sliced.index,:]
# add the series as a new column in the original dataframe. Missing data (when not all TMDs found) will be filled using np.nan
dfs['nested_tuple_indices_all_nonTMD_regions'] = df_nonTMD_sliced['nested_tuple_indices_all_nonTMD_regions']
# # filter to remove incomplete sequences
# df_cr_nonTMD = df_cr_nonTMD.query('all_tmds_in_SW_alignment == True')
# define the string for slicing as a numpy array
# use the numpy vectorize function, which effectively applies the function in a for loop (not optimized for speed)
# df_cr_nonTMD['nonTMD_seq_query'] = np.vectorize(utils.slice_with_nested_tuple)(np.array(df_cr_nonTMD['query_align_seq']),np.array(df_cr_nonTMD['nested_tuple_indices_all_nonTMD_regions']))
# df_cr_nonTMD['nonTMD_markup'] = np.vectorize(utils.slice_with_nested_tuple)(np.array(df_cr_nonTMD['align_markup_seq']),np.array(df_cr_nonTMD['nested_tuple_indices_all_nonTMD_regions']))
# df_cr_nonTMD['nonTMD_seq_match'] = np.vectorize(utils.slice_with_nested_tuple)(np.array(df_cr_nonTMD['match_align_seq']),np.array(df_cr_nonTMD['nested_tuple_indices_all_nonTMD_regions']))
########################################################################################
# #
# slice out the nonTMD region for each homologue [AAIMON] #
# #
########################################################################################
# due to problems with np.vectorize and the pandas methods, slice the sequences one at a time with a simple 'for loop'
# for each hit, perform the slice
for hit in dfs.index:
df_nonTMD_sliced.loc[hit, 'nonTMD_seq_query'] = utils.slice_with_nested_tuple(dfs.loc[hit, 'query_align_seq'],dfs.loc[hit, 'nested_tuple_indices_all_nonTMD_regions'])
df_nonTMD_sliced.loc[hit, 'nonTMD_markup'] = utils.slice_with_nested_tuple(dfs.loc[hit, 'align_markup_seq'],dfs.loc[hit, 'nested_tuple_indices_all_nonTMD_regions'])
df_nonTMD_sliced.loc[hit, 'nonTMD_seq_match'] = utils.slice_with_nested_tuple(dfs.loc[hit, 'match_align_seq'],dfs.loc[hit, 'nested_tuple_indices_all_nonTMD_regions'])
# # transfer to original dataframe (index should still match the original, partial seqs will be filled with np.nan)
# df_cr['nonTMD_seq_query'] = df_cr_nonTMD['nonTMD_seq_query']
# df_cr['nonTMD_markup'] = df_cr_nonTMD['nonTMD_markup']
# df_cr['nonTMD_seq_match'] = df_cr_nonTMD['nonTMD_seq_match']
return df_nonTMD_sliced
def | |
segPair in self.expansionSegmentPairs:
dbs.extend(segPair.get_dbs())
return dbs
# --------------------------------------------------------------------
def getExpansionSegPairList(self):
"""Returns a list of all SegmentPair objects that make up the new segments
of an expansion"""
return self.expansionSegmentPairs
# --------------------------------------------------------------------
def getSegmentContainingDb(self, db):
for segPair in self.segmentPairs:
for segDb in segPair.get_dbs():
if db.getSegmentDbId() == segDb.getSegmentDbId():
return segPair
return None
# --------------------------------------------------------------------
def getExpansionSegmentContainingDb(self, db):
for segPair in self.expansionSegmentPairs:
for segDb in segPair.get_dbs():
if db.getSegmentDbId() == segDb.getSegmentDbId():
return segPair
return None
# --------------------------------------------------------------------
def get_invalid_segdbs(self):
dbs=[]
for segPair in self.segmentPairs:
if not segPair.primaryDB.valid:
dbs.append(segPair.primaryDB)
if segPair.mirrorDB and not segPair.mirrorDB.valid:
dbs.append(segPair.mirrorDB)
return dbs
# --------------------------------------------------------------------
def get_synchronized_segdbs(self):
dbs=[]
for segPair in self.segmentPairs:
if segPair.primaryDB.mode == MODE_SYNCHRONIZED:
dbs.append(segPair.primaryDB)
if segPair.mirrorDB and segPair.mirrorDB.mode == MODE_SYNCHRONIZED:
dbs.append(segPair.primaryDB)
return dbs
# --------------------------------------------------------------------
def get_unbalanced_segdbs(self):
dbs=[]
for segPair in self.segmentPairs:
for segdb in segPair.get_dbs():
if segdb.preferred_role != segdb.role:
dbs.append(segdb)
return dbs
# --------------------------------------------------------------------
def get_unbalanced_primary_segdbs(self):
dbs = [seg for seg in self.get_unbalanced_segdbs() if seg.role == ROLE_PRIMARY]
return dbs
# --------------------------------------------------------------------
def get_valid_segdbs(self):
dbs=[]
for segPair in self.segmentPairs:
if segPair.primaryDB.valid:
dbs.append(segPair.primaryDB)
if segPair.mirrorDB and segPair.mirrorDB.valid:
dbs.append(segPair.mirrorDB)
return dbs
# --------------------------------------------------------------------
def get_hostlist(self, includeMaster=True):
hosts=[]
if includeMaster:
hosts.append(self.master.hostname)
if self.standbyMaster is not None:
hosts.append(self.standbyMaster.hostname)
for segPair in self.segmentPairs:
hosts.extend(segPair.get_hosts())
# dedupe? segPair.get_hosts() doesn't promise to dedupe itself, and there might be more deduping to do
return hosts
# --------------------------------------------------------------------
def get_master_host_names(self):
if self.hasStandbyMaster():
return [self.master.hostname, self.standbyMaster.hostname]
else:
return [self.master.hostname]
# --------------------------------------------------------------------
def get_max_dbid(self,includeExpansionSegs=False):
"""Returns the maximum dbid in the array. If includeExpansionSegs
is True, this includes the expansion segment array in the search"""
dbid = 0
for db in self.getDbList(includeExpansionSegs):
if db.getSegmentDbId() > dbid:
dbid = db.getSegmentDbId()
return dbid
# --------------------------------------------------------------------
def get_max_contentid(self, includeExpansionSegs=False):
"""Returns the maximum contentid in the array. If includeExpansionSegs
is True, this includes the expansion segment array in the search"""
content = 0
for db in self.getDbList(includeExpansionSegs):
if db.content > content:
content = db.content
return content
# --------------------------------------------------------------------
def get_segment_count(self):
return len(self.segmentPairs)
# --------------------------------------------------------------------
def get_min_primary_port(self):
"""Returns the minimum primary segment db port"""
min_primary_port = self.segmentPairs[0].primaryDB.port
for segPair in self.segmentPairs:
if segPair.primaryDB.port < min_primary_port:
min_primary_port = segPair.primaryDB.port
return min_primary_port
# --------------------------------------------------------------------
def get_max_primary_port(self):
"""Returns the maximum primary segment db port"""
max_primary_port = self.segmentPairs[0].primaryDB.port
for segPair in self.segmentPairs:
if segPair.primaryDB.port > max_primary_port:
max_primary_port = segPair.primaryDB.port
return max_primary_port
# --------------------------------------------------------------------
def get_min_mirror_port(self):
"""Returns the minimum mirror segment db port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
min_mirror_port = self.segmentPairs[0].mirrorDB.port
for segPair in self.segmentPairs:
mirror = segPair.mirrorDB
if mirror and mirror.port < min_mirror_port:
min_mirror_port = mirror.port
return min_mirror_port
# --------------------------------------------------------------------
def get_max_mirror_port(self):
"""Returns the maximum mirror segment db port"""
if self.get_mirroring_enabled() is False:
raise Exception('Mirroring is not enabled')
max_mirror_port = self.segmentPairs[0].mirrorDB.port
for segPair in self.segmentPairs:
mirror = segPair.mirrorDB
if mirror and mirror.port > max_mirror_port:
max_mirror_port = mirror.port
return max_mirror_port
# --------------------------------------------------------------------
def get_interface_numbers(self):
"""Returns interface numbers in the array. Assumes that addresses are named
<hostname>-<int_num>. If the nodes just have <hostname> then an empty
array is returned."""
interface_nums = []
primary_hostname = self.segmentPairs[0].primaryDB.hostname
primary_address_list = []
dbList = self.getDbList()
for db in dbList:
if db.isSegmentQD() == True:
continue
if db.getSegmentHostName() == primary_hostname:
if db.getSegmentAddress() not in primary_address_list:
primary_address_list.append(db.getSegmentAddress())
for address in primary_address_list:
if address.startswith(primary_hostname) == False or len(primary_hostname) + 2 > len(address):
return []
suffix = address[len(primary_hostname):]
if len(suffix) < 2 or suffix[0] != '-' or suffix[1:].isdigit() == False:
return []
interface_nums.append(suffix[1:])
return interface_nums
# --------------------------------------------------------------------
def get_primary_count(self):
return self.numPrimarySegments
# --------------------------------------------------------------------
def get_mirroring_enabled(self):
"""
Returns True if content ID 0 has a mirror
"""
return self.segmentPairs[0].mirrorDB is not None
# --------------------------------------------------------------------
def get_list_of_primary_segments_on_host(self, hostname):
retValue = []
for db in self.getDbList():
if db.isSegmentPrimary(False) == True and db.getSegmentHostName() == hostname:
retValue.append(db)
return retValue
# --------------------------------------------------------------------
def get_list_of_mirror_segments_on_host(self, hostname):
retValue = []
for db in self.getDbList():
if db.isSegmentMirror(False) == True and db.getSegmentHostName() == hostname:
retValue.append(db)
return retValue
# --------------------------------------------------------------------
def get_primary_root_datadirs(self):
"""
Returns a list of primary data directories minus the <prefix><contentid>
NOTE 1:
This currently assumes that all segments are configured the same
and gets the results only from the host of segment 0
NOTE 2:
The determination of hostname is based on faulty logic
"""
primary_datadirs = []
seg0_hostname = self.segmentPairs[0].primaryDB.getSegmentAddress()
(seg0_hostname, inf_num) = get_host_interface(seg0_hostname)
for db in self.getDbList():
if db.isSegmentPrimary(False) and db.getSegmentAddress().startswith(seg0_hostname):
primary_datadirs.append(db.datadir[:db.datadir.rfind('/')])
return primary_datadirs
# --------------------------------------------------------------------
def get_mirror_root_datadirs(self):
"""
Returns a list of mirror data directories minus the <prefix><contentid>
"""
mirror_datadirs = []
seg0_hostname = self.segmentPairs[0].primaryDB.getSegmentAddress()
(seg0_hostname, inf_num) = get_host_interface(seg0_hostname)
for db in self.getDbList():
if db.isSegmentMirror(False) and db.getSegmentAddress().startswith(seg0_hostname):
mirror_datadirs.append(db.datadir[:db.datadir.rfind('/')])
return mirror_datadirs
# --------------------------------------------------------------------
def get_datadir_prefix(self):
"""
Returns the prefix portion of <prefix><contentid>
"""
start_last_dir = self.master.datadir.rfind('/') + 1
start_dir_content = self.master.datadir.rfind('-')
prefix = self.master.datadir[start_last_dir:start_dir_content]
return prefix
# --------------------------------------------------------------------
# If we've got recovered segments, and we have a matched-pair, we
# can update the catalog to "rebalance" back to our original primary.
def updateRoleForRecoveredSegs(self, dbURL):
"""
Marks the segment role to match the configured preferred_role.
"""
# walk our list of segments, checking to make sure that
# both members of the peer-group are in our recovered-list,
# save their content-id.
recovered_contents = []
for segPair in self.segmentPairs:
if segPair.primaryDB:
if segPair.primaryDB.dbid in self.recoveredSegmentDbids:
if segPair.mirrorDB and segPair.mirrorDB.dbid in self.recoveredSegmentDbids:
recovered_contents.append((segPair.primaryDB.content, segPair.primaryDB.dbid, segPair.mirrorDB.dbid))
conn = dbconn.connect(dbURL, True, allowSystemTableMods = True)
for (content_id, primary_dbid, mirror_dbid) in recovered_contents:
sql = "UPDATE gp_segment_configuration SET role=preferred_role where content = %d" % content_id
dbconn.executeUpdateOrInsert(conn, sql, 2)
# NOTE: primary-dbid (right now) is the mirror.
sql = "INSERT INTO gp_configuration_history VALUES (now(), %d, 'Reassigned role for content %d to MIRROR')" % (primary_dbid, content_id)
dbconn.executeUpdateOrInsert(conn, sql, 1)
# NOTE: mirror-dbid (right now) is the primary.
sql = "INSERT INTO gp_configuration_history VALUES (now(), %d, 'Reassigned role for content %d to PRIMARY')" % (mirror_dbid, content_id)
dbconn.executeUpdateOrInsert(conn, sql, 1)
# We could attempt to update the segments-array.
# But the caller will re-read the configuration from the catalog.
dbconn.execSQL(conn, "COMMIT")
conn.close()
# --------------------------------------------------------------------
def addExpansionSeg(self, content, preferred_role, dbid, role,
hostname, address, port, datadir):
"""
Adds a segment to the gparray as an expansion segment.
Note: may work better to construct the new Segment in gpexpand and
simply pass it in.
"""
if (content <= self.segmentPairs[-1].get_dbs()[0].content):
raise Exception('Invalid content ID for expansion segment')
segdb = Segment(content = content,
preferred_role = preferred_role,
dbid = dbid,
role = role,
mode = MODE_SYNCHRONIZED,
status = STATUS_UP,
hostname = hostname,
address = address,
port = port,
datadir = datadir)
seglen = len(self.segmentPairs)
expseglen = len(self.expansionSegmentPairs)
expseg_index = content - seglen
logger.debug('New segment index is %d' % expseg_index)
if expseglen < expseg_index + 1:
extendByNum = expseg_index - expseglen + 1
logger.debug('Extending expansion array by %d' % (extendByNum))
self.expansionSegmentPairs.extend([None] * (extendByNum))
if self.expansionSegmentPairs[expseg_index] is None:
self.expansionSegmentPairs[expseg_index] = SegmentPair()
seg = self.expansionSegmentPairs[expseg_index]
if preferred_role == ROLE_PRIMARY:
if seg.primaryDB:
raise Exception('Duplicate content id for primary segment')
seg.addPrimary(segdb)
else:
seg.addMirror(segdb)
# --------------------------------------------------------------------
def reOrderExpansionSegs(self):
"""
The expansion segments content ID may have changed during the expansion.
This method will re-order the the segments into their proper positions.
Since there can be no gaps in the content id (see validateExpansionSegs),
the self.expansionSegmentPairs list is the same length.
"""
seglen = len(self.segmentPairs)
expseglen = len(self.expansionSegmentPairs)
newExpansionSegments = []
newExpansionSegments.extend([None] * expseglen)
for segPair in self.expansionSegmentPairs:
contentId = segPair.primaryDB.getSegmentContentId()
index = contentId - seglen
newExpansionSegments[index] = segPair
self.expansionSegmentPairs = newExpansionSegments
# --------------------------------------------------------------------
def validateExpansionSegs(self):
""" Checks the segments added for various inconsistencies and errors.
"""
dbids = []
content = []
expansion_seg_count = 0
# make sure we have added at least one segment
if len(self.expansionSegmentPairs) == 0:
raise Exception('No expansion segments defined')
expect_all_segments_to_have_mirror = self.segmentPairs[0].mirrorDB is not None
for segPair in self.expansionSegmentPairs:
# If a segment is 'None' that means we have a gap in the content ids
if segPair is None:
raise Exception('Expansion segments do not have contiguous content ids.')
expansion_seg_count += 1
for segdb in segPair.get_dbs():
dbids.append(segdb.getSegmentDbId())
if segdb.getSegmentRole() == ROLE_PRIMARY:
isprimary = True
else:
isprimary = False
content.append((segdb.getSegmentContentId(), isprimary))
# mirror count correct for this content id?
if segPair.mirrorDB is None | |
not match")
band = dataset.GetRasterBand(1)
if DEBUG:
print('Overviews:', band.GetOverviewCount())
self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
# Convert and compare MRF
img = gdal.Open(self.output_img_d)
if DEBUG:
print('Size: ', img.RasterXSize, 'x', img.RasterYSize, 'x', img.RasterCount)
print("Comparing: " + self.output_img_d + " to " + self.compare_img_d)
self.assertTrue(filecmp.cmp(self.output_img_d, self.compare_img_d), "Output granule image does not match")
img = None
# Test ZDB
if DEBUG:
print("Checking " + self.output_zdb)
con = sqlite3.connect(self.output_zdb)
cur = con.cursor()
# Check for existing key
cur.execute("SELECT COUNT(*) FROM ZINDEX;")
lid = int(cur.fetchone()[0])
if DEBUG:
print("Number of records: " + str(lid))
self.assertEqual(lid, 3, "Number of records not matching in ZDB")
# Check for matching keys
cur.execute("SELECT key_str FROM ZINDEX where z=0;")
key_str = cur.fetchone()[0]
if DEBUG:
print(key_str)
self.assertEqual(key_str, '20151202', "Time for Z=0 does not match in ZDB")
cur.execute("SELECT key_str FROM ZINDEX where z=1;")
key_str = cur.fetchone()[0]
if DEBUG:
print(key_str)
self.assertEqual(key_str, '20151202100000', "Time for Z=1 does not match in ZDB")
if con:
con.close()
def tearDown(self):
if not SAVE_RESULTS:
[os.remove(os.path.join(self.input_dir, file)) for file in os.listdir(self.input_dir) if not file.endswith('.tiff')]
shutil.rmtree(self.staging_area)
else:
print("Leaving test results in : " + self.staging_area)
class TestMRFGeneration_granule_webmerc(unittest.TestCase):
def setUp(self):
testdata_path = os.path.join(os.getcwd(), 'mrfgen_files')
self.input_dir = os.path.join(testdata_path, 'obpg')
self.staging_area = os.path.join(os.getcwd(), 'mrfgen_test_data')
test_config = os.path.join(testdata_path, "mrfgen_test_config5.xml")
# Make source image dir
make_dir_tree(self.input_dir, ignore_existing=True)
# Make empty dirs for mrfgen output
mrfgen_dirs = ('output_dir', 'working_dir', 'logfile_dir')
[make_dir_tree(os.path.join(self.staging_area, path)) for path in mrfgen_dirs]
# Copy empty output tile
shutil.copytree(os.path.join(testdata_path, 'empty_tiles'), os.path.join(self.staging_area, 'empty_tiles'))
self.output_mrf = os.path.join(self.staging_area, "output_dir/OBPG_webmerc2015336_.mrf")
self.output_ppg = os.path.join(self.staging_area, "output_dir/OBPG_webmerc2015336_.ppg")
self.output_idx = os.path.join(self.staging_area, "output_dir/OBPG_webmerc2015336_.idx")
self.output_zdb = os.path.join(self.staging_area, "output_dir/OBPG_webmerc2015336_.zdb")
self.output_img = os.path.join(self.staging_area, "output_dir/OBPG_webmerc2015336_.png")
self.compare_img = os.path.join(testdata_path, "test_comp5.png")
# create copy of colormap
shutil.copy2(os.path.join(testdata_path, "colormaps/MODIS_Aqua_Chlorophyll_A.xml"), os.path.join(self.staging_area, 'working_dir'))
if DEBUG:
print("Generating global Web Mercator image with granules ")
#pdb.set_trace()
run_command("mrfgen -c " + test_config, show_output=DEBUG)
run_command('gdal_translate -of PNG -outsize 1024 1024 ' + self.output_mrf+':MRF:Z0 ' + self.output_img, show_output=DEBUG)
def test_generate_mrf(self):
'''
This covers the following test cases:
Test auto creation of empty MRF
Test using existing MRF with reprojection with z-level
Test non-merging of input images with transparency
'''
# Check MRF generation succeeded
self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
# Read MRF
dataset = gdal.Open(self.output_mrf)
driver = dataset.GetDriver()
if DEBUG:
print('Driver:', str(driver.LongName))
self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
# This part of the test previously looked for a triplet of files in dataset.GetFileList().
if DEBUG:
print('Files: {0}, {1}'.format(self.output_ppg, self.output_idx))
self.assertTrue(os.path.isfile(self.output_ppg), "MRF PPG generation failed")
self.assertTrue(os.path.isfile(self.output_idx), "MRF IDX generation failed")
self.assertTrue(os.path.isfile(self.output_zdb), "MRF ZDB generation failed")
if DEBUG:
print('Projection:', str(dataset.GetProjection()))
self.assertEqual(str(dataset.GetProjection().replace(' ',' ')),'PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]]')
if DEBUG:
print('Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount)
self.assertEqual(dataset.RasterXSize, 32768, "Size does not match")
self.assertEqual(dataset.RasterYSize, 32768, "Size does not match")
self.assertEqual(dataset.RasterCount, 1, "Number of bands do not match")
geotransform = dataset.GetGeoTransform()
if DEBUG:
print('Origin: (',geotransform[0], ',',geotransform[3],')')
self.assertEqual(geotransform[0], -20037508.34, "Origin does not match")
self.assertEqual(geotransform[3], 20037508.34, "Origin does not match")
if DEBUG:
print('Pixel Size: (',geotransform[1], ',',geotransform[5],')')
self.assertEqual(float(geotransform[1]), 1222.9924523925781, "Pixel size does not match")
self.assertEqual(float(geotransform[5]), -1222.9924523925781, "Pixel size does not match")
band = dataset.GetRasterBand(1)
if DEBUG:
print('Overviews:', band.GetOverviewCount())
self.assertEqual(band.GetOverviewCount(), 7, "Overview count does not match")
# Compare MRF
img = gdal.Open(self.output_img)
if DEBUG:
print('Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount)
print("Comparing: " + self.output_img + " to " + self.compare_img)
self.assertTrue(filecmp.cmp(self.output_img, self.compare_img), "Output composite image does not match")
img = None
# Test ZDB
if DEBUG:
print("Checking " + self.output_zdb)
con = sqlite3.connect(self.output_zdb)
cur = con.cursor()
# Check for existing key
cur.execute("SELECT COUNT(*) FROM ZINDEX;")
lid = int(cur.fetchone()[0])
if DEBUG:
print("Number of records: " + str(lid))
self.assertEqual(lid, 1, "Number of records not matching in ZDB")
# Check for matching keys
cur.execute("SELECT key_str FROM ZINDEX where z=0;")
key_str = cur.fetchone()[0]
if DEBUG:
print(key_str)
self.assertEqual(key_str, '20151202', "Time for Z=0 does not match in ZDB")
if con:
con.close()
def tearDown(self):
if not SAVE_RESULTS:
[os.remove(os.path.join(self.input_dir, file)) for file in os.listdir(self.input_dir) if not file.endswith('.tiff')]
shutil.rmtree(self.staging_area)
else:
print("Leaving test results in : " + self.staging_area)
class TestMRFGeneration_tiled_z(unittest.TestCase):
'''
This covers the following test cases:
Test using tiled images with Z-level
Test MRF generation with date and time
'''
def setUp(self):
testdata_path = os.path.join(os.getcwd(), 'mrfgen_files')
self.staging_area = os.path.join(os.getcwd(), 'mrfgen_test_data')
test_config = os.path.join(testdata_path, "mrfgen_test_config6.xml")
# Make source image dir
make_dir_tree(os.path.join(testdata_path, 'MORCR143LLDY'), ignore_existing=True)
# Make empty dirs for mrfgen output
mrfgen_dirs = ('output_dir', 'working_dir', 'logfile_dir')
[make_dir_tree(os.path.join(self.staging_area, path)) for path in mrfgen_dirs]
# Copy empty output tile
shutil.copytree(os.path.join(testdata_path, 'empty_tiles'), os.path.join(self.staging_area, 'empty_tiles'))
self.output_mrf = os.path.join(self.staging_area, "output_dir/MORCR143LLDY2016024000000_.mrf")
self.output_pjg = os.path.join(self.staging_area, "output_dir/MORCR143LLDY2016024000000_.pjg")
self.output_idx = os.path.join(self.staging_area, "output_dir/MORCR143LLDY2016024000000_.idx")
self.output_zdb = os.path.join(self.staging_area, "output_dir/MORCR143LLDY2016024000000_.zdb")
self.output_img = os.path.join(self.staging_area, "output_dir/MORCR143LLDY2016024000000_.jpg")
self.compare_img = os.path.join(testdata_path, "test_comp6.jpg")
#pdb.set_trace()
#generate MRF
run_command("mrfgen -c " + test_config, show_output=DEBUG)
run_command('gdal_translate -of JPEG -outsize 1024 512 -projwin -10018754.1713946 5621521.48619207 -8015003.3371157 4300621.37204427 ' + self.output_mrf+':MRF:Z0 ' + self.output_img, show_output=DEBUG)
def test_generate_mrf_tiled_z(self):
# Check MRF generation succeeded
self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
# Read MRF
dataset = gdal.Open(self.output_mrf+":MRF:Z0")
driver = dataset.GetDriver()
if DEBUG:
print('Driver:', str(driver.LongName))
self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
# This part of the test previously looked for a triplet of files in dataset.GetFileList().
if DEBUG:
print('Files: {0}, {1}'.format(self.output_pjg, self.output_idx))
self.assertTrue(os.path.isfile(self.output_pjg), "MRF PJG generation failed")
self.assertTrue(os.path.isfile(self.output_idx), "MRF IDX generation failed")
self.assertTrue(os.path.isfile(self.output_zdb), "MRF ZDB generation failed")
if DEBUG:
print('Projection:', str(dataset.GetProjection()))
self.assertEqual(str(dataset.GetProjection().replace(' ',' ')),'PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]]')
if DEBUG:
print('Size: ',dataset.RasterXSize,'x',dataset.RasterYSize, 'x',dataset.RasterCount)
self.assertEqual(dataset.RasterXSize, 163840, "Size does not match")
self.assertEqual(dataset.RasterYSize, 163840, "Size does not match")
self.assertEqual(dataset.RasterCount, 3, "Size does not match")
geotransform = dataset.GetGeoTransform()
if DEBUG:
print('Origin: (',geotransform[0], ',',geotransform[3],')')
self.assertEqual(geotransform[0], -20037508.34, "Origin does not match")
self.assertEqual(geotransform[3], 20037508.34, "Origin does not match")
if DEBUG:
print('Pixel Size: (',geotransform[1], ',',geotransform[5],')')
self.assertEqual(str(geotransform[1]), '244.59849047851563', "Pixel size does not match")
self.assertEqual(str(geotransform[5]), '-244.59849047851563', "Pixel size does not match")
band = dataset.GetRasterBand(1)
if DEBUG:
print('Overviews:', band.GetOverviewCount())
self.assertEqual(band.GetOverviewCount(), 10, "Overview count does not match")
# Compare MRF
img = gdal.Open(self.output_img)
if DEBUG:
print('Size: ',img.RasterXSize,'x',img.RasterYSize, 'x',img.RasterCount)
print("Comparing: " + self.output_img + " to " + self.compare_img)
self.assertTrue(filecmp.cmp(self.output_img, self.compare_img), "Output composite image does not match")
img = None
# Test ZDB
if DEBUG:
print("Checking " + self.output_zdb)
con = sqlite3.connect(self.output_zdb)
cur = con.cursor()
# Check for existing key
cur.execute("SELECT COUNT(*) FROM ZINDEX;")
lid = int(cur.fetchone()[0])
if DEBUG:
print("Number of records: " + str(lid))
self.assertEqual(lid, 1, "Number of records not matching in ZDB")
# Check for matching keys
cur.execute("SELECT key_str FROM ZINDEX where z=0;")
key_str = cur.fetchone()[0]
if DEBUG:
print(key_str)
self.assertEqual(key_str, 'test', "Time for Z=0 does not match in ZDB")
if con:
con.close()
def tearDown(self):
if not SAVE_RESULTS:
shutil.rmtree(self.staging_area)
else:
print("Leaving test results in : " + self.staging_area)
class TestMRFGeneration_nonpaletted_colormap(unittest.TestCase):
def setUp(self):
testdata_path = os.path.join(os.getcwd(), 'mrfgen_files')
self.staging_area = os.path.join(os.getcwd(), 'mrfgen_test_data')
test_config7a = os.path.join(testdata_path, "mrfgen_test_config7a.xml")
# Make empty dirs for mrfgen output
mrfgen_dirs = ('output_dir', 'working_dir', 'logfile_dir')
[make_dir_tree(os.path.join(self.staging_area, path)) for path in mrfgen_dirs]
# create copy of colormap
shutil.copy2(os.path.join(testdata_path, "colormaps/MODIS_Combined_Flood.xml"), os.path.join(self.staging_area, 'working_dir'))
# Copy empty output tile and input imagery
shutil.copytree(os.path.join(testdata_path, 'empty_tiles'), os.path.join(self.staging_area, 'empty_tiles'))
shutil.copytree(os.path.join(testdata_path, 'flood'), os.path.join(self.staging_area, 'flood'))
self.output_mrf = os.path.join(self.staging_area, "output_dir/Flood_webmerc2019268_.mrf")
self.output_ppg = os.path.join(self.staging_area, "output_dir/Flood_webmerc2019268_.ppg")
self.output_idx = os.path.join(self.staging_area, "output_dir/Flood_webmerc2019268_.idx")
self.output_img = os.path.join(self.staging_area, "output_dir/Flood_webmerc2019268_.png")
self.compare_img = os.path.join(testdata_path, "test_comp7a.png")
# generate MRF
#pdb.set_trace()
cmd = "mrfgen -c " + test_config7a + " -s --email_logging_level WARN"
run_command(cmd, show_output=DEBUG)
run_command('gdal_translate -of PNG -outsize 2048 2048 ' + self.output_mrf + ' ' + self.output_img, show_output=DEBUG)
def test_generate_mrf(self):
# Check MRF generation succeeded
self.assertTrue(os.path.isfile(self.output_mrf), "MRF generation failed")
# Read MRF
dataset = gdal.Open(self.output_mrf)
driver = dataset.GetDriver()
if DEBUG:
print('Driver:', str(driver.LongName))
self.assertEqual(str(driver.LongName), "Meta Raster Format", "Driver is not Meta Raster Format")
# This part of the test previously looked for a triplet of files in dataset.GetFileList().
if DEBUG:
print('Files: {0}, {1}'.format(self.output_ppg, self.output_idx))
self.assertTrue(os.path.isfile(self.output_ppg), "MRF PPG generation failed")
self.assertTrue(os.path.isfile(self.output_idx), "MRF IDX generation failed")
if DEBUG:
print('Projection:', str(dataset.GetProjection()))
self.assertEqual(str(dataset.GetProjection().replace(' ', ' ')),
'PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs"],AUTHORITY["EPSG","3857"]]')
if DEBUG:
print('Size: ', dataset.RasterXSize, 'x', dataset.RasterYSize, 'x', dataset.RasterCount)
self.assertEqual(dataset.RasterXSize, 32768, "Size does not | |
"""
:mod:`hashdist.core.execute_job` --- Job exection
=================================================
Executes a set of commands in a controlled environment. This
should usually be used to launch a real script interpreter, but
basic support for modifying the environment and running multiple
commands are provided through the JSON job specification.
Job specification
-----------------
The job spec is a document that contains what's needed to set up a
controlled environment and run the commands. The idea is to be able
to reproduce a job run, and hash the job spec. Example:
.. code-block:: python
{
"import" : [
{"ref": "bash", "id": "virtual:bash"},
{"ref": "make", "id": "virtual:gnu-make/3+"},
{"ref": "zlib", "id": "zlib/2d4kh7hw4uvml67q7npltyaau5xmn4pc"},
{"ref": "unix", "id": "virtual:unix"},
{"ref": "gcc", "before": ["virtual:unix"], "id": "gcc/jonykztnjeqm7bxurpjuttsprphbooqt"}
],
"env" : {
"FOO" : "bar"
},
"env_nohash" : {
"NCORES": "4"
}
"script" : [
[
["CFLAGS=$(pkgcfg", "--cflags", "foo", ")"],
["./configure", "--prefix=$ARTIFACT", "--foo-setting=$FOO"]
],
["make", "-j$NCORES"],
["make", "install"]
],
}
.. warning::
The job spec may not completely specify the job
environment because it is usually a building block of other specs
which may imply certain additional environment variables. E.g.,
during a build, ``$ARTIFACT`` and ``$BUILD`` are defined even if
they are never mentioned here.
**import**:
The artifacts needed in the environment for the run. After the
job has run they have no effect (i.e., they do not
affect garbage collection or run-time dependencies of a build,
for instance). The list specifies an unordered set; `before` can be used to
specify order.
* **id**: The artifact ID. If the value is prepended with
``"virtual:"``, the ID is a virtual ID, used so that the real
one does not contribute to the hash. See section on virtual
imports below.
* **ref**: A name to use to inject information of this dependency
into the environment. Above, ``$zlib`` will be the
absolute path to the ``zlib`` artifact, and ``$zlib_id`` will be
the full artifact ID. This can be set to `None` in order to not
set any environment variables for the artifact.
* **before**: List of artifact IDs. Adds a constraint that this
dependency is listed before the dependencies listed in all paths.
* **in_env**: Whether to add the environment variables of the
artifact (typically ``$PATH`` if there is a ``bin`` sub-directory
and so on). Otherwise the artifact can only be used through the
variables ``ref`` sets up. Defaults to `True`.
**script**:
Executed to perform the build. See below.
**env**:
Environment variables. The advantage to using this over just defining
them in the `script` section is that they are automatically unordered
w.r.t. hashing.
**env_nohash**:
Same as `env` but entries here do not contribute to the hash. Should
only be used when one is willing to trust that the value does not
affect the build result in any way. E.g., parallelization flags,
paths to manually downloaded binary installers, etc.
The execution environment
-------------------------
Standard output (except with "<=", see below) and error of all
commands are both re-directed to a Logger instance passed in
by the user. There is no stdin (it's set to a closed pipe).
The build environment variables are wiped out and the variables in `env`
and `env_nohash` set. Then, each of the `import`-ed artifacts are
visited and (if `in_env` is not set to `False`) the following variables
are affected:
**PATH**:
Set to point to the ``bin``-sub-directories of imports.
**HDIST_CFLAGS**:
Set to point to the ``include``-sub-directories of imports.
**HDIST_LDFLAGS**:
Set to point to the ``lib*``-sub-directories of imports.
Note that it is almost impossible to inject a relative RPATH; even
if one manages to escaoe $ORIGIN properly for the build system,
any auto-detection will tend to prepend absolute RPATHs
anyway. See experiences in mess.rst. If on wishes '$ORIGIN' in the
RPATH then ``patchelf`` should be used.
**HDIST_VIRTUALS**:
The mapping of virtual artifacts to concrete artifact IDs that has
been used. Format by example:
``virtual:unix=unix/r0/KALiap2<...>;virtual:hdist=hdist/r0/sLt4Zc<...>``
Mini script language
--------------------
It may seem insane to invent another script language, so here's some
rationalization: First off, *something* must do the initial launch from
the Python process. That couldn't be a shell (because no shell is cross-platform)
and it couldn't be Python (because of all the different Python versions,
and we would like job specifications to not be Python-specific).
Then, there were a few features (notably getting log output from
hdist-jail reliably) that were simply easier to implement this way.
Ultimately, the "advanced" features like redirection are justified by
how little extra code was needed.
The scripting language should only be used for setting up an
environment and launching the job, and intentionally does not contain
any control flow. While it is modelled after Bash to make it familiar
to read, it is *not* in any way Bash, the implementation is entirely
in this Python module.
Parsing is at a minimum, since most of the structure is already
present in the JSON structure. There's no quoting, one string from the
input document is always passed as a single argument to ``Popen``.
Example script::
"script" : [
[
["LIB=foo"],
["CFLAGS=$(pkgcfg", "--cflags", "$LIB", ")"],
["./configure", "--prefix=$ARTIFACT", "--foo-setting=$FOO"]
],
["make", "-j$NCORES"],
["make", "install"]
]
Rules:
* Lists of strings are a command to execute, lists of lists is a scope
(i.e., above, ``$CFLAGS`` is only available to the ``./configure``
command).
* Variable substitution is performed on all strings (except, currently,
assignment left-hand-sides) using the ``$CFLAGS`` and ``${CFLAGS}``
syntax. ``\$`` is an escape for ``$`` (but ``\`` not followed by ``$``
is not currently an escape).
* The ``["executable", "arg1", ...]``: First string is command to execute (either
absolute or looked up in ``$PATH``). Both `stdout` and `stderr` are
redirected to the application logger.
* The ``["executable>filename", "arg1", ...]``: Like the above, but `stdout` is
redirected to the file. **Note** the unusual location of the filename (this was
done so that one does not have to mess with escaping the ``>`` character for
arguments).
* The ``["VAR=str"]`` command sets an environment variable
* The ``["VAR=$(command", "arg1", ..., ")"]``: Assigns the stdout of the command
to the variable. The result has leading and trailing whitespace stripped
but is otherwise untouched. The trailing ``")"`` must stand by itself (and does not
really mean anything except as to balance the opening visually).
* All forms above can be prepended with ``@`` on the command-string to silence
logging the running environment (this may silence even more in the future).
The ``hdist`` command is given special treatment and is executed in the
same process, with logging set up to the logger of the job runner.
In addition to what is listed in ``hdist --help``, the following special
command is available for interacting with the job runner:
* ``hdist logpipe HEADING LEVEL``: Creates a new Unix FIFO and prints
its name to standard output (it will be removed once the job
terminates). The job runner will poll the pipe and print
anything written to it nicely formatted to the log with the given
heading and log level (the latter is one of ``DEBUG``, ``INFO``,
``WARNING``, ``ERROR``).
.. note::
``hdist`` is not automatically available in the environment in general
(in launched scripts etc.), for that, see :mod:`hashdist.core.hdist_recipe`.
``hdist logpipe`` is currently not supported outside of the job script
at all (this could be supported through RPC with the job runner, but the
gain seems very slight).
Virtual imports
---------------
Some times one do not wish some imports to become part of the hash.
For instance, if the ``cp`` tool is used in the job, one is normally
ready to trust that the result wouldn't have been different if a newer
version of the ``cp`` tool was used instead.
Virtual imports, such as ``virtual:unix`` in the example above, are
used so that the hash depends on a user-defined string rather than the
artifact contents. If a bug in ``cp`` is indeed discovered, one can
change the user-defined string (e.g, ``virtual:unix/r2``) in order to
change the hash of the job desc.
.. note::
One should think about virtual dependencies merely as a tool that gives
the user control (and responsibility) over when the hash should change.
They are *not* the primary mechanism for providing software
from the host; though software from the host will sometimes be
specified as virtual dependencies.
Reference
---------
"""
import sys
import | |
<reponame>UttaranB127/GeneratingEmotiveGaits<filename>utils/processor_legacy_1.py<gh_stars>1-10
import math
import os
import torch.optim as optim
import torch.nn as nn
from net import quater_emonet
from torchlight.torchlight.io import IO
from utils.mocap_dataset import MocapDataset
from utils.visualizations import display_animations
from utils import losses
from utils.Quaternions_torch import *
from utils.spline import Spline
torch.manual_seed(1234)
rec_loss = losses.quat_angle_loss
def find_all_substr(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1:
return
yield start
start += len(sub) # use start += 1 to find overlapping matches
def get_best_epoch_and_loss(path_to_model_files):
all_models = os.listdir(path_to_model_files)
if len(all_models) < 2:
return 0, np.inf
loss_list = -1. * np.ones(len(all_models))
acc_list = -1. * np.ones(len(all_models))
for i, model in enumerate(all_models):
loss_acc_val = str.split(model, '_')
if len(loss_acc_val) > 1:
loss_list[i] = float(loss_acc_val[3])
acc_list[i] = float(loss_acc_val[5])
if len(loss_list) < 3:
best_model = all_models[np.argwhere(loss_list == min([n for n in loss_list if n > 0]))[0, 0]]
else:
loss_idx = np.argpartition(loss_list, 2)
best_model = all_models[loss_idx[1]]
all_underscores = list(find_all_substr(best_model, '_'))
# return model name, best loss, best acc
return best_model, int(best_model[all_underscores[0] + 1:all_underscores[1]]),\
float(best_model[all_underscores[2] + 1:all_underscores[3]]),\
float(best_model[all_underscores[4] + 1:all_underscores[5]])
class Processor(object):
"""
Processor for gait generation
"""
def __init__(self, args, dataset, data_loader, T, V, C, D, A, S,
joint_parents, num_labels, prefix_length, target_length,
min_train_epochs=-1, generate_while_train=False,
save_path=None, device='cuda:0'):
self.args = args
self.dataset = dataset
self.mocap = MocapDataset(V, C, joint_parents)
self.device = device
self.data_loader = data_loader
self.num_labels = num_labels
self.result = dict()
self.iter_info = dict()
self.epoch_info = dict()
self.meta_info = dict(epoch=0, iter=0)
self.io = IO(
self.args.work_dir,
save_log=self.args.save_log,
print_log=self.args.print_log)
# model
self.T = T
self.V = V
self.C = C
self.D = D
self.A = A
self.S = S
self.O = 1
self.PRS = 2
self.prefix_length = prefix_length
self.target_length = target_length
self.joint_parents = joint_parents
self.model = quater_emonet.QuaterEmoNet(V, D, S, A, self.O, num_labels[0], self.PRS)
self.model.cuda(device)
self.quat_h = None
self.p_rs_loss_func = nn.L1Loss()
self.affs_loss_func = nn.L1Loss()
self.best_loss = math.inf
self.best_mean_ap = 0.
self.loss_updated = False
self.mean_ap_updated = False
self.step_epochs = [math.ceil(float(self.args.num_epoch * x)) for x in self.args.step]
self.best_loss_epoch = None
self.best_acc_epoch = None
self.min_train_epochs = min_train_epochs
# generate
self.generate_while_train = generate_while_train
self.save_path = save_path
# optimizer
if self.args.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.args.base_lr,
momentum=0.9,
nesterov=self.args.nesterov,
weight_decay=self.args.weight_decay)
elif self.args.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.args.base_lr)
# weight_decay=self.args.weight_decay)
else:
raise ValueError()
self.lr = self.args.base_lr
self.tf = self.args.base_tr
def process_data(self, data, poses, quat, trans, affs):
data = data.float().to(self.device)
poses = poses.float().to(self.device)
quat = quat.float().to(self.device)
trans = trans.float().to(self.device)
affs = affs.float().to(self.device)
return data, poses, quat, trans, affs
def load_best_model(self, ):
if self.best_loss_epoch is None:
model_name, self.best_loss_epoch, self.best_loss, self.best_mean_ap =\
get_best_epoch_and_loss(self.args.work_dir)
# load model
# if self.best_loss_epoch > 0:
loaded_vars = torch.load(os.path.join(self.args.work_dir, model_name))
self.model.load_state_dict(loaded_vars['model_dict'])
self.quat_h = loaded_vars['quat_h']
def adjust_lr(self):
self.lr = self.lr * self.args.lr_decay
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
def adjust_tf(self):
if self.meta_info['epoch'] > 20:
self.tf = self.tf * self.args.tf_decay
def show_epoch_info(self):
print_epochs = [self.best_loss_epoch if self.best_loss_epoch is not None else 0,
self.best_acc_epoch if self.best_acc_epoch is not None else 0,
self.best_acc_epoch if self.best_acc_epoch is not None else 0]
best_metrics = [self.best_loss, 0, self.best_mean_ap]
i = 0
for k, v in self.epoch_info.items():
self.io.print_log('\t{}: {}. Best so far: {} (epoch: {:d}).'.
format(k, v, best_metrics[i], print_epochs[i]))
i += 1
if self.args.pavi_log:
self.io.log('train', self.meta_info['iter'], self.epoch_info)
def show_iter_info(self):
if self.meta_info['iter'] % self.args.log_interval == 0:
info = '\tIter {} Done.'.format(self.meta_info['iter'])
for k, v in self.iter_info.items():
if isinstance(v, float):
info = info + ' | {}: {:.4f}'.format(k, v)
else:
info = info + ' | {}: {}'.format(k, v)
self.io.print_log(info)
if self.args.pavi_log:
self.io.log('train', self.meta_info['iter'], self.iter_info)
def yield_batch(self, batch_size, dataset):
batch_pos = np.zeros((batch_size, self.T, self.V, self.C), dtype='float32')
batch_quat = np.zeros((batch_size, self.T, (self.V - 1) * self.D), dtype='float32')
batch_orient = np.zeros((batch_size, self.T, self.O), dtype='float32')
batch_affs = np.zeros((batch_size, self.T, self.A), dtype='float32')
batch_spline = np.zeros((batch_size, self.T, self.S), dtype='float32')
batch_phase_and_root_speed = np.zeros((batch_size, self.T, self.PRS), dtype='float32')
batch_labels = np.zeros((batch_size, 1, self.num_labels[0]), dtype='float32')
pseudo_passes = (len(dataset) + batch_size - 1) // batch_size
probs = []
for k in dataset.keys():
if 'spline' not in dataset[k]:
raise KeyError('No splines found. Perhaps you forgot to compute them?')
probs.append(dataset[k]['spline'].size())
probs = np.array(probs) / np.sum(probs)
for p in range(pseudo_passes):
rand_keys = np.random.choice(len(dataset), size=batch_size, replace=True, p=probs)
for i, k in enumerate(rand_keys):
pos = dataset[str(k)]['positions_world']
quat = dataset[str(k)]['rotations']
orient = dataset[str(k)]['orientations']
affs = dataset[str(k)]['affective_features']
spline, phase = Spline.extract_spline_features(dataset[str(k)]['spline'])
root_speed = dataset[str(k)]['trans_and_controls'][:, -1].reshape(-1, 1)
labels = dataset[str(k)]['labels'][:self.num_labels[0]]
batch_pos[i] = pos
batch_quat[i] = quat.reshape(self.T, -1)
batch_orient[i] = orient.reshape(self.T, -1)
batch_affs[i] = affs
batch_spline[i] = spline
batch_phase_and_root_speed[i] = np.concatenate((phase, root_speed), axis=-1)
batch_labels[i] = np.expand_dims(labels, axis=0)
yield batch_pos, batch_quat, batch_orient, batch_affs, batch_spline,\
batch_phase_and_root_speed / np.pi, batch_labels
def return_batch(self, batch_size, dataset):
if len(batch_size) > 1:
rand_keys = np.copy(batch_size)
batch_size = len(batch_size)
else:
batch_size = batch_size[0]
probs = []
for k in dataset.keys():
if 'spline' not in dataset[k]:
raise KeyError('No splines found. Perhaps you forgot to compute them?')
probs.append(dataset[k]['spline'].size())
probs = np.array(probs) / np.sum(probs)
rand_keys = np.random.choice(len(dataset), size=batch_size, replace=False, p=probs)
batch_pos = np.zeros((batch_size, self.T, self.V, self.C), dtype='float32')
batch_traj = np.zeros((batch_size, self.T, self.C), dtype='float32')
batch_quat = np.zeros((batch_size, self.T, (self.V - 1) * self.D), dtype='float32')
batch_orient = np.zeros((batch_size, self.T, self.O), dtype='float32')
batch_affs = np.zeros((batch_size, self.T, self.A), dtype='float32')
batch_spline = np.zeros((batch_size, self.T, self.S), dtype='float32')
batch_phase_and_root_speed = np.zeros((batch_size, self.T, self.PRS), dtype='float32')
batch_labels = np.zeros((batch_size, 1, self.num_labels[0]), dtype='float32')
for i, k in enumerate(rand_keys):
pos = dataset[str(k)]['positions_world']
traj = dataset[str(k)]['trajectory']
quat = dataset[str(k)]['rotations']
orient = dataset[str(k)]['orientations']
affs = dataset[str(k)]['affective_features']
spline, phase = Spline.extract_spline_features(dataset[str(k)]['spline'])
root_speed = dataset[str(k)]['trans_and_controls'][:, -1].reshape(-1, 1)
labels = dataset[str(k)]['labels'][:self.num_labels[0]]
batch_pos[i] = pos
batch_traj[i] = traj
batch_quat[i] = quat.reshape(self.T, -1)
batch_orient[i] = orient.reshape(self.T, -1)
batch_affs[i] = affs
batch_spline[i] = spline
batch_phase_and_root_speed[i] = np.concatenate((phase, root_speed), axis=-1)
batch_labels[i] = np.expand_dims(labels, axis=0)
return batch_pos, batch_traj, batch_quat, batch_orient, batch_affs, batch_spline,\
batch_phase_and_root_speed, batch_labels
def per_train(self):
self.model.train()
train_loader = self.data_loader['train']
batch_loss = 0.
N = 0.
for pos, quat, orient, affs, spline, p_rs, labels in self.yield_batch(self.args.batch_size, train_loader):
pos = torch.from_numpy(pos).cuda()
quat = torch.from_numpy(quat).cuda()
orient = torch.from_numpy(orient).cuda()
affs = torch.from_numpy(affs).cuda()
spline = torch.from_numpy(spline).cuda()
p_rs = torch.from_numpy(p_rs).cuda()
labels = torch.from_numpy(labels).cuda()
pos_pred = pos.clone()
quat_pred = quat.clone()
p_rs_pred = p_rs.clone()
affs_pred = affs.clone()
pos_pred_all = pos.clone()
quat_pred_all = quat.clone()
p_rs_pred_all = p_rs.clone()
affs_pred_all = affs.clone()
prenorm_terms = torch.zeros_like(quat_pred)
# forward
self.optimizer.zero_grad()
for t in range(self.target_length):
quat_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1], \
p_rs_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1], \
self.quat_h, prenorm_terms[:, self.prefix_length + t: self.prefix_length + t + 1] = \
self.model(
quat_pred[:, t:self.prefix_length + t],
p_rs_pred[:, t:self.prefix_length + t],
affs_pred[:, t:self.prefix_length + t],
spline[:, t:self.prefix_length + t],
orient[:, t:self.prefix_length + t],
labels,
quat_h=None if t == 0 else self.quat_h, return_prenorm=True)
pos_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1],\
affs_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1] = \
self.mocap.get_predicted_features(
pos_pred[:, self.prefix_length + t:self.prefix_length + t + 1, 0],
quat_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1],
orient[:, self.prefix_length + t:self.prefix_length + t + 1])
if np.random.uniform(size=1)[0] > self.tf:
pos_pred[:, self.prefix_length + t:self.prefix_length + t + 1] = \
pos_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1]
quat_pred[:, self.prefix_length + t:self.prefix_length + t + 1] = \
quat_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1]
p_rs_pred[:, self.prefix_length + t:self.prefix_length + t + 1] = \
p_rs_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1]
affs_pred[:, self.prefix_length + t:self.prefix_length + t + 1] = \
affs_pred_all[:, self.prefix_length + t:self.prefix_length + t + 1]
prenorm_terms = prenorm_terms.view(prenorm_terms.shape[0], prenorm_terms.shape[1], -1, self.D)
quat_norm_loss = self.args.quat_norm_reg * torch.mean((torch.sum(prenorm_terms ** 2, dim=-1) - 1) ** 2)
quat_loss, quat_derv_loss = losses.quat_angle_loss(quat_pred_all[:, self.prefix_length - 1:],
quat[:, self.prefix_length - 1:], self.V, self.D)
quat_loss *= self.args.quat_reg
p_rs_loss = self.p_rs_loss_func(p_rs_pred_all[:, self.prefix_length:],
p_rs[:, self.prefix_length:])
affs_loss = self.affs_loss_func(affs_pred_all[:, self.prefix_length:],
affs[:, self.prefix_length:])
# recons_loss = self.args.recons_reg *\
# (pos_pred_all[:, self.prefix_length:] - pos_pred_all[:, self.prefix_length:, 0:1] -
# pos[:, self.prefix_length:] + pos[:, self.prefix_length:, 0:1]).norm()
loss_total = quat_norm_loss + quat_loss + quat_derv_loss + p_rs_loss + affs_loss # + recons_loss
loss_total.backward()
# nn.utils.clip_grad_norm_(self.model.parameters(), self.args.gradient_clip)
self.optimizer.step()
# Compute statistics
batch_loss += loss_total.item()
N += quat.shape[0]
# statistics
self.iter_info['loss'] = loss_total.data.item()
self.iter_info['lr'] = '{:.6f}'.format(self.lr)
self.iter_info['tf'] = '{:.6f}'.format(self.tf)
self.show_iter_info()
self.meta_info['iter'] += 1
batch_loss = batch_loss / N
self.epoch_info['mean_loss'] = batch_loss
self.show_epoch_info()
self.io.print_timer()
self.adjust_lr()
self.adjust_tf()
def per_test(self):
self.model.eval()
test_loader = self.data_loader['test']
valid_loss = 0.
N = 0.
for pos, quat, orient, affs, spline, p_rs, labels in self.yield_batch(self.args.batch_size, test_loader):
pos = torch.from_numpy(pos).cuda()
quat = torch.from_numpy(quat).cuda()
orient = torch.from_numpy(orient).cuda()
| |
MPxNode_setInternalValueInContext(*args, **kwargs):
pass
def MPx3dModelView_getObjectsToView(*args, **kwargs):
pass
def MPxManipContainer_newManipulator(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxSizeX_get(*args, **kwargs):
pass
def MPxObjectSet_dagSetMembers_set(*args, **kwargs):
pass
def MPxAssembly_getActive(*args, **kwargs):
pass
def MPxTransformationMatrix_asScaleMatrixInverse(*args, **kwargs):
pass
def MPxIkSolverNode_hasJointLimitSupport(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBox_set(*args, **kwargs):
pass
def MPxAnimCurveInterpolator_swiginit(*args, **kwargs):
pass
def MPxConstraintCommand_doCreate(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxMaxX_set(*args, **kwargs):
pass
def new_MExternalContentLocationTable(*args, **kwargs):
pass
def MPxMidiInputDevice_getMessage(*args, **kwargs):
pass
def MPxTransform_transMinusRotatePivotZ_get(*args, **kwargs):
pass
def MPxTransform_minScaleZLimit_get(*args, **kwargs):
pass
def MPxNode_getInternalValueInContext(*args, **kwargs):
pass
def MPxLocatorNode_boundingBoxCenterZ_get(*args, **kwargs):
pass
def MPxManipContainer_initialize(*args, **kwargs):
pass
def MPxTransform_rotateBy(*args, **kwargs):
pass
def MPxTransform__dirtyRotatePivotTranslation(*args, **kwargs):
pass
def MPxGeometryIterator_isDone(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxMax_get(*args, **kwargs):
pass
def MPxMotionPathNode_swigregister(*args, **kwargs):
pass
def MPx3dModelView_getCurrentCameraSetCamera(*args, **kwargs):
pass
def MPxRenderPassImpl_swigregister(*args, **kwargs):
pass
def MPxHwShaderNode_outGlowColorB_get(*args, **kwargs):
pass
def disown_MPxManipContainer(*args, **kwargs):
pass
def MPxCacheFormat_open(*args, **kwargs):
pass
def MPxCommand_isCurrentResultArray(*args, **kwargs):
pass
def delete_MExternalContentLocationTable(*args, **kwargs):
pass
def MPxFluidEmitterNode_fluidColor(*args, **kwargs):
pass
def MPxManipContainer_finishAddingManips(*args, **kwargs):
pass
def MPxNode_typeId(*args, **kwargs):
pass
def MPxTransform_scaleX_get(*args, **kwargs):
pass
def MPxEmitterNode_mRandStateX_set(*args, **kwargs):
pass
def MPxTransform_maxTransZLimit_get(*args, **kwargs):
pass
def MPxContext__setCursor(*args, **kwargs):
pass
def MPxLocatorNode_draw(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidFuelEmission_get(*args, **kwargs):
pass
def MPxControlCommand_doEditFlags(*args, **kwargs):
pass
def MPxTransformationMatrix_rotateOrientation(*args, **kwargs):
pass
def MPxBakeEngine_getUVRange(*args, **kwargs):
pass
def MPxFileTranslator_allowMultipleFileOptimization(*args, **kwargs):
pass
def MPxAssembly_activating(*args, **kwargs):
pass
def MPxFluidEmitterNode_mEmissionFunction_set(*args, **kwargs):
pass
def MPxConstraint_getOutputAttributes(*args, **kwargs):
pass
def MPxConstraintCommand_constraintRestAttribute(*args, **kwargs):
pass
def MPxEmitterNode_mOwnerCentroidZ_set(*args, **kwargs):
pass
def MPxMotionPathNode_pathGeometry_get(*args, **kwargs):
pass
def MPxRenderPassImpl_typesSupported(*args, **kwargs):
pass
def getLockCaptureCount(*args, **kwargs):
pass
def MPxUIControl_swigregister(*args, **kwargs):
pass
def MPxTransform_minScaleZLimitEnable_set(*args, **kwargs):
pass
def MPxTransform_selectHandleY_set(*args, **kwargs):
pass
def MPxPolyTweakUVInteractiveCommand_swiginit(*args, **kwargs):
pass
def MPxLocatorNode_instObjGroups_get(*args, **kwargs):
pass
def new_MPxPolyTweakUVInteractiveCommand(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_computeNode_get(*args, **kwargs):
pass
def MPxTransform_checkAndSetRotateOrientation(*args, **kwargs):
pass
def MPxModelEditorCommand__syntax(*args, **kwargs):
pass
def MPxSpringNode_mDeltaTime_set(*args, **kwargs):
pass
def MPx3dModelView_isShadeActiveOnly(*args, **kwargs):
pass
def MPxImagePlane_maxShadingSamples_set(*args, **kwargs):
pass
def MPx3dModelView_isXrayEnabled(*args, **kwargs):
pass
def MPxSurfaceShape_objectGroups_get(*args, **kwargs):
pass
def MPxImagePlane_visibleInReflections_get(*args, **kwargs):
pass
def MPxGeometryIterator_next(*args, **kwargs):
pass
def MPxCommand_setUndoable(*args, **kwargs):
pass
def MPxMayaAsciiFilter_haveWriteMethod(*args, **kwargs):
pass
def MPxGeometryFilter_getDeformationDetails(*args, **kwargs):
pass
def MPxSpringNode_mEnd2Weight_set(*args, **kwargs):
pass
def MPxFieldNode_mUseMaxDistance_get(*args, **kwargs):
pass
def MPxCameraSet_order_get(*args, **kwargs):
pass
def MPxBlendShape_inputPointsTarget_get(*args, **kwargs):
pass
def MPxSurfaceShape_mControlValueZ_get(*args, **kwargs):
pass
def MPxNode_setExternalContent(*args, **kwargs):
pass
def MPxTransform_maxTransYLimitEnable_set(*args, **kwargs):
pass
def MPxTransform_scalePivot_get(*args, **kwargs):
pass
def MPxDragAndDropBehavior_connectAttrToAttr(*args, **kwargs):
pass
def MPxTransformationMatrix_setRotatePivot(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBox_set(*args, **kwargs):
pass
def MPxRepresentation_canApplyEdits(*args, **kwargs):
pass
def MPxTransform_postConstructor(*args, **kwargs):
pass
def MPxTransform_rotateY_get(*args, **kwargs):
pass
def MPxMotionPathNode_frontAxis_set(*args, **kwargs):
pass
def MPxBakeEngine_bake(*args, **kwargs):
pass
def MPxMotionPathNode_xCoordinate_set(*args, **kwargs):
pass
def MPxGeometryIterator_swigregister(*args, **kwargs):
pass
def MPxTransform_specifiedManipLocation_get(*args, **kwargs):
pass
def MPxHardwareShader_transparencyOptions(*args, **kwargs):
pass
def MPxTransform_minTransZLimit_set(*args, **kwargs):
pass
def MPxFieldNode_mInputMass_get(*args, **kwargs):
pass
def new_MPxSurfaceShapeUI(*args, **kwargs):
pass
def MPxSurfaceShape_swigregister(*args, **kwargs):
pass
def MPxTransform_objectColor_set(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxSize_set(*args, **kwargs):
pass
def MPx3dModelView_fogEnd(*args, **kwargs):
pass
def MPxImagePlane_depthFile_set(*args, **kwargs):
pass
def MPx3dModelView_setDrawInterrupt(*args, **kwargs):
pass
def MPxMayaAsciiFilter_writePostRequires(*args, **kwargs):
pass
def MPxContext__ignoreEntry(*args, **kwargs):
pass
def MPxObjectSet_partition_get(*args, **kwargs):
pass
def MPxSkinCluster_swiginit(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidColorB_get(*args, **kwargs):
pass
def MPxEditData_performIsLessThan(*args, **kwargs):
pass
def MPxIkSolverNode_setRotatePlane(*args, **kwargs):
pass
def new_MPxModelEditorCommand(*args, **kwargs):
pass
def MPxManipulatorNode_mouseDown(*args, **kwargs):
pass
def MPxIkSolverNode_className(*args, **kwargs):
pass
def MPxNode_copyInternalData(*args, **kwargs):
pass
def new_MPxBakeEngine(*args, **kwargs):
pass
def MPxTransform_maxRotYLimit_set(*args, **kwargs):
pass
def MPxEmitterNode_mDirectionX_set(*args, **kwargs):
pass
def MPxTransform_rotateAxis_set(*args, **kwargs):
pass
def MPxMidiInputDevice_doButtonEvents(*args, **kwargs):
pass
def MPxTransform_maxRotXLimitEnable_get(*args, **kwargs):
pass
def MFnPlugin_registerNode(*args, **kwargs):
pass
def MPxSurfaceShape_objectColor_get(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxSize_get(*args, **kwargs):
pass
def MPxPolyTrg_swigregister(*args, **kwargs):
pass
def MPxLocatorNode_objectGrpCompList_get(*args, **kwargs):
pass
def MPxTransform_mustCallValidateAndSet(*args, **kwargs):
pass
def MPxFieldNode_mOwnerCentroid_set(*args, **kwargs):
pass
def MPxMotionPathNode_bank_get(*args, **kwargs):
pass
def delete_MPxModelEditorCommand(*args, **kwargs):
pass
def MPxGeometryIterator_setPoint(*args, **kwargs):
pass
def MPxMotionPathNode_rotateZ_get(*args, **kwargs):
pass
def MPxImagePlane_refreshImage(*args, **kwargs):
pass
def MPx3dModelView_refresh(*args, **kwargs):
pass
def MPxImagePlane_fit_set(*args, **kwargs):
pass
def MPxFluidEmitterNode_mTurbulence_get(*args, **kwargs):
pass
def MPxFieldNode_mApplyPerVertex_get(*args, **kwargs):
pass
def MPxContext_completeAction(*args, **kwargs):
pass
def MPxCacheFormat_writeIntArray(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_outMaxValue_get(*args, **kwargs):
pass
def MPxTransform_overrideVisibility_set(*args, **kwargs):
pass
def MPxUITableControl_getCellColor(*args, **kwargs):
pass
def MFnPlugin_isNodeRegistered(*args, **kwargs):
pass
def MPxTransform_translateZ_set(*args, **kwargs):
pass
def MPxNode_existWithoutOutConnections(*args, **kwargs):
pass
def MPxTransform_minTransXLimit_get(*args, **kwargs):
pass
def MPxContext_doDrag(*args, **kwargs):
pass
def MPxObjectSet_memberWireframeColor_set(*args, **kwargs):
pass
def MPxContextCommand_doQueryFlags(*args, **kwargs):
pass
def MPxAssembly_getRepLabel(*args, **kwargs):
pass
def MPxManipContainer_addManipToPlugConversion(*args, **kwargs):
pass
def MPxTransformationMatrix_asInterpolationMatrix(*args, **kwargs):
pass
def MPxIkSolverNode_setFuncValueTolerance(*args, **kwargs):
pass
def MPxSurfaceShape_deleteComponents(*args, **kwargs):
pass
def delete_MPxConstraint(*args, **kwargs):
pass
def MExternalContentInfoTable_swiginit(*args, **kwargs):
pass
def MPxRenderPassImpl_getDefaultType(*args, **kwargs):
pass
def new_MPxPolyTweakUVCommand(*args, **kwargs):
pass
def MFnPlugin_deregisterIkSolver(*args, **kwargs):
pass
def MPxTransform_maxScaleXLimit_get(*args, **kwargs):
pass
def MPx3dModelView_numUserDefinedColors(*args, **kwargs):
pass
def MPxLocatorNode_inverseMatrix_get(*args, **kwargs):
pass
def MPxLocatorNode_objectGroupColor_get(*args, **kwargs):
pass
def MPxTransform_scaleBy(*args, **kwargs):
pass
def MPxTransform_applyRotationLimits(*args, **kwargs):
pass
def MPxSelectionContext__isSelecting(*args, **kwargs):
pass
def MPx3dModelView_name(*args, **kwargs):
pass
def MPxIkSolverNode__setToRestAngles(*args, **kwargs):
pass
def MPxImagePlane_coverageX_set(*args, **kwargs):
pass
def MPx3dModelView_drawHUDNow(*args, **kwargs):
pass
def MPxConstraintCommand_supportsOffset(*args, **kwargs):
pass
def delete_MPxData(*args, **kwargs):
pass
def MPxImagePlane_offsetX_set(*args, **kwargs):
pass
def MPxCacheFormat_rewind(*args, **kwargs):
pass
def MPxHwShaderNode_outColorG_get(*args, **kwargs):
pass
def MPxContextCommand__parser(*args, **kwargs):
pass
def MPxMidiInputDevice_sendMessage(*args, **kwargs):
pass
def MPxTexContext_viewRect(*args, **kwargs):
pass
def MPxSurfaceShape_closestPoint(*args, **kwargs):
pass
def MPx3dModelView_setUserDefinedColor(*args, **kwargs):
pass
def MPxEditData__dataStringValue_get(*args, **kwargs):
pass
def MPxEmitterNode_mIsFull_get(*args, **kwargs):
pass
def MPxControlCommand_skipFlagForCreate(*args, **kwargs):
pass
def MPxManipulatorNode_shouldDrawHandleAsSelected(*args, **kwargs):
pass
def MPxAssembly_memberRemoved(*args, **kwargs):
pass
def MPxManipContainer_addPlugToInViewEditor(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidFuelEmission_set(*args, **kwargs):
pass
def MPxAssembly_addDisconnectAttrEdit(*args, **kwargs):
pass
def new_MPxConstraintCommand(*args, **kwargs):
pass
def MPxConstraintCommand_constraintTargetWeightAttribute(*args, **kwargs):
pass
def MPxFileTranslator_filter(*args, **kwargs):
pass
def MPxEmitterNode_getStartTime(*args, **kwargs):
pass
def MPxTransform_maxScaleXLimitEnable_set(*args, **kwargs):
pass
def MPxTransform_inheritsTransform_get(*args, **kwargs):
pass
def MPxLocatorNode_swigregister(*args, **kwargs):
pass
def MPxMotionPathNode_fractionMode_get(*args, **kwargs):
pass
def MPxTransform_checkAndSetScalePivotTranslation(*args, **kwargs):
pass
def MPxSelectionContext__newToolCommand(*args, **kwargs):
pass
def MPxTransform_objectGroups_set(*args, **kwargs):
pass
def MPx3dModelView_beginXorDrawing(*args, **kwargs):
pass
def MFnPlugin_deregisterDisplayFilter(*args, **kwargs):
pass
def MPxImagePlane_depth_get(*args, **kwargs):
pass
def MPx3dModelView_isTextureDisplayEnabled(*args, **kwargs):
pass
def MPxEmitterNode_mCurrentTime_set(*args, **kwargs):
pass
def MPxImagePlane_visibleInRefractions_set(*args, **kwargs):
pass
def MPxSurfaceShape_intermediateObject_get(*args, **kwargs):
pass
def MPxHwShaderNode_outTransparencyG_set(*args, **kwargs):
pass
def MPxEmitterNode_mSweptGeometry_set(*args, **kwargs):
pass
def MPxNode_isHistoricallyInteresting_get(*args, **kwargs):
pass
def MPxGeometryFilter_input_set(*args, **kwargs):
pass
def delete_MPxRepresentation(*args, **kwargs):
pass
def MPxBlendShape_className(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxMin_get(*args, **kwargs):
pass
def MFnPlugin_registerBakeEngine(*args, **kwargs):
pass
def new_MPxSurfaceShape(*args, **kwargs):
pass
def MPxTransform_minRotLimit_set(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxMaxZ_set(*args, **kwargs):
pass
def MPxEmitterNode_mSpeed_get(*args, **kwargs):
pass
def MPxTransform_scalePivotY_get(*args, **kwargs):
pass
def MPxTransform_maxRotZLimit_set(*args, **kwargs):
pass
def MPxLocatorNode_isTransparent(*args, **kwargs):
pass
def MPxImagePlane_lockedToCamera_set(*args, **kwargs):
pass
def MPxManipulatorNode_mainColor(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxMinX_get(*args, **kwargs):
pass
def MPxRepresentation__getAssembly(*args, **kwargs):
pass
def MPxManipContainer_addCurveSegmentManip(*args, **kwargs):
pass
def MPxImagePlane_centerX_get(*args, **kwargs):
pass
def MPxTransform_boundingBox(*args, **kwargs):
pass
def MPxFileResolver_swigregister(*args, **kwargs):
pass
def MPxMotionPathNode_worldUpType_get(*args, **kwargs):
pass
def MPxGeometryData_swigregister(*args, **kwargs):
pass
def MPxGeometryData_deleteComponent(*args, **kwargs):
pass
def delete_MPxImageFile(*args, **kwargs):
pass
def MFnPlugin_registerData(*args, **kwargs):
pass
def disown_MPxComponentShape(*args, **kwargs):
pass
def MPxHwShaderNode_texCoordsPerVertex(*args, **kwargs):
pass
def MaterialInputData_emission_set(*args, **kwargs):
pass
def MPxSelectionContext_doRelease(*args, **kwargs):
pass
def MFnPlugin_setName(*args, **kwargs):
pass
def disown_MPxUIControl(*args, **kwargs):
pass
def MPxUITableControl_setSelection(*args, **kwargs):
pass
def MPxTransform_renderLayerId_set(*args, **kwargs):
pass
def MPxConstraintCommand_undoIt(*args, **kwargs):
pass
def MPx3dModelView_includeInvisible(*args, **kwargs):
pass
def MPxMayaAsciiFilter_writePostConnectAttrsBlock(*args, **kwargs):
pass
def MPxObjectSet_usedByNodes_get(*args, **kwargs):
pass
def MPxBlendShape_type(*args, **kwargs):
pass
def MPxFieldNode_mOwnerCentroidZ_set(*args, **kwargs):
pass
def MPxSkinCluster_bindPreMatrix_set(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxMinX_get(*args, **kwargs):
pass
def MPxIkSolverNode_swiginit(*args, **kwargs):
pass
def MPxFileResolver_getURIResolversByScheme(*args, **kwargs):
pass
def MPxTransform_rotateAxisY_set(*args, **kwargs):
pass
def MFnPlugin_registerURIFileResolver(*args, **kwargs):
pass
def MPxManipulatorNode_setDoubleValue(*args, **kwargs):
pass
def MPxLocatorNode_nodeBoundingBoxSizeY_get(*args, **kwargs):
pass
def delete_MPxTransformationMatrix(*args, **kwargs):
pass
def MPxLocatorNode_objectGroupId_set(*args, **kwargs):
pass
def MPxTransform_getTranslation(*args, **kwargs):
pass
def MPxTransform_copyInternalData(*args, **kwargs):
pass
def MPxLocatorNode_localPositionZ_set(*args, **kwargs):
pass
def MPxUITableControl_allowEdit(*args, **kwargs):
pass
def MPxImagePlane_imageType_set(*args, **kwargs):
pass
def MPx3dModelView_drawText(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_outColorPP_set(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidJitter_set(*args, **kwargs):
pass
def MPxSurfaceShape_objectGrpCompList_set(*args, **kwargs):
pass
def MPxTransform_displayLocalAxis_get(*args, **kwargs):
pass
def MPxEmitterNode_volumePrimitiveBoundingBox(*args, **kwargs):
pass
def MPxCameraSet_active_get(*args, **kwargs):
pass
def MPxTransform_lodVisibility_set(*args, **kwargs):
pass
def MFnPlugin_deregisterAnimCurveInterpolator(*args, **kwargs):
pass
def MPxTransform_rotateX_set(*args, **kwargs):
pass
def MPxTransform_minTransZLimit_get(*args, **kwargs):
pass
def MPxContext_helpStateHasChanged(*args, **kwargs):
pass
def MPxContextCommand_setResult(*args, **kwargs):
pass
def MPxAssembly_deleteAllRepresentations(*args, **kwargs):
pass
def MPxDragAndDropBehavior_swiginit(*args, **kwargs):
pass
def MPxTransformationMatrix_translateTo(*args, **kwargs):
pass
def MPxHardwareShader_outColorB_set(*args, **kwargs):
pass
def MPxFieldNode_mInputForce_set(*args, **kwargs):
pass
def MPxTransform_getShear(*args, **kwargs):
pass
def MPxConstraint_constraintRotateOrderAttribute(*args, **kwargs):
pass
def MPxBakeEngine_fInstance_get(*args, **kwargs):
pass
def MPxConstraintCommand_aimVectorAttribute(*args, **kwargs):
pass
def MPxEmitterNode_swiginit(*args, **kwargs):
pass
def MExternalContentLocationTable_swigregister(*args, **kwargs):
pass
def MPxFileTranslator_identifyFile(*args, **kwargs):
pass
def MPxTransform_maxScaleZLimit_get(*args, **kwargs):
pass
def MPxSurfaceShape_parentInverseMatrix_set(*args, **kwargs):
pass
def MPxHardwareShader_profile(*args, **kwargs):
pass
def MPxTransform_maxScaleZLimitEnable_get(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxSizeX_set(*args, **kwargs):
pass
def MPxLocatorNode_intermediateObject_get(*args, **kwargs):
pass
def MPxLocatorNode_localScaleX_get(*args, **kwargs):
pass
def MPxTransform_shearBy(*args, **kwargs):
pass
def MPxSurfaceShape_parentInverseMatrix_get(*args, **kwargs):
pass
def MPxSelectionContext_deleteManipulators(*args, **kwargs):
pass
def MPx3dModelView_postMultipleDraw(*args, **kwargs):
pass
def MPxContextCommand_className(*args, **kwargs):
pass
def MPxEmitterNode_getCurrentTime(*args, **kwargs):
pass
def MPxImagePlane_colorOffsetB_get(*args, **kwargs):
pass
def MPx3dModelView_setObjectDisplay(*args, **kwargs):
pass
def MPxImagePlane_center_set(*args, **kwargs):
pass
def MPxBlendShape_inputTarget_get(*args, **kwargs):
pass
def MPx3dModelView_setViewSelectedPrefix(*args, **kwargs):
pass
def MPxCacheFormat_writeHeader(*args, **kwargs):
pass
def MPxCacheFormat_swiginit(*args, **kwargs):
pass
def MPxFluidEmitterNode_swiginit(*args, **kwargs):
pass
def MPxDeformerNode_weightValue(*args, **kwargs):
pass
def MPxCameraSet_camera_get(*args, **kwargs):
pass
def MPxSurfaceShape_undeleteComponents(*args, **kwargs):
pass
def delete_MPxEmitterNode(*args, **kwargs):
pass
def MFnPlugin_className(*args, **kwargs):
pass
def MPxTransform_minTransYLimitEnable_set(*args, **kwargs):
pass
def MPxEmitterNode_mDirection_set(*args, **kwargs):
pass
def MPxTransform_rotatePivotZ_get(*args, **kwargs):
pass
def MPxManipulatorNode_addVectorValue(*args, **kwargs):
pass
def MPxAssembly_inactivateRep(*args, **kwargs):
pass
def MPxManipContainer_getConverterManipValue(*args, **kwargs):
pass
def MPxLocatorNode_worldPositionZ_get(*args, **kwargs):
pass
def MFnPlugin_deregisterDragAndDropBehavior(*args, **kwargs):
pass
def MPxAssembly_addParentEdit(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxMaxX_set(*args, **kwargs):
pass
def MPxLocatorNode_type(*args, **kwargs):
pass
def MPxCommand_undoIt(*args, **kwargs):
pass
def MPxFileResolver_performAfterSaveURI(*args, **kwargs):
pass
def MPxMotionPathNode_follow_get(*args, **kwargs):
pass
def new_MFnPlugin(*args, **kwargs):
pass
def MPxModelEditorCommand_setResult(*args, **kwargs):
pass
def MPxGeometryIterator_setCurrentPoint(*args, **kwargs):
pass
def MPxLocatorNode_worldPositionZ_set(*args, **kwargs):
pass
def MPxTransform_displayScalePivot_get(*args, **kwargs):
pass
def MPxPolyTweakUVCommand_getTweakedUVs(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxMin_get(*args, **kwargs):
pass
def MPxSelectionContext_setImage(*args, **kwargs):
pass
def MPxTransform_objectGrpCompList_set(*args, **kwargs):
pass
def MPx3dModelView_numActiveColors(*args, **kwargs):
pass
def MFnPlugin_registerRenderer(*args, **kwargs):
pass
def MPxImagePlane_squeezeCorrection_set(*args, **kwargs):
pass
def MPx3dModelView_lightingMode(*args, **kwargs):
pass
def MPxConstraintCommand_getObjectAttributesArray(*args, **kwargs):
pass
def MPxImagePlane_alreadyPremult_set(*args, **kwargs):
pass
def MPx3dModelView_wantStereoGLBuffer(*args, **kwargs):
pass
def MPxFieldNode_isFalloffCurveConstantOne(*args, **kwargs):
pass
def MPxSkinCluster_weightList_set(*args, **kwargs):
pass
def MPxBlendShape_swiginit(*args, **kwargs):
pass
def MPxSurfaceShape_mControlPoints_set(*args, **kwargs):
pass
def MPxEmitterNode_mDirectionX_get(*args, **kwargs):
pass
def MPxSurfaceShape_type(*args, **kwargs):
pass
def MPxTransform_scalePivotZ_set(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_swiginit(*args, **kwargs):
pass
def MPxUITableControl_setNumberOfRows(*args, **kwargs):
pass
def MPxHwShaderNode_outTransparencyB_set(*args, **kwargs):
pass
def MPxToolCommand_className(*args, **kwargs):
pass
def MPxFieldNode_mOwnerPPData_set(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxSizeX_set(*args, **kwargs):
pass
def MPxPolyTweakUVInteractiveCommand_setUVs(*args, **kwargs):
pass
def MPxTransform_resetTransformation(*args, **kwargs):
pass
def delete_MPxAssembly(*args, **kwargs):
pass
def delete_MPxDeformerNode(*args, **kwargs):
pass
def MPxUITableControl_suspendUpdates(*args, **kwargs):
pass
def disown_MPxControlCommand(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_computeNodeColorR_set(*args, **kwargs):
pass
def MPxLocatorNode_worldPositionX_set(*args, **kwargs):
pass
def MPxSurfaceShape_className(*args, **kwargs):
pass
def MaterialInputData_specular_get(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_type(*args, **kwargs):
pass
def MPxSurfaceShapeUI_draw(*args, **kwargs):
pass
def MPxTransform_overrideShading_get(*args, **kwargs):
pass
def MPxTransform_renderLayerColor_set(*args, **kwargs):
pass
def MPxEmitterNode_mInheritFactor_get(*args, **kwargs):
pass
def MPx3dModelView_swiginit(*args, **kwargs):
pass
def MPxObjectSet_swigregister(*args, **kwargs):
pass
def MPxFieldNode_mOutputForce_get(*args, **kwargs):
pass
def MPxSurfaceShape_nodeBoundingBoxMinZ_get(*args, **kwargs):
pass
def MPxNode_preEvaluation(*args, **kwargs):
pass
def MPxFieldNode_mUseMaxDistance_set(*args, **kwargs):
pass
def MPxCacheFormat_beginReadChunk(*args, **kwargs):
pass
def MPxTransform_transMinusRotatePivot_get(*args, **kwargs):
pass
def MPxHardwareShader_outColorG_get(*args, **kwargs):
pass
def MPxTransform_minScaleLimit_get(*args, **kwargs):
pass
def MPxFluidEmitterNode_mFluidColorR_get(*args, **kwargs):
pass
def MPxTransformationMatrix_assign(*args, **kwargs):
pass
def new_MPxManipContainer(*args, **kwargs):
pass
def MPxTransform_translateTo(*args, **kwargs):
pass
def MPxTransform__dirtyScale(*args, **kwargs):
pass
def MPxMotionPathNode_fractionMode_set(*args, **kwargs):
pass
def MPxMotionPathNode_banking(*args, **kwargs):
pass
def disown_MPxSurfaceShapeUI(*args, **kwargs):
pass
def MPx3dModelView_setDrawCameraOverride(*args, **kwargs):
pass
def MPxTransform_nodeBoundingBoxMaxX_get(*args, **kwargs):
pass
def MPxParticleAttributeMapperNode_computeNodeColorR_get(*args, **kwargs):
pass
def MPxImagePlane_size_set(*args, **kwargs):
pass
def delete_MPxFieldNode(*args, **kwargs):
pass
def MPxLocatorNode_localPositionX_get(*args, **kwargs):
pass
def MPxCacheFormat_readDoubleVectorArray(*args, **kwargs):
pass
def MFnPlugin_registerDragAndDropBehavior(*args, **kwargs):
pass
def MPxMidiInputDevice_swigregister(*args, **kwargs):
pass
def MPxLocatorNode_worldPositionY_get(*args, **kwargs):
pass
def MPxTransform_renderInfo_get(*args, **kwargs):
pass
def MPxTransform_rotateZ_get(*args, **kwargs):
pass
def MPxManipulatorNode_setInstancePtr(*args, **kwargs):
pass
def MFnPlugin_deregisterImageFile(*args, **kwargs):
pass
def MPxTransform_maxTransLimit_set(*args, **kwargs):
pass
def MPxSurfaceShape_boundingBoxCenterY_get(*args, **kwargs):
pass
def MPxObjectSet_verticesOnlySet_set(*args, **kwargs):
pass
def MPxGlBuffer_openFbo(*args, **kwargs):
pass
def MPxTransformationMatrix_eulerRotation(*args, **kwargs):
pass
def MPxIkSolverNode_toWorldSpace(*args, **kwargs):
pass
def MPxGlBuffer_swiginit(*args, **kwargs):
pass
def MPxConstraintCommand_worldUpTypeAttribute(*args, **kwargs):
pass
def MPxEmitterNode_mSweptGeometry_get(*args, **kwargs):
pass
def MPxContext_stringClassName(*args, **kwargs):
pass
def delete_MPx3dModelView(*args, **kwargs):
pass
def MPxTransform_minScaleXLimitEnable_get(*args, **kwargs):
pass
def MPxFluidEmitterNode_fluidDensityEmission(*args, **kwargs):
pass
def MPxTransform_xformMatrix_get(*args, **kwargs):
pass
def MPxLocatorNode_worldMatrix_set(*args, **kwargs):
pass
def MPxGeometryIterator_maxPoints(*args, **kwargs):
pass
def MPxTransform_getRotatePivotTranslation(*args, **kwargs):
pass
def MPxContext_addManipulator(*args, **kwargs):
pass
def MPxSelectionContext_newToolCommand(*args, **kwargs):
pass
def MPxUITableControl_numberOfRows(*args, **kwargs):
pass
def MPxSurfaceShape_center_get(*args, **kwargs):
pass
def MPx3dModelView_setBackfaceCulling(*args, **kwargs):
pass
def MPxSurfaceShape_isTemplated_set(*args, **kwargs):
pass
def MPxImagePlane_centerY_set(*args, **kwargs):
pass
def MPxNode_setExistWithoutOutConnections(*args, **kwargs):
pass
def MPxMayaAsciiFilterOutput___lshift__(*args, **kwargs):
pass
def MPxGeometryFilter_type(*args, **kwargs):
| |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import os
import sys
from contextlib import suppress
from typing import Any, List, Type, cast
import numpy as np
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.algorithms.join import _Join, _JoinHook
from torch.distributed.optim import ZeroRedundancyOptimizer
from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import SGD
from torch.testing._internal import common_distributed, common_utils
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
def check_same_model_params(model_a: torch.nn.Module, model_b: torch.nn.Module, message: str = "") -> None:
for p_a, p_b in zip(model_a.parameters(), model_b.parameters()):
assert torch.allclose(p_a, p_b, atol=1e-3), f"Model parameters differ\n{p_a} {p_b}\n" + message
for b_a, b_b in zip(model_a.buffers(), model_b.buffers()):
assert torch.allclose(b_a, b_b), f"Model buffers differ {b_a} - {b_b}\n" + message
class TestZeroRedundancyOptimizer(common_distributed.MultiProcessTestCase):
def setUp(self):
super(TestZeroRedundancyOptimizer, self).setUp()
os.environ["WORLD_SIZE"] = str(self.world_size)
self._spawn_processes()
@property
def device(self):
return torch.device(self.rank) if BACKEND == dist.Backend.NCCL else torch.device("cpu")
@property
def world_size(self):
return 1
def tearDown(self):
try:
torch.distributed.destroy_process_group()
except AssertionError:
pass
try:
os.remove(self.file_name)
except OSError:
pass
def dist_init(self, rank, world_size=-1, backend=BACKEND):
if (world_size < 1):
world_size = self.world_size
store = dist.FileStore(self.file_name, world_size)
return dist.init_process_group(backend=backend, store=store, rank=rank, world_size=world_size)
class TestZeroRedundancyOptimizerSingleRank(TestZeroRedundancyOptimizer):
def test_state_dict(self):
"""Check that the ZeroRedundancyOptimizer exposes the expected state dict interface,
irrespective of the sharding.
"""
self.dist_init(self.rank)
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.1, momentum=0.9)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
self.assertEqual(o.optim.state[x]["momentum_buffer"], torch.tensor([1.0], device=DEVICE))
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
self.assertIn("param_groups", state_dict.keys())
self.assertIn("state", state_dict.keys())
# Check that the pulled state is what we expect, and that we have all the expected keys
self.assertEqual(state_dict["param_groups"][0]["lr"], 0.1)
self.assertEqual(state_dict["param_groups"][0]["momentum"], 0.9)
self.assertFalse(state_dict["param_groups"][0]["nesterov"])
self.assertEqual(state_dict["param_groups"][0]["weight_decay"], 0.0)
self.assertEqual(state_dict["param_groups"][0]["dampening"], 0.0)
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
self.assertEqual(state_dict["param_groups"][0][k], o.param_groups[0][k])
# Check that it's correctly loaded
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
self.assertEqual(o.optim.state[x]["momentum_buffer"], torch.tensor([1.0], device=DEVICE))
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.71], device=DEVICE))
self.assertEqual(o.optim.state[x]["momentum_buffer"], torch.tensor([1.9], device=DEVICE))
# Check that the exposed param_groups are on the proper device
self.assertEqual(o.param_groups[0]["params"][0].device, x.device)
def test_lr_scheduler(self):
""" Check that a normal torch lr_scheduler is usable with ZeroRedundancyOptimizer"""
self.dist_init(self.rank)
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
self.assertEqual(x, x2)
def test_step_with_kwargs(self):
""" Check that the `step(**kwargs)` interface is properly exposed"""
self.dist_init(self.rank)
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=None):
super().step()
kwarg.append(5)
kwarg: List[Any] = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
self.assertEqual(kwarg, [5])
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
def test_step_with_extra_inner_key(self):
"""Check that an optimizer adding extra keys to the param_groups
is properly handled, in that the new key is exposed to the user
"""
self.dist_init(self.rank)
class SGDWithNewKey(torch.optim.SGD):
# Dummy optimizer which adds a new key to the param groups
def step(self, closure=None):
super().step()
self.param_groups[0]["new_key"] = 0.1
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithNewKey, lr=0.1)
x.backward()
o.step()
self.assertEqual(o.param_groups[0]["new_key"], 0.1)
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
def test_step_without_closure(self):
"""Check that the step() method (without closure) is handlded as expected"""
self.dist_init(self.rank)
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
self.assertEqual(x, torch.tensor([0.9], device=DEVICE))
def test_zero_grad(self):
"""Check that the zero_grad attribute is properly handled"""
self.dist_init(self.rank)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = ZeroRedundancyOptimizer(m.parameters(), optimizer_class=SGD, lr=0.1)
y = m(x)
y.backward(x)
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
self.assertNotEqual(m.weight.grad, torch.zeros_like(m.weight))
o.zero_grad()
self.assertFalse(m.weight.grad)
self.assertFalse(m.bias.grad)
def test_constructor(self):
"""Check the robustness of the ZeroRedundancyOptimizer constructor by
passing different values for `params`"""
self.dist_init(self.rank)
m = torch.nn.Linear(1, 1)
# (input, expected error)
inputs = [
([], ValueError), # empty parameter list
(torch.randn(1), TypeError), # non-iterable: `torch.Tensor`
(1.2, TypeError), # non-iterable: `float`
([{"params": m.parameters()}], TypeError), # iterable of dict
(list(m.parameters()) + [42], TypeError), # iterable containing non-`torch.Tensor`
(m.parameters(), None), # `params` as a generator
(list(m.parameters()), None) # `params` as a list
]
for input, error in inputs:
if (error):
with self.assertRaises(error):
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=0.1)
else:
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=0.1)
def test_same_dense_param_type(self):
"""Check that ZeroRedundancyOptimizer raises an exception if the input
parameters include sparse tensors or different dense types.
NOTE: This test should be removed once support for sparse parameters
and varying parameter types is added.
"""
self.dist_init(self.rank)
inputs = [
[torch.sparse_coo_tensor(size=(2, 3))],
[torch.FloatTensor(1), torch.DoubleTensor(1)],
[torch.FloatTensor(1), torch.FloatTensor(1),
torch.sparse_coo_tensor(size=(2, 3))]
]
for input in inputs:
with self.assertRaises(ValueError):
ZeroRedundancyOptimizer(input, optimizer_class=SGD, lr=0.1)
class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
@property
def world_size(self):
return min(4, max(2, torch.cuda.device_count()))
@common_distributed.skip_if_rocm
def test_step(self):
""" Check that the ZeroRedundancyOptimizer wrapper properly exposes the `.step()` interface"""
if self.rank >= self.world_size or (BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < 2):
return
self.dist_init(self.rank, world_size=self.world_size)
context = suppress() if not torch.cuda.is_available() else torch.cuda.device(self.rank)
with context:
x = torch.tensor([float(self.rank + 1)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m_zero = copy.deepcopy(m)
m.to(self.device)
m_zero.to(self.device)
lr = 0.1
o = SGD(m.parameters(), lr=lr)
o_zero = ZeroRedundancyOptimizer(m_zero.parameters(), optimizer_class=SGD, lr=lr)
y = m(x)
y.backward(x)
y_zero = m_zero(x)
y_zero.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o.step()
for p in m_zero.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o_zero.step()
self.assertEqual(m.weight, m_zero.weight)
self.assertEqual(m.bias, m_zero.bias)
@common_distributed.skip_if_rocm
def test_step_with_closure(self):
""" Check that the ZeroRedundancyOptimizer wrapper properly exposes the `.step(closure)` interface"""
if self.rank >= self.world_size or (BACKEND == dist.Backend.NCCL and torch.cuda.device_count() < 2):
return
self.dist_init(self.rank, world_size=self.world_size)
context = suppress() if not torch.cuda.is_available() else torch.cuda.device(self.rank)
with context:
for bucket_view in [False, True]:
x_val = self.rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=self.device)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(self.device)
o = ZeroRedundancyOptimizer(
m.parameters(),
optimizer_class=SGD,
parameters_as_bucket_view=bucket_view,
lr=0.1,
)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
self.assertEqual(loss, torch.tensor(error))
self.assertEqual(m.weight, torch.tensor([[1.1]]))
self.assertEqual(m.bias, torch.tensor([2.1]))
def test_sharding(self):
""" Check the sharding at construction time
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
`ZeroRedundancyOptimizer._partition_parameters()` in
`zero_redundancy_optimizer.py`.
"""
self.dist_init(self.rank)
sizes = [9, 7, 5, 3]
params = []
for size in sizes * self.world_size:
params.append(torch.rand(size, 1))
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=0.1)
self.assertEqual(sum([x.numel() for x in o.optim.param_groups[0]["params"]]), sum(sizes))
def test_add_param_group(self):
"""Check that ZeroRedundancyOptimizer properly handles adding a new param_group a posteriori,
and that all ranks get a shard
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
`ZeroRedundancyOptimizer._partition_parameters()` in
`zero_redundancy_optimizer.py`.
"""
self.dist_init(self.rank)
# Test with all parameters trainable to begin with
def all_trainable():
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * self.world_size
for size in sizes_world[:-1]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have the same elements.
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == sum(sizes)
assert len(o.optim.param_groups) == 2
# Test a pathological config with a first big non-trainable param
def some_trainable():
params = []
for size in [100, 3, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable, enforces size-based partitioning
for p in params[1:]:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
assert len(o.optim.param_groups) == 2
all_trainable()
some_trainable()
@common_distributed.skip_if_lt_x_gpu(2)
def | |
<reponame>humblef00ls/PokerData
# Project Euler Library - Written in Python
# This library contains all the functions needed to solve
# the problems from the website
#!usr/bin/python
import math, time, itertools
# number of digits
def num_digits(n):
return int(math.log10(n)) + 1
# Check if it's a prime number
def isPrime(n):
i = 2
limit = int(math.sqrt(n))
while i <= limit:
if n % i == 0:
return 0
i = i + 1
return 1
# Check if it's double squared
def isDoubleSquare(n):
x = math.sqrt(n/2)
if x - int(x) == 0:
return True
else:
return False
# Better version of checking a prime number using the AKS primality check
def checkPrime(n):
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
i = 5
w = 2
# 6n+1 and 6n-1 check
while i * i <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
# Check if number is pandigital
def checkPandigital(n):
pandigital = ""
#sorted the number
s = ''.join(sorted(str(n)))
# generate pandigital numbers according the number of digits
for i in range(1, len(s)+1):
pandigital += str(i)
if s == pandigital:
return True
else:
return False
# Check if the number is both prime and pandigital
def isPrime_n_isPandigital(n):
x = checkPrime(n)
y = checkPandigital(n)
if x and y == True:
return True
else:
return False
# Generate all permutations
def genAllPandigitals():
s = "0123456789"
arr = [''.join(i) for i in itertools.permutations(s)]
return arr
# Generate all factors of a number
def factors(n):
list = []
# for i in range(1, n + 1):
for i in range(1, n):
if n % i == 0:
list.append(i)
return list
# Generate all prime factors
def primeFactors(n):
list = []
factor = 2
while n > 1:
while n % factor == 0:
list.append(factor)
n = n / factor
factor = factor + 1
return list
def primeFactorsTwo(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n /= i
factors.append(i)
if n > 1:
factors.append(n)
return set(factors)
def num_of_prime_factors(n):
return len(primeFactorsTwo(n))
# Check if the number is a palindrome
def checkPalindrome(n):
num_str = str(n)
rev_str = num_str[::-1]
if num_str == rev_str:
return True
else:
return False
# Return the largest palindrome product within a given range
def largestPalindromeProduct(start, end):
max_prod = 0
for i in range(start, end+1):
for j in range(start, end+1):
temp_prod = i * j
# Check if it's a palindrome
if checkPalindrome(temp_prod) == True:
if temp_prod > max_prod:
max_prod = temp_prod
return max_prod
# Greatest Common Divisor
def gcd(a,b):
if a == b or b == 0:
return a
else:
return gcd(b, a % b)
# Least Common Divisor
def lcm(a,b):
return (a*b) / gcd(a,b)
# Smallest Multiple
def smallestMultiple(n):
lcm_count = 1
for i in range (1, n+1):
lcm_count = lcm(i, lcm_count)
return lcm_count
# Sum of numbers
def sumOfNumbers(n):
return (n*(n+1))/2
# Sum of Squares Formula
def sumOfSquares(n):
return ((n*(n+1))*((2*n)+1))/6
# Square of Sum Formula
def squareOfSum(n):
total = (n*(n+1))/2
return pow(total, 2)
# Largest Product in a series
def largestProdInSeries(str, limit):
maxProd = 0
for i in range(0, len(str)):
if i + (limit-1) < len(str):
count = 0
prodSeries = ""
tempProd = 1
while count != limit:
prodSeries += str[i]
i += 1
count += 1
for j in range(0, len(prodSeries)):
tempProd *= int(prodSeries[j])
if tempProd > maxProd:
maxProd = tempProd
return maxProd
# Generate a list of primes
def genPrimesOne(n):
list = []
i = 2
count = 0
while count != n:
if isPrime(i) == 1:
list.append(i)
count += 1
i += 1
return list
# Generate a list of primes - Sieve of Eratosthenes
def genPrimesTwo(n):
p = 2
list = []
primes = [True for i in range(n)]
while p * p <= n:
if primes[p] == True:
# Update any number that is a multiple of p to False
for i in range(p * 2, n, p):
primes[i] = False
p += 1
for i in range(2, len(primes)-1):
if(primes[i] == True):
list.append(i)
return list
# Sum of a list of primes
def sumOfPrimesList(list):
return sum(list)
# Product in a grid - diagonal downright
def prGrid_diagDR(r,c,grid):
maxProd = 0
for i in range(0,r-3):
prod = 0
for j in range(0,c-3):
prod = grid[i][j] * grid[i+1][j+1] * grid[i+2][j+2] * grid[i+3][j+3]
# str = grid[i][j], grid[i+1][j+1], grid[i+2][j+2], grid[i+3][j+3], prod
# print str
if prod > maxProd:
maxProd = prod
return maxProd
# Product in a grid - diagonal downleft
def prGrid_diagDL(r,c,grid):
maxProd = 0
for i in range(0,r-3):
for j in range(3,c):
prod = grid[i][j] * grid[i+1][j-1] * grid[i+2][j-2] * grid[i+3][j-3]
# str = grid[i][j], grid[i+1][j+1], grid[i+2][j+2], grid[i+3][j+3], prod
# print str
if prod > maxProd:
maxProd = prod
return maxProd
# Product in a grid - rows
def prGrid_rows(r,c,grid):
maxProd = 0
for i in range(0,r):
for j in range(0,c-3):
prod = grid[i][j] * grid[i][j+1] * grid[i][j+2] * grid[i][j+3]
# str = grid[i][j], grid[i+1][j+1], grid[i+2][j+2], grid[i+3][j+3], prod
# print str
if prod > maxProd:
maxProd = prod
return maxProd
# Product in a grid - columns
def prGrid_cols(r,c,grid):
maxProd = 0
for i in range(0,r-3):
for j in range(0,c):
prod = grid[i][j] * grid[i+1][j] * grid[i+2][j] * grid[i+3][j]
# str = grid[i][j], grid[i+1][j+1], grid[i+2][j+2], grid[i+3][j+3], prod
# print str
if prod > maxProd:
maxProd = prod
return maxProd
# Generate Triangular Numbers
def triangularSeries(n):
list = []
for i in range(1,n+1):
list.append(sumOfNumbers(i))
return list
# Trial Division
def trialDivision(n):
count = 0
size = int(math.sqrt(n))
for i in range(2,size+1):
if n % i == 0:
count += 2
if size * size == n:
count -= 1
return count
# Factorial
def factorial(n):
if n <= 1:
return 1
else:
return n * factorial(n-1)
# Collatz Sequence
def collatzSequence(n):
count = 0
while n != 1:
if n > 0:
if n % 2 == 0:
n = n/2
elif n % 2 != 0:
n = (3*n)+1
count += 1
return count+1
# Largest Collatz Sequence
def largestCollatzSequence(n):
largeTerms = 0
largeNumber = 0
for i in range(2,n+1):
terms = collatzSequence(i)
if terms > largeTerms:
largeNumber = i
largeTerms = terms
return largeNumber
# Combinations - Refer to the equation from:
# https://en.wikipedia.org/wiki/Combination
# n = number of moves which is going to be 2n
# k = number of elements
def combinations(n):
k = n
return (factorial(2*n))/(factorial(k) * factorial((2*n)-k))
# Number to Words
def num_to_words():
# a dictionary of number words
number_words = {
1 : 'one', 2 : 'two', 3 : 'three',
4 : 'four', 5 : 'five', 6 : 'six',
7 : 'seven', 8 : 'eight', 9 : 'nine',
10 : 'ten', 11 : 'eleven', 12 : 'twelve',
13 : 'thirteen', 14 : 'fourteen', 15 : 'fifteen',
16 : 'sixteen', 17 : 'seventeen', 18 : 'eighteen',
19 : 'nineteen', 20 : 'twenty', 30 : 'thirty',
40 : 'forty', 50 : 'fifty', 60 : 'sixty',
70 : 'seventy', 80 : 'eighty', 90 : 'ninety',
100 : 'hundred', 1000 : 'thousand'
}
num_str = ""
for i in range(1, 1001):
x = str(i)
# numbers from 1 to 20
if i >= 1 and i <= 20:
num_str += number_words[i]
# numbers from 21 to 99
elif i >= 21 and i <= 99:
# if the second digit is not a zero
if x[1] != '0':
num_str += number_words[int(x[0]) * 10] + number_words[int(x[1])]
else:
num_str += number_words[int(x[0]) * 10]
# numbers from 100 to 999
elif i >= 100 and i <= 999:
# if the second digit is zero and third digit is zero
if x[1] == '0' and x[2] == '0':
num_str += number_words[int(x[0])] + number_words[100]
# if the second digit is zero and third digit is not zero
elif x[1] == '0' and x[2] != '0':
num_str += number_words[int(x[0])] + number_words[100] + 'and' + number_words[int(x[2])]
# if the second digit is not zero and third digit is zero
elif x[1] != '0' and x[2] == '0':
num_str += number_words[int(x[0])] + number_words[100] + 'and' + number_words[int(x[1]) * 10]
# if the second digit is not zero and third digit is not zero
elif x[1] != '0' and x[2] != '0':
# if the second digit is one and third digit is less than or equal to 9
if int(x[1]) == 1 and int(x[2]) <= 9:
num_str += number_words[int(x[0])] + number_words[100] + 'and' + number_words[int(x[1]+x[2])]
else:
num_str += number_words[int(x[0])] + number_words[100] + 'and' + number_words[int(x[1]) * 10] + number_words[int(x[2])]
# if the number is 1000
elif i == 1000:
num_str += number_words[1] + number_words[1000]
return num_str
# Return the maximum path sum in a triangle
# Bottom Up Approach
def maxPathSum(list):
# the last number of the list
last = len(list)
# number of rows in the triangle
nrow = 1
# count the number of rows in the triangle
# use the sum of numbers method to count the number of rows
while sumOfNumbers(nrow) < last:
# print (sumOfNumbers(nrow))
nrow += 1
last -= 1
for i in range(nrow, 0, -1):
# print list[last - i]
# iterate through each number in each row
for j in range(2, i+1):
# pick a number from the row above the current row
# and pick the 2 numbers from the current row
# Find the max between the two numbers and add it
list[last - i] = list[last - i] + max(list[last - 1], list[last])
# shift to the next number in the row above
last -= 1
# shift to the next number in the row above
last -= 1
# return the max sum
return list[0]
# Check if it's a leap year
def isLeapYear(n):
if (n % 4 == 0 and n % 100 != 0) or (n % 400 == 0):
return True
return False
# Counting Sundays
def countingSundays(start_year, end_year):
counter = 0
x = 1
years = [
[31, 29, 31, 30, 31, 30, 31, | |
Estado Nº 355" -33.4391488 -70.6495695 "Estado 355, Santiago, Chile")
'("Ahumada - Moneda N° 975, local 3" -33.44183489 -70.650444 "Moneda N° 975, local 3")
'("Hospital Clínico Universidad de Chile - Santos Dumont 999" -33.420406 -70.65295 "Santos Dumont 999")
'("Bci Home Santiago - Bandera 341, Piso 9" -33.439186 -70.6524145 "Bandera 341, Piso 9")
'("Centro Empresarios Bandera 1 Oficina Central - Bandera 250 piso 1" -33.4402281 -70.6523019 "Bandera 250, Santiago, Chile")
'("Oficina Central - Huérfanos Nº 1134, Santiago" -33.4398343 -70.6527831 "Huérfanos Nº 1134, Santiago")
'("Edificio Prat Premier - Av. Prat 847, Piso 2" -33.4558707 -70.6479542 "Av. Prat 847, Piso 2")
'("Paseo Huérfanos - Huérfanos 1263 local 1" -33.43991464 -70.65472001 "Huérfanos 1263 local 1")
'("Teatinos - Teatinos 235, Santiago" -33.4408985 -70.6549664 "Teatinos 235, Santiago")
'("Centro de Negocios - Huérfanos 1112" -33.4400547 -70.6553456 "Huérfanos 1112")
'("POWER CENTER - <NAME> 10581 LOCAL 4 Y 6" -33.3555863 -70.5351105 "<NAME> 10581, Lo Barnechea, Santiago")
'("Plaza Bulnes - Nataniel Cox Nº 27, Santiago Centro" -33.4453623 -70.6539966 "Nataniel Cox Nº 27, Santiago Centro")
'("Santa Lucía - Huérfanos Nº 669, Local 1-9, Santiago" -33.4691199 -70.641997 "Huérfanos Nº 669, Local 1-9, Santiago")
'("Plaza Almagro - Av. Diez de Julio Nº 1140, Santiago" -33.454261 -70.651449 "Av. Diez de Julio Nº 1140, Santiago")
'("Agustinas - Amunategui 154" -33.4421761 -70.6561234 "Amunategui 154")
'("<NAME> - Av. Matta Nº 1132" -33.45988604 -70.64964062 "Av. Matta Nº 1132")
'("Plaza San Lucas - Avda. La Dehesa N° 457, local 2" -33.36229297 -70.51486194 "Avda. La Dehesa N° 457, local 2")
'("La Dehesa - Avda. La Dehesa 1788" -33.356575 -70.5175 "Avda. La Dehesa 1788")
'("San Diego - San Diego Nº 2043" -33.472448 -70.647983 "San Diego Nº 2043")
'("Los Trapenses - Av. Camino Los Trapenses N°3515 Loc 202" -33.34391075 -70.54472116 "Av. Camino Los Trapenses N°3515 Loc 202")
'("El Rodeo - Avda. La Dehesa N°2035, locales 15 y 16" -33.351946 -70.518131 "Avda. La Dehesa N°2035, locales 15 y 16")
'("<NAME>atorre - Av. B. O'Higgins Nº 2102" -33.44764174 -70.66499946 "Av. B. O'Higgins Nº 2102")
'("TBANC - Av. Libertador Bernardo O' Higgins 2432" -33.4486565 -70.669942 "Av. Libertador Bernardo O' Higgins 2432")
'("Beaucheff - Av. Beaucheff Nº 1453" -33.46662774 -70.66388576 "Av. Beaucheff Nº 1453")
'("El Llano - El Llano Subercaseaux 3397" -33.48549183 -70.65068534 "El Llano Subercaseaux 3397")
'("Estación Central - Unión Latino Americana Nº 40" -33.45088394 -70.67385248 "Unión Latino Americana Nº 40")
'("Centro Empresarios Estación Central - Unión Latinoamericana 40" -33.4504407 -70.6740677 "Unión Latinoamericana 40, Estación Central, Santiago, Chile")
'("Isla de Maipo - Santelices N° 615" -33.3804567 -70.6647241 "Santelices N° 615")
'("Matucana - Chacabuco Nº 848, Santiago" -33.4351067 -70.6792414 "Chacabuco Nº 848, Santiago")
'("Renca - Av. Ptde. E. <NAME> Nº 1792, Renca" -33.40690297 -70.68056066 "Av. Ptde. E. Frei Montalva Nº 1792, Renca")
'("La Florida - Av. Vicuña Mackenna Oriente 7385, La Florida" -33.5212524 -70.5989039 "Av. Vicuña Mackenna Oriente 7385, La Florida")
'("San Miguel - Gran Av. J<NAME>. Carrera Nº 4780.San Miguel" -33.49770813 -70.6530588 "Gran Av. José M. Carrera Nº 4780.San Miguel")
'("<NAME> - Av. Pedro Fontova 6251, Local 3" -33.36631815 -70.67017355 "Av. <NAME> 6251, Local 3")
'("Club Hípico de Santiago 4676, <NAME>, Santiago" -33.4919827 -70.668105 "Club Hípico de Santiago 4676")
'("Panamericana Norte - Av. A. Vespucio Sur Nº 2982, Conchalí" -33.368545 -70.67783 "Av. A. Vespucio Sur Nº 2982, Conchalí")
'("<NAME> - Rojas Magallanes N°3638" -33.5357656 -70.5745186 "Rojas Magallanes N°3638")
'("Huechuraba - Av. Américo Vespucio 1737 Local 2154, Mall Plaza Norte" -33.36707572 -70.67805813 "Av. Américo Vespucio 1737 Local 2154, Mall Plaza Norte")
'("Altos de la Florida - La Florida Nº 9343" -33.538481 -70.5725788 "La Florida Nº 9343, Región Metropolitana, Santiago, Chile")
'("Base Aerea Cerrillos - Av. <NAME>erda Nº 5.500" -33.48945949 -70.70049577 "Av. <NAME> Cerda Nº 5.500")
'("La Cisterna - Gran Av.J<NAME>. Carrera 8445, Las Cisterna" -33.53227995 -70.6632211 "Gran Av.José M. Carrera 8445, Las Cisterna")
'("Cerrillos - Av. <NAME> Cerda Nº 6049" -33.49377846 -70.70519015 "Av. <NAME> Cerda Nº 6049")
'("Arica Norte - Diego Portales 749" -33.5627295 -70.5786623 "Diego Portales 749")
'("Mall Plaza Tobalaba - Avda. C<NAME>íquez N° 3296, local BS 108 - 110" -33.577385 -70.553114 "Avda. Camilo Henríquez N° 3296, local BS 108 - 110")
'("Quilicura - <NAME> Vergara N°491" -33.3672309 -70.7340495 "J<NAME> N°491, Quilicura, Santiago, Chile")
'("Maipú - Av. Pajaritos Nº 5100, Local 12, Maipú" -33.47468796 -70.74097349 "Av. Pajaritos Nº 5100, Local 12, Maipú")
'("Mall Plaza Oeste - Avda. Américo Vespucio 1501, local C-278 - C-282 - C-286 - BS-124" -33.51686812 -70.71712 "Avda. Américo Vespucio 1501, local C-278 - C-282 - C-286 - BS-124")
'("<NAME> - Av. Paseo Colina Sur 14500 local 101, Colina" -33.27744128 -70.62713055 "Av. Paseo Colina Sur 14500 local 101, Colina")
'("El Bosque - Av. <NAME> paradero 32 1/2" -33.557267 -70.677817 "Av. <NAME> Carrera paradero 32 1/2")
'("Maipú Pajaritos - Avda. Los Pajaritos 2664, Maipú" -33.4839444 -70.7465111 "Avda. Los Pajaritos 2664, Maipú")
'("Mall Arauco Maipú - Avda Americo Vespucio 399 Local B 02" -33.4823731 -70.75198636 "Avda Americo Vespucio 399 Local B 02")
'("Moneda - <NAME> Tocornal 920, Santiago" -33.5935111 -70.5793159 "<NAME> Tocornal 920, Santiago")
'("Chicureo - <NAME>icureo Km 2,2" -33.285698 -70.678001 "Camino Chicureo Km 2,2")
'("Camino Lo Echevers 550" -33.3735149 -70.7590944 "Camino Lo Echevers 550")
'("<NAME> - Avda Concha y Toro N°1036" -33.60167007 -70.57928105 "Avda Concha y Toro N°1036")
'("<NAME> - Irarrázabal Nº 0178, Puente Alto" -33.60751014 -70.57394295 "Irarrázabal Nº 0178, Puente Alto")
'("5 de Abril - 5 de Abril N°180" -33.510366 -70.759725 "5 de Abril N°180")
'("San And<NAME> - <NAME> 3177, piso 7" -33.564646 -70.710865 "J<NAME> 3177, piso 7")
'("Base Aérea Pudahuel - Aeropuerto sector Norte lote 16" -33.396403 -70.793753 "Aeropuerto sector Norte lote 16")
'("<NAME> - Puente Nº 779, Santiago" -33.578651 -70.7071405 "Puente Nº 779, Santiago")
'("San Bernardo - Covadonga Nº 664, San Bernardo" -33.5942529 -70.7065787 "Covadonga Nº 664, San Bernardo")
'("Mall Plaza San Bernardo - Presidente J<NAME> N° 20,040, locales BS 122, 126 y 130" -33.6316089 -70.71281424 "Presidente J<NAME> N° 20,040, locales BS 122, 126 y 130")
'("Colina - Font Nº 146" -33.202846 -70.675298 "Font Nº 146")
'("<NAME> - Camino San Alberto Hurtado 3295" -33.5735883 -70.8134319 "Camino San Alberto Hurtado 3295, Región Metropolitana, Chile")
'("Calera de Tango - Av. Calera Tango 345 Municipalidad Calera Tango" -33.629103 -70.768697 "Av. Calera Tango 345 Municipalidad Calera Tango")
'("San José de Maipo - Uno Sur Nº 225, San José de Maipo" -33.6408406 -70.3523686 "Uno Sur Nº 225, San José de Maipo")
'("Buin - JJ Pérez Nº 302" -33.7327659 -70.74141194 "JJ Pérez Nº 302")
'("Saladillo - Av. Santa Teresa 679, Los Andes" -32.8359327 -70.6039869 "Av. Santa Teresa 679, Los Andes")
'("Los Andes - Esmeralda Nº 347, Los Andes" -32.8332936 -70.5978586 "Esmeralda Nº 347, Los Andes")
'("Cordillera Preferencial - Argentina 17 piso 4, oficina 402" -32.8309974 -70.5924856 "Avenida Argentina 17, Los Andes")
'("Melipilla - Serrano Nº 210, Melipilla" -33.6828262 -71.2137351 "Serrano Nº 210, Melipilla")
'("San Felipe - Arturo Prat Nº 161, San Felipe" -32.7502436 -70.7246347 "Arturo Prat Nº 161, San Felipe")
'("Lampa - Baquedano 739" -34.1655788 -70.7670241 "Baquedano 739")
'("Bci Home Rancagua - Bueras Nº 470, Rancagua" -34.170928 -70.744937 "Bueras Nº 470, Rancagua")
'("Av. San Juan Nº 133 C Piso 1, Machalí " -34.1827652 -70.6486342 "San Juan Nº 133, Machalí")
'("Rancagua II - Millán Nº 886, Rancagua" -34.1737718 -70.7481936 "Millán Nº 886, Rancagua")
'("El Cobre - Carretera El Cobre Nº 1002" -34.1865831 -70.72105338 "Carretera El Cobre Nº 1002")
'("Villa Alemana - Avda. Valparaíso N° 896, Villa Alemana" -33.0439541 -71.3692845 "Avda. Valparaíso N° 896, Villa Alemana")
'("Hotel Prat Iquique - Anibal Pinto 601" -32.8839139 -71.2497172 "Anibal Pinto 601")
'("Quillota - Av. L. B. O´Higgins Nº102, Quillota" -32.87840882 -71.24689088 "Av. L. B. O´Higgins Nº102, Quillota")
'("Quilpué - Claudio Vicuña Nº 898, Quilpué" -33.0469042 -71.4419194 "Claudio Vicuña Nº 898, Quilpué")
'("La Calera - <NAME> Nº 244, La Calera" -32.78757 -71.189657 "<NAME> Nº 244, La Calera")
'("San Antonio - Av. Centenario Nº 145, San Antonio" -33.5791403 -71.6077543 "Av. Centenario 145, San Antonio, Región de Valparaiso, Chile")
'("Multiservicio Centro Comercial del lago - CURAUMA Ruta 68 local 1, Valparaiso." -33.118122 -71.561238 "Ruta 68 | |
<filename>tests/hwsim/test_wpas_wmm_ac.py
# Test cases for wpa_supplicant WMM-AC operations
# Copyright (c) 2014, Intel Corporation
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import logging
logger = logging.getLogger()
import struct
import sys
import hwsim_utils
import hostapd
from utils import fail_test
def add_wmm_ap(apdev, acm_list):
params = { "ssid": "wmm_ac",
"hw_mode": "g",
"channel": "11",
"wmm_enabled" : "1"}
for ac in acm_list:
params["wmm_ac_%s_acm" % (ac.lower())] = "1"
return hostapd.add_ap(apdev, params)
def test_tspec(dev, apdev):
"""Basic addts/delts tests"""
# configure ap with VO and VI requiring admission-control
hapd = add_wmm_ap(apdev[0], ["VO", "VI"])
dev[0].connect("wmm_ac", key_mgmt="NONE", scan_freq="2462")
hwsim_utils.test_connectivity(dev[0], hapd)
status = dev[0].request("WMM_AC_STATUS")
if "WMM AC is Enabled" not in status:
raise Exception("WMM-AC not enabled")
if "TSID" in status:
raise Exception("Unexpected TSID info")
if "BK: acm=0 uapsd=0" not in status:
raise Exception("Unexpected BK info" + status)
if "BE: acm=0 uapsd=0" not in status:
raise Exception("Unexpected BE info" + status)
if "VI: acm=1 uapsd=0" not in status:
raise Exception("Unexpected VI info" + status)
if "VO: acm=1 uapsd=0" not in status:
raise Exception("Unexpected VO info" + status)
# no tsid --> tsid out of range
if "FAIL" not in dev[0].request("WMM_AC_ADDTS downlink"):
raise Exception("Invalid WMM_AC_ADDTS accepted")
# no direction
if "FAIL" not in dev[0].request("WMM_AC_ADDTS tsid=5"):
raise Exception("Invalid WMM_AC_ADDTS accepted")
# param out of range
if "FAIL" not in dev[0].request("WMM_AC_ADDTS tsid=5 downlink"):
raise Exception("Invalid WMM_AC_ADDTS accepted")
tsid = 5
# make sure we fail when the ac is not configured for acm
try:
dev[0].add_ts(tsid, 3)
raise Exception("ADDTS succeeded although it should have failed")
except Exception as e:
if not str(e).startswith("ADDTS failed"):
raise
status = dev[0].request("WMM_AC_STATUS")
if "TSID" in status:
raise Exception("Unexpected TSID info")
# add tspec for UP=6
dev[0].add_ts(tsid, 6)
status = dev[0].request("WMM_AC_STATUS")
if "TSID" not in status:
raise Exception("Missing TSID info")
# using the same tsid for a different ac is invalid
try:
dev[0].add_ts(tsid, 5)
raise Exception("ADDTS succeeded although it should have failed")
except Exception as e:
if not str(e).startswith("ADDTS failed"):
raise
# update the tspec for a different UP of the same ac
dev[0].add_ts(tsid, 7, extra="fixed_nominal_msdu")
dev[0].del_ts(tsid)
status = dev[0].request("WMM_AC_STATUS")
if "TSID" in status:
raise Exception("Unexpected TSID info")
# verify failure on uplink/bidi without driver support
tsid = 6
try:
dev[0].add_ts(tsid, 7, direction="uplink")
raise Exception("ADDTS succeeded although it should have failed")
except Exception as e:
if not str(e).startswith("ADDTS failed"):
raise
try:
dev[0].add_ts(tsid, 7, direction="bidi")
raise Exception("ADDTS succeeded although it should have failed")
except Exception as e:
if not str(e).startswith("ADDTS failed"):
raise
# attempt to delete non-existing tsid
try:
dev[0].del_ts(tsid)
raise Exception("DELTS succeeded although it should have failed")
except Exception as e:
if not str(e).startswith("DELTS failed"):
raise
# "CTRL: Invalid WMM_AC_ADDTS parameter: 'foo'
if "FAIL" not in dev[0].request("WMM_AC_ADDTS foo"):
raise Exception("Invalid WMM_AC_ADDTS command accepted")
def test_tspec_protocol(dev, apdev):
"""Protocol tests for addts/delts"""
# configure ap with VO and VI requiring admission-control
hapd = add_wmm_ap(apdev[0], ["VO", "VI"])
dev[0].connect("wmm_ac", key_mgmt="NONE", scan_freq="2462")
dev[0].dump_monitor()
hapd.set("ext_mgmt_frame_handling", "1")
tsid = 6
# timeout on ADDTS response
dev[0].add_ts(tsid, 7, expect_failure=True)
hapd.dump_monitor()
req = "WMM_AC_ADDTS downlink tsid=6 up=7 nominal_msdu_size=1500 sba=9000 mean_data_rate=1500 min_phy_rate=6000000"
if "OK" not in dev[0].request(req):
raise Exception("WMM_AC_ADDTS failed")
# a new request while previous is still pending
if "FAIL" not in dev[0].request(req):
raise Exception("WMM_AC_ADDTS accepted while oen was still pending")
msg = hapd.mgmt_rx()
payload = msg['payload']
(categ, action, dialog, status) = struct.unpack('BBBB', payload[0:4])
if action != 0:
raise Exception("Unexpected Action code: %d" % action)
msg['da'] = msg['sa']
msg['sa'] = apdev[0]['bssid']
# unexpected dialog token
msg['payload'] = struct.pack('BBBB', 17, 1, (dialog + 1) & 0xff, 0) + payload[4:]
hapd.mgmt_tx(msg)
# valid response
msg['payload'] = struct.pack('BBBB', 17, 1, dialog, 0) + payload[4:]
hapd.mgmt_tx(msg)
ev = dev[0].wait_event(["TSPEC-ADDED"], timeout=10)
if ev is None:
raise Exception("Timeout on TSPEC-ADDED")
if "tsid=%d" % tsid not in ev:
raise Exception("Unexpected TSPEC-ADDED contents: " + ev)
# duplicated response
msg['payload'] = struct.pack('BBBB', 17, 1, dialog, 0) + payload[4:]
hapd.mgmt_tx(msg)
# too short ADDTS
msg['payload'] = struct.pack('BBBB', 17, 1, dialog, 0)
hapd.mgmt_tx(msg)
# invalid IE
msg['payload'] = struct.pack('BBBB', 17, 1, dialog, 0) + payload[4:] + struct.pack('BB', 0xdd, 100)
hapd.mgmt_tx(msg)
# too short WMM element
msg['payload'] = struct.pack('BBBB', 17, 1, dialog, 0) + payload[4:] + b'\xdd\x06\x00\x50\xf2\x02\x02\x01'
hapd.mgmt_tx(msg)
# DELTS
dev[0].dump_monitor()
msg['payload'] = struct.pack('BBBB', 17, 2, 0, 0) + payload[4:]
hapd.mgmt_tx(msg)
ev = dev[0].wait_event(['TSPEC-REMOVED'], timeout=6)
if ev is None:
raise Exception("Timeout on TSPEC-REMOVED event")
if "tsid=%d" % tsid not in ev:
raise Exception("Unexpected TSPEC-REMOVED contents: " + ev)
# DELTS duplicated
msg['payload'] = struct.pack('BBBB', 17, 2, 0, 0) + payload[4:]
hapd.mgmt_tx(msg)
# start a new request
hapd.dump_monitor()
if "OK" not in dev[0].request(req):
raise Exception("WMM_AC_ADDTS failed")
msg = hapd.mgmt_rx()
payload = msg['payload']
(categ, action, dialog, status) = struct.unpack('BBBB', payload[0:4])
if action != 0:
raise Exception("Unexpected Action code: %d" % action)
msg['da'] = msg['sa']
msg['sa'] = apdev[0]['bssid']
# modified parameters
p12int = payload[12] if sys.version_info[0] > 2 else ord(payload[12])
msg['payload'] = struct.pack('BBBB', 17, 1, dialog, 1) + payload[4:12] + struct.pack('B', p12int & ~0x60) + payload[13:]
hapd.mgmt_tx(msg)
# reject request
msg['payload'] = struct.pack('BBBB', 17, 1, dialog, 1) + payload[4:]
hapd.mgmt_tx(msg)
ev = dev[0].wait_event(["TSPEC-REQ-FAILED"], timeout=10)
if ev is None:
raise Exception("Timeout on TSPEC-REQ-FAILED")
if "tsid=%d" % tsid not in ev:
raise Exception("Unexpected TSPEC-REQ-FAILED contents: " + ev)
hapd.set("ext_mgmt_frame_handling", "0")
@remote_compatible
def test_tspec_not_enabled(dev, apdev):
"""addts failing if AP does not support WMM"""
params = { "ssid": "wmm_no_ac",
"hw_mode": "g",
"channel": "11",
"wmm_enabled" : "0" }
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("wmm_no_ac", key_mgmt="NONE", scan_freq="2462")
status = dev[0].request("WMM_AC_STATUS")
if "Not associated to a WMM AP, WMM AC is Disabled" not in status:
raise Exception("Unexpected WMM_AC_STATUS: " + status)
try:
dev[0].add_ts(5, 6)
raise Exception("ADDTS succeeded although it should have failed")
except Exception as e:
if not str(e).startswith("ADDTS failed"):
raise
# attempt to delete non-existing tsid
try:
dev[0].del_ts(5)
raise Exception("DELTS succeeded although it should have failed")
except Exception as e:
if not str(e).startswith("DELTS failed"):
raise
# unexpected Action frame when WMM is disabled
MGMT_SUBTYPE_ACTION = 13
msg = {}
msg['fc'] = MGMT_SUBTYPE_ACTION << 4
msg['da'] = dev[0].p2p_interface_addr()
msg['sa'] = apdev[0]['bssid']
msg['bssid'] = apdev[0]['bssid']
msg['payload'] = struct.pack('BBBB', 17, 2, 0, 0)
hapd.mgmt_tx(msg)
@remote_compatible
def test_tspec_ap_roam_open(dev, apdev):
"""Roam between two open APs while having tspecs"""
hapd0 = add_wmm_ap(apdev[0], ["VO", "VI"])
dev[0].connect("wmm_ac", key_mgmt="NONE")
hwsim_utils.test_connectivity(dev[0], hapd0)
dev[0].add_ts(5, 6)
hapd1 = add_wmm_ap(apdev[1], ["VO", "VI"])
dev[0].scan_for_bss(apdev[1]['bssid'], freq=2462)
dev[0].roam(apdev[1]['bssid'])
hwsim_utils.test_connectivity(dev[0], hapd1)
if dev[0].tspecs():
raise Exception("TSPECs weren't deleted on roaming")
dev[0].scan_for_bss(apdev[0]['bssid'], freq=2462)
dev[0].roam(apdev[0]['bssid'])
hwsim_utils.test_connectivity(dev[0], hapd0)
@remote_compatible
def test_tspec_reassoc(dev, apdev):
"""Reassociation to same BSS while having tspecs"""
hapd0 = add_wmm_ap(apdev[0], ["VO", "VI"])
dev[0].connect("wmm_ac", key_mgmt="NONE")
hwsim_utils.test_connectivity(dev[0], hapd0)
dev[0].add_ts(5, 6)
last_tspecs = dev[0].tspecs()
dev[0].request("REASSOCIATE")
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd0)
if dev[0].tspecs() != last_tspecs:
raise Exception("TSPECs weren't saved on reassociation")
def test_wmm_element(dev, apdev):
"""hostapd FTM range request timeout"""
try:
run_wmm_element(dev, apdev)
finally:
dev[0].request("VENDOR_ELEM_REMOVE 13 *")
def run_wmm_element(dev, apdev):
params = { "ssid": "wmm" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = hapd.own_addr()
# Too short WMM IE
dev[0].request("VENDOR_ELEM_ADD 13 dd060050f2020001")
dev[0].scan_for_bss(bssid, freq=2412)
dev[0].connect("wmm", key_mgmt="NONE", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
if ev is None:
raise Exception("Association not rejected")
dev[0].request("REMOVE_NETWORK all")
# Unsupported WMM IE Subtype/Version
dev[0].request("VENDOR_ELEM_ADD 13 dd070050f202000000")
dev[0].connect("wmm", key_mgmt="NONE", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
if ev is None:
raise Exception("Association not rejected")
dev[0].request("REMOVE_NETWORK all")
# Unsupported WMM IE Subtype/Version
dev[0].request("VENDOR_ELEM_ADD 13 dd070050f202010100")
dev[0].connect("wmm", key_mgmt="NONE", scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-ASSOC-REJECT"], timeout=10)
if ev is None:
raise Exception("Association not rejected")
dev[0].request("REMOVE_NETWORK all")
def test_tspec_ap_fail(dev, apdev):
"""AP failing to send tspec response"""
# configure ap with VO and VI requiring admission-control
hapd = add_wmm_ap(apdev[0], ["VO", "VI"])
dev[0].connect("wmm_ac", key_mgmt="NONE", scan_freq="2462")
tsid = 5
with fail_test(hapd, 1, "wmm_send_action"):
try:
# add tspec for UP=6
dev[0].add_ts(tsid, 6)
except:
pass
def test_tspec_ap_parsing(dev, apdev):
"""TSPEC AP parsing tests"""
# configure ap with VO and VI requiring admission-control
hapd = add_wmm_ap(apdev[0], ["VO", "VI"])
bssid = hapd.own_addr()
dev[0].connect("wmm_ac", key_mgmt="NONE", scan_freq="2462")
addr = dev[0].own_addr()
tests = [ "WMM_AC_ADDTS downlink tsid=5 up=6 nominal_msdu_size=1500 sba=9000 mean_data_rate=1500 min_phy_rate=600000",
"WMM_AC_ADDTS downlink tsid=5 up=6 nominal_msdu_size=1500 sba=8192 mean_data_rate=1500 min_phy_rate=6000000",
"WMM_AC_ADDTS downlink tsid=5 up=6 nominal_msdu_size=32767 sba=65535 mean_data_rate=1500 min_phy_rate=1000000",
"WMM_AC_ADDTS downlink tsid=5 up=6 nominal_msdu_size=10000 sba=65535 mean_data_rate=2147483647 min_phy_rate=1000000" ]
for t in tests:
if "OK" not in dev[0].request(t):
raise Exception("WMM_AC_ADDTS failed")
ev = dev[0].wait_event(["TSPEC-REQ-FAILED"], timeout=1)
if ev is None:
raise Exception("No response")
tests = | |
<filename>Code/Python/Apps/Whiteboard/App/Whiteboard.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
import os
import sys
import Axon
import pygame
import cjson
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Chassis.ConnectedServer import SimpleServer
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Visualisation.PhysicsGraph.chunks_to_lines import chunks_to_lines
from Kamaelia.Util.NullSink import nullSinkComponent
from Kamaelia.Util.Backplane import Backplane, PublishTo, SubscribeTo
from Kamaelia.Util.Detuple import SimpleDetupler
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Util.PureTransformer import PureTransformer
# Ticker
from Kamaelia.UI.Pygame.Ticker import Ticker
from Kamaelia.UI.Pygame.Display import PygameDisplay
from Kamaelia.Protocol.Framing import DataChunker, DataDeChunker
#
# The following application specific components will probably be rolled
# back into the repository.
#
from Kamaelia.Apps.Whiteboard.TagFiltering import TagAndFilterWrapper, FilterAndTagWrapper
from Kamaelia.Apps.Whiteboard.TagFiltering import TagAndFilterWrapperKeepingTag
from Kamaelia.Apps.Whiteboard.Tokenisation import tokenlists_to_lines, lines_to_tokenlists
from Kamaelia.Apps.Whiteboard.Canvas import Canvas
from Kamaelia.Apps.Whiteboard.Painter import Painter
from Kamaelia.Apps.Whiteboard.SingleShot import OneShot
from Kamaelia.Apps.Whiteboard.CheckpointSequencer import CheckpointSequencer
from Kamaelia.Apps.Whiteboard.Entuple import Entuple
from Kamaelia.Apps.Whiteboard.Routers import Router, TwoWaySplitter, ConditionalSplitter
from Kamaelia.Apps.Whiteboard.Palette import buildPalette, colours
from Kamaelia.Apps.Whiteboard.Options import parseOptions
from Kamaelia.Apps.Whiteboard.UI import PagingControls, Eraser, ClearPage, SaveDeck, LoadDeck, ClearScribbles, Delete, Quit
from Kamaelia.Apps.Whiteboard.CommandConsole import CommandConsole
#from Kamaelia.Apps.Whiteboard.SmartBoard import SmartBoard
from Kamaelia.Apps.Whiteboard.Webcam import VideoCaptureSource, WebcamManager
from Kamaelia.Apps.Whiteboard.Email import Email
from Kamaelia.Apps.Whiteboard.Decks import Decks
from Kamaelia.Apps.Whiteboard.ProperSurfaceDisplayer import ProperSurfaceDisplayer
from Kamaelia.Apps.Whiteboard.Play import AlsaPlayer
from Kamaelia.Apps.Whiteboard.Record import AlsaRecorder
try:
from Kamaelia.Codec.Speex import SpeexEncode,SpeexDecode
except Exception, e:
print "Speex not available, using null components instead"
SpeexEncode = nullSinkComponent
SpeexDecode = nullSinkComponent
#try:
# from Kamaelia.Apps.Whiteboard.Audio import SoundInput
#except ImportError:
# print "SoundInput not available, using NullSink instead"
# SoundInput = nullSinkComponent
#try:
# from Kamaelia.Apps.Whiteboard.Audio import SoundOutput
#except ImportError:
# print "SoundOutput not available, using NullSink instead"
# SoundOutput = nullSinkComponent
#try:
# from Kamaelia.Apps.Whiteboard.Audio import RawAudioMixer
#except ImportError:
# print "RawAudioMixer not available, using NullSink instead"
# RawAudioMixer = nullSinkComponent
defaults = {"email" : {"server" : "","port" : "","user" : "","pass": "","from" : ""},\
"directories" : {"scribbles" : os.path.expanduser("~") + "/.kamaelia/Kamaelia.Apps.Whiteboard/Scribbles",\
"decks" : os.path.expanduser("~") + "/Whiteboard/Decks"},\
"webcam" : {"device" : "/dev/video0"}}
config = defaults
emailavailable = False
# Load Config
try:
wbdirs = ["/etc/kamaelia/Kamaelia.Apps.Whiteboard","/usr/local/etc/kamaelia/Kamaelia.Apps.Whiteboard",os.path.expanduser("~") + "/.kamaelia/Kamaelia.Apps.Whiteboard"]
raw_config = False
for directory in wbdirs:
if os.path.isfile(directory + "/whiteboard.conf"):
file = open(directory + "/whiteboard.conf")
raw_config = file.read()
file.close()
if raw_config:
try:
temp_config = cjson.decode(raw_config)
entries = ["email","directories", "webcam"]
for entry in entries:
if temp_config.has_key(entry):
for key in temp_config[entry].keys():
config[entry][key] = temp_config[entry][key]
emailavailable = True
except cjson.DecodeError, e:
print("Could not decode config file in " + dir)
except IOError, e:
print ("Failed to load config file")
if defaults['directories']['scribbles'] != config['directories']['scribbles']:
# Remove trailing '/' if exists:
if config['directories']['scribbles'][-1:] == "/":
config['directories']['scribbles'] = config['directories']['scribbles'][0:-1]
# Check directories exist
if os.path.exists(config['directories']['scribbles']):
if not os.path.isdir(config['directories']['scribbles']):
print("You have a user configured Scribbles directory that can't be found. Please create it.")
sys.exit(0)
else:
print("You have a user configured Scribbles directory that can't be found. Please create it.")
sys.exit(0)
elif not os.path.exists(config['directories']['scribbles']):
os.makedirs(config['directories']['scribbles'])
if defaults['directories']['decks'] != config['directories']['decks']:
# Remove trailing '/' if exists:
if config['directories']['decks'][-1:] == "/":
config['directories']['decks'] = config['directories']['decks'][0:-1]
# Check directories exist
if os.path.exists(config['directories']['decks']):
if not os.path.isdir(config['directories']['decks']):
print("You have a user configured Decks directory that can't be found. Please create it.")
sys.exit(0)
else:
print("You have a user configured Decks directory that can't be found. Please create it.")
sys.exit(0)
elif not os.path.exists(config['directories']['decks']):
os.makedirs(config['directories']['decks'])
#
# Misplaced encapsulation --> Kamaelia.Apps.Whiteboard.Palette
#
colours_order = [ "black", "red", "orange", "yellow", "green", "turquoise", "blue", "purple", "darkgrey", "lightgrey" ]
num_pages = 0
for x in os.listdir(config['directories']['scribbles']):
if (os.path.splitext(x)[1] == ".png"):
num_pages += 1
if (num_pages < 1):
num_pages = 1
def FilteringPubsubBackplane(backplaneID,**FilterTagWrapperOptions):
"""Sends tagged events to a backplane. Emits events not tagged by this pubsub."""
return FilterAndTagWrapper(
Pipeline(
PublishTo(backplaneID),
# well, should be to separate pipelines, this is lazier!
SubscribeTo(backplaneID),
),
**FilterTagWrapperOptions
)
def clientconnector(whiteboardBackplane="WHITEBOARD", audioBackplane="AUDIO", port=1500):
return Pipeline(
chunks_to_lines(),
lines_to_tokenlists(),
Graphline(
ROUTER = Router( ((lambda T : T[0]=="SOUND"), "audio"),
((lambda T : T[0]!="SOUND"), "whiteboard"),
),
WHITEBOARD = FilteringPubsubBackplane(whiteboardBackplane),
AUDIO = Pipeline(
SimpleDetupler(1), # remove 'SOUND' tag
SpeexDecode(3),
FilteringPubsubBackplane(audioBackplane, dontRemoveTag=True),
PureTransformer(lambda x : x[1]),
#RawAudioMixer(),
SpeexEncode(3),
Entuple(prefix=["SOUND"],postfix=[]),
),
linkages = {
# incoming messages go to a router
("self", "inbox") : ("ROUTER", "inbox"),
# distribute messages to appropriate destinations
("ROUTER", "audio") : ("AUDIO", "inbox"),
("ROUTER", "whiteboard") : ("WHITEBOARD", "inbox"),
# aggregate all output
("AUDIO", "outbox") : ("self", "outbox"),
("WHITEBOARD", "outbox") : ("self", "outbox"),
# shutdown routing, not sure if this will actually work, but hey!
("self", "control") : ("ROUTER", "control"),
("ROUTER", "signal") : ("AUDIO", "control"),
("AUDIO", "signal") : ("WHITEBOARD", "control"),
("WHITEBOARD", "signal") : ("self", "signal")
},
),
tokenlists_to_lines(),
)
class StringToSurface(component):
# This component converts strings to pygame surfaces
Inboxes = {
"inbox" : "Receives strings for conversion in the format",
"control" : "",
}
Outboxes = {
"outbox" : "Outputs pygame surfaces",
"signal" : "",
}
def __init__(self,width=190,height=140):
super(StringToSurface, self).__init__()
self.width = width
self.height = height
def finished(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def main(self):
while not self.finished():
while self.dataReady("inbox"):
data = self.recv("inbox")
# Convert string to Pygame image using a particular size
try: # Prevent crashing with malformed received images
image = pygame.image.fromstring(data,(self.width,self.height),"RGB")
self.send(image, "outbox")
except Exception, e:
sys.stderr.write("Error converting string to PyGame surface in StringToSurface")
self.pause()
yield 1
def clientconnectorwc(webcamBackplane="WEBCAM", port=1501):
# Connects webcams to the network
return Pipeline(
Graphline(
WEBCAM = FilteringPubsubBackplane(webcamBackplane),
STRINGCONVERTER = PureTransformer(lambda x: pygame.image.tostring(x,"RGB")),
SURFACECONVERTER = StringToSurface(190,140),
FRAMER = DataChunker(),
CONSOLE = ConsoleEchoer(),
DEFRAMER = DataDeChunker(),
SIZER = PureTransformer(lambda x: pygame.transform.scale(x,(190,140))), # This is a temporary fix - we should really be sending full resolution images
# The issue is that to do this we need to send the original size as metadata and this needs more work to include
linkages = {
# Receive data from the network - deframe and convert to image for display
("self", "inbox") : ("DEFRAMER", "inbox"),
("DEFRAMER", "outbox") : ("SURFACECONVERTER", "inbox"),
# Send to display
("SURFACECONVERTER", "outbox") : ("WEBCAM", "inbox"),
# Forward local images to the network - convert to strings and frame
("WEBCAM", "outbox") : ("SIZER", "inbox"),
("SIZER", "outbox") : ("STRINGCONVERTER", "inbox"),
("STRINGCONVERTER", "outbox") : ("FRAMER", "inbox"),
# Send to network
("FRAMER", "outbox") : ("self", "outbox"),
},
),
)
#/-------------------------------------------------------------------------
# Server side of the system
#
def LocalEventServer(whiteboardBackplane="WHITEBOARD", audioBackplane="AUDIO", port=1500):
def configuredClientConnector():
return clientconnector(whiteboardBackplane=whiteboardBackplane,
audioBackplane=audioBackplane,
port=port)
return SimpleServer(protocol=clientconnector, port=port)
def LocalWebcamEventServer(webcamBackplane="WEBCAM", port=1501):
# Sets up the webcam server in a similar way to the one used for images and audio
def configuredClientConnector():
return clientconnectorwc(webcamBackplane=webcamBackplane,
port=port)
return SimpleServer(protocol=clientconnectorwc, port=port)
#/-------------------------------------------------------------------------
# Client side of the system
#
def EventServerClients(rhost, rport,
whiteboardBackplane="WHITEBOARD",
audioBackplane="AUDIO"):
# plug a TCPClient into the backplane
loadingmsg = "Fetching sketch from server..."
return Graphline(
# initial messages sent to the server, and the local whiteboard
GETIMG = Pipeline(
OneShot(msg=[["GETIMG"]]),
tokenlists_to_lines()
),
BLACKOUT = OneShot(msg="CLEAR 0 0 0\r\n"
"WRITE 100 100 24 255 255 255 "+loadingmsg+"\r\n"),
NETWORK = TCPClient(host=rhost,port=rport),
APPCOMMS = clientconnector(whiteboardBackplane=whiteboardBackplane,
audioBackplane=audioBackplane),
linkages = {
("GETIMG", "outbox") : ("NETWORK", "inbox"), # Single shot out
("APPCOMMS", "outbox") : ("NETWORK", "inbox"), # Continuous out
("BLACKOUT", "outbox") : ("APPCOMMS", "inbox"), # Single shot in
("NETWORK", "outbox") : ("APPCOMMS", "inbox"), # Continuous in
}
)
def WebcamEventServerClients(rhost, rport,
webcamBackplane="WEBCAM"):
# Allows retrieval of remote cam images from the network
# plug a TCPClient into the backplane
return Graphline(
NETWORK = TCPClient(host=rhost,port=rport),
APPCOMMS = clientconnectorwc(webcamBackplane=webcamBackplane),
linkages = {
("APPCOMMS", "outbox") : ("NETWORK", "inbox"), # Continuous out
("NETWORK", "outbox") : ("APPCOMMS", "inbox"), # Continuous in
}
)
#/-------------------------------------------------------------------------
class LocalPageEventsFilter(ConditionalSplitter): # This is a data tap/siphon/demuxer
def condition(self, data):
return (data == [["prev"]]) or (data == [["next"]])
def true(self,data):
self.send((data[0][0], "local"), "true")
SLIDESPEC = config['directories']['scribbles'] +"/slide.%d.png"
def makeBasicSketcher(left=0,top=0,width=1024,height=768,is_client=False):
if is_client:
# This is a temporary addition to prevent slide synchronisation issues between server and client
# This could be removed should full synchronisation of files between clients and servers be achieved
CLEAR = nullSinkComponent()
SAVEDECK = nullSinkComponent()
LOADDECK = nullSinkComponent()
DELETE = nullSinkComponent()
| |
import os
import numpy as np
import h5py
import sqlite3
import sys
from datetime import datetime as dt
from multiprocessing import Process, Lock, BoundedSemaphore
from GCR import GCRQuery
import GCRCatalogs
import lsst.sims.photUtils as sims_photUtils
from lsst.sims.catUtils.dust import EBVbase
from . import sqlite_utils
MAX_PARALLEL= 10 # to be tuned.. May depend on chunk size
KNL_FACTOR = 8 # KNL is about 8 times slower than Cori. Adjust
_global_cosmoDC2_data = {}
def _logit(log_lock, log_path, to_write):
if log_lock is not None:
log_lock.acquire() # timeout?
with open(log_path, 'a') as out_file:
out_file.write(to_write)
if log_lock is not None:
log_lock.release()
def _good_indices(galaxies, bad_gs):
'''
Return a list of indices for the good galaxies
Parameters:
galaxies: list of galaxy ids, monotone increasing
bad_gs: list of galaxy ids of galaxies deemed bad. Monotone increasing
Return: list of indices referring to those elements in galaxies
which are not in bad_gs
'''
bad_ix = 0
good_ixes = []
for gal_ix in range(0, len(galaxies) ):
while (bad_gs[bad_ix] < galaxies[gal_ix]):
bad_ix += 1
if bad_ix == len(bad_gs) : # no more bad galaxies
good_ixes += [g for g in range(gal_ix, len(galaxies))]
return good_ixes
if galaxies[gal_ix] < bad_gs[bad_ix] : # a good one
good_ixes.append(gal_ix)
return good_ixes
def _write_sqlite(dbfile, galaxy_ids, ra, dec, redshift, flux_by_band_MW,
flux_by_band_noMW, good_ixes):
with sqlite3.connect(dbfile) as conn:
sqlite_utils.write_column_descriptions(conn)
cursor = conn.cursor()
cmd = '''CREATE TABLE IF NOT EXISTS truth_summary
(id BIGINT, host_galaxy BIGINT, ra DOUBLE, dec DOUBLE,
redshift FLOAT, is_variable INT, is_pointsource INT,
flux_u FLOAT, flux_g FLOAT, flux_r FLOAT,
flux_i FLOAT, flux_z FLOAT, flux_y FLOAT,
flux_u_noMW FLOAT, flux_g_noMW FLOAT, flux_r_noMW FLOAT,
flux_i_noMW FLOAT, flux_z_noMW FLOAT, flux_y_noMW FLOAT)'''
cursor.execute(cmd)
conn.commit()
#print("Created table if not exists truth_summary")
values = ((int(galaxy_ids[i_obj]),int(-1),
ra[i_obj],dec[i_obj],
redshift[i_obj], 0, 0,
flux_by_band_MW['u'][i_obj], flux_by_band_MW['g'][i_obj],
flux_by_band_MW['r'][i_obj], flux_by_band_MW['i'][i_obj],
flux_by_band_MW['z'][i_obj], flux_by_band_MW['y'][i_obj],
flux_by_band_noMW['u'][i_obj], flux_by_band_noMW['g'][i_obj],
flux_by_band_noMW['r'][i_obj], flux_by_band_noMW['i'][i_obj],
flux_by_band_noMW['z'][i_obj], flux_by_band_noMW['y'][i_obj])
for i_obj in good_ixes)
cursor.executemany('''INSERT INTO truth_summary
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',
values)
conn.commit()
#sys.stdout.flush()
def _process_chunk(db_lock, log_lock, sema, sed_fit_name, cosmoDC2_data,
first_gal, self_dict, bad_gals):
"""
Do all chunk-specific work: compute table contents for a
collection of galaxies and write to db
Parameters
----------
db_lock Used to avoid conflicts writing to sqlite output
log_lock Used to avoid conflicts writing to per-healpixel log
sema A semaphore. Release when done
sed_fit_name File where sed fits for this healpixel are
cosmoDC2_data Values from cosmoDC2 for this healpixel, keyed by
column name
first_gal index of first galaxy in our chunk (in sed fit list)
self_dict Random useful values stored in GalaxyTruthWriter
bad_gals List of galaxy ids, monotone increasing, to be
skipped
"""
dry = self_dict['dry']
chunk_size = self_dict['chunk_size']
dbfile = self_dict['dbfile']
logfile = self_dict['logfile']
if dry:
_logit(log_lock, logfile,
'_process_chunk invoke for first_gal {}, chunk size {}'.format(first_gal, chunk_size))
if sema is None:
return
sema.release()
#exit(0)
return
lsst_bp_dict = self_dict['lsst_bp_dict']
galaxy_ids = []
ra = []
dec = []
redshift = []
ebv_vals = None
ebv_vals_init = False # does this belong somewhere else?
ccm_w = None
total_gals = self_dict['total_gals']
chunk_start = first_gal
chunk_end = min(first_gal + chunk_size, total_gals)
with h5py.File(sed_fit_name, 'r') as sed_fit_file:
sed_names = sed_fit_file['sed_names'][()]
sed_names = [s.decode() for s in sed_names] # becse stored as bytes
gals_this_chunk = chunk_end - chunk_start
subset = slice(chunk_start, chunk_end)
galaxy_ids = sed_fit_file['galaxy_id'][()][subset]
to_log = 'Start with galaxy #{}, id={}\n# galaxies for _process_chunk: {}\n'.format(first_gal, galaxy_ids[0], len(galaxy_ids))
_logit(log_lock, logfile, to_log)
# get the cross-match between the sed fit and cosmoDC2
cosmo_len = len(cosmoDC2_data['galaxy_id'])
crossmatch_dex = np.searchsorted(cosmoDC2_data['galaxy_id'],
galaxy_ids)
np.testing.assert_array_equal(
galaxy_ids, cosmoDC2_data['galaxy_id'][crossmatch_dex])
ra = sed_fit_file['ra'][()][subset]
dec = sed_fit_file['dec'][()][subset]
np.testing.assert_array_equal(ra,
cosmoDC2_data['ra'][crossmatch_dex])
np.testing.assert_array_equal(dec,
cosmoDC2_data['dec'][crossmatch_dex])
good_ixes = _good_indices(galaxy_ids.tolist(), bad_gals[0])
if (len(good_ixes) == 0):
if sema is not None:
sema.release()
return
else:
_logit(log_lock, logfile,
'Found {} good indices for chunk starting with {}\n'.format(len(good_ixes), chunk_start))
flux_by_band_MW = {}
flux_by_band_noMW = {}
# Calculate E(B-V) for dust extinction in Milky Way along relevant
# lines of sight
band_print="Processing band {}, first gal {}, time {}\n"
if not ebv_vals_init:
equatorial_coords = np.array([np.radians(ra), np.radians(dec)])
ebv_model = EBVbase()
ebv_vals = ebv_model.calculateEbv(equatorialCoordinates=equatorial_coords,
interp=True)
ebv_vals_init = True
for i_bp, bp in enumerate('ugrizy'):
if (i_bp == 0 or i_bp == 5):
_logit(log_lock, logfile,
band_print.format(bp, first_gal, dt.now()))
fluxes_noMW = {}
fluxes = {}
for component in ['disk', 'bulge']:
fluxes_noMW[component] = np.zeros(gals_this_chunk, dtype=float)
fluxes[component] = np.zeros(gals_this_chunk, dtype=float)
for component in ['disk', 'bulge']:
#print(" Processing component ", component)
sed_arr = sed_fit_file['%s_sed' % component][()][subset]
av_arr = sed_fit_file['%s_av' % component][()][subset]
rv_arr = sed_fit_file['%s_rv' % component][()][subset]
mn_arr = sed_fit_file['%s_magnorm' % component][()][i_bp,:][subset]
z_arr = cosmoDC2_data['redshift'][crossmatch_dex]
gii = 0
done = False
for i_gal, (s_dex, mn, av,
rv, zz, ebv) in enumerate(zip(sed_arr, mn_arr,
av_arr, rv_arr,
z_arr, ebv_vals)):
if done: break
while good_ixes[gii] < i_gal :
gii += 1
if gii == len(good_ixes): # ran out of good ones
done = True
break
if done: break
if good_ixes[gii] > i_gal : # skipped over it; it's bad
continue
# Leave space for it in the arrays, but values
# for all the fluxes will be left at 0
# read in the SED file from the library
sed_file_name = os.path.join(self_dict['sed_lib_dir'],
sed_names[s_dex])
sed = sims_photUtils.Sed()
sed.readSED_flambda(sed_file_name)
# find and apply normalizing flux
fnorm = sims_photUtils.getImsimFluxNorm(sed, mn)
sed.multiplyFluxNorm(fnorm)
# add internal dust
if ccm_w is None or not np.array_equal(sed.wavelen, ccm_w):
ccm_w = np.copy(sed.wavelen)
a_x, b_x = sed.setupCCM_ab()
sed.addDust(a_x, b_x, A_v=av, R_v=rv)
# apply redshift
sed.redshiftSED(zz, dimming=True)
# flux, in Janskys, without Milky Way dust extinction
f_noMW = sed.calcFlux(lsst_bp_dict[bp])
# apply Milky Way dust
# (cannot reuse a_x, b_x because wavelength grid changed
# when we called redshiftSED)
a_x_mw, b_x_mw = sed.setupCCM_ab()
sed.addDust(a_x_mw, b_x_mw, R_v=3.1, ebv=ebv)
f_MW = sed.calcFlux(lsst_bp_dict[bp])
fluxes_noMW[component][i_gal] = f_noMW
fluxes[component][i_gal] = f_MW
if (component == 'disk') and (bp == 'r'):
redshift = z_arr
# Sum components and convert to nanojansky
total_fluxes = (fluxes_noMW['disk'] + fluxes_noMW['bulge']) * 10**9
total_fluxes_MW = (fluxes['disk'] + fluxes['bulge']) * 10**9
dummy_sed = sims_photUtils.Sed()
# add magnification due to weak lensing
kappa = cosmoDC2_data['convergence'][crossmatch_dex]
gamma_sq = (cosmoDC2_data['shear_1'][crossmatch_dex]**2
+ cosmoDC2_data['shear_2'][crossmatch_dex]**2)
magnification = 1.0/((1.0-kappa)**2-gamma_sq)
magnified_fluxes = magnification*total_fluxes
magnified_fluxes_MW = magnification*total_fluxes_MW
flux_by_band_noMW[bp] = magnified_fluxes
flux_by_band_MW[bp] = magnified_fluxes_MW
# Open connection to sqlite db and write
#print('Time before db write is {}, first gal={}'.format(dt.now(), first_gal))
#sys.stdout.flush()
if not db_lock.acquire(timeout=120.0):
_logit(log_lock, logfile,
"Failed to acquire db lock, first gal=", first_gal)
if sema is None:
return
sema.release()
exit(1)
try:
_write_sqlite(dbfile, galaxy_ids, ra, dec, redshift,
flux_by_band_MW, flux_by_band_noMW, good_ixes)
db_lock.release()
if sema is not None:
sema.release()
_logit(log_lock, logfile,
'Time after db write: {}, first_gal={}\n'.format(dt.now(),first_gal))
exit(0)
except Exception as ex:
db_lock.release()
if sema is not None:
sema.release()
raise(ex)
class GalaxyTruthWriter(object):
'''
Writes truth catalog for static galaxies to sqlite3 file for a
single healpixel
'''
def __init__(self, output_dir, hpid, sed_fit_dir, mag_cut, chunk_size,
start=0, nchunk=None, parallel=10, dry=False, call=False,
knl=False):
#self.sed_fit_dir = os.path.join(sed_fit_dir,
# 'DC2/cosmoDC2_v1.1.4/sedLookup')
self.sed_fit_dir = sed_fit_dir
assert os.path.isdir(self.sed_fit_dir)
self.sed_fit_name = os.path.join(self.sed_fit_dir,
'sed_fit_%d.h5' % hpid)
assert os.path.isfile(self.sed_fit_name)
assert os.path.isdir(output_dir)
self.dbfile = os.path.join(output_dir,
'truth_summary_hp{}.sqlite3'.format(hpid))
self.logfile = os.path.join(output_dir,
'truth_summary_log_hp{}.txt'.format(hpid))
logfile = self.logfile
self.hpid = hpid
self.mag_cut = mag_cut
self.chunk_size=chunk_size
self.start = start
self.nchunk = nchunk
self.dry = dry
self.parallel = parallel
self.call = call
self.knl = knl
logstring='GalaxyTruthWriter invoked with arguments\n output-dir={}\n'
logstring += 'healpixel={}\nsed_fit_dir={}\nmag_cut={}\nchunk_size={}\nchunk={}\nparallel={}\ndry\n'
_logit(None, logfile, logstring.format(output_dir, hpid, sed_fit_dir,
mag_cut,chunk_size, nchunk,
parallel,dry))
def do_indices(dbfile):
with sqlite3.connect(dbfile) as conn:
cursor = conn.cursor()
cmd = '''CREATE INDEX IF NOT EXISTS ra_dec ON truth_summary (ra,dec)'''
cursor.execute(cmd)
cmd = '''CREATE UNIQUE INDEX IF NOT EXISTS gal_id ON truth_summary (id)'''
cursor.execute(cmd)
conn.commit
#print("created indexes")
def write(self):
"""
Do all the 'shared' work (get cosmoDC2 information, figure
out how to chunk, etc.)
"""
_logit(None, self.logfile,
'Enter GalaxyTruthWriter.write, time {}\n'.format(dt.now()))
db_lock = Lock()
log_lock = Lock()
self.log_lock = log_lock
sema = BoundedSemaphore(self.parallel)
self_dict = {}
self_dict['dry'] = self.dry
self_dict['chunk_size'] = self.chunk_size
self_dict['dbfile'] = self.dbfile
self_dict['logfile'] = self.logfile
bad_gals = []
cosmoDC2_data = {}
# read in LSST bandpasses
lsst_bp_dict=sims_photUtils.BandpassDict.loadTotalBandpassesFromFiles()
if self.dry:
self.total_gals = 17e6
bad_gals = list()
self.cosmoDC2_data = {}
else:
cat = GCRCatalogs.load_catalog('cosmoDC2_v1.1.4_image')
_logit(log_lock, self.logfile,"cosmoDC2 catalog loaded")
# get galaxy_id and redshift for crossmatching with SED fit files;
# we will also get the magnitudes that should be reproduced
# by our synthetic photometry (hp_query makes sure we only load
# the healpixel we are interested in)
hp_query = GCRQuery('healpix_pixel==%d' % self.hpid)
cosmoDC2_data = cat.get_quantities(
| |
scan_j: :class`~.ScanBase`
Returns
-------
:class:`float`
'''
peak_set_a = self.peak_getter(scan_i)
peak_set_b = self.peak_getter(scan_j)
return peak_set_similarity(
peak_set_a, peak_set_b)
def find_best_cluster_for_scan(self, scan):
'''Locate the best cluster to add ``scan`` to according to
precursor mass and peak set similarity.
Parameters
----------
scan: :class:`~.ScanBase`
Returns
-------
:class:`SpectrumCluster`
'''
best_cluster = None
best_similarity = 0.0
n = len(self.clusters)
if n == 0:
return best_cluster
center_i = self._binsearch_simple(scan.precursor_information.neutral_mass)
i = center_i
while i >= 0:
cluster = self.clusters[i]
if abs(_ppm_error(scan.precursor_information.neutral_mass,
cluster.neutral_mass)) > self.precursor_error_tolerance:
break
similarity = self.peak_set_similarity(scan, cluster[0])
i -= 1
if similarity > best_similarity and similarity > self.minimum_similarity:
best_similarity = similarity
best_cluster = cluster
i = center_i + 1
while i < n:
cluster = self.clusters[i]
if abs(_ppm_error(scan.precursor_information.neutral_mass,
cluster.neutral_mass)) > self.precursor_error_tolerance:
break
similarity = self.peak_set_similarity(scan, cluster[0])
i += 1
if similarity > best_similarity and similarity > self.minimum_similarity:
best_similarity = similarity
best_cluster = cluster
return best_cluster
def add_scan(self, scan):
'''Add ``scan`` to the cluster collection, adding it to the best
matching cluster, or starting a new cluster around it if no good
match can be found.
Parameters
----------
scan: :class:`~.ScanBase`
'''
best_cluster = self.find_best_cluster_for_scan(scan)
if best_cluster:
best_cluster.append(scan, incremental_similarity=self.track_incremental_similarity)
else:
self.clusters.add(SpectrumCluster([scan]))
def __iter__(self):
return iter(self.clusters)
def __len__(self):
return len(self.clusters)
def __getitem__(self, i):
return self.clusters[i]
def _get_tic(self, scan):
try:
return self.peak_getter.tic(scan)
except AttributeError:
return sum(p.intensity for p in self.peak_getter(scan))
@classmethod
def cluster_scans(cls, scans, precursor_error_tolerance=1e-5, minimum_similarity=0.1,
peak_getter=None, sort=True, track_incremental_similarity=False):
'''Cluster scans by precursor mass and peak set similarity.
Parameters
----------
scans: :class:`Iterable`
An iterable of :class:`Scan`-like objects
precursor_error_tolerance: :class:`float`
The PPM mass accuracy threshold for precursor mass differences to
tolerate when deciding whether to compare spectra. Defaults to 1e-5.
minimum_similarity: :class:`float`
The minimum peak set similarity required to consider adding a spectrum
to a cluster. Defaults to 0.1
peak_getter: :class:`Callable`
A callable object used to get peaks from elements of ``scans``.
sort: :class:`bool`
Whether or not to sort spectra by their total ion current before clustering.
'''
self = cls([], precursor_error_tolerance, minimum_similarity,
peak_getter, track_incremental_similarity)
if sort:
scans = self._sort_by_tic(scans)
if len(scans) > 100:
self.log("Clustering %d Scans" % (len(scans), ))
n = len(scans)
report_interval = max(min(n // 10, 1000), 50)
for i, scan in enumerate(scans):
if i % report_interval == 0 and i:
self.log("... Handled %d Scans (%0.2f%%)" % (i, i * 100.0 / n))
self.add_scan(scan)
return self.clusters
def _sort_by_tic(self, scans):
should_log = len(scans) > 100
if should_log:
self.log("Sorting Scans By TIC")
augmented = []
n = len(scans)
for i, scan in enumerate(scans):
if i % 1000 == 0 and i > 0:
self.log("... Loaded TIC for %d Scans (%0.2f%%)" % (i, i * 100.0 / n))
augmented.append((self._get_tic(scan), scan))
augmented.sort(key=lambda x: x[0], reverse=True)
scans = [a[1] for a in augmented]
return scans
@classmethod
def iterative_clustering(cls, scans, precursor_error_tolerance=1e-5, similarity_thresholds=None,
peak_getter=None):
'''Cluster scans by precursor mass and peak set similarity, iteratively refining
clusters with increasing similarity threshold requirements.
Parameters
----------
scans: :class:`Iterable`
An iterable of :class:`Scan`-like objects
precursor_error_tolerance: :class:`float`
The PPM mass accuracy threshold for precursor mass differences to
tolerate when deciding whether to compare spectra. Defaults to 1e-5.
similarity_thresholds: :class:`Sequence` of :class:`float`
A series of similarity thresholds to apply as spectra are added to clusters
and as clusters are iteratively refined.
peak_getter: :class:`Callable`
A callable object used to get peaks from elements of ``scans``.
'''
peak_getter = cls._guess_peak_getter(peak_getter)
if similarity_thresholds is None:
similarity_thresholds = [0.1, .4, 0.7]
singletons = []
to_bisect = [scans]
logger = LogUtilsMixin()
track_similarity = [False] * (len(similarity_thresholds) - 1)
track_similarity.append(True)
for similarity_threshold, track in zip(similarity_thresholds, track_similarity):
logger.log("Clustering with Threshold %0.2f" % (similarity_threshold, ))
next_to_bisect = []
if len(to_bisect) > 1:
logger.log("Refining %d Clusters" % (len(to_bisect)))
elif to_bisect:
logger.log("Clustering %d Scans" % (len(to_bisect[0])))
else:
logger.log("Nothing to cluster...")
break
n = len(to_bisect)
report_interval = max(min(n // 10, 1000), 1)
for i, group in enumerate(to_bisect):
if i % report_interval == 0:
logger.log("... Handling Batch %d (%d Scans)" % (i, len(group)))
clusters = cls.cluster_scans(
group, precursor_error_tolerance,
minimum_similarity=similarity_threshold,
peak_getter=peak_getter, sort=True, track_incremental_similarity=track)
for cluster in clusters:
if len(cluster) == 1:
singletons.append(cluster)
else:
next_to_bisect.append(cluster)
logger.log("%d Singletons and %d Groups" % (len(singletons), len(next_to_bisect)))
to_bisect = next_to_bisect
return SpectrumClusterCollection(sorted(list(singletons) + list(to_bisect)))
cluster_scans = ScanClusterBuilder.cluster_scans
iterative_clustering = ScanClusterBuilder.iterative_clustering
class ScanClusterWriterBase(object):
'''A base class for writing :class:`ScanCluster` objects to an
I/O stream like a file.
Attributes
----------
stream: :class:`io.IOBase`
The stream to write the clusters to
metadata: :class:`dict`
A set of key-value pairs that describe this
collection.
'''
def __init__(self, stream, metadata=None):
self.stream = stream
self.metadata = metadata or {}
self._wrote_metadata = False
def _write(self, data):
self.stream.write(data)
def save(self, cluster):
'''Write ``cluster`` to the output stream, recording its
members and calculating it's average similarity.
Parameters
----------
cluster: :class:`SpectrumCluster`
The spectrum cluster to write out
'''
if not self._wrote_metadata:
self.write_metadata()
self._wrote_metadata = True
self._save(cluster)
def _save(self, cluster):
raise NotImplementedError()
def save_all(self, clusters):
'''Write each :class:`SpectrumCluster` in ``clusters`` out,
calling :meth:`save` on each one.
Parameters
----------
clusters: :class:`collections.Iterable` of :class:`SpectrumCluster`
The spectrum clusters to write out
'''
raise NotImplementedError()
def add_metadata(self, key, value):
'''Add metadata to the writer. That metadata will be flushed
out upon starting to write clusters out.
Parameters
----------
key: :class:`str`
The metadata element's name
value: :class:`str`, :class:`float`
The metadata element's value
'''
if self.wrote_metadata:
raise TypeError(
"Cannot add additional metadata, the metadata has already been written")
self.metadata[key] = value
def write_metadata(self):
'''Write the accumulated metadata out in a format-appropriate
manner at the top of the file.
'''
if self._wrote_metadata:
raise TypeError("Already wrote metadata!")
class ScanClusterWriter(ScanClusterWriterBase):
'''Writes :class:`ScanCluster` objects to a hierarchical text stream
Parameters
----------
stream: :class:`io.IOBase`
The stream to write the clusters to
'''
def __init__(self, stream, metadata=None):
super(ScanClusterWriter, self).__init__(stream, metadata)
def write_metadata(self):
for key, value in self.metadata.items():
self._write("#%s = %s\n" % (key, value))
self._write("\n")
def _save(self, cluster):
'''Write ``cluster`` as a tab delimited tree, recording its
members and calculating it's average similarity.
Parameters
----------
cluster: :class:`SpectrumCluster`
The spectrum cluster to write out
'''
self._write("%f\t%d\t%f\n" % (cluster.neutral_mass, len(cluster), cluster.average_similarity()))
for member in cluster:
member_source = member.source
if member_source is not None:
source_name = member_source.source_file
if not isinstance(source_name, basestring):
if hasattr(source_name, 'name'):
source_name = source_name.name
else:
source_name = ":detatched:"
self._write("\t%s\t%s\n" % (source_name, member.id))
self._write('\n')
def save_all(self, clusters):
'''Write each :class:`SpectrumCluster` in ``clusters`` out,
calling :meth:`save` on each one.
Parameters
----------
clusters: :class:`collections.Iterable` of :class:`SpectrumCluster`
The spectrum clusters to write out
'''
for cluster in clusters:
self.save(cluster)
class JSONScanClusterWriter(ScanClusterWriterBase):
'''A :class:`ScanClusterWriterBase` that uses JSON
lines.
'''
def write_metadata(self):
json.dump(self.metadata, self.stream)
self._write("\n")
def _save(self, cluster):
json.dump(cluster.to_dict(), self.stream)
self._write("\n")
def save_all(self, clusters):
for cluster in clusters:
self.save(cluster)
class ScanClusterReaderBase(object):
'''Base class for reading spectrum clusters from disk.
Attributes
----------
stream: :class:`io.IOBase`
The stream to read the clusters from
resolver_map: :class:`dict`
A mapping from scan source name to a :class:`Callable`
which will return a :class:`~.ScanBase` object representing
that spectrum.
metadata: :class:`dict`
A set of key-value pairs that describe this
collection.
clusters: :class:`list`
The read clusters.
'''
def __init__(self, stream, resolver_map):
self.stream = stream
self.resolver_map = resolver_map
self.clusters = list()
self.metadata = {}
self._generator = None
def _resolve(self, source, scan_id):
resolver = self.resolver_map[source]
try:
return resolver.get_scan_by_id(scan_id)
except AttributeError:
return resolver(scan_id)
def _parse(self):
'''Parse the cluster collection from :attr:`stream`
'''
self._load_metadata()
return self._load_clusters()
def _load_metadata(self):
'''Read the metadata header from :attr:`stream`.
'''
raise NotImplementedError()
def _load_clusters(self):
'''Read the data describing :class:`SpectrumCluster` objects from
:attr:`stream`.
'''
raise NotImplementedError()
def __iter__(self):
return self
def __next__(self):
'''Advance the iterator, retrieving the next :class:`SpectrumCluster`
Returns
-------
:class:`SpectrumCluster`
'''
if self._generator is None:
self._generator = self._parse()
return next(self._generator)
def next(self):
'''Advance the iterator, retrieving the next :class:`SpectrumCluster`
Returns
-------
:class:`SpectrumCluster`
'''
return self.__next__()
class ScanClusterReader(ScanClusterReaderBase):
'''Reads :class:`SpectrumCluster` objects from hierarchical text files written by
:class:`ScanClusterWriter`.
'''
def __init__(self, stream, resolver_map):
super(ScanClusterReader, self).__init__(stream, resolver_map)
self._line_buffer = deque()
def _next_line(self):
if self._line_buffer:
return self._line_buffer.popleft()
return self.stream.readline()
def _return_line(self, line):
self._line_buffer.append(line)
def _stream_lines(self):
line = self._next_line()
while line:
yield line
line = self._next_line()
def _load_metadata(self):
line = self._next_line()
while line.startswith("#"):
key, value = line.strip().split(" = ", 1)
try:
value = float(value)
except ValueError:
value = str(value)
self.metadata[key] = value
self._return_line(line)
def _load_clusters(self):
current_cluster = []
mass = None
| |
<reponame>RohiBaner/Beijing-Air-Quality-Prediction<gh_stars>1-10
''' --------------------------------------------IMPORTING NECESSARY LIBRARIES------------------------------------------- '''
import numpy as np
import pandas as pd
from math import radians, cos, sin, asin, sqrt
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from itertools import cycle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import time
start_time = time.time()
pd.options.mode.chained_assignment = None # default='warn'
''' ---------------------------FUNCTIONS TO FIND NEAREST DISTANCE BETWEEN ALL NECESSARY STATIONS------------------------ '''
# Function to find nearest station between two points using Haversine Distance
def haversine_dist(lon1, lat1, lon2, lat2):
# Calculate the great circle distance between two points on the earth
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # Convert to radians
# Haversine distance formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371 #Radius of earth in kilometers
return c * r
# Find nearest AQ to AQ station
def near_aq_to_aq(lat, long):
distances = station_aq.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return station_aq.loc[distance.idxmin(), 'station']
# Find nearest GW to GW station
def near_gw_to_gw(lat, long):
distances = gw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return gw_station.loc[distance.idxmin(), 'station_id']
# Find nearest OBW to OBW station
def near_obw_to_obw(lat, long):
distances = obw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
distance = distances[distances!=0]
return obw_station.loc[distance.idxmin(), 'station_id']
# Find nearest AQ to OBW station
def near_aq_to_obw(lat, long):
distances = obw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
return obw_station.loc[distances.idxmin(), 'station_id']
# Find nearest AQ to GW station
def near_aq_to_gw(lat, long):
distances = gw_station.apply(lambda row: haversine_dist(lat, long, row['latitude'], row['longitude']), axis=1)
return gw_station.loc[distances.idxmin(), 'station_id']
# Function to calculate the model error via SMAPE
def smape(actual, predicted):
dividend= np.abs(np.array(actual) - np.array(predicted))
denominator = np.array(actual) + np.array(predicted)
return 2 * np.mean(np.divide(dividend, denominator, out=np.zeros_like(dividend), where=denominator!=0, casting='unsafe'))
''' ------------------------------------------TRAIN: AIR QUALITY PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Air Quality Dataset!')
# Read all the air quality datasets
aq_2017 = pd.read_csv("airQuality_201701-201801.csv")
aq_2018 = pd.read_csv("airQuality_201802-201803.csv")
aq_2018a = pd.read_csv("aiqQuality_201804.csv")
# Renaming the header of April AQ dataset to match the other AQ datasets
aq_2018a.rename(columns={'station_id': 'stationId', 'time': 'utc_time', 'PM25_Concentration':'PM2.5'\
,'PM10_Concentration':'PM10','NO2_Concentration':'NO2'\
,'CO_Concentration':'CO', 'O3_Concentration':'O3'\
,'SO2_Concentration':'SO2'}, inplace=True)
aq_2018a= aq_2018a.drop(columns=['id'], axis=1)
# Merge all AQ datasets together into a single dataframe
aq_train = aq_2017.append(aq_2018, ignore_index=True)
aq_train = aq_train.append(aq_2018a, ignore_index=True)
# Convert the entire 'utc_time' column into the same format
aq_train["utc_time"] = pd.to_datetime(aq_train["utc_time"])
# Delete unnecessary dataframes to save space
del(aq_2017)
del(aq_2018)
del(aq_2018a)
# Set the time column as the index of the dataframe
aq_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the AQ dataframe
min_date=aq_train.index.min()
max_date=aq_train.index.max()
# Drop any duplicates present in the AQ dataframe
aq_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the AQ station location file and find nearest station for each AQ station
# This dataset was created by us
station_aq = pd.read_csv("Beijing_AirQuality_Stations.csv")
station_aq["nearest_station"] = station_aq.apply(lambda row: near_aq_to_aq(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
time_hours = pd.DataFrame({"date": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all AQ stations and the above dataframe
aq_all_time = pd.merge(time_hours.assign(key=0), station_aq.assign(key=0), on='key').drop('key', axis=1)
# Join the AQ dataset with the dataframe containing all the timestamps for each AQ station
aq_train1 = pd.merge(aq_train, aq_all_time, how='right', left_on=['stationId','utc_time'], right_on = ['station','date'])
aq_train1 = aq_train1.drop('stationId', axis=1)
aq_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest AQ station (same time interval)
aq_train_copy = aq_train1.copy()
aq_train_copy = aq_train_copy.drop(['nearest_station','longitude', 'latitude', 'type'], axis=1)
aq_train_copy.rename(columns={'PM2.5': 'n_PM2.5','PM10': 'n_PM10', "NO2":"n_NO2","CO":"n_CO","O3":"n_O3",
"SO2":"n_SO2", "date":"n_date", "station":"n_station" }, inplace=True)
# Merge original AQ data and the copy AQ data to get all attributes of a particular AQ station and its nearest AQ station
aq_train2 = pd.merge(aq_train1, aq_train_copy, how='left', left_on=['nearest_station','date'], right_on = ['n_station','n_date'])
# Sort the final dataframe based on AQ station and then time
aq_train2 = aq_train2.sort_values(by=['n_station', 'date'], ascending=[True,True])
aq_train2 = aq_train2.reset_index(drop=True)
# Drop all unncessary attributes
aq_train2.drop(['n_station', 'longitude', 'latitude', 'n_date'], axis=1, inplace=True)
# Create two attributes - month and hour
aq_train2['month'] = pd.DatetimeIndex(aq_train2['date']).month
aq_train2['hour'] = pd.DatetimeIndex(aq_train2['date']).hour
# Fill in missing values of attributes with their corresponding values in the nearest AQ station (within same time)
aq_train2['PM10'].fillna(aq_train2['n_PM10'], inplace=True)
aq_train2['PM2.5'].fillna(aq_train2['n_PM2.5'], inplace=True)
aq_train2['NO2'].fillna(aq_train2['n_NO2'], inplace=True)
aq_train2['CO'].fillna(aq_train2['n_CO'], inplace=True)
aq_train2['O3'].fillna(aq_train2['n_O3'], inplace=True)
aq_train2['SO2'].fillna(aq_train2['n_SO2'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
aq_train2[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']] = aq_train2.groupby(["station","month","hour"])[['PM2.5', 'PM10', 'NO2', 'CO', 'O3', 'SO2']].transform(lambda x: x.fillna(x.mean()))
# Create final AQ dataset after dropping all unnecessary attributes
aq_train_final = aq_train2.drop(['type','nearest_station','n_PM2.5','n_PM10','n_NO2','n_CO','n_O3','n_SO2'],axis=1)
# Delete unnecessary dataframes to save space
del(aq_train1)
del(aq_train2)
del(aq_train_copy)
del(aq_all_time)
print('Done!')
print('-'*50)
''' ------------------------------------------TRAIN: GRID DATASET PREPROCESSING------------------------------------------ '''
print('Preprocessing and cleaning the train Grid Weather Dataset!')
# Read all the grid weather train datasets
gw_2017 = pd.read_csv("gridWeather_201701-201803.csv")
gw_2018 = pd.read_csv("gridWeather_201804.csv")
# Renaming the headers of the GW data to match each other
gw_2017.rename(columns={'stationName': 'station_id', 'wind_speed/kph': 'wind_speed'}, inplace=True)
gw_2018.rename(columns={'station_id':'station_id', 'time':'utc_time'}, inplace=True)
# Merge all GW train datasets into a single dataframe
gw_train = gw_2017.append(gw_2018, ignore_index=True)
gw_train = gw_train.drop(columns=['id','weather'], axis=1)
# Delete unnecessary dataframes to save space
del(gw_2017)
del(gw_2018)
# Set the time column as the index of the dataframe
gw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the GW dataframe
min_date = gw_train.index.min()
max_date = gw_train.index.max()
# Drop any duplicates present in the GW dataframe
gw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the GW station location file and find nearest station for each GW station
gw_station = pd.read_csv("Beijing_grid_weather_station.csv", header=None, names=['station_id','latitude','longitude'])
gw_station["nearest_station"] = gw_station.apply(lambda row: near_gw_to_gw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
gw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all GW stations and the above dataframe
gw_all_time = pd.merge(gw_time_hours.assign(key=0), gw_station.assign(key=0), on='key').drop('key', axis=1)
gw_all_time['time'] = gw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the GW dataset with the dataframe containing all the timestamps for each GW station
gw_train1 = pd.merge(gw_train, gw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
gw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will be used to refer all data for the nearest GW station (same time interval)
gw_train_copy = gw_train1.copy()
gw_train_copy.drop(['nearest_station','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
gw_train_copy.rename(columns={'humidity': 'n_humidity','pressure': 'n_pressure', "temperature":"n_temperature",\
"wind_direction":"n_wind_dir","wind_speed":"n_wind_speed",\
"time":"n_time", "station_id":"n_station_id" }, inplace=True)
# Merge original GW data and the copy GW data to get all attributes of a particular GW station and its nearest GW station
gw_train2 = pd.merge(gw_train1, gw_train_copy, how='left', left_on=['nearest_station','time'], right_on = ['n_station_id','n_time'])
# Sort the final dataframe based on GW station and then time
gw_train2 = gw_train2.sort_values(by=['station_id', 'time'], ascending=[True,True])
gw_train2 = gw_train2.reset_index(drop=True)
# Drop all unncessary attributes
gw_train2.drop(['n_station_id', 'n_time','longitude_x', 'latitude_y','latitude_x','longitude_y'], axis=1, inplace=True)
# Create two attributes - month and hour
gw_train2['month'] = pd.DatetimeIndex(gw_train2['time']).month
gw_train2['hour'] = pd.DatetimeIndex(gw_train2['time']).hour
# Fill in missing values of attributes with their corresponding values in the nearest GW station (within same time)
gw_train2['humidity'].fillna(gw_train2['n_humidity'], inplace=True)
gw_train2['pressure'].fillna(gw_train2['n_pressure'], inplace=True)
gw_train2['temperature'].fillna(gw_train2['n_temperature'], inplace=True)
gw_train2['wind_speed'].fillna(gw_train2['n_wind_speed'], inplace=True)
gw_train2['wind_direction'].fillna(gw_train2['n_wind_dir'], inplace=True)
# Fill in any remaining missing value by the mean of the attribute within the same station, month and hour
gw_train2[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']] = gw_train2.groupby(["station_id","month","hour"])[['humidity', 'pressure', 'temperature', 'wind_direction', 'wind_speed']].transform(lambda x: x.fillna(x.mean()))
# Create final GW dataset after dropping all unnecessary attributes
gw_train_final = gw_train2.drop(['nearest_station','n_humidity','n_pressure','n_temperature','n_wind_dir','n_wind_speed'],axis=1)
# Delete unnecessary dataframes to save space
del(gw_train1)
del(gw_train2)
del(gw_train_copy)
del(gw_all_time)
print('Done!')
print('-'*50)
''' -----------------------------------TRAIN: OBSERVED WEATHER DATASET PREPROCESSING------------------------------------ '''
print('Preprocessing and cleaning the train Observed Weather Dataset!')
# Read all the observed weather train datasets
obw_2017 = pd.read_csv("observedWeather_201701-201801.csv")
obw_2018 = pd.read_csv("observedWeather_201802-201803.csv")
obw_2018a = pd.read_csv("observedWeather_201804.csv")
obw_2018a.rename(columns={'time': 'utc_time'}, inplace=True)
# Read the time stamp in the April observed weather data in the same format as the other datasets
#obw_2018a['utc_time'] = pd.to_datetime(obw_2018a['utc_time'], format='%d-%m-%Y %H:%M:%S')
obw_2018a['utc_time'] = obw_2018a['utc_time'].astype(str)
# Merge all OBW train datasets into a single dataframe
obw_train = obw_2017.append(obw_2018, ignore_index=True)
obw_train = obw_train.append(obw_2018a, ignore_index=True)
obw_train.drop(['id','weather'],axis=1, inplace=True) # Drop unnecessary columns
# Delete unnecessary dataframes to save space
del(obw_2017)
del(obw_2018)
del(obw_2018a)
# Set the time column as the index of the dataframe
obw_train.set_index("utc_time", inplace = True)
# Get the entire span of the time in the OBW dataframe
min_date = obw_train.index.min()
max_date = obw_train.index.max()
# Drop any duplicates present in the OBW dataframe
obw_train.drop_duplicates(subset= None, keep= "first", inplace= True)
# Read the OBW station location file
obw_station = obw_train[["station_id","latitude","longitude"]]
obw_station = obw_station.drop_duplicates().dropna()
obw_station = obw_station.reset_index(drop=True)
# Find nearest station for each OBW station
obw_station["nearest_station"] = obw_station.apply(lambda row: near_obw_to_obw(row['latitude'], row['longitude']), axis=1)
# Create an empty dataframe with all hourly time stamps in the above found range
obw_time_hours = pd.DataFrame({"time": pd.date_range(min_date, max_date, freq='H')})
# Perform a cartesian product of all OBW stations and the above dataframe
obw_all_time = pd.merge(obw_time_hours.assign(key=0), obw_station.assign(key=0), on='key').drop('key', axis=1)
obw_all_time['time'] = obw_all_time['time'].astype(str) # Make all time stamps in the same format
# Join the OBW dataset with the dataframe containing all the timestamps for each OBW station
obw_train1 = pd.merge(obw_train, obw_all_time, how='right', left_on=['station_id','utc_time'], right_on = ['station_id','time'])
obw_train1.drop_duplicates(subset= None, keep= "first", inplace= True)
# Create a copy of the above dataframe keeping all required columns
# This dataframe will | |
#!/usr/bin/env python
import added_routines as AR, itertools, pickle, hashlib
from operator import itemgetter
###########################################################################
# This module creates a cell dictionary from added_routines and a SolutionObject
# which is an object based on the concept of taking each cell in the cell
# dictionary and creating an object related to a specific cell (focus_cell)
# and a number of cells in a play direction (right or down). A SolutionObject
# will be create for each of the possible 15 x 15 cells for 1 to 7 spaces in
# the 2 play directions. The concept is to populate the cell dictionary with
# current state
#
#
###########################################################################
xxx = AR.create_cell_dict(AR.word_multipliers, AR.letter_multipliers)
ifile = open('words_hashed.pkl','r')
yyy = pickle.load(ifile)
ifile.close()
class SolutionObject(object):
'contains solution value for one focal cell with numbers of cells at play'
scores = []
available_empty_spaces_in_main = 0
current_far_edge = (-1,-1)
current_string_begin = (-1,-1)
main_string_begin = (-1,-1)
valid_and_initalized = False
strings_contact_points = []
current_number_free_cells = 0
open_cells_map_list = []
contains_valid_solution = False
solution_letters_list = []
scored_solution_letters_list = []
top_score = (-1,'zzz')
def __init__(self, cell_dict, word_dict, focus_cell, number_of_cells_in_play, direction, letters_in_play):
'SolutionObject initializer - takes game dict, location of focus cell, number of cells in play, and direction (right/down)'
if not (direction == 'down' or direction == 'right') :
raise ValueError('direction must be down or right')
self.scores = []
self.letter_value_dict = {'a':1, 'b':4, 'c':4, 'd':2, 'e':1, 'f':4, 'g':3, 'h':3, 'i':1, 'j':10, 'k':5, 'l':2, 'm':4, 'n':2, 'o':1, 'p':4, 'q':10, 'r':1, 's':1, 't':1, 'u':2, 'v':5, 'w':4, 'x':8, 'y':3, 'z':10}
self.strings_contact_points = []
self.cell_dict = cell_dict
self.word_dict = word_dict
self.focus_cell = focus_cell
self.number_of_cells_in_play = number_of_cells_in_play
self.direction = direction
self.letters_in_play = letters_in_play
self.discover_far_edge(cell_dict,focus_cell,direction)
self.discover_string_begin(cell_dict,focus_cell,self.get_opposite_direction(direction))
self.main_string_begin = self.current_string_begin
self.discover_available_empty_spaces_in_main(cell_dict,focus_cell,direction)
if self.available_empty_spaces_in_main < number_of_cells_in_play: #stop and leave valid_and_initalized as False
return
if cell_dict[focus_cell]['current_letter'] != '': #stop and leave valid_and_initalized as False
return
self.current_number_free_cells = self.number_of_cells_in_play
self.discover_main_string_contact_points(focus_cell)
if self.strings_contact_points == []: #no potential strings so stop and leave valid_and_initalized as False
return
self.valid_and_initalized = True
self.create_strings_template()
self.search_for_valid_solutions()
if self.contains_valid_solution:
self.score_solution()
self.top_score = self.get_top_score()
def get_opposite_direction(self,direction):
if not (direction == 'down' or direction == 'right') :
raise ValueError('direction must be down or right')
if direction == 'right':
return 'left'
else:
return 'up'
def get_perpendicular_back_direction(self,direction):
if not (direction == 'down' or direction == 'right') :
raise ValueError('direction must be down or right')
if direction == 'right':
return 'up'
else:
return 'left'
def get_perpendicular_forward_direction(self,direction):
if not (direction == 'down' or direction == 'right') :
raise ValueError('direction must be down or right')
if direction == 'right':
return 'down'
else:
return 'right'
def discover_string_begin(self,cell_dict,location,direction):
'find string beginning and place return location'
if not (direction == 'up' or direction == 'left') :
raise ValueError('direction must be up or left')
if cell_dict[location]['adjacent_dict'][direction][0]:
if cell_dict[cell_dict[location]['adjacent_dict'][direction][1]]['current_letter'] != "":
self.discover_string_begin(cell_dict, cell_dict[location]['adjacent_dict'][direction][1], direction)
else:
self.current_string_begin = location
else:
self.current_string_begin = location
def get_top_score(self):
return sorted(self.scored_solution_letters_list, key=itemgetter(1), reverse=True)[0]
def discover_far_edge(self,cell_dict,location,direction):
'''assumes dict structure populated as expected and global variable updates global far_edge with location'''
if not (direction == 'down' or direction == 'right') :
raise ValueError('direction must be down or right')
current_far_edge = (-1,-1)
if cell_dict[location]['adjacent_dict'][direction][0]:
self.discover_far_edge(cell_dict, cell_dict[location]['adjacent_dict'][direction][1],direction)
else:
self.current_far_edge = location
def discover_available_empty_spaces_in_main(self,cell_dict,location,direction):
'used on object creation to determine if object is even viable in the scored_solution set since solution set cells in play must be equal or less than available empty spaces on main string'
if not (direction == 'down' or direction == 'right') :
raise ValueError('direction must be down or right')
if cell_dict[location]['current_letter'] == '':
self.available_empty_spaces_in_main += 1
if cell_dict[location]['adjacent_dict'][direction][0]:
self.discover_available_empty_spaces_in_main(cell_dict, cell_dict[location]['adjacent_dict'][direction][1],direction)
def discover_main_string_contact_points(self,location):
'self.current_number_free_cells must be updated prior to call - finds empty cells on main string that touch cells with characters set and add to list'
while self.current_number_free_cells > 0:
found_at_least_one = False
for x in ['up','down','right','left']:
if self.cell_dict[location]['adjacent_dict'][x][0]:
if self.cell_dict[self.cell_dict[location]['adjacent_dict'][x][1]]['current_letter'] != '':
found_at_least_one = True
if self.cell_dict[location]['current_letter'] == '':
self.current_number_free_cells = self.current_number_free_cells - 1
if found_at_least_one:
self.strings_contact_points.append(location)
if self.cell_dict[location]['adjacent_dict'][self.direction][0]:
location = self.cell_dict[location]['adjacent_dict'][self.direction][1]
else:
return
self.discover_main_string_contact_points(location)
def create_strings_template(self):
'object must be valid and initalized as internals are used'
if not self.valid_and_initalized:
raise ValueError('create_string_template called on object not validated and initalized')
self.current_string_list = [] # used to hold sequential locations of a string created by object and later added to strings_list as entry
self.strings_list = [] # contains the locations for all strings created by object as sequential entries
self.open_cells_map_list = [] # contains a mapping to dictionary of open cells in strings_list
self.current_number_free_cells = self.number_of_cells_in_play
current_location = self.current_string_begin #start main template object which contains all open variable spaces
while self.current_number_free_cells > 0:
self.current_string_list.append(current_location)
if self.cell_dict[current_location]['current_letter'] == '':
self.current_number_free_cells = self.current_number_free_cells - 1
self.open_cells_map_list.append(self.cell_dict[current_location])
current_location = self.cell_dict[current_location]['adjacent_dict'][self.direction][1] # no validity test needed as object valid and initalized
keep_going = True
if current_location == (-1,-1): # if we moved to end above and are queued one cell beyond grid
keep_going = False
while keep_going:# so far got main from beginning throught last empty space, now next check for a following string
if self.cell_dict[current_location]['current_letter'] != '':
self.current_string_list.append(current_location)
else:
keep_going = False
if self.cell_dict[current_location]['adjacent_dict'][self.direction][0]:
current_location = self.cell_dict[current_location]['adjacent_dict'][self.direction][1]
else:
keep_going = False
self.strings_list.append(self.current_string_list)
for x in self.strings_contact_points: # main string done now to perpendicular strings
self.current_string_list = [] # we captured main string mapping and now reset and do same with perpendicular side strings which if they exist will be off strings_ contact_points
p_back_direction = self.get_perpendicular_back_direction(self.direction)
p_forward_direction = self.get_perpendicular_forward_direction(self.direction)
string_after = False
if self.cell_dict[x]['adjacent_dict'][p_back_direction][0]:
if self.cell_dict[self.cell_dict[x]['adjacent_dict'][p_back_direction][1]]['current_letter'] != '' : #we have a perpendicular string before our contact point
self.discover_string_begin(self.cell_dict,x,p_back_direction)
known_empty_spaces = 1 # we are on the perpendicular before our space and will move to, decrement, and stop that while loop
current_location = self.current_string_begin
self.current_string_list.append(current_location)
while known_empty_spaces > 0:
current_location = self.cell_dict[current_location]['adjacent_dict'][p_forward_direction][1] #no validity check as must exist here
self.current_string_list.append(current_location)
if self.cell_dict[current_location]['current_letter'] == '':
known_empty_spaces = known_empty_spaces - 1
keep_going = True # captured details before and through empty cell now check past same
while keep_going:
if self.cell_dict[current_location]['adjacent_dict'][p_forward_direction][0]:
current_location = self.cell_dict[current_location]['adjacent_dict'][p_forward_direction][1]
if self.cell_dict[current_location]['current_letter'] != '':
string_after = True
self.current_string_list.append(current_location)
else:
keep_going = False
else:
keep_going = False
if self.current_string_list != []:
self.strings_list.append(self.current_string_list) # added a sting perpendicular and before and maybe after cell
if not string_after: # as in we did not alread capture this with a sting before and after above
self.current_string_list = [] # reset
initial_pass = True
keep_going = True
current_location = x
while keep_going:
if self.cell_dict[current_location]['adjacent_dict'][p_forward_direction][0]:
current_location = self.cell_dict[current_location]['adjacent_dict'][p_forward_direction][1]
if self.cell_dict[current_location]['current_letter'] != '':
if initial_pass:
self.current_string_list.append(x)
initial_pass = False
self.current_string_list.append(current_location)
else:
keep_going = False
else:
keep_going = False
if self.current_string_list != []:
self.strings_list.append(self.current_string_list)
def search_for_valid_solutions(self):
'after validating strings_list contains entries check permutations of letters for solutions, toggle contains_valid_solution, and populate solution_letters_list'
self.contains_valid_solution = False
self.solution_letters_list = []
if self.strings_list == []:
return
for permuted_letters_iter in itertools.permutations(self.letters_in_play,self.number_of_cells_in_play):
targets_not_yet_passed = len(self.strings_list)
for x in range(len(self.open_cells_map_list)):
self.open_cells_map_list[x]['current_letter'] = permuted_letters_iter[x]
for word_maps in self.strings_list:
target_word = []
for test_char_loc in word_maps:
target_word.append(self.cell_dict[test_char_loc]['current_letter'])
if hashlib.md5("".join(target_word)).hexdigest() in self.word_dict:
targets_not_yet_passed = targets_not_yet_passed - 1
if targets_not_yet_passed == 0:
validated_word = "".join(permuted_letters_iter)
for count in range(len(self.open_cells_map_list)):
if self.open_cells_map_list[count]['letter_multiplier'] != 0 or self.open_cells_map_list[count]['word_multiplier'] != 0:
validated_word = list(validated_word)
validated_word[count] = validated_word[count].upper()
validated_word = "".join(validated_word)
count += 1
self.contains_valid_solution = True
self.solution_letters_list.append(validated_word)
self.solution_letters_list = list(set(self.solution_letters_list)) #removing identical copies
for x in range(len(self.open_cells_map_list)):
self.open_cells_map_list[x]['current_letter'] = ''
def score_solution(self):
if not self.contains_valid_solution:
return
empty_slots = [] #list for cells that were updated and thus valid for multiples of letter and word if present
for x in self.open_cells_map_list:
empty_slots.append(x['location'])
self.scored_solution_letters_list = []
self.scores = []
for sol_ltrs in self.solution_letters_list:
count = 0 #used to grab letters to place in open cells
for x in self.open_cells_map_list:
x['current_letter'] = sol_ltrs[count]
count += 1
total_score = 0 #we have dict ready to score for this letter set
for x in self.strings_list:
current_score = 0
word_multiple = 1
for y in x: #individual cells in solution list
letter_multiple = 1
if y in empty_slots: #thus multiple factor counts
if self.cell_dict[y]['word_multiplier'] != 0:
word_multiple = word_multiple * self.cell_dict[y]['word_multiplier']
if self.cell_dict[y]['letter_multiplier'] != 0:
| |
of the segment, which is done outside of this class).a
#
# This function may reduce start_index and/or increase end_index by
# including a single adjacent 'tainted' line from the ctm-edits file. This
# is only done if the lines at the boundaries of the segment are currently
# real non-silence words and not non-scored words. The idea is that we
# probably don't want to start or end the segment right at the boundary of a
# real word, we want to add some kind of padding.
def PossiblyAddTaintedLines(self):
global non_scored_words
split_lines_of_utt = self.split_lines_of_utt
# we're iterating over the segment (start, end)
for b in [False, True]:
if b:
boundary_index = self.end_index - 1
adjacent_index = self.end_index
else:
boundary_index = self.start_index
adjacent_index = self.start_index - 1
if adjacent_index >= 0 and adjacent_index < len(split_lines_of_utt):
# only consider merging the adjacent word into the segment if we're not
# at a segment boundary.
adjacent_line_is_tainted = IsTainted(split_lines_of_utt[adjacent_index])
# if the adjacent line wasn't tainted, then there must have been
# another stronger reason why we didn't include it in the core
# of the segment (probably that it was an ins, del or sub), so
# there is no point considering it.
if adjacent_line_is_tainted:
boundary_edit_type = split_lines_of_utt[boundary_index][7]
boundary_hyp_word = split_lines_of_utt[boundary_index][7]
# we only add the tainted line to the segment if the word at
# the boundary was a non-silence word that was correctly
# decoded and not fixed [see modify_ctm_edits.py.]
if (
boundary_edit_type == "cor"
and not boundary_hyp_word in non_scored_words
):
# Add the adjacent tainted line to the segment.
if b:
self.end_index += 1
else:
self.start_index -= 1
# This is stage 2 of segment processing.
# This function will split a segment into multiple pieces if any of the
# internal [non-boundary] silences or non-scored words are longer
# than the allowed values --max-internal-silence-length and
# --max-internal-non-scored-length. This function returns a
# list of segments. In the normal case (where there is no splitting)
# it just returns an array with a single element 'self'.
def PossiblySplitSegment(self):
global non_scored_words, args
# make sure the segment hasn't been processed more than we expect.
assert (
self.start_unk_padding == 0.0
and self.end_unk_padding == 0.0
and self.start_keep_proportion == 1.0
and self.end_keep_proportion == 1.0
)
segments = [] # the answer
cur_start_index = self.start_index
cur_start_is_split = False
# only consider splitting at non-boundary lines. [we'd just truncate
# the boundary lines.]
for index_to_split_at in range(cur_start_index + 1, self.end_index - 1):
this_split_line = self.split_lines_of_utt[index_to_split_at]
this_duration = float(this_split_line[3])
this_edit_type = this_split_line[7]
this_ref_word = this_split_line[6]
if (
this_edit_type == "sil"
and this_duration > args.max_internal_silence_length
) or (
this_ref_word in non_scored_words
and this_duration > args.max_internal_non_scored_length
):
# We split this segment at this index, dividing the word in two
# [later on, in PossiblyTruncateBoundaries, it may be further
# truncated.]
# Note: we use 'index_to_split_at + 1' because the Segment constructor
# takes an 'end-index' which is interpreted as one past the end.
new_segment = Segment(
self.split_lines_of_utt,
cur_start_index,
index_to_split_at + 1,
self.debug_str,
)
if cur_start_is_split:
new_segment.start_keep_proportion = 0.5
new_segment.end_keep_proportion = 0.5
cur_start_is_split = True
cur_start_index = index_to_split_at
segments.append(new_segment)
if len(segments) == 0: # We did not split.
segments.append(self)
else:
# We did split. Add the very last segment.
new_segment = Segment(
self.split_lines_of_utt, cur_start_index, self.end_index, self.debug_str
)
assert cur_start_is_split
new_segment.start_keep_proportion = 0.5
segments.append(new_segment)
return segments
# This is stage 3 of segment processing. It will truncate the silences and
# non-scored words at the segment boundaries if they are longer than the
# --max-edge-silence-length and --max-edge-non-scored-length respectively
# (and to the extent that this wouldn't take us below the
# --min-segment-length or --min-new-segment-length).
def PossiblyTruncateBoundaries(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
truncated_duration = None
this_duration = float(this_split_line[3])
this_edit = this_split_line[7]
this_ref_word = this_split_line[6]
if this_edit == "sil" and this_duration > args.max_edge_silence_length:
truncated_duration = args.max_edge_silence_length
elif (
this_ref_word in non_scored_words
and this_duration > args.max_edge_non_scored_length
):
truncated_duration = args.max_edge_non_scored_length
if truncated_duration != None:
keep_proportion = truncated_duration / this_duration
if b:
self.start_keep_proportion = keep_proportion
else:
self.end_keep_proportion = keep_proportion
# This relaxes the segment-boundary truncation of
# PossiblyTruncateBoundaries(), if it would take us below
# min-new-segment-length or min-segment-length. Note: this does not relax
# the boundary truncation for a particular boundary (start or end) if that
# boundary corresponds to a 'tainted' line of the ctm (because it's
# dangerous to include too much 'tainted' audio).
def RelaxBoundaryTruncation(self):
# this should be called before adding unk padding.
assert self.start_unk_padding == self.end_unk_padding == 0.0
if self.start_keep_proportion == self.end_keep_proportion == 1.0:
return # nothing to do there was no truncation.
length_cutoff = max(args.min_new_segment_length, args.min_segment_length)
length_with_truncation = self.Length()
if length_with_truncation >= length_cutoff:
return # Nothing to do.
orig_start_keep_proportion = self.start_keep_proportion
orig_end_keep_proportion = self.end_keep_proportion
if not IsTainted(self.split_lines_of_utt[self.start_index]):
self.start_keep_proportion = 1.0
if not IsTainted(self.split_lines_of_utt[self.end_index - 1]):
self.end_keep_proportion = 1.0
length_with_relaxed_boundaries = self.Length()
if length_with_relaxed_boundaries <= length_cutoff:
# Completely undo the truncation [to the extent allowed by the
# presence of tainted lines at the start/end] if, even without
# truncation, we'd be below the length cutoff. This segment may be
# removed later on (but it may not, if removing truncation makes us
# identical to the input utterance, and the length is between
# min_segment_length min_new_segment_length).
return
# Next, compute an interpolation constant a such that the
# {start,end}_keep_proportion values will equal a *
# [values-computed-by-PossiblyTruncateBoundaries()] + (1-a) * [completely-relaxed-values].
# we're solving the equation:
# length_cutoff = a * length_with_truncation + (1-a) * length_with_relaxed_boundaries
# -> length_cutoff - length_with_relaxed_boundaries =
# a * (length_with_truncation - length_with_relaxed_boundaries)
# -> a = (length_cutoff - length_with_relaxed_boundaries) / (length_with_truncation - length_with_relaxed_boundaries)
a = (length_cutoff - length_with_relaxed_boundaries) / (
length_with_truncation - length_with_relaxed_boundaries
)
if a < 0.0 or a > 1.0:
print(
"segment_ctm_edits.py: bad 'a' value = {0}".format(a), file=sys.stderr
)
return
self.start_keep_proportion = (
a * orig_start_keep_proportion + (1 - a) * self.start_keep_proportion
)
self.end_keep_proportion = (
a * orig_end_keep_proportion + (1 - a) * self.end_keep_proportion
)
if not abs(self.Length() - length_cutoff) < 0.01:
print(
"segment_ctm_edits.py: possible problem relaxing boundary "
"truncation, length is {0} vs {1}".format(self.Length(), length_cutoff),
file=sys.stderr,
)
# This is stage 4 of segment processing.
# This function may set start_unk_padding and end_unk_padding to nonzero
# values. This is done if the current boundary words are real, scored
# words and we're not next to the beginning or end of the utterance.
def PossiblyAddUnkPadding(self):
for b in [True, False]:
if b:
this_index = self.start_index
else:
this_index = self.end_index - 1
this_split_line = self.split_lines_of_utt[this_index]
this_start_time = float(this_split_line[2])
this_ref_word = this_split_line[6]
this_edit = this_split_line[7]
if this_edit == "cor" and not this_ref_word in non_scored_words:
# we can consider adding unk-padding.
if b: # start of utterance.
unk_padding = args.unk_padding
if unk_padding > this_start_time: # close to beginning of file
unk_padding = this_start_time
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.start_unk_padding = unk_padding
else: # end of utterance.
this_end_time = this_start_time + float(this_split_line[3])
last_line = self.split_lines_of_utt[-1]
utterance_end_time = float(last_line[2]) + float(last_line[3])
max_allowable_padding = utterance_end_time - this_end_time
assert max_allowable_padding > -0.01
unk_padding = args.unk_padding
if unk_padding > max_allowable_padding:
unk_padding = max_allowable_padding
# If we could add less than half of the specified
# unk-padding, don't add any (because when we add
# unk-padding we add the unknown-word symbol '<unk>', and if
# there isn't enough space to traverse the HMM we don't want
# to do it at all.
if unk_padding < 0.5 * args.unk_padding:
unk_padding = 0.0
self.end_unk_padding = unk_padding
# This function will merge the segment in | |
while attempting to work with dir={} stdout=\"{}\" stderr=\"{}\"".format(args.gitWorkingDir, output, stderr))
shutil.rmtree(args.gitWorkingDir)
os.makedirs(args.gitWorkingDir)
logger.debug("Attempting clone again from url={}".format(args.gitRepoURL))
if args.gitFirstConnection:
# This is a once off make it a switch?!
(output, stderr, res) = utility.run_os_process("ssh -n -o \"BatchMode yes\" -o StrictHostKeyChecking=no " + args.gitRepoURL[:args.gitRepoURL.find(":")], logger)
if res == False:
logger.warn("Unexpected failure while attempting to trust the remote git repo. stdout=\"%s\", stderr=\"%s\"" % (output, stderr))
(output, stderr, res) = utility.run_os_process("cd %s; git clone %s" % (args.gitWorkingDir, args.gitRepoURL), logger, timeout=120)
if res == False:
logger.warn("git clone failed for some reason...on url=%s stdout=\"%s\", stderr=\"%s\"" % (args.gitRepoURL, output, stderr))
else:
logger.debug("git command result is res=%s" % (res))
logger.info("Success, git is working as expected")
else:
if not os.path.isdir(args.gitWorkingDir):
os.makedirs(args.gitWorkingDir)
(output, stderr, res) = utility.run_os_process("cd %s; git clone %s" % (args.gitWorkingDir, args.gitRepoURL), logger, timeout=120)
if res == False:
logger.warn("git clone failed for some reason...on url %s, output is '%s', stderr is '%s'" % (args.gitRepoURL, output, stderr))
git_path = args.gitWorkingDir + args.gitRoot
# Always start from master and the current version
(output, stderr, res) = utility.run_os_process("cd %s; git checkout master; git pull" % (git_path), logger)
if res == False:
logger.warn("git checkout master or git pull failed, stdout=\"%s\" stderr=\"%s\"" % (output, stderr))
# TODO below is copy and paste, should be a method/function
logger.warn("git error occurred while attempting to work with dir={} stdout=\"{}\" stderr=\"{}\"".format(args.gitWorkingDir, output, stderr))
shutil.rmtree(args.gitWorkingDir)
os.makedirs(args.gitWorkingDir)
logger.debug("git attempting clone again from url={}".format(args.gitRepoURL))
if args.gitFirstConnection:
# This is a once off make it a switch?!
(output, stderr, res) = utility.run_os_process("ssh -n -o \"BatchMode yes\" -o StrictHostKeyChecking=no " + args.gitRepoURL[:args.gitRepoURL.find(":")], logger)
if res == False:
logger.warn("git unexpected failure while attempting to trust the remote git repo url=%s, stdout=\"%s\", stderr=\"%s\"" % (args.gitRepoURL, output, stderr))
(output, stderr, res) = utility.run_os_process("cd %s; git clone %s" % (args.gitWorkingDir, args.gitRepoURL), logger, timeout=120)
if res == False:
logger.warn("git clone failed for some reason...on url=%s stdout=\"%s\", stderr=\"%s\"" % (args.gitRepoURL, output, stderr))
# At this point we've written out the potential updates
indextuning_indextempoutput.output_index_files_into_temp_dir(conf_files_requiring_changes, index_list, git_path, indexes_requiring_changes, replace_slashes=False)
(output, stderr, res) = utility.run_os_process("cd %s; git status | grep \"nothing to commit\"" % (git_path), logger)
if res == False:
# We have one or more files to commit, do something
# Then we git checkout -b indextuning_20181220_1120
todays_date = datetime.datetime.now().strftime("%Y-%m-%d_%H%M")
(output, stderr, res) = utility.run_os_process("cd {0}; git checkout -b {1}_{2} 2>&1; git commit -am \"Updated by index auto-tuning algorithm on {2}\" 2>&1; git push origin {1}_{2} 2>&1".format(git_path, args.gitBranch, todays_date), logger)
if res == False:
logger.warn("git failure while creating new branch and pushing to remote git repo stdout=\"%s\" stderr=\"%s\"" % (output, stderr))
else:
logger.info("Changes commited into git and pushed without warnings, stdout=\"%s\", stderr=\"%s\"" % (output, stderr))
if args.gitLabToken and args.gitLabURL:
res = requests.post(args.gitLabURL,
headers = { 'Private-Token': args.gitLabToken },
data={ 'target_branch' : 'master',
'source_branch' : args.gitBranch + "_" + todays_date,
'title' : 'Automated merge request from index tuning script on ' + todays_date },
verify=False)
if res.status_code != requests.codes.ok and res.status_code != 201:
logger.error("git url=%s statuscode=%s reason=%s response=\"%s\"" % (args.gitLabURL, res.status_code, res.reason, res.text))
else:
logger.debug("gitlab res=\"%s\"" % (res.text))
elif args.gitLabToken:
logger.warn("gitLabToken supplied but the gitLabURL has not been provided, will not create a merge request")
else:
logger.info("No changes to be checked into git")
# If we asked for sizing estimates only, and we're not running the dead index check only option
if args.sizingEstimates:
total_index_allocation = 0
total_estimated_index_allocation = 0
#Ugly hack until I find a better way to do this
total_growth_per_day_mb = [0 for i in range(0,365)]
for index in list(index_list.keys()):
max_total_data_size_mb = index_list[index].max_total_data_size_mb
total_index_allocation = total_index_allocation + max_total_data_size_mb
logger.debug("index=%s max_total_data_size_mb=%s" % (index, max_total_data_size_mb))
if hasattr(index_list[index], "estimated_total_data_size"):
estimated_total_data_size = index_list[index].estimated_total_data_size
logger.info("index=%s estimated_total_data_size=%s, perc_of_current_disk_utilised=%s, days_until_full_compared_to_frozen=%s, days_until_full_disk_calculation=%s, "\
" current_max_on_disk=%s, estimated_total_data_size_with_contingency=%s, perc_utilised_on_estimate=%s, days_until_full_disk_calculation_on_estimate=%s"
% (index, estimated_total_data_size, index_list[index].perc_utilised, index_list[index].days_until_full,
index_list[index].days_until_full_disk_calculation, index_list[index].splunk_max_disk_usage_mb,
index_list[index].estimated_total_data_size_with_contingency, index_list[index].perc_utilised_on_estimate,
index_list[index].days_until_full_disk_calculation_on_estimate))
# If the index is not yet full it will likely consume further disk space on the indexing tier...
if index_list[index].days_until_full > 0:
total_growth_per_day_calc = estimated_total_data_size / ((index_list[index].frozen_time_period_in_secs)/60/60/24)
if index_list[index].days_until_full > 365:
days_until_full = 365
else:
days_until_full = index_list[index].days_until_full
for entry in range(days_until_full):
total_growth_per_day_mb[entry] = total_growth_per_day_mb[entry] + total_growth_per_day_calc
total_estimated_index_allocation = total_estimated_index_allocation + estimated_total_data_size
for index in list(indexes_not_getting_sized.keys()):
max_total_data_size_mb = indexes_not_getting_sized[index].max_total_data_size_mb
total_index_allocation = total_index_allocation + max_total_data_size_mb
logger.debug("index=%s max_total_data_size_mb=%s (indexes_not_getting_sized)" % (index, max_total_data_size_mb))
if hasattr(indexes_not_getting_sized[index], "estimated_total_data_size"):
estimated_total_data_size = indexes_not_getting_sized[index].estimated_total_data_size
logger.info("index=%s estimated_total_data_size=%s (indexes_not_getting_sized), perc_of_current_disk_utilised=%s, days_until_full_compared_to_frozen=%s, " \
" days_until_full_disk_calculation=%s, current_max_on_disk=%s, estimated_total_data_size_with_contingency=%s, perc_utilised_on_estimate=%s, " \
" days_until_full_disk_calculation_on_estimate=%s"
% (index, estimated_total_data_size, indexes_not_getting_sized[index].perc_utilised, indexes_not_getting_sized[index].days_until_full,
indexes_not_getting_sized[index].days_until_full_disk_calculation, indexes_not_getting_sized[index].splunk_max_disk_usage_mb,
indexes_not_getting_sized[index].estimated_total_data_size_with_contingency, indexes_not_getting_sized[index].perc_utilised_on_estimate,
indexes_not_getting_sized[index].days_until_full_disk_calculation_on_estimate))
# If the index is not yet full it will likely consume further disk space on the indexing tier...
if indexes_not_getting_sized[index].days_until_full > 0:
total_growth_per_day_calc = estimated_total_data_size / ((indexes_not_getting_sized[index].frozen_time_period_in_secs)/60/60/24)
if indexes_not_getting_sized[index].days_until_full > 365:
days_until_full = 365
else:
days_until_full = indexes_not_getting_sized[index].days_until_full
for entry in range(days_until_full):
total_growth_per_day_mb[entry] = total_growth_per_day_mb[entry] + total_growth_per_day_calc
total_estimated_index_allocation = total_estimated_index_allocation + estimated_total_data_size
total_vol_size = 0
total_in_use_currently = 0
for vol in list(vol_list.keys()):
if hasattr(vol_list[vol], "max_vol_data_size_mb"):
vol_size = vol_list[vol].max_vol_data_size_mb
# Determine current disk utilisation for this volume
stat = os.statvfs(vol_list[vol].path)
used_in_mb = ((stat.f_blocks-stat.f_bfree)*stat.f_bsize)/1024/1024
if vol != "_splunk_summaries":
total_vol_size = total_vol_size + vol_size
total_in_use_currently = total_in_use_currently + used_in_mb
logger.info("volume=%s max_vol_data_size_mb=%s used_in_mb=%s" % (vol, vol_size, used_in_mb))
else:
logger.info("volume=%s, has no maxVolumedata_size_mb setting" % (vol))
logger.info("Summary: total_index_allocated=%s total_volume_allocated=%s (excluding _splunk_summaries)" % (total_index_allocation, total_vol_size))
total_available = total_vol_size - total_in_use_currently
logger.debug("total_available=%s, total_vol_size=%s, total_in_use_currently=%s" % (total_available, total_vol_size, total_in_use_currently))
day_counter = 0
while total_available > 0 and day_counter < 365:
total_available = total_available - total_growth_per_day_mb[day_counter]
day_counter = day_counter + 1
if day_counter >= 365:
logger.info("Based on a combined available volume size of %s with %s in use currently, leaving %s available, I am calculating we will not run out of disk in the next 365 days"
% (total_vol_size, total_in_use_currently, total_available))
else:
logger.info("Based on a combined available volume size of %s with %s in use currently, leaving %s available, I am calculating %s days before we run out of disk"
% (total_vol_size, total_in_use_currently, total_available, day_counter))
if total_estimated_index_allocation > 0:
if args.indexLimit < len(index_list):
logger.warn("Estimated size cannot be accurate as we have looked at index_limit=%s of total_indexes=%s" % (args.indexLimit, len(index_list)))
logger.info("estimated_index_allocation=%s" % (total_estimated_index_allocation))
if index_dir_check_res:
hot_dirs_checked = list(index_dir_check_res["hot_dirs_checked"].keys())
cold_dirs_checked = list(index_dir_check_res["cold_dirs_checked"].keys())
summaries_dirs_checked = list(index_dir_check_res["summaries_dirs_checked"].keys())
dead_hot_dirs = index_dir_check_res["hotDirsDead"]
dead_cold_dirs = index_dir_check_res["coldDirsDead"]
dead_summary_dirs = index_dir_check_res["summariesDirsDead"]
#Note that duplicate values can be returned if the same path is used for hot/cold or summaries, for example /opt/splunk/var/lib/splunk
#may be duplicated...
#output the remaining data around "dead" indexes which have directories on the filesystem but no matching config
logger.info("The following directories were checked to ensure that they are still in use by Splunk indexes in the indexes hot path=\"%s\"" % (hot_dirs_checked))
if len(list(dead_hot_dirs.keys())) > 0:
logger.info("The below list were located in the above directories but no mention in the btool output, these should likely be removed from the filesystem:")
for line in list(dead_hot_dirs.keys()):
for entry in dead_hot_dirs[line]:
#We escaped spaces for the shell, but we do not want spaces escaped for python
line = line.replace('\\ ',' ')
thedir = entry + "/" + line
if args.deadIndexDelete and not line == "\\$_index_name":
if os.path.isdir(thedir):
logger.info("Wiping directory %s" % (thedir))
shutil.rmtree(thedir)
else:
logger.warn("dir=%s does not exist, no deletion required" % (thedir))
else:
logger.info(thedir)
else:
logger.info("No dead hot dirs found")
#output the remaining data around "dead" indexes which have directories on the filesystem but no matching config
logger.info("The following directories were checked to ensure that they are still in use by Splunk indexes in the indexes cold path=\"%s\"" % (cold_dirs_checked))
if len(list(dead_cold_dirs.keys())) > 0:
logger.info("The below list were located in the above directories but no mention in the btool output, these should likely be removed from the filesystem:")
for line in list(dead_cold_dirs.keys()):
for entry in dead_cold_dirs[line]:
#We escaped spaces for the shell, but we do not want spaces escaped for python
line = line.replace('\\ ',' ')
thedir = entry + "/" + line
if args.deadIndexDelete and not line == "\\$_index_name":
if os.path.isdir(thedir):
logger.info("dir=%s deleted due to not exisitng and deadIndexDelete flag enabled" % (thedir))
shutil.rmtree(thedir)
else:
logger.warn("dir=%s does not exist, no deletion required" % (thedir))
else:
logger.info(thedir)
else:
logger.info("No dead cold dirs found")
#output the | |
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
%(verbose)s
See Also
--------
mne.time_frequency.psd_multitaper
"""
def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,
adaptive=False, low_bias=True, n_jobs=1,
normalization='length', verbose=None): # noqa: D102
self.sfreq = sfreq
self.fmin = fmin
self.fmax = fmax
self.bandwidth = bandwidth
self.adaptive = adaptive
self.low_bias = low_bias
self.n_jobs = n_jobs
self.verbose = verbose
self.normalization = normalization
def fit(self, epochs_data, y):
"""Compute power spectrum density (PSD) using a multi-taper method.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : array, shape (n_epochs,)
The label for each epoch
Returns
-------
self : instance of PSDEstimator
returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
return self
def transform(self, epochs_data):
"""Compute power spectrum density (PSD) using a multi-taper method.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data
Returns
-------
psd : array, shape (n_signals, n_freqs) or (n_freqs,)
The computed PSD.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
psd, _ = psd_array_multitaper(
epochs_data, sfreq=self.sfreq, fmin=self.fmin, fmax=self.fmax,
bandwidth=self.bandwidth, adaptive=self.adaptive,
low_bias=self.low_bias, normalization=self.normalization,
n_jobs=self.n_jobs)
return psd
@fill_doc
class FilterEstimator(TransformerMixin):
"""Estimator to filter RtEpochs.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter to the channels selected by "picks".
l_freq and h_freq are the frequencies below which and above which,
respectively, to filter out of the data. Thus the uses are:
- l_freq < h_freq: band-pass filter
- l_freq > h_freq: band-stop filter
- l_freq is not None, h_freq is None: low-pass filter
- l_freq is None, h_freq is not None: high-pass filter
If n_jobs > 1, more memory is required as "len(picks) * n_times"
additional time points need to be temporarily stored in memory.
Parameters
----------
info : instance of Info
Measurement info.
l_freq : float | None
Low cut-off frequency in Hz. If None the data are only low-passed.
h_freq : float | None
High cut-off frequency in Hz. If None the data are only
high-passed.
%(picks_good_data)s
filter_length : str (Default: '10s') | int | None
Length of the filter to use. If None or "len(x) < filter_length",
the filter length used is len(x). Otherwise, if int, overlap-add
filtering with a filter of the specified length in samples) is
used (faster for long signals). If str, a human-readable time in
units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted
to the shortest power-of-two length at least that duration.
l_trans_bandwidth : float
Width of the transition band at the low cut-off frequency in Hz.
h_trans_bandwidth : float
Width of the transition band at the high cut-off frequency in Hz.
n_jobs : int | str
Number of jobs to run in parallel.
Can be 'cuda' if ``cupy`` is installed properly and method='fir'.
method : str
'fir' will use overlap-add FIR filtering, 'iir' will use IIR
forward-backward filtering (via filtfilt).
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
fir_design : str
Can be "firwin" (default in 0.16) to use
:func:`scipy.signal.firwin`, or "firwin2" (default in 0.15 and
before) to use :func:`scipy.signal.firwin2`. "firwin" uses a
time-domain design technique that generally gives improved
attenuation using fewer samples than "firwin2".
..versionadded:: 0.15
%(verbose)s
See Also
--------
TemporalFilter
Notes
-----
This is primarily meant for use in conjunction with
:class:`mne.realtime.RtEpochs`. In general it is not recommended in a
normal processing pipeline as it may result in edge artifacts. Use with
caution.
"""
def __init__(self, info, l_freq, h_freq, picks=None, filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1,
method='fir', iir_params=None, fir_design='firwin',
verbose=None): # noqa: D102
self.info = info
self.l_freq = l_freq
self.h_freq = h_freq
self.picks = _picks_to_idx(info, picks)
self.filter_length = filter_length
self.l_trans_bandwidth = l_trans_bandwidth
self.h_trans_bandwidth = h_trans_bandwidth
self.n_jobs = n_jobs
self.method = method
self.iir_params = iir_params
self.fir_design = fir_design
def fit(self, epochs_data, y):
"""Filter data.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : array, shape (n_epochs,)
The label for each epoch.
Returns
-------
self : instance of FilterEstimator
Returns the modified instance
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.picks is None:
self.picks = pick_types(self.info, meg=True, eeg=True,
ref_meg=False, exclude=[])
if self.l_freq == 0:
self.l_freq = None
if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.):
self.h_freq = None
if self.l_freq is not None and not isinstance(self.l_freq, float):
self.l_freq = float(self.l_freq)
if self.h_freq is not None and not isinstance(self.h_freq, float):
self.h_freq = float(self.h_freq)
if self.info['lowpass'] is None or (self.h_freq is not None and
(self.l_freq is None or
self.l_freq < self.h_freq) and
self.h_freq <
self.info['lowpass']):
self.info['lowpass'] = self.h_freq
if self.info['highpass'] is None or (self.l_freq is not None and
(self.h_freq is None or
self.l_freq < self.h_freq) and
self.l_freq >
self.info['highpass']):
self.info['highpass'] = self.l_freq
return self
def transform(self, epochs_data):
"""Filter data.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The data after filtering
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
return filter_data(
epochs_data, self.info['sfreq'], self.l_freq, self.h_freq,
self.picks, self.filter_length, self.l_trans_bandwidth,
self.h_trans_bandwidth, method=self.method,
iir_params=self.iir_params, n_jobs=self.n_jobs, copy=False,
fir_design=self.fir_design, verbose=False)
class UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator):
"""Use unsupervised spatial filtering across time and samples.
Parameters
----------
estimator : instance of sklearn.base.BaseEstimator
Estimator using some decomposition algorithm.
average : bool, default False
If True, the estimator is fitted on the average across samples
(e.g. epochs).
"""
def __init__(self, estimator, average=False): # noqa: D102
# XXX: Use _check_estimator #3381
for attr in ('fit', 'transform', 'fit_transform'):
if not hasattr(estimator, attr):
raise ValueError('estimator must be a scikit-learn '
'transformer, missing %s method' % attr)
if not isinstance(average, bool):
raise ValueError("average parameter must be of bool type, got "
"%s instead" % type(bool))
self.estimator = estimator
self.average = average
def fit(self, X, y=None):
"""Fit the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data to be filtered.
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
self : instance of UnsupervisedSpatialFilter
Return the modified instance.
"""
if self.average:
X = np.mean(X, axis=0).T
else:
n_epochs, n_channels, n_times = X.shape
# trial as time samples
X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs *
n_times)).T
self.estimator.fit(X)
return self
def fit_transform(self, X, y=None):
"""Transform the data to its filtered components after fitting.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data to be filtered.
y : None | array, shape (n_samples,)
Used for scikit-learn compatibility.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The transformed data.
"""
return self.fit(X).transform(X)
def transform(self, X):
"""Transform the data to its spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data to be filtered.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The transformed data.
"""
return self._apply_method(X, 'transform')
def inverse_transform(self, X):
"""Inverse transform the data to its original space.
Parameters
----------
X : array, shape (n_epochs, n_components, n_times)
The data to be inverted.
Returns
-------
X : array, shape (n_epochs, n_channels, n_times)
The transformed data.
"""
return self._apply_method(X, 'inverse_transform')
def _apply_method(self, X, method):
"""Vectorize time samples as trials, apply method and reshape back.
Parameters
----------
X : array, shape (n_epochs, n_dims, n_times)
The data to be inverted.
Returns
-------
X : array, shape (n_epochs, n_dims, n_times)
The transformed data.
"""
n_epochs, n_channels, n_times = X.shape
# trial as time samples
X = np.transpose(X, [1, 0, 2])
X = np.reshape(X, [n_channels, n_epochs * n_times]).T
# apply method
method = getattr(self.estimator, method)
X = method(X)
# put it back to n_epochs, n_dimensions
X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2])
return X
@fill_doc
class TemporalFilter(TransformerMixin):
"""Estimator to filter data array along the last dimension.
Applies a zero-phase low-pass, high-pass, band-pass, or band-stop
filter | |
network.offset:
o_row = logit_mean(O[i,:])
if np.isfinite(o_row):
theta[B + 1 + i] -= o_row
for j in range(N-1):
theta[B + 1 + (M-1) + j] += \
logit((c[j] + 1.0) / (M + 2.0))
if network.offset:
o_col = logit_mean(O[:,j])
if np.isfinite(o_col):
theta[B + 1 + (M-1) + j] -= o_col
alpha_out = network.row_covariates['alpha_out']
alpha_in = network.col_covariates['alpha_in']
def obj(theta):
if np.any(np.isnan(theta)):
print 'Warning: computing objective for nan-containing vector.'
return np.Inf
alpha_out[0:M-1] = theta[(B + 1):(B + 1 + (M-1))]
alpha_in[0:N-1] = theta[(B + 1 + (M-1)):(B + 1 + (M-1) + (N-1))]
for b, b_n in enumerate(self.beta):
self.beta[b_n] = theta[b]
self.kappa = theta[B]
nll = self.nll(network)
self.fit_info['nll_evals'] += 1
return nll
def grad(theta):
if np.any(np.isnan(theta)):
print 'Warning: computing gradient for nan-containing vector.'
return np.zeros(B + 1 + (M-1) + (N-1))
alpha_out[0:M-1] = theta[(B + 1):(B + 1 + (M-1))]
alpha_in[0:N-1] = theta[(B + 1 + (M-1)):(B + 1 + (M-1) + (N-1))]
for b, b_n in enumerate(self.beta):
self.beta[b_n] = theta[b]
self.kappa = theta[B]
ET = np.empty(B + 1 + (M-1) + (N-1))
P = self.edge_probabilities(network)
Er = P.sum(1)[0:(M-1)]
Ec = P.sum(0)[0:(N-1)]
ET[(B + 1):(B + 1 + (M-1))] = Er
ET[(B + 1 + (M-1)):(B + 1 + (M-1) + (N-1))] = Ec
if fix_beta:
ET[0:B] = 0.0
else:
for b, b_n in enumerate(self.beta):
ET[b] = (P * network.edge_covariates[b_n].matrix()).sum()
ET[B] = P.sum()
g = ET - T
if fix_beta:
g[0:B] = 0.0
self.fit_info['grad_nll_evals'] += 1
self.fit_info['grad_nll_final'][:] = g
if verbose:
abs_grad = np.abs(g)
print '|ET - T|: %.2f, %.2f, %.2f (min, mean, max)' % \
(np.min(abs_grad), np.mean(abs_grad), np.max(abs_grad))
return g
bounds = [(-8,8)] * B + [(-15,15)] + [(-8,8)] * ((M-1) + (N-1))
theta_opt = opt.fmin_l_bfgs_b(obj, theta, grad, bounds = bounds)[0]
if (np.any(theta_opt == [b[0] for b in bounds]) or
np.any(theta_opt == [b[1] for b in bounds])):
print 'Warning: some constraints active in model fitting.'
if verbose:
for b, b_n in enumerate(self.beta):
if theta_opt[b] in (bounds[b][0], bounds[b][1]):
print '%s: %.2f (T = %.2f)' % (b_n, theta_opt[b], T[b])
if theta_opt[B] in (bounds[B][0], bounds[B][1]):
print 'kappa: %.2f (T = %.2f)' % (theta_opt[B], T[B])
for i in range(M-1):
r_i = B + 1 + i
if theta_opt[r_i] in (bounds[r_i][0], bounds[r_i][1]):
print 'alpha_%d: %.2f (T = %.2f)' % \
(i, theta_opt[r_i], T[r_i])
for j in range(N-1):
c_j = B + 1 + (M-1) + j
if theta_opt[c_j] in (bounds[c_j][0], bounds[c_j][1]):
print 'beta_%d: %.2f (T = %.2f)' % \
(j, theta_opt[c_j], T[c_j])
alpha_out[0:M-1] = theta_opt[(B + 1):(B + 1 + (M-1))]
alpha_in[0:N-1] = theta_opt[(B + 1 + (M-1)):(B + 1 + (M-1) + (N-1))]
alpha_out_mean = np.mean(alpha_out[:])
alpha_in_mean = np.mean(alpha_in[:])
alpha_out[:] -= alpha_out_mean
alpha_in[:] -= alpha_in_mean
for b, b_n in enumerate(self.beta):
self.beta[b_n] = theta_opt[b]
self.kappa = theta_opt[B] + alpha_out_mean + alpha_in_mean
self.fit_info['wall_time'] = time() - start_time
def fit_conditional(self, network, **kwargs):
StationaryLogistic.fit_conditional(self, network, **kwargs)
start_time = time()
self.fit_convex_opt(network, fix_beta = True)
self.fit_info['wall_time'] += time() - start_time
def fit_irls(self, network, verbose = False, perturb = 1e-4):
M = network.M
N = network.N
B = len(self.beta)
P = B + 1 + (M-1) + (N-1)
start_time = time()
alpha_zero(network)
alpha_out = network.row_covariates['alpha_out']
alpha_in = network.col_covariates['alpha_in']
# Construct response and design matrices
y = np.asarray(network.as_dense(), dtype='float64')
y = y.reshape((M*N,1))
X = np.zeros((M*N,P))
for b, b_n in enumerate(self.beta):
X[:,b] = network.edge_covariates[b_n].matrix().reshape((M*N,))
X[:,B] = 1.0
for r in range(M-1):
X_row = np.zeros((M,N))
X_row[r,:] = 1.0
X[:,B + 1 + r] = X_row.reshape((M*N,))
for c in range(N-1):
X_col = np.zeros((M,N))
X_col[:,c] = 1.0
X[:,B + 1 + (M-1) + c] = X_col.reshape((M*N,))
theta = np.zeros((P,1))
def fitted_p(theta):
theta_vec = np.reshape(theta, (P,))
alpha_out[0:M-1] = theta_vec[(B + 1):(B + 1 + (M-1))]
alpha_in[0:N-1] = theta_vec[(B + 1 + (M-1)):P]
for b, b_n in enumerate(self.beta):
self.beta[b_n] = theta_vec[b]
self.kappa = theta_vec[B]
return self.edge_probabilities(network).reshape((M*N,1))
for iter in range(10):
p = fitted_p(theta)
X_tilde = X * p
del p
for j in range(P):
X_tilde[:,j] += np.random.uniform(-perturb, perturb, M*N)
X_t = np.transpose(X)
X_t_X_tilde = np.dot(X_t, X_tilde)
del X_tilde
hat = solve(X_t_X_tilde, X_t, overwrite_a = True)
p = fitted_p(theta)
theta += np.dot(hat, (y - p))
theta_vec = np.reshape(theta, (P,))
alpha_out[0:M-1] = theta_vec[(B + 1):(B + 1 + (M-1))]
alpha_in[0:N-1] = theta_vec[(B + 1 + (M-1)):P]
alpha_out_mean = np.mean(alpha_out[:])
alpha_in_mean = np.mean(alpha_in[:])
alpha_out[:] -= alpha_out_mean
alpha_in[:] -= alpha_in_mean
for b, b_n in enumerate(self.beta):
self.beta[b_n] = theta_vec[b]
self.kappa = theta_vec[B] + alpha_out_mean + alpha_in_mean
self.fit_info['wall_time'] = time() - start_time
def fit_logistic(self, network):
import statsmodels.api as sm
M = network.M
N = network.N
B = len(self.beta)
# Set up outcome and design matrix for fit
y = network.as_dense().reshape((M*N,))
Phi = np.zeros((M*N,B + 1 + (M-1) + (N-1)))
for b, b_n in enumerate(self.beta):
Phi[:,b] = network.edge_covariates[b_n].matrix().reshape((M*N,))
Phi[:,B] = 1.0
for r in range(M-1):
phi_row = np.zeros((M,N))
phi_row[r,:] = 1.0
Phi[:,B + 1 + r] = phi_row.reshape((M*N,))
for c in range(N-1):
phi_col = np.zeros((M,N))
phi_col[:,c] = 1.0
Phi[:,B + 1 + (M-1) + c] = phi_col.reshape((M*N,))
# Do fit, defaulting to beta = 0 in case of problems
try:
if network.offset:
offset = network.offset.matrix().reshape((M*N,))
fit = sm.GLM(y, Phi, sm.families.Binomial(), offset).fit()
else:
fit = sm.Logit(y, Phi).fit()
coefs = fit.params
except:
print 'Warning: logistic fit failed.'
coefs = np.zeros(B + 1 + (M-1) + (N-1))
alpha_zero(network)
alpha_out = network.row_covariates['alpha_out']
alpha_in = network.col_covariates['alpha_in']
alpha_out[0:M-1] = coefs[(B + 1):(B + 1 + (M-1))]
alpha_in[0:N-1] = coefs[(B + 1 + (M-1)):(B + 1 + (M-1) + (N-1))]
alpha_out_mean = np.mean(alpha_out[:])
alpha_in_mean = np.mean(alpha_in[:])
alpha_out[:] -= alpha_out_mean
alpha_in[:] -= alpha_in_mean
for b, b_n in enumerate(self.beta):
self.beta[b_n] = coefs[b]
self.kappa = coefs[B] + alpha_out_mean + alpha_in_mean
# The network is needed for its covariates and degree
# heterogeneity terms, not for the observed pattern of edges, etc.
#
# Typically, the inverse Fisher information matrix will be more
# useful (it gives a lower bound on the variances/covariances of
# an unbised estimator), so that is calculated by default.
def fisher_information(self, network, inverse = True):
M = network.M
N = network.N
B = len(self.beta)
P = self.edge_probabilities(network)
x = np.empty((B,M,N))
for i, b in enumerate(self.beta):
x[i] = network.edge_covariates[b].matrix()
P_bar = P * (1.0 - P)
I = np.zeros(((M-1) + (N-1) + 1 + B, (M-1) + (N-1) + 1 + B))
for i in range(M-1):
for j in range(N-1):
v = P_bar[i,j]
I[i,(M-1)+j] = v
I[(M-1)+j,i] = v
for i in range(M-1):
v = (P_bar[i,:]).sum()
I[i,i] = v
I[(M-1) + (N-1),i] = v
I[i,(M-1) + (N-1)] = v
for b in range(B):
v = (x[b,i,:] * P_bar[i,:]).sum()
I[(M-1) + (N-1) + 1 + b,i] = v
I[i,(M-1) + (N-1) + 1 + b] = v
for j in range(N-1):
v = (P_bar[:,j]).sum()
I[(M-1) + j,(M-1) + j] = v
I[(M-1) + (N-1),(M-1) + j] = v
I[(M-1) + j,(M-1) + (N-1)] = v
for b in range(B):
v = (x[b,:,j] * P_bar[:,j]).sum()
I[(M-1) + (N-1) + 1 + b,(M-1) + j] = v
I[(M-1) + j,(M-1) + (N-1) + 1 + b] = v
I[(M-1) + (N-1),(M-1) + (N-1)] = P_bar.sum()
for b in range(B):
v = (x[b] * P_bar).sum()
I[(M-1) + (N-1) + 1 + b,(M-1) + (N-1)] = v
I[(M-1) + (N-1),(M-1) + (N-1) + 1 + b] = v
for b_1 in range(B):
for b_2 in range(B):
v = (x[b_1] * x[b_2] * P_bar).sum()
I[(M-1) + (N-1) + 1 + b_1,(M-1) + (N-1) + 1 + b_2] = v
I[(M-1) + (N-1) + 1 + b_2,(M-1) + (N-1) + 1 + b_1] = v
names_alpha = ['alpha_{%s}' % n for n in network.rnames[0:(M-1)]]
names_beta = ['beta_{%s}' % n for n in network.cnames[0:(N-1)]]
names_theta = ['theta_{%s}' % b for b in self.beta]
I_names = np.array(names_alpha + names_beta + ['kappa'] + names_theta)
self.I = {}
for i in range((M-1) + (N-1) + 1 + B):
for j in range((M-1) + (N-1) + 1 + B):
if i == j:
self.I[I_names[i]] = I[i,i]
else:
self.I[(I_names[i],I_names[j])] = I[i,j]
if inverse:
self._invert_fisher_information(I, I_names)
# | |
import functools
import urlparse
import urllib
import json
import endpoint
import protocol
from response import Response, ResponseException
from temporal.validate import check_time_param
def make_series_url(key):
"""For internal use. Given a series key, generate a valid URL to the series
endpoint for that key.
:param string key: the series key
:rtype: string"""
url = urlparse.urljoin(endpoint.SERIES_ENDPOINT, 'key/')
url = urlparse.urljoin(url, urllib.quote(key))
return url
class with_response_type(object):
"""For internal use. Decorator for ensuring the Response object returned by
the :class:`Client` object has a data attribute that corresponds to the
object type expected from the TempoDB API. This class should not be
used by user code.
The "t" argument should be a string corresponding to the name of a class
from the :mod:`tempodb.protocol.objects` module, or a single element list
with the element being the name of a class from that module if the API
endpoint will return a list of those objects.
:param t: the type of object to cast the TempoDB response to
:type t: list or string"""
def __init__(self, t):
self.t = t
def __call__(self, f, *args, **kwargs):
@functools.wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
#dont try this at home kids
session = args[0].session
resp_obj = Response(resp, session)
if resp_obj.status == 200:
resp_obj._cast_payload(self.t)
else:
raise ResponseException(resp_obj)
return resp_obj
return wrapper
class with_cursor(object):
"""For internal use. Decorator class for automatically transforming a
response into a Cursor of the required type.
:param class cursor_type: the cursor class to use
:param class data_type: the data type that cursor should generate"""
def __init__(self, cursor_type, data_type):
self.cursor_type = cursor_type
self.data_type = data_type
def __call__(self, f, *args, **kwargs):
@functools.wraps(f)
def wrapper(*args, **kwargs):
resp = f(*args, **kwargs)
session = args[0].session
resp_obj = Response(resp, session)
if resp_obj.status == 200:
data = json.loads(resp_obj.body)
if self.cursor_type in [protocol.SeriesCursor,
protocol.SingleValueCursor]:
return self.cursor_type(data, self.data_type, resp_obj)
else:
return self.cursor_type(data, self.data_type, resp_obj,
kwargs.get('tz'))
raise ResponseException(resp_obj)
return wrapper
class Client(object):
"""Entry point class into the TempoDB API. The client should be
initialized with your API key and secret obtained from your TempoDB
login.
The methods are grouped as follows:
SERIES
* :meth:`create_series`
* :meth:`delete_series`
* :meth:`get_series`
* :meth:`list_series`
* :meth:`update_series`
READING DATA
* :meth:`read_data`
* :meth:`find_data`
* :meth:`aggregate_data`
* :meth:`read_multi`
* :meth:`read_multi_rollups`
* :meth:`get_summary`
WRITING DATA
* :meth:`write_data`
* :meth:`write_multi`
DELETING
* :meth:`delete`
SINGLE VALUE
* :meth:`single_value`
* :meth:`multi_series_single_value`
:param string database_id: 32-character identifier for your database
:param string key: your API key, currently the same as database_id
:param string secret: your API secret"""
def __init__(self, database_id, key, secret, base_url=endpoint.BASE_URL):
self.database_id = database_id
self.session = endpoint.HTTPEndpoint(database_id, key, secret,
base_url)
#SERIES METHODS
@with_response_type('Nothing')
def create_series(self, key=None, tags=[], attrs={}):
"""Create a new series with an optional string key. A list of tags
and a map of attributes can also be optionally supplied.
:param string key: (optional) a string key for the series
:param list tags: (optional) the tags to create the series with
:param dict attrs: (optional) the attributes to the create the series
with
:rtype: :class:`tempodb.response.Response` object"""
body = protocol.make_series_key(key, tags, attrs)
resp = self.session.post(endpoint.SERIES_ENDPOINT, body)
return resp
@with_response_type('Nothing')
def delete_series(self, keys=None, tags=None, attrs=None,
allow_truncation=False):
"""Delete a series according to the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:param bool allow_truncation: whether to allow full deletion of a
database. Default is False.
:rtype: :class:`tempodb.response.Response` object"""
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'allow_truncation': str(allow_truncation).lower()
}
url_args = endpoint.make_url_args(params)
url = '?'.join([endpoint.SERIES_ENDPOINT, url_args])
resp = self.session.delete(url)
return resp
@with_response_type('Series')
def get_series(self, key):
"""Get a series object from TempoDB given its key.
:param string key: a string name for the series
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.Series` data payload"""
url = make_series_url(key)
resp = self.session.get(url)
return resp
@with_cursor(protocol.SeriesCursor, protocol.Series)
def list_series(self, keys=None, tags=None, attrs=None,
limit=1000):
"""Get a list of all series matching the given criteria.
**Note:** for the key argument, the filter will return the *union* of
those values. For the tag and attr arguments, the filter will return
the *intersection* of those values.
:param keys: filter by one or more series keys
:type keys: list or string
:param tags: filter by one or more tags
:type tags: list or string
:param dict attrs: filter by one or more key-value attributes
:rtype: :class:`tempodb.protocol.cursor.SeriesCursor` with an
iterator over :class:`tempodb.protocol.objects.Series`
objects"""
params = {
'key': keys,
'tag': tags,
'attr': attrs,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([endpoint.SERIES_ENDPOINT, url_args])
resp = self.session.get(url)
return resp
@with_response_type('Series')
def update_series(self, series):
"""Update a series with new attributes. This does not change
any of the data written to this series. The recommended workflow for
series updates is to pull a Series object down using the
:meth:`get_series` method, change its attributes, then pass it into
this method.
:param series: the series to update
:type series: `tempodb.protocol.Series` object
:rtype: :class:`tempodb.response.Response` object with the updated
:class:`tempodb.protocol.objects.Series` as the data payload"""
url = make_series_url(series.key)
resp = self.session.put(url, series.to_json())
return resp
#DATA READING METHODS
@with_cursor(protocol.DataPointCursor, protocol.DataPoint)
def read_data(self, key, start=None, end=None, rollup=None,
period=None, interpolationf=None, interpolation_period=None,
tz=None, limit=1000):
"""Read data from a series given its ID or key. Start and end times
must be supplied. They can either be ISO8601 encoded strings (i.e.
2012-01-08T00:21:54.000+0000) or Python Datetime objects, which will
be converted for you.
The rollup parameter is optional and can include string values such
as "sum" and "avg". Below is a list of valid rollup functions:
* count
* sum
* mult
* min
* max
* stddev
* ss
* range
* percentile,N (where N is what percentile to calculate)
This will apply a rollup function to your raw dataset. The
optional period parameter will downsample your data according to the
given resolution ("1min", "2day", etc).
The optional interpolation parameters can be used to resample your
data to a regular interval interpolation_period according to an
interpolation function interpolationf. Valid values for
interpolation_period are the same as for the period parameter, and
valid values for interpolationf include "zoh" and "linear".
Finally, the optional tz parameter can be used to specify a time zone
for your output. Please see
`here <https://tempo-db.com/docs/api/timezone/>`_ for a list of a
valid timezone values.
:param string key: the series key to use
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string rollup: (optional) the name of a rollup function to use
:param string period: (optional) downsampling rate for the data
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.DataPoint`
objects"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'segment')
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'rollup.fold': rollup,
'rollup.period': period,
'interpolation.function': interpolationf,
'interpolation.period': interpolation_period,
'tz': tz,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp
@with_response_type('SeriesSummary')
def get_summary(self, key, start, end, tz=None):
"""Get a summary for the series from *start* to *end*. The summary is
a map containing keys *count*, *min*, *max*, *mean*, *sum*, and
*stddev*.
:param string key: the series key to use
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.response.Response` with a
:class:`tempodb.protocol.objects.SeriesSummary` data payload"""
url = make_series_url(key)
url = urlparse.urljoin(url + '/', 'summary')
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'tz': | |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 22:28 2016
@author: scott
[
20B14:
This module basically implements my own version of JSON, which I did not
know about when I wrote it. It is in ToDo.txt to change uses of Object_Files
to uses of JSON.
Because:
Simple is better than complex.
There should be one-- and preferably only one --obvious way to do it.
Special cases aren't special enough to break the rules.
If the implementation is hard to explain, it's a bad idea.
]
This module provides badass functions for coordinating between complex
objects and easily readable files. The compromise for so awesome a toolbox
is that the tools themselves aren't easily readible. Good luck!
"""
from __future__ import print_function, division
import os, re
import datetime
float_match = r"\s[-]?\d+[\.]?\d*(e[-]?\d+)?\s" # matches floats like -3.57e4
def group_lines(lines, indent="\t", removecomments=True):
"""
Groups indentation blocks into list elements. The line before the
indentation block is included.
"""
if removecomments:
lines = remove_comments(lines)
nest = 0 # to keep track of how deep we are in the indentation block
grouped_lines = []
whitespace = re.compile(r"\s+")
for (i, line) in enumerate(lines):
# print(line)
if len(re.sub(whitespace, "", line)) == 0:
# print('... skipped!')
# fix 17C23 to protect against empty lines
continue
line = line[:-1] # to get rid of the '\n'
while line[0:nest] != indent * nest:
nest -= 1
group = eval("grouped_lines" + "[-1]" * nest) # actually works!
if line[0 : (nest + 1)] == indent * (nest + 1):
nest += 1
group[-1] = [group[-1], line[nest:]]
elif len(line) > nest: # to drop empty lines
group += [line[nest:]]
return grouped_lines
def remove_comments(lines):
new_lines = []
for line in lines:
if "#" in line:
line = re.search("^.*\#", line).group()[:-1]
if re.search(r"\w", line): # to drop lines that only have comments
new_lines += [line]
else:
new_lines += [line] # I don't want to get rid of empty lines here
return new_lines
def structure_to_lines(
structure, nest=0, indent="\t", toplevel=False, preamble=None, title_key=None
):
"""
Formats a dictionary or list, which can have nested dictionaries or lists,
into a set of properly indented lines to write to file.
"""
lines = []
intro = ""
if preamble is not None:
intro += preamble
if type(structure) is dict:
if title_key in structure.keys(): # changed 'intro' to 'title_key' 16L14
intro += indent + "-" + indent + structure[title_key]
if not toplevel:
if len(intro) == 0:
intro += "<Dictionary>"
lines += [nest * indent + intro + "\n"]
if not toplevel:
nest += 1
for (key, value) in structure.items():
if key == title_key:
continue
lines += structure_to_lines(
value, nest, indent, preamble=key, title_key="title"
)
elif type(structure) is list:
if not toplevel:
if len(intro) == 0:
intro += "<List>"
lines += [nest * indent + intro + ":\n"]
if not toplevel:
nest += 1
for value in structure:
if type(value) is tuple and len(value) == 2:
lines += structure_to_lines(value[1], nest, indent, preamble=value[0])
# added 16L14 to enable writing of lists of (key, value)
else:
lines += structure_to_lines(value, nest, indent)
elif type(structure) is str:
if len(intro) > 0:
intro += ": "
lines += [nest * indent + intro + structure + "\n"]
else:
if len(intro) > 0:
intro += indent + "=" + indent
lines += [nest * indent + intro + str(structure) + "\n"]
return lines
def dictionary_to_lines(dictionary, indent="\t"):
return structure_to_lines(dictionary, toplevel=True)
def grouped_lines_to_structure(lines, indent="\t"):
"""
The exact inverse of write_lines, but works on grouped lines!
# as of 16L14, '\n' is removed by group_lines and not here.
"""
if type(lines) is str:
line = lines.strip()
if ":" in line: # then we've got a key and string value separated by a ': '
key = re.search(r"^.+:", line).group()[:-1] # don't want the ':'
try:
value = re.search(r":.+$", line).group()[2:] # don't want the ': '
# note: use of '$' means '\n' isn't in group()!
except AttributeError:
value = None
structure = (key, value)
elif (
"=" in line
): # then we've got a key and numerical value separated by a '\t=\t'
key = re.search(r"^.+=", line).group()[:-2] # don't want the '\t='
try:
value = re.search(r"=.+$", line).group()[2:] # don't want the '=\t'
except AttributeError:
value = None
try:
value = eval(value)
except (SyntaxError, NameError):
print("wasn" "t able to evaluate '" + value + "'")
structure = (key, value)
else: # then we've got just a string
structure = line
elif type(lines) is list:
title_line = lines[0]
if ":" in title_line: # then we want to make it into a list
key = re.search(r"^.+:", title_line).group()[:-1]
value = []
for line in lines[1:]:
value += [grouped_lines_to_structure(line)]
else: # then we want to make it into a dictionary
value = {}
if (indent + "-" + indent) in title_line:
key = re.search(r"^.+" + indent + "-", title_line).group()[:-2]
# don't want the '\t-'
title = re.search(r"-" + indent + ".+$", title_line).group()[2:]
# don't want the '-\t'
value["title"] = title
else:
key = title_line
for line in lines[1:]:
item = grouped_lines_to_structure(line)
try:
value[item[0]] = item[1]
except IndexError:
print("missing something. line = " + str(line))
if key == "<list>:" or key == "<dictionary>":
structure = value
else:
structure = (key, value)
return structure
def lines_to_structure(lines, indent="\t", removecomments=True):
"""
Have to group lines seperately to not mess up with recursion.
This function includes both steps.
"""
# print('lines:\n ' + str(lines))
grouped_lines = group_lines(lines, indent, removecomments=removecomments)
# this is necessary for it to treat the file as a single structure
# print('grouped lines:\n ' + str(grouped_lines))
return grouped_lines_to_structure(grouped_lines, indent)
def lines_to_dictionary(lines, indent="\t", removecomments=True):
lines = ["<dictionary>\n"] + lines
structure = lines_to_structure(lines, removecomments=removecomments)
dictionary = (
structure #'<dictionary>' line is now ignored in grouped_lines_to_structure
)
# this is necessary for it to treat the file as a single structure
# print('grouped lines:\n ' + str(grouped_lines))
return dictionary
def lines_to_attributes(lines, obj, verbose=1, indent="\t"):
if verbose:
print("function 'lines_to_attributes' at your service!")
lines = ["<dictionary>\n"] + lines
# gets lines_to_structure to treat it as one big dictionary
attributes = lines_to_structure(lines, indent)[1]
for (key, value) in attributes.items():
setattr(obj, key, value)
if verbose:
print("function 'lines_to_attributes' finished!")
# return obj #shouldn't be necessary
def file_to_attributes(f, obj, verbose=1, indent="\t"):
lines = f.readlines()
return lines_to_attributes(lines, obj, verbose, indent)
def attributes_to_file(f, obj, verbose=1, indent="\t"):
if verbose:
print("function 'attributes_to_file' at your service!")
attributes = obj.__dict__.copy()
for unwanted_key in ["file_lines", "attr_status", "__str__"]:
if unwanted_key in attributes.keys():
del attributes[unwanted_key] # so I don't write the whole file in itself
lines = structure_to_lines(attributes, indent=indent)
lines = [
line[1:] for line in lines[1:]
] # dropping '<dictionary>\n' and an indentation
for line in lines:
f.write(line)
if verbose:
print("function 'attributes_to_file' finished!")
# return f #shouldn't be necessary
def advanced_update(
dict1, dict2, newstuff=True, oldstuff=False, newkeys=[], oldkeys=[], mask=None
):
"""
updates dict1 with dict2, but with options about which keys to add/update.
Default values give a normal update.
"""
keys2 = list(
dict2.keys()
) # so that I don't have a dictionary changed size during iteration error
if not newstuff:
# then don't add new keys
for key in keys2:
if key not in dict1.keys() and key not in newkeys:
dict2.pop(key, None)
if oldstuff or len(oldkeys) > 0:
# then don't replace values of (evt. select) existing keys
for key in keys2:
if (oldstuff and key in dict1.keys()) or key in oldkeys:
dict2.pop(key, None)
if mask is not None:
# then mask is a function evaluating to True if
# a key shouldn't be added or updated.
for key in keys2:
if mask(key):
dict2.pop(key)
# print(type(dict2))
dict1.update(dict2)
return dict1
def update_lines(lines, dictionary, **kwargs):
"""
Does exactly what you'd think.
"""
dict1 = lines_to_dictionary(lines)
newdict = advanced_update(dict1, dictionary, **kwargs)
newlines = dictionary_to_lines(newdict)
return newlines
def date_scott(date="today"):
"""
Returns the date, default is today's, as Scott writes it.
"""
if date == "today":
a = datetime.date.today()
year = a.year
month = a.month
day = a.day
elif type(date) is str:
if len(date) == 6: # 6-digit-integer dates format
year = date[0:2]
month = date[2:4]
year = date[4:6]
else: # if you insist
return str(date)
else:
return | |
import struct
import zlib
from zlib import crc32
from collections import deque
from itertools import cycle
from enum import IntEnum
class Chunk():
@classmethod
def from_buffer(cls, buf):
"""read a single chunk from the head of the buffer"""
new = cls()
length = struct.unpack(">I", buf[:4])[0]
new.type = buf[4:8]
new.body = buf[8:8+length]
new.crc = struct.unpack(">I", buf[8+length:8+length+4])[0]
new.valid = new.crc == crc32(new.type+new.body)
return new
@classmethod
def create(cls, ctype, body):
new = cls()
new.type = ctype
new.body = body
new.crc = crc32(new.type+new.body)
new.valid = True
return new
def total(self):
"""return length including type-, length- and checksum fields"""
return len(self.body)+12
def __repr__(self):
return "Chunk[{}]:{}".format(str(self.type), self.total())
def to_bytes(self, recalculate_crc=True):
"""turn Chunk back into bytes
length and checksum are calculated appropriately"""
buf = struct.pack(">I", len(self.body))
buf += self.type
buf += self.body
if recalculate_crc:
buf += struct.pack(">I", crc32(self.type+self.body))
else:
buf += struct.pack(">I", self.crc)
return buf
def create_comment(text, tag='Comment', compressed = False):
body = b''
body += tag.encode('ascii') + b'\x00'
if compressed:
ctype = b'zTXt'
body += text.encode('ascii')
else:
ctype = b'tEXt'
body += b'\x00' + zlib.compress(text.encode('ascii'), 9)
return Chunk.create(ctype, body)
IEND = Chunk.create(b'IEND', b'')
CRITICAL_CHUNK_TYPES = [b'IHDR', b'PLTE', b'IDAT', b'IEND']
def parse_chunks(s):
"""parse bytes to list of Chunks"""
i = 0
chunks = []
while i<len(s):
try:
c = Chunk.from_buffer(s[i:])
if not c.valid:
print(f"WARNING: chunk {i} checksum failed")
if c.type[0]&(1<<5) == 0:
if not c.type in CRITICAL_CHUNK_TYPES:
print(f"ERROR: chunk {i} of type {c.type} cannot be interpreted")
except struct.error:
print("WARNING: Orphan data after IEND")
return chunks, s[i:]
chunks.append(c)
i += c.total()
if c.type == b'IEND':
if i<len(s):
print("WARNING: Orphan data after IEND chunk")
return chunks, s[i:]
return chunks, None
def bits_to_bytes(x):
return (x >> 3) + ((x & 7) > 0)
_bits_per_pixel = {}
_bits_per_pixel[0] = {1:1, 2:2, 4:4, 8:8, 16:16}
_bits_per_pixel[2] = {8:24, 16:48}
_bits_per_pixel[3] = {1:1, 2:2, 4:4, 8:8}
_bits_per_pixel[4] = {8:16, 16:32}
_bits_per_pixel[6] = {8:32, 16:64}
class ColorType(IntEnum):
GRAYSCALE = 0
RGB = 2
PALETTE = 3
GRAYSCALE_ALPHA = 4
RGBA = 6
class IHDR():
@classmethod
def create(cls, width, height, bitdepth, colortype, compression=0, filtermethod=0, interlace=0):
new = cls()
new.width = width
new.height = height
new.bitdepth = bitdepth
new.colortype = colortype
new.compression = compression
new.filtermethod = filtermethod
new.interlace = interlace
assert new.width>0, "ERROR: width must be positive"
assert new.height>0, "ERROR: height must be positive"
assert new.colortype in set([0, 2, 3, 4, 6]), "ERROR: {} is not a valid colortype".format(new.colortype)
if new.colortype == 0:
assert new.bitdepth in set([1,2,4,8,16]), "ERROR: {} is not a valid bitdepth for colortype {}".format(new.bitdepth, ColorType(new.colortype).name)
elif new.colortype == 2:
assert new.bitdepth in set([8,16]), "ERROR: {} is not a valid bitdepth for colortype {}".format(new.bitdepth, ColorType(new.colortype).name)
elif new.colortype == 3:
assert new.bitdepth in set([1,2,4,8]), "ERROR: {} is not a valid bitdepth for colortype {}".format(new.bitdepth, ColorType(new.colortype).name)
elif new.colortype == 4:
assert new.bitdepth in set([8,16]), "ERROR: {} is not a valid bitdepth for colortype {}".format(new.bitdepth, ColorType(new.colortype).name)
elif new.colortype == 6:
assert new.bitdepth in set([8,16]), "ERROR: {} is not a valid bitdepth for colortype {}".format(new.bitdepth, ColorType(new.colortype).name)
assert new.compression==0, "ERROR: {} is not a valid compression type".format(new.compression)
assert new.filtermethod==0, "ERROR: {} is not a valid filter method".format(new.filtermethod)
assert new.interlace in set([0, 1]), "ERROR: {} is not a valid interlace method".format(new.interlace)
new.bytes_per_pixel = bits_to_bytes(_bits_per_pixel[new.colortype][new.bitdepth])
new.width_bytes = bits_to_bytes(new.width * _bits_per_pixel[new.colortype][new.bitdepth])
return new
@classmethod
def from_buffer(cls, buf):
length, expected = len(buf), struct.calcsize(">IIBBBBB")
if length > expected:
print(f"WARNING: IHDR too large? (found: {length}, expected: {expected})")
raise ValueError
elif length < expected:
print(f"ERROR: IHDR too small? (found: {length}, expected: {expected})")
raise ValueError
width, height, bitdepth, colortype, compression, filtermethod, interlace = struct.unpack(">IIBBBBB", buf)
new = cls.create(width, height, bitdepth, colortype, compression, filtermethod, interlace)
return new
def __repr__(self):
data = [("width", self.width), ("height", self.height), ("bitdepth", self.bitdepth), ("colortype", ColorType(self.colortype).name), ("compression", self.compression), ("filtermethod", self.filtermethod), ("interlace", ["false", "adam7"][self.interlace])]
return '\n'.join("{} : {}".format(y, x) for x, y in data)
def to_chunk(self):
c = Chunk()
c.type = b'IHDR'
c.body = struct.pack(">IIBBBBB", self.width, self.height, self.bitdepth, self.colortype, self.compression, self.filtermethod, self.interlace)
return c
def group(seq, n):
while seq:
yield seq[:n]
seq = seq[n:]
def split_to_chunks(data, size):
"""
split encoded image data into IDAT chunks of given size
"""
out = []
for i in range(0, len(data), size):
c = Chunk()
c.type = b"IDAT"
c.body = data[i:min(i + size, len(data))]
out.append(c)
return out
class FilterType(IntEnum):
NONE = 0
SUB = 1
UP = 2
AVG = 3
PAETH = 4
def undo_sub(scanline, bytes_per_pixel):
old = deque([0] * bytes_per_pixel)
out = []
for b in scanline:
new = (b + old.pop()) & 0xff
out.append(new)
old.appendleft(new)
return bytes(out)
def undo_up(scanline, prev, bytes_per_pixel):
return bytes((x + b) & 0xff for x, b in zip(scanline, prev))
def undo_avg(scanline, prev, bytes_per_pixel):
out = []
old = deque([0] * bytes_per_pixel)
for x, b in zip(scanline, prev):
new = (x + (b + old.pop()) // 2) & 0xff
out.append(new)
old.appendleft(new)
return bytes(out)
def undo_paeth(scanline, prev, bytes_per_pixel):
out = []
old = deque([0] * bytes_per_pixel)
old_up = deque([0] * bytes_per_pixel)
for x, b in zip(scanline, prev):
a = old.pop()
c = old_up.pop()
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
rho = a
elif pb <= pc:
rho = b
else:
rho = c
new = (x + rho) & 0xff
out.append(new)
old.appendleft(new)
old_up.appendleft(b)
return bytes(out)
def undo_filter(pixels, width, bytes_per_pixel):
out = b''
prev = bytes([0] * width)
for row in group(pixels, width + 1):
filter_type, *scanline = row
if filter_type == FilterType.NONE:
cur = bytes(scanline)
elif filter_type == FilterType.SUB:
cur = undo_sub(scanline, bytes_per_pixel)
elif filter_type == FilterType.UP:
cur = undo_up(scanline, prev, bytes_per_pixel)
elif filter_type == FilterType.AVG:
cur = undo_avg(scanline, prev, bytes_per_pixel)
elif filter_type == FilterType.PAETH:
cur = undo_paeth(scanline, prev, bytes_per_pixel)
else:
print(f"ERROR: Invalid filter type {filter_type}")
raise ValueError
assert len(cur) == width, f"ERROR: scanline wrong, is {len(cur)} should be {width}"
out += cur
prev = cur
return bytearray(out)
def apply_sub(scanline, bytes_per_pixel):
old = deque([0] * bytes_per_pixel)
out = []
for x in scanline:
new = (x - old.pop()) & 0xff
out.append(new)
old.appendleft(new)
return bytes(out)
def apply_up(scanline, prev, bytes_per_pixel):
return bytes((x - b) & 0xff for x, b in zip(scanline, prev))
def apply_avg(scanline, prev, bytes_per_pixel):
out = []
old = deque([0] * bytes_per_pixel)
for x, b in zip(scanline, prev):
new = (x - (b + old.pop()) // 2) & 0xff
out.append(new)
old.appendleft(new)
return bytes(out)
def apply_paeth(scanline, prev, bytes_per_pixel):
out = []
old = deque([0] * bytes_per_pixel)
old_up = deque([0] * bytes_per_pixel)
for x, b in zip(scanline, prev):
a = old.pop()
c = old.pop()
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
rho = a
elif pb <= pc:
rho = b
else:
rho = c
new = (x - rho) & 0xff
out.append(new)
old.appendleft(new)
old_up.appendleft(b)
return bytes(out)
def apply_filter(pixels, width, bytes_per_pixel, method):
out = b''
prev = bytes([0] * width)
for filter_type, scanline in zip(method, group(pixels, width)):
if filter_type == FilterType.NONE:
cur = scanline
elif filter_type == FilterType.SUB:
cur = apply_sub(scanline, bytes_per_pixel)
elif filter_type == FilterType.UP:
cur = apply_up(scanline, prev, bytes_per_pixel)
elif filter_type == FilterType.AVG:
cur = apply_avg(scanline, prev, bytes_per_pixel)
elif filter_type == FilterType.PAETH:
cur = apply_paeth(scanline, prev, bytes_per_pixel)
else:
print(f"ERROR: Invalid filter type {filter_type}")
raise ValueError
row = b'{:d}{}'.format(filter_type, cur)
out += row
prev = row
def read_nth(seq, n, offset=0):
for i in range(offset, len(seq), n):
yield seq[i]
def merge_data(chunks):
"""concat content of all chunks of type IDAT"""
return b''.join(chunk.body for chunk in chunks if chunk.type == b'IDAT')
def decompress(data):
D = zlib.decompressobj()
data = D.decompress(data)
return data, D.unused_data
class PNG():
MAGIC = b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a"
@classmethod
def from_chunks(cls, chunks, ihdr=None, orphan=None):
new = cls()
new.chunks = chunks
if not orphan is None:
new.orphan = orphan
if ihdr is None:
try:
ihdr, *_ = new.get_chunks_by_type(b'IHDR')
except ValueError:
print("ERROR: Please supply a IHDR chunk")
if len(_) > 0:
print("WARNING: Multiple IHDR chunks found, used first")
new.ihdr = IHDR.from_buffer(ihdr.body)
else:
new.ihdr = ihdr
new.chunks = [ihdr.to_chunk()] + chunks
if len(new.get_chunks_by_type(b'IHDR')) > 1:
print("WARNING: Multiple IHDR chunks found, used the selected one")
iend_chunks = new.get_chunks_by_type(b'IEND')
if len(iend_chunks)==0:
print("INFO: autogenerated | |
<filename>poker_server.py
import sys
import socket
import struct
import threading
import time
import argparse
import re
#from pynput import keyboard
import select
from math import sqrt, log, ceil
from random import randrange
import pyDes
import base64
import pickle
import game_utils
SIZE = 1024
PORT = 12345
# Maximum number to check while generating a random prime number
LIMIT = 999999999
SECRET_KEY_MAX_NUMBER = 99999
# CONSTANTS
POKER_MESSAGE_TYPE_INIT = "init"
POKER_MESSAGE_TYPE_PLAY = "play"
POKER_MESSAGE_TYPE_FOLD = "fold"
POKER_MESSAGE_TYPE_UPDATE = "update"
POKER_MESSAGE_TYPE_INVALID_BET = "invalid-bet"
POKER_MESSAGE_TYPE_VALID_BET = "valid-bet"
POKER_MESSAGE_TYPE_WATCH = "watch"
POKER_MESSAGE_TYPE_SPEC = "spectator"
POKER_MESSAGE_TYPE_SIT = "spectator-sit"
POKER_MESSAGE_TYPE_TURN = "turn"
POKER_MESSAGE_TYPE_TABLE = "table"
POKER_MESSAGE_TYPE_CARDS = "cards"
POKER_MESSAGE_TYPE_CHIPS = "chips"
POKER_MESSAGE_TYPE_INIT_RESPONSE = "init-response"
# Server is announcing that it is serving a poker game
POKER_MESSAGE_TYPE_ANNOUNCE = "announce"
# A message type that the clients broadcast to find game servers available
POKER_MESSAGE_TYPE_CLIENTCAST = "clientcast"
#########
IS_WINDOWS = (len(re.findall('[Ww]in', sys.platform)) != 0)
def is_prime(num: int) -> bool:
if num == 2:
return True
if num % 2 == 0 or num < 2:
return False
for i in range(3, ceil(sqrt(num)), 2):
if num % i == 0:
return False
return True
def generate_prime(limit: int) -> int:
while 1:
candidate = randrange(2, limit)
if is_prime(candidate):
return candidate
def encrypt_message(data, key):
"""
Encodes data
:param data: Data to be encoded
:type data: str
:returns: string -- Encoded data
"""
key_handle = pyDes.triple_des(
str(key).ljust(24),
pyDes.CBC,
"\0\0\0\0\0\0\0\0",
padmode=pyDes.PAD_PKCS5)
encrypted = key_handle.encrypt(
data=data)
return base64.b64encode(s=encrypted)
def decrypt_message(data, key):
"""
Decodes data
:param data: Data to be decoded
:type data: str
:returns: string -- Decoded data
"""
key_handle = pyDes.triple_des(
str(key).ljust(24),
pyDes.CBC,
"\0\0\0\0\0\0\0\0",
padmode=pyDes.PAD_PKCS5)
decrypted = key_handle.decrypt(
data=base64.b64decode(s=data))
return decrypted
class PokerMessage(object):
def __init__(self, _type, username: str = None, data=None, g: int = None, p: int = None, A: int = None, table: list = None, chips: int = None,
active: bool = None, order: int = None, total_bet:int=None, high_bet: int = None, key: int=None):
self.type_ = _type
self.username_ = username
self.data_ = data
self.g_ = g
self.p_ = p
self.A_ = A
self.table_ = table
#self.seat = seat
self.chips_ = chips
self.order_ = order
self.total_bet_ = total_bet
self.high_bet_ = high_bet
self.key_ = key
def __str__(self):
return "Type: {}, Data: {}, username: {}, g-p-A: {}-{}-{}, table: {}, chips: {}, order: {}".format(
self.type_, self.data_, self.username_, self.g_, self.p_, self.A_, self.table_ , self.chips_, self.order_)
class Player(object):
def __init__(self, name: str, ip: str, g: int = None, p: int = None, B: int = None, spectating: bool=None, order: int = None , chips: int = None , high_bet: int = None, folded: bool=None): #, cards: list = None):
self.name_ = name
self.ip_ = ip
self.g_ = g if g is not None else generate_prime(LIMIT)
self.p_ = p if p is not None else generate_prime(LIMIT)
self.a_ = randrange(SECRET_KEY_MAX_NUMBER)
self.B_ = B
self._calculate_A()
self.cards_ = []
self.socket_ = None
self.chips_ = chips
self.order_ = order
self.table_id = None
self.high_bet_ = high_bet
self.folded_ = folded
self.spectating_ = spectating
if self.B_ is not None:
self.calculate_key()
print("{} has key: {}".format(self.name_, self.key_))
def _calculate_A(self):
self.A_ = self.g_ ** self.a_ % self.p_
def calculate_key(self):
self.key_ = self.B_ ** self.a_ % self.p_
def __str__(self): return "Player: uname: {}, ip: {}, key: {}".format(
self.name_, self.ip_, self.key_)
class Spectator(object):
def __init__(self, name: str, ip: str):
self.name_ = name
self.ip_ = ip
class Game(object):
def StartGameThread(self):
while True:
#print("--")
if self.CAN_START_GAME:
# print("Type \"start\" to start the game.")
# user_input = input()
# if user_input == "start":
self.begin_game = threading.Thread(target=self.start_game)
self.begin_game.start()
self.CAN_START_GAME = False
# else:
# import time
# time.sleep(3)
def __init__(self, table_id, nbr_of_players: int = 0): #, CAN_START_GAME: bool = None, PLAYER_COUNT: int = None, TOTAL_PLAYERS: int = None):
self.deck_ = game_utils.get_deck(shuffle_deck=True)
self.player_dict_ = {}
self.spect_dict_ = {}
self.player_order_list_ = []
self.table_ = []
self.is_started_ = False
self.waiting_list_ = []
self.CAN_START_GAME = False
self.PLAYER_COUNT = 0
self.TOTAL_PLAYERS = 0
self.LOCK_WAIT = False
self.multicast_addresses = []
def seat_available(self):
return len(self.player_order_list_) + len(self.waiting_list_) <= 4
def parse_raw_msg(self, payload: bytes) -> PokerMessage:
return pickle.loads(payload)
def add_player(self, player: Player):
if self.PLAYER_COUNT == 0:
StartGame = threading.Thread(target=self.StartGameThread)
StartGame.start()
print("Adding player {}".format(player.name_))
print(player.socket_.getsockname())
#self.multicast_addresses.append(player.socket_.getsockname()[0])
self.multicast_addresses.append(player.socket_.getsockname())
#PLAYER_COUNT = 0
if self.is_started_:
self.waiting_list_.append(player)
elif player.spectating_ == False:
self.deck_, player.cards_ = game_utils.draw_cards_from_deck(
self.deck_, 2)
player.chips_ = 10000
player.order_ = self.PLAYER_COUNT
self.PLAYER_COUNT = self.PLAYER_COUNT + 1
print("Drawn cards for {} => {}".format(player.name_, player.cards_))
self.player_dict_[player.name_] = player
self.player_order_list_.append(player.name_)
if len(self.player_order_list_) >= 2 and not self.is_started_:
print("Game has more than 2 players. You can start the game.")
self.TOTAL_PLAYERS = len(self.player_order_list_)
#print(PLAYER_COUNT)
self.CAN_START_GAME = True
else:
self.spect_dict_[player.name_] = player
#self.start_game()
def table_update(self):
table_msg = PokerMessage(POKER_MESSAGE_TYPE_TABLE, table=self.table_)
print("[INFO]: sending table update: ", self.table_)
table_msg_bin = pickle.dumps(table_msg)
#print("yo: "+ str(table_msg_bin))
for uname in self.player_dict_:
player = self.player_dict_[uname]
player.socket_.send(table_msg_bin)
for sname in self.spect_dict_:
spectator = self.spect_dict_[sname]
spectator.socket_.send(table_msg_bin)
def bring_new_players(self):
for player in self.waiting_list_:
self.add_player(player)
self.waiting_list_.clear()
def start_game(self):
players_in_game = []
TABLE_BET = 0
HIGH_BET = 0
PLAYER_BET = 0
player_bets = []
PLAYER_FOLDED = False
self.PLAYER_COUNT = 0
self.is_started_ = True
bets_on_table = 0
#round zero, distribute cards to players
for uname in self.player_dict_:
#PLAYER_COUNT = PLAYER_COUNT + 1
print(self.player_dict_[uname].cards_)
print(self.player_dict_[uname].chips_)
player = self.player_dict_[uname]
cards_msg = PokerMessage(POKER_MESSAGE_TYPE_CARDS, table=self.player_dict_[uname].cards_,
chips=self.player_dict_[uname].chips_, order=self.PLAYER_COUNT, key = player.key_)
players_in_game.append(1)
self.PLAYER_COUNT = self.PLAYER_COUNT + 1
cards_msg_bin = pickle.dumps(cards_msg)
player.folded_ = False
encrypted_msg = encrypt_message(cards_msg_bin, key = player.key_)
player.socket_.send(encrypted_msg)
for sname in self.spect_dict_:
cards_msg = PokerMessage(POKER_MESSAGE_TYPE_CARDS, username = uname, table=self.player_dict_[uname].cards_)
cards_msg_bin = pickle.dumps(cards_msg)
spectator = self.spect_dict_[sname]
spectator.socket_.send(cards_msg_bin)
cards_on_table = []
for round in range(3):
# rounds
HIGH_BET = 0
# first round, 3 cards on table
if round == 0:
self.deck_, self.table_ = game_utils.draw_cards_from_deck(
self.deck_, 3)
for x in range(3):
cards_on_table.append(self.table_[x])
# second & third round, 1 card on table
else:
self.deck_, self.table_ = game_utils.draw_cards_from_deck(
self.deck_, 1)
cards_on_table.append(self.table_[0])
print((self.PLAYER_COUNT))
self.table_update()
# ask each player their bet
for uname in self.player_dict_:
name = uname
player = self.player_dict_[uname]
print(player.folded_)
if player.folded_ == False:
name = uname
win = True
for other_name in self.player_dict_:
if other_name != name:
other_player = self.player_dict_[other_name]
if(other_player.folded_ == False):
win = False
if win:
print("player " + name + " has won!")
break
cards_msg = PokerMessage(POKER_MESSAGE_TYPE_TURN, username = uname)
#player_bets.append(0)
cards_msg_bin = pickle.dumps(cards_msg)
player.socket_.send(cards_msg_bin)
self.LOCK_WAIT = True
bets_on_table = TABLE_BET
print("Waiting for player " + uname)
player_bets.append(0)
while(self.LOCK_WAIT):
data = player.socket_.recv(1024)
client_msg = self.parse_raw_msg(data)
if client_msg.type_ == POKER_MESSAGE_TYPE_TURN:
#print("NEREDE 22")
order = client_msg.order_
name = client_msg.username_
print("!---!")
#print(client_msg.chips_ )
#print( player_bets[order])
#print("---")
print(order)
print(client_msg.chips_ )
print( player_bets[order])
print("!---!")
if client_msg.chips_ + player_bets[order] > HIGH_BET:
HIGH_BET = client_msg.chips_ + player_bets[order]
PLAYER_BET = client_msg.chips_
TABLE_BET = TABLE_BET + client_msg.chips_
player.chips_ = player.chips_ - (client_msg.chips_ + player_bets[order])
cards_msg = PokerMessage(POKER_MESSAGE_TYPE_VALID_BET, username = uname, chips=(client_msg.chips_))
cards_msg_bin = pickle.dumps(cards_msg)
player.socket_.send(cards_msg_bin)
self.LOCK_WAIT = False
elif client_msg.chips_ + player_bets[order] == HIGH_BET:
TABLE_BET = TABLE_BET + client_msg.chips_
PLAYER_BET = client_msg.chips_ + player_bets[order]
player.chips_ = player.chips_ - (client_msg.chips_ + player_bets[order])
cards_msg = PokerMessage(POKER_MESSAGE_TYPE_VALID_BET, username = uname, chips=(client_msg.chips_ ))
cards_msg_bin = pickle.dumps(cards_msg)
player.socket_.send(cards_msg_bin)
self.LOCK_WAIT = False
else:
msg = PokerMessage(POKER_MESSAGE_TYPE_INVALID_BET, high_bet=HIGH_BET, chips=player_bets[order])
msg_bin = pickle.dumps(msg)
player.socket_.send(msg_bin)
msg = PokerMessage(POKER_MESSAGE_TYPE_TURN, username = name)
msg_bin = pickle.dumps(msg)
player.socket_.send(msg_bin)
elif client_msg.type_ == POKER_MESSAGE_TYPE_FOLD:
order = client_msg.order_
name = client_msg.username_
print("folded client: " + str(order))
print(client_msg)
for uname in self.player_dict_:
if name == uname:
print("heh")
players_in_game[order] = 0
self.LOCK_WAIT = False
PLAYER_FOLDED = True
elif client_msg.type_ == POKER_MESSAGE_TYPE_INVALID_BET:
cards_msg = PokerMessage(POKER_MESSAGE_TYPE_TURN, username = uname)
cards_msg_bin = pickle.dumps(cards_msg)
player.socket_.send(cards_msg_bin)
print("YENIDEN")
self.LOCK_WAIT = True
if PLAYER_FOLDED == True:
player.folded_ = True
PLAYER_FOLDED = False
player.folded_ = True
PLAYER_FOLDED = False
fold_msg = PokerMessage(POKER_MESSAGE_TYPE_FOLD, username = name)
print(fold_msg)
fold_msg_bin = pickle.dumps(fold_msg)
for oname in self.player_dict_:
other_player = self.player_dict_[oname]
other_player.socket_.send(fold_msg_bin)
for sname in self.spect_dict_:
spectator = self.spect_dict_[sname]
spectator.socket_.send(fold_msg_bin)
player_bets.pop()
player_bets.append(PLAYER_BET)
#while
player.high_bet_ = PLAYER_BET
#if players_in_game[order-1] == 0:
#print()
msg = PokerMessage(POKER_MESSAGE_TYPE_UPDATE, username = name, chips = (TABLE_BET-bets_on_table), total_bet = TABLE_BET)
msg_bin = pickle.dumps(msg)
print("GECTI")
for uname in self.player_dict_:
player = self.player_dict_[uname]
player.socket_.send(msg_bin)
for sname in self.spect_dict_:
spectator = self.spect_dict_[sname]
spectator.socket_.send(msg_bin)
check_high_bets = True
i = 0
# ask for bets again to match the highest bid
while check_high_bets:
for uname in self.player_dict_:
player = self.player_dict_[uname]
print(player.high_bet_)
if player.high_bet_ < HIGH_BET and player.folded_ == False:
name = uname
cards_msg = PokerMessage(POKER_MESSAGE_TYPE_TURN, username = uname, high_bet=(HIGH_BET-player.high_bet_))
| |
used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 6)
self.marks = {} # glyphName -> (markClassName, anchor)
self.baseMarks = {} # glyphName -> {markClassName: anchor}
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.marks == other.marks
and self.baseMarks == other.baseMarks
)
def inferGlyphClasses(self):
result = {glyph: 3 for glyph in self.baseMarks}
result.update({glyph: 3 for glyph in self.marks})
return result
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the mark-to-mark
positioning lookup.
"""
markClasses = self.buildMarkClasses_(self.marks)
markClassList = sorted(markClasses.keys(), key=markClasses.get)
marks = {
mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items()
}
st = ot.MarkMarkPos()
st.Format = 1
st.ClassCount = len(markClasses)
st.Mark1Coverage = buildCoverage(marks, self.glyphMap)
st.Mark2Coverage = buildCoverage(self.baseMarks, self.glyphMap)
st.Mark1Array = buildMarkArray(marks, self.glyphMap)
st.Mark2Array = ot.Mark2Array()
st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs)
st.Mark2Array.Mark2Record = []
for base in st.Mark2Coverage.glyphs:
anchors = [self.baseMarks[base].get(mc) for mc in markClassList]
st.Mark2Array.Mark2Record.append(buildMark2Record(anchors))
return self.buildLookup_([st])
class ReverseChainSingleSubstBuilder(LookupBuilder):
"""Builds a Reverse Chaining Contextual Single Substitution (GSUB8) lookup.
Users are expected to manually add substitutions to the ``substitutions``
attribute after the object has been initialized, e.g.::
# reversesub [a e n] d' by d.alt;
prefix = [ ["a", "e", "n"] ]
suffix = []
mapping = { "d": "d.alt" }
builder.substitutions.append( (prefix, suffix, mapping) )
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
substitutions: A three-element tuple consisting of a prefix sequence,
a suffix sequence, and a dictionary of single substitutions.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 8)
self.rules = [] # (prefix, suffix, mapping)
def equals(self, other):
return LookupBuilder.equals(self, other) and self.rules == other.rules
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the chained
contextual substitution lookup.
"""
subtables = []
for prefix, suffix, mapping in self.rules:
st = ot.ReverseChainSingleSubst()
st.Format = 1
self.setBacktrackCoverage_(prefix, st)
self.setLookAheadCoverage_(suffix, st)
st.Coverage = buildCoverage(mapping.keys(), self.glyphMap)
st.GlyphCount = len(mapping)
st.Substitute = [mapping[g] for g in st.Coverage.glyphs]
subtables.append(st)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
# Nothing to do here, each substitution is in its own subtable.
pass
class SingleSubstBuilder(LookupBuilder):
"""Builds a Single Substitution (GSUB1) lookup.
Users are expected to manually add substitutions to the ``mapping``
attribute after the object has been initialized, e.g.::
# sub x by y;
builder.mapping["x"] = "y"
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: A dictionary mapping a single glyph name to another glyph name.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 1)
self.mapping = OrderedDict()
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the multiple
substitution lookup.
"""
subtables = self.build_subst_subtables(self.mapping, buildSingleSubstSubtable)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
return {glyph: set([repl]) for glyph, repl in self.mapping.items()}
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class ClassPairPosSubtableBuilder(object):
"""Builds class-based Pair Positioning (GPOS2 format 2) subtables.
Note that this does *not* build a GPOS2 ``otTables.Lookup`` directly,
but builds a list of ``otTables.PairPos`` subtables. It is used by the
:class:`PairPosBuilder` below.
Attributes:
builder (PairPosBuilder): A pair positioning lookup builder.
"""
def __init__(self, builder):
self.builder_ = builder
self.classDef1_, self.classDef2_ = None, None
self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2)
self.forceSubtableBreak_ = False
self.subtables_ = []
def addPair(self, gc1, value1, gc2, value2):
"""Add a pair positioning rule.
Args:
gc1: A set of glyph names for the "left" glyph
value1: An ``otTables.ValueRecord`` object for the left glyph's
positioning.
gc2: A set of glyph names for the "right" glyph
value2: An ``otTables.ValueRecord`` object for the right glyph's
positioning.
"""
mergeable = (
not self.forceSubtableBreak_
and self.classDef1_ is not None
and self.classDef1_.canAdd(gc1)
and self.classDef2_ is not None
and self.classDef2_.canAdd(gc2)
)
if not mergeable:
self.flush_()
self.classDef1_ = ClassDefBuilder(useClass0=True)
self.classDef2_ = ClassDefBuilder(useClass0=False)
self.values_ = {}
self.classDef1_.add(gc1)
self.classDef2_.add(gc2)
self.values_[(gc1, gc2)] = (value1, value2)
def addSubtableBreak(self):
"""Add an explicit subtable break at this point."""
self.forceSubtableBreak_ = True
def subtables(self):
"""Return the list of ``otTables.PairPos`` subtables constructed."""
self.flush_()
return self.subtables_
def flush_(self):
if self.classDef1_ is None or self.classDef2_ is None:
return
st = buildPairPosClassesSubtable(self.values_, self.builder_.glyphMap)
if st.Coverage is None:
return
self.subtables_.append(st)
self.forceSubtableBreak_ = False
class PairPosBuilder(LookupBuilder):
"""Builds a Pair Positioning (GPOS2) lookup.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
pairs: An array of class-based pair positioning tuples. Usually
manipulated with the :meth:`addClassPair` method below.
glyphPairs: A dictionary mapping a tuple of glyph names to a tuple
of ``otTables.ValueRecord`` objects. Usually manipulated with the
:meth:`addGlyphPair` method below.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 2)
self.pairs = [] # [(gc1, value1, gc2, value2)*]
self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2)
self.locations = {} # (gc1, gc2) --> (filepath, line, column)
def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2):
"""Add a class pair positioning rule to the current lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this rule. Unused.
glyphclass1: A set of glyph names for the "left" glyph in the pair.
value1: A ``otTables.ValueRecord`` for positioning the left glyph.
glyphclass2: A set of glyph names for the "right" glyph in the pair.
value2: A ``otTables.ValueRecord`` for positioning the right glyph.
"""
self.pairs.append((glyphclass1, value1, glyphclass2, value2))
def addGlyphPair(self, location, glyph1, value1, glyph2, value2):
"""Add a glyph pair positioning rule to the current lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this rule.
glyph1: A glyph name for the "left" glyph in the pair.
value1: A ``otTables.ValueRecord`` for positioning the left glyph.
glyph2: A glyph name for the "right" glyph in the pair.
value2: A ``otTables.ValueRecord`` for positioning the right glyph.
"""
key = (glyph1, glyph2)
oldValue = self.glyphPairs.get(key, None)
if oldValue is not None:
# the Feature File spec explicitly allows specific pairs generated
# by an 'enum' rule to be overridden by preceding single pairs
otherLoc = self.locations[key]
log.debug(
"Already defined position for pair %s %s at %s; "
"choosing the first value",
glyph1,
glyph2,
otherLoc,
)
else:
self.glyphPairs[key] = (value1, value2)
self.locations[key] = location
def add_subtable_break(self, location):
self.pairs.append(
(
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
)
)
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.glyphPairs == other.glyphPairs
and self.pairs == other.pairs
)
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the pair positioning
lookup.
"""
builders = {}
builder = None
for glyphclass1, value1, glyphclass2, value2 in self.pairs:
if glyphclass1 is self.SUBTABLE_BREAK_:
if builder is not None:
builder.addSubtableBreak()
continue
valFormat1, valFormat2 = 0, 0
if value1:
valFormat1 = value1.getFormat()
if value2:
valFormat2 = value2.getFormat()
builder = builders.get((valFormat1, valFormat2))
if builder is None:
builder = ClassPairPosSubtableBuilder(self)
builders[(valFormat1, valFormat2)] = builder
builder.addPair(glyphclass1, value1, glyphclass2, value2)
subtables = []
if self.glyphPairs:
subtables.extend(buildPairPosGlyphs(self.glyphPairs, self.glyphMap))
for key in sorted(builders.keys()):
subtables.extend(builders[key].subtables())
lookup = self.buildLookup_(subtables)
# Compact the lookup
# This is a good moment to do it because the compaction should create
# smaller subtables, which may prevent overflows from happening.
| |
[0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_max_distance_for_overlap(self):
"""Test that the distance ensures the IoU with random boxes."""
# TODO(vighneshb) remove this after the `_smallest_positive_root`
# function if fixed.
self.skipTest(('Skipping test because we are using an incorrect version of'
'the `max_distance_for_overlap` function to reproduce'
' results.'))
rng = np.random.RandomState(0)
n_samples = 100
width = rng.uniform(1, 100, size=n_samples)
height = rng.uniform(1, 100, size=n_samples)
min_iou = rng.uniform(0.1, 1.0, size=n_samples)
def graph_fn():
max_dist = targetassigner.max_distance_for_overlap(height, width, min_iou)
return max_dist
max_dist = self.execute(graph_fn, [])
xmin1 = np.zeros(n_samples)
ymin1 = np.zeros(n_samples)
xmax1 = np.zeros(n_samples) + width
ymax1 = np.zeros(n_samples) + height
xmin2 = max_dist * np.cos(rng.uniform(0, 2 * np.pi))
ymin2 = max_dist * np.sin(rng.uniform(0, 2 * np.pi))
xmax2 = width + max_dist * np.cos(rng.uniform(0, 2 * np.pi))
ymax2 = height + max_dist * np.sin(rng.uniform(0, 2 * np.pi))
boxes1 = np.vstack([ymin1, xmin1, ymax1, xmax1]).T
boxes2 = np.vstack([ymin2, xmin2, ymax2, xmax2]).T
iou = np.diag(np_box_ops.iou(boxes1, boxes2))
self.assertTrue(np.all(iou >= min_iou))
def test_max_distance_for_overlap_centernet(self):
"""Test the version of the function used in the CenterNet paper."""
def graph_fn():
distance = targetassigner.max_distance_for_overlap(10, 5, 0.5)
return distance
distance = self.execute(graph_fn, [])
self.assertAlmostEqual(2.807764064, distance)
def test_assign_size_and_offset_targets(self):
"""Test the assign_size_and_offset_targets function."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center_offset]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
assigner = targetassigner.CenterNetBoxTargetAssigner(4)
indices, hw, yx_offset, weights = assigner.assign_size_and_offset_targets(
80, 80, box_batch)
return indices, hw, yx_offset, weights
indices, hw, yx_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (5, 3))
self.assertEqual(hw.shape, (5, 2))
self.assertEqual(yx_offset.shape, (5, 2))
self.assertEqual(weights.shape, (5,))
np.testing.assert_array_equal(
indices,
[[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_equal(
hw, [[20, 20], [10, 10], [18, 19], [10, 10], [8, 15]])
np.testing.assert_array_equal(
yx_offset, [[0, 0], [0, 0], [0, 0.5], [0, 0], [0.25, 0.75]])
np.testing.assert_array_equal(weights, 1)
def test_assign_size_and_offset_targets_weights(self):
"""Test the assign_size_and_offset_targets function with box weights."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_lower_left, self._box_center_small]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4)
weights_batch = [
tf.constant([0.0, 1.0]),
tf.constant([1.0, 1.0]),
tf.constant([0.0, 0.0])
]
indices, hw, yx_offset, weights = cn_assigner.assign_size_and_offset_targets(
80, 80, box_batch, weights_batch)
return indices, hw, yx_offset, weights
indices, hw, yx_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (6, 3))
self.assertEqual(hw.shape, (6, 2))
self.assertEqual(yx_offset.shape, (6, 2))
self.assertEqual(weights.shape, (6,))
np.testing.assert_array_equal(indices,
[[0, 10, 10], [0, 15, 5], [1, 15, 5],
[1, 10, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_equal(
hw, [[20, 20], [10, 10], [10, 10], [10, 10], [10, 10], [8, 15]])
np.testing.assert_array_equal(
yx_offset, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0.25, 0.75]])
np.testing.assert_array_equal(weights, [0, 1, 1, 1, 0, 0])
def test_get_batch_predictions_from_indices(self):
"""Test the get_batch_predictions_from_indices function.
This test verifies that the indices returned by
assign_size_and_offset_targets function work as expected with a predicted
tensor.
"""
def graph_fn():
pred_array = np.ones((2, 40, 20, 2), dtype=np.int32) * -1000
pred_array[0, 20, 10] = [1, 2]
pred_array[0, 30, 5] = [3, 4]
pred_array[1, 20, 10] = [5, 6]
pred_array[1, 14, 11] = [7, 8]
pred_tensor = tf.constant(pred_array)
indices = tf.constant([
[0, 20, 10],
[0, 30, 5],
[1, 20, 10],
[1, 14, 11]
], dtype=tf.int32)
preds = targetassigner.get_batch_predictions_from_indices(
pred_tensor, indices)
return preds
preds = self.execute(graph_fn, [])
np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]])
def test_get_batch_predictions_from_indices_with_class(self):
"""Test the get_batch_predictions_from_indices function with class axis.
This test verifies that the indices returned by
assign_size_and_offset_targets function work as expected with a predicted
tensor.
"""
def graph_fn():
pred_array = np.ones((2, 40, 20, 5, 2), dtype=np.int32) * -1000
pred_array[0, 20, 10, 0] = [1, 2]
pred_array[0, 30, 5, 2] = [3, 4]
pred_array[1, 20, 10, 1] = [5, 6]
pred_array[1, 14, 11, 4] = [7, 8]
pred_tensor = tf.constant(pred_array)
indices = tf.constant([
[0, 20, 10, 0],
[0, 30, 5, 2],
[1, 20, 10, 1],
[1, 14, 11, 4]
], dtype=tf.int32)
preds = targetassigner.get_batch_predictions_from_indices(
pred_tensor, indices)
return preds
preds = self.execute(graph_fn, [])
np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]])
class CenterNetKeypointTargetAssignerTest(test_case.TestCase):
def test_keypoint_heatmap_targets(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 1.0],
[0.4, 0.1, 0.4, 0.2, 0.1],
[float('nan'), 0.1, 0.5, 0.7, 0.6]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
gt_boxes_list = [
tf.constant(
np.array([[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 1.0, 1.0]]),
dtype=tf.float32)
]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(targets, num_instances_batch,
valid_mask) = cn_assigner.assign_keypoint_heatmap_targets(
120,
80,
gt_keypoints_list,
gt_classes_list,
gt_boxes_list=gt_boxes_list)
return targets, num_instances_batch, valid_mask
targets, num_instances_batch, valid_mask = self.execute(graph_fn, [])
# keypoint (0.5, 0.5) is selected. The peak is expected to appear at the
# center of the image.
self.assertEqual((15, 10), _array_argmax(targets[0, :, :, 1]))
self.assertAlmostEqual(1.0, targets[0, 15, 10, 1])
# No peak for the first class since NaN is selected.
self.assertAlmostEqual(0.0, targets[0, 15, 10, 0])
# Verify the output heatmap shape.
self.assertAllEqual([1, 30, 20, 2], targets.shape)
# Verify the number of instances is correct.
np.testing.assert_array_almost_equal([[0, 1]],
num_instances_batch)
# When calling the function, we specify the class id to be 1 (1th and 3rd)
# instance and the keypoint indices to be [0, 2], meaning that the 1st
# instance is the target class with no valid keypoints in it. As a result,
# the region of the 1st instance boxing box should be blacked out
# (0.0, 0.0, 0.5, 0.5), transfering to (0, 0, 15, 10) in absolute output
# space.
self.assertAlmostEqual(np.sum(valid_mask[:, 0:15, 0:10]), 0.0)
# All other values are 1.0 so the sum is: 30 * 20 - 15 * 10 = 450.
self.assertAlmostEqual(np.sum(valid_mask), 450.0)
def test_assign_keypoints_offset_targets(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list)
return indices, weights, offsets
indices, weights, offsets = self.execute(graph_fn, [])
# Only the last element has positive weight.
np.testing.assert_array_almost_equal(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights)
# Validate the last element's indices and offsets.
np.testing.assert_array_equal([0, 3, 2], indices[7, :])
np.testing.assert_array_almost_equal([0.6, 0.4], offsets[7, :])
def test_assign_keypoint_depths_target(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, 0.7, 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
depths = tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[0.5, 0.0, 7.0, 0.7, 0.4]]),
dtype=tf.float32)
gt_keypoint_depths_list = [depths]
gt_keypoint_depth_weights = tf.constant(
np.array([[1.0, 1.0, 1.0, 1.0, 1.0],
[float('nan'), 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.5, 1.0, 1.0]]),
dtype=tf.float32)
gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1)
(indices, depths, weights) = cn_assigner.assign_keypoints_depth_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_keypoint_depths_list=gt_keypoint_depths_list,
gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
return indices, depths, weights
indices, depths, weights = self.execute(graph_fn, [])
# Only the last 5 elements has positive weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5
], weights)
# Validate the last 5 elements' depth value.
np.testing.assert_array_almost_equal(
[7.0, 7.0, 7.0, 7.0, 7.0], depths[35:, 0])
self.assertEqual((40, 3), indices.shape)
np.testing.assert_array_equal([0, 2, 2], indices[35, :])
def test_assign_keypoint_depths_per_keypoints(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, 0.7, 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
depths = tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[0.5, 0.0, 7.0, 0.7, 0.4]]),
dtype=tf.float32)
gt_keypoint_depths_list = [depths]
gt_keypoint_depth_weights = tf.constant(
np.array([[1.0, 1.0, 1.0, 1.0, 1.0],
[float('nan'), 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.5, 1.0, 1.0]]),
dtype=tf.float32)
gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
| |
<filename>scripts/vcp.py<gh_stars>0
#!/usr/bin/python
# -*-coding=utf-8-*-
import os
import re
import time
import datetime
from datetime import datetime
from datetime import timedelta
import requests
import pandas as pd
from pandas import Series
import numpy as np
from requests.api import get
import tushare as ts
import queue
import threading
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
import mplfinance as mpfs
from matplotlib import ticker
from matplotlib.pylab import date2num
from mplfinance.original_flavor import candlestick_ochl
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(filename='my.log', level=logging.INFO, format=LOG_FORMAT)
# 解决mplfinance绘制输出中文乱码
plt.rcParams['font.sans-serif'] = ['Songti SC']
plt.rcParams['axes.unicode_minus'] = False
s = mpfs.make_mpf_style(base_mpf_style='yahoo', rc={
'font.family': 'Songti SC'})
debug = False
exitFlag = 0
# pd.set_option()就是pycharm输出控制显示的设置
pd.set_option('expand_frame_repr', False) # True就是可以换行显示。设置成False的时候不允许换行
pd.set_option('display.max_columns', None) # 显示所有列
pd.set_option('display.max_rows', 4) # 显示所有行
pd.set_option('colheader_justify', 'centre') # 显示居中
pd.options.display.float_format = '{:,.2f}'.format
date = datetime.now().strftime('%Y%m%d')
# filename = "data/vcp/"+date+".txt"
filename = date + ".txt"
f = open(filename, "a+", buffering=8096)
code_list = []
class VCP():
def __init__(self, key, **kwargs):
self.pro = ts.pro_api(key)
self.queueLock = threading.Lock()
self.workQueue = queue.Queue(5000)
def get_stock_basic(self, ts_code=''):
'''
获取所有股票
'''
# 本地缓存
file = 'stock_basic_' + filename
if os.path.exists(file):
df = pd.read_csv(file, index_col=0)
if not None:
logging.info("stock basic read form csv:\n{}".format(df))
return df
for _ in range(300):
try:
df = self.pro.stock_basic(ts_code=ts_code, exchange='', list_status='L')
logging.info("stock basic read from remote:\n{}".format(df))
# 添加交易所列
# df.loc[df['ts_code'].str.startswith('3'), 'exchange'] = 'CY'
# df.loc[df['ts_code'].str.startswith('688'), 'exchange'] = 'KC'
# df.loc[df['ts_code'].str.startswith('60'), 'exchange'] = 'SH'
# df.loc[df['ts_code'].str.startswith('0'), 'exchange'] = 'SZ'
except Exception as e:
# logging.info('抓取异常 get_stock_basic')
# logging.info(e.args)
time.sleep(15)
else:
df.to_csv(file)
return df
logging.info('抓取异常 get_stock_basic')
return
def get_daily_basic(self, ts_code='', trade_date=''):
''' 获取所有股票的每日指标
turnover_rate float 换手率(%)
turnover_rate_f float 换手率(自由流通股)
volume_ratio float 量比
pe float 市盈率(总市值/净利润, 亏损的PE为空)
pe_ttm float 市盈率(TTM,亏损的PE为空)
pb float 市净率(总市值/净资产)
ps float 市销率
ps_ttm float 市销率(TTM)
dv_ratio float 股息率 (%)
dv_ttm float 股息率(TTM)(%)
total_share float 总股本 (万股)
float_share float 流通股本 (万股)
free_share float 自由流通股本 (万)
total_mv float 总市值 (万元)
circ_mv float 流通市值(万元)
'''
# 本地缓存
file = 'daily_basic_' + trade_date + '.csv'
if os.path.exists(file):
df = pd.read_csv(file, index_col=0)
if not None:
logging.info("daily basic read form csv:\n{}".format(df))
return df
for _ in range(300):
try:
df = self.pro.daily_basic(ts_code=ts_code, trade_date=trade_date)
logging.info("daily basic read form remote:\n{}".format(df))
except Exception as e:
# logging.info('抓取异常 get_daily_basic')
# logging.info(e.args)
time.sleep(15)
else:
df.to_csv(file)
return df
logging.info('抓取异常 get_daily_basic')
return
def get_stock_daily(self, ts_code, start_date='', end_date='', adj='qfq'):
# 获取指定股票的交易数据
for _ in range(300):
try:
df = self.pro.daily(ts_code=ts_code,
start_date=start_date, end_date=end_date, adj=adj)
# if debug:
# logging.info("ts_code:{}, start_date:{}, end_date:{}, df: {}".format(
# ts_code, start_date, end_date, df))
except Exception as e:
# logging.info('抓取异常 get_stock_daily, ts_code:{}'.format(ts_code))
# logging.info(e.args)
time.sleep(30)
else:
return df
logging.info('抓取异常 get_stock_daily, ts_code:{}'.format(ts_code))
return
def get_fina_indicator(self, ts_code, start_date=''):
# 获取指定股票的财务指标数据
for _ in range(300):
try:
df = self.pro.fina_indicator(ts_code=ts_code, start_date=start_date)
# if debug:
# logging.info("ts_code:{}, df: {}".format(ts_code, df))
# logging.info("ts_code:{}, start_date:{}, end_date:{}, df: {}".format(
# ts_code, start_date, end_date, df))
except Exception as e:
# logging.info('抓取异常 get_stock_daily, ts_code:{}'.format(ts_code))
# logging.info(e.args)
time.sleep(30)
else:
return df
logging.info('抓取异常 get_fina_indicator, ts_code:{}'.format(ts_code))
return
def get_pro_bar(self, ts_code, start_date='', end_date='', freq='D', ma=[50, 150, 200], factors=[]):
for _ in range(300):
try:
df = ts.pro_bar(
ts_code=ts_code,
start_date=start_date,
end_date=end_date,
adj='qfq',
freq=freq,
ma=ma,
factors=factors)
# logging.info("df:{}".format(df))
except Exception as e:
# logging.info('抓取异常 get_pro_bar')
# logging.info(e.args)
time.sleep(30)
else:
return df
logging.info('抓取异常 get_pro_bar, ts_code:{}'.format(ts_code))
return
def convert_date_to_int(self, dt):
t = dt.year * 10000 + dt.month * 100 + dt.day
# t *= 1000000
return t
def vcp_search(self, timeframe=260, volTf=50, baseLowerLimit=0.6, pivotLen=5, pivotLimit=0.1):
df = self.get_stock_basic()
# IPO Date Filter
# 过滤掉 52 周内上市的股票
st_count_before_filter = len(df)
now = datetime.now()
deadline = self.convert_date_to_int(now - timedelta(timeframe))
df.list_date = df.list_date.replace('.', '').astype(int)
df = df[(df.list_date < deadline)]
logging.info("IPO Date Filter, before:{}, after:{}".format(
st_count_before_filter, len(df)))
# logging.info("df:\n{}\n".format(df))
# Market Cap Filter
# 过滤掉市值total_mv 小于 1e2 million(10000万) 和大于 1e4 million(10亿),范围待确认
st_count_before_filter = len(df)
trade_date = now.strftime('%Y%m%d')
if now.hour < 15:
trade_date = (now - timedelta(1)).strftime('%Y%m%d')
logging.info("[FAIL] 我整的昨天的数据哈:{}".format(trade_date))
daily_basic_df = self.get_daily_basic(trade_date=trade_date)
daily_basic_df.drop(daily_basic_df[daily_basic_df.close < 10].index, inplace=True)
daily_basic_df.drop(daily_basic_df[(daily_basic_df.total_mv > 100 *
100) & (daily_basic_df.total_mv < 10000 * 100)].index, inplace=True)
df = pd.merge(df, daily_basic_df, on=['ts_code'], how='inner')
logging.info("Market Cap Filter, before:{}, after:{}".format(
st_count_before_filter, len(df)))
# logging.info("df:\n{}\n".format(df))
# i = 0
# for ts_code in df.ts_code:
# df = df.drop(df[(df.ts_code == ts_code)].index)
# df = df[fina_indicator_df[(fina_indicator_df.q_roe > 17)].ts_code]
# logging.info("ROE Filter, df:\n{}".format(
# fina_indicator_df[(fina_indicator_df.q_roe > 17)]))
# i = i+1
# if i > 10:
# break
# logging.info("df:\n{}".format(df))
# return
# Sales QoQ
# 创建线程池并填充队列
threads = self.batch()
self.queueLock.acquire()
for idx, ts_code in enumerate(df['ts_code']):
# if int(ts_code[0:6]) < 590:
# logging.info('skip ts_code:{}'.format(ts_code))
# continue
self.workQueue.put(["self.vcp_analyse",
{"ts_code": ts_code,
"timeframe": timeframe,
"volTf": volTf,
"baseLowerLimit": baseLowerLimit,
"pivotLen": pivotLen,
"pivotLimit": pivotLimit}])
if idx % 120 == 100:
self.queueLock.release()
time.sleep(25)
self.queueLock.acquire()
self.queueLock.release()
# 等待队列清空
while not self.workQueue.empty():
pass
# 通知线程是时候退出
global exitFlag
exitFlag = 1
# 等待所有线程完成
for t in threads:
t.join()
logging.info("退出主线程")
f.close()
class myThread (threading.Thread):
def __init__(self, that, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
self.that = that
def run(self):
logging.info("开启线程:" + self.name)
self.that.process_data(self.name, self.q)
logging.info("退出线程:" + self.name)
def process_data(self, threadName, q):
while not exitFlag:
if not q.empty():
self.queueLock.acquire()
if not q.empty():
data = q.get()
# logging.info("data: {}".format(data))
target = data[0]
kwargs = data[1]
self.queueLock.release()
logging.info("%s processing %s, queue size: %s" %
(threadName, target, q.qsize()))
eval(target)(**kwargs)
else:
self.queueLock.release()
# logging.info("%s waiting, queue size: %s" %
# (threadName, q.qsize()))
# 创建新线程
def batch(self, ):
threadList = ["Thread-0", "Thread-1", "Thread-2", "Thread-3"]
# threadList = ["Thread-0", "Thread-1", "Thread-2", "Thread-3", "Thread-4",
# "Thread-5", "Thread-6", "Thread-7", "Thread-8", "Thread-9"]
threads = []
threadID = 1
for tName in threadList:
thread = self.myThread(self, threadID, tName, self.workQueue)
thread.start()
threads.append(thread)
threadID += 1
return threads
def calcSlope(self, src, len):
sumX = 0.0
sumY = 0.0
sumXSqr = 0.0
sumXY = 0.0
for i in range(0, len - 1):
val = src[i]
per = i + 1.0
sumX = sumX + per
sumY = sumY + val
sumXSqr = sumXSqr + per * per
sumXY = sumXY + val * per
slope = (len * sumXY - sumX * sumY) / (len * sumXSqr - sumX * sumX)
average = sumY / len
intercept = average - slope * sumX / len + slope
return slope, average, intercept
def vcp_analyse(
self,
ts_code,
df=pd.DataFrame(),
timeframe=252,
volTf=50,
baseLowerLimit=0.6,
pivotLen=5,
pivotLimit=0.1):
now = datetime.now()
if len(df) == 0:
end_date = now.strftime('%Y%m%d')
start_date = (now - timedelta(timeframe * 2)).strftime('%Y%m%d')
df = self.get_pro_bar(ts_code, start_date, end_date)
if df is None:
logging.info("[FAIL] ts_code:{} is None".format(ts_code))
return
if df['close'][0] < 10:
logging.error("[FAIL] ts_code:{} price lower then ¥10".format(ts_code))
return
# ROE Filter
# TODO >17
last_fina_date = (now - timedelta(180)).strftime('%Y%m%d')
fina_indicator_df = self.get_fina_indicator(ts_code, last_fina_date)
fina_indicator_df = fina_indicator_df.head(1)
# logging.info("fina_indicator_df:{}\n".format(fina_indicator_df))
if fina_indicator_df.q_roe[0] <= 5:
# logging.info("[FAIL] ts_code:{} roe lower then 5".format(ts_code))
return
if len(df) > timeframe:
df = df.head(timeframe)
if not self.stage2(ts_code, df):
return
# 1.最新价格小于最高价,但跌幅不能太大
close = df['close'][0]
highest = df['close'].max()
# lowest = df['close'].min()
# logging.info("ts_code:{}, highest:{}, close:{}".format(ts_code, highest, close))
nearHigh = close < highest and close > highest * baseLowerLimit
if not nearHigh:
logging.info("[FAIL] ts_code:{} is in stage2, not near w52Highest".format(ts_code))
return
# 2.平均交易量下降
vma = df['vol'].sort_index(ascending=False).rolling(
window=volTf).mean().tail(volTf).sort_index(ascending=True)
# logging.info("ts_code:{}, vma:{}, close:{}".format(ts_code, vma, close))
if vma.size < volTf:
logging.warn("ts_code:{}, vma.size:{}, volTf:{}".format(
ts_code, vma.size, volTf))
return
(volSlope, average, intercept) = self.calcSlope(vma, volTf)
volDecreasing = volSlope < 0
if not volDecreasing:
logging.info(
"[FAIL] ts_code:{} is in stage2, vol not decreasing, volSlope:{:0.2f}, average:{:0.2f}, intercept:{:0.2f}".format(
ts_code,
volSlope,
average,
intercept))
return
# 3.Pivot Quality
pivotHighPrice = df['high'].head(pivotLen).max()
pivotLowPrice = df['low'].head(pivotLen).min()
pivotWidth = (pivotHighPrice - pivotLowPrice) / close
pivotStartHP = df['high'][pivotLen - 1]
isPivot = pivotWidth < pivotLimit and pivotHighPrice <= pivotStartHP * 1.05
if not isPivot:
logging.info(
"[FAIL] ts_code:{} is in stage2, not isPivot, pivotWidth:{}, pivotHighPrice:{}, pivotStartHP:{}" .format(
ts_code,
pivotWidth,
pivotHighPrice,
pivotStartHP))
return
# 4.ensure volume is below avgrage
volDryUp = True
for i in range(0, pivotLen - 1):
volDryUp = volDryUp and df['vol'][i] < vma[i]
logging.info(
"[FAIL] ts_code:{} volDryUp:{}, vol:{}, vma:{}".format(
ts_code, volDryUp, df['vol'][i], vma[i]))
if not volDryUp:
logging.info("[FAIL] ts_code:{} is in stage2, not volDryUp".format(ts_code))
return
f.write("{}\n".format(ts_code))
logging.info("[SUCCESS] ts_code:{} is in stage2, vis volDryUp:{}".format(ts_code, volDryUp))
code_list.append(ts_code)
return volDryUp
def stage2(
self,
ts_code,
df=pd.DataFrame(),
timeframe=252,
volTf=50,
baseLowerLimit=0.6,
pivotLen=5,
pivotLimit=0.1):
if len(df) == 0:
now = datetime.now()
end_date = now.strftime('%Y%m%d')
start_date = (now - timedelta(timeframe * 2)).strftime('%Y%m%d')
df = self.get_pro_bar(ts_code, start_date, end_date)
if len(df) > timeframe:
df = df.head(timeframe)
# 交易日期太短
if len(df) < timeframe:
logging.debug("[FAIL] ts_code:{} 交易日期太短, len:{}\n".format(ts_code, len(df)))
return False
close = df['close'][0]
| |
import collections
import ctypes
import functools
import os.path
import subprocess
from six import iteritems, itervalues, integer_types, PY2
from six.moves import UserDict
from .lexer import AssemblyLexer
from .parser import AssemblyParser
from .ast import FileNode, LabelNode, GlobalDirectiveNode, FileDirectiveNode, SectionDirectiveNode, DataSectionDirectiveNode, TextSectionDirectiveNode, SetDirectiveNode
from .ast import StringNode, AsciiNode, SpaceNode, AlignNode, ByteNode, ShortNode, WordNode, InstructionNode, SourceLocation, ExpressionNode
from ..cpu.instructions import EncodingContext, encoding_to_u32
from .. import cpu
from .. import mm
from ..cpu.coprocessor.math_copro import MathCoprocessorInstructionSet # noqa - it's not unused, SIS instruction may need it but that's hidden from flake
from ..mm import PAGE_SIZE, UINT32_FMT
from ..mm.binary import SectionTypes, SectionFlags, RelocFlags
from ..util import align, LoggingCapable, str2bytes, bytes2str
from ..errors import ConflictingNamesError, UnknownInstructionError
from functools import partial
from collections import OrderedDict
align_to_next_page = functools.partial(align, PAGE_SIZE)
if PY2:
def decode_string(s):
return s.decode('string_escape')
else:
def decode_string(s):
return str2bytes(s).decode('unicode_escape')
class Section(object):
__slots__ = ('name', 'type', 'flags', 'content', 'base', 'ptr')
def __init__(self, s_name, s_type = SectionTypes.PROGBITS, s_flags = None):
super(Section, self).__init__()
self.name = s_name
self.type = s_type
self.flags = SectionFlags.from_int(0) if s_flags is None else SectionFlags.from_string(s_flags)
self.content = []
self.base = 0
self.ptr = 0
def __getattr__(self, name):
if name == 'data_size':
return self.ptr - self.base
if name == 'file_size':
if self.flags.bss is True:
return 0
return self.data_size
def __repr__(self):
return '<Section: name=%s, type=%s, flags=%s, base=%s, ptr=%s, data_size=%s, file_size=%s>' % (self.name, self.type, self.flags.to_string(), UINT32_FMT(self.base) if self.base is not None else '', UINT32_FMT(self.ptr), self.data_size, self.file_size)
class SymtabSection(Section):
def __init__(self):
super(SymtabSection, self).__init__('.symtab', s_type = SectionTypes.SYMBOLS)
class RelocSection(Section):
def __init__(self):
super(RelocSection, self).__init__('.reloc', s_type = SectionTypes.RELOC)
class Label(object):
__slots__ = ('name', 'section', 'location', 'globally_visible')
def __init__(self, name, section, location):
super(Label, self).__init__()
self.name = name
self.section = section
self.location = location
self.globally_visible = False
def __repr__(self):
return '<label %s:%s>' % (self.section.name, self.name)
class Slot(object):
"""
Base class of all items the sections can contain.
"""
__slots__ = ('ctx', 'size', 'value', 'refers_to', 'section', 'section_ptr', 'location', 'labels')
def __init__(self, ctx, size = None, value = None, section = None, section_ptr = None, location = None, labels = None):
self.ctx = ctx
if size is not None:
self.size = size
self.value = value
self.refers_to = None
self.section = section
self.section_ptr = section_ptr
self.location = location.copy() if location is not None else None
self.labels = labels or []
def __repr__(self):
d = OrderedDict()
d['size'] = str(self.size)
d['section'] = self.section.name
if self.refers_to is not None:
d['refers_to'] = self.refers_to
if self.value is not None:
d['value'] = self.value
if self.labels:
d['labels'] = ', '.join(['%s:%s' % (l.section.name, l.name) for l in self.labels])
return '<%s: %s>' % (self.__class__.__name__, ', '.join(['%s=%s' % (k, v) for k, v in iteritems(d)]))
def place_in_section(self, section, sections, references):
pass
def resolve_reference(self, section, sections, references):
raise NotImplementedError()
def do_finalize_value(self):
pass
def finalize_value(self):
assert self.section is not None
if self.section.flags.bss is True:
if self.value is not None and any((b != 0 for b in self.value)):
self.ctx.WARN('%s: Slot has non-zero initial value that will be lost since it is located in BSS section', self.location)
self.value = None
return
self.do_finalize_value()
class RelocSlot(object):
__slots__ = ('name', 'flags', 'patch_section', 'patch_address', 'patch_offset', 'patch_size', 'patch_add', 'size')
def __init__(self, name, flags = None, patch_section = None, patch_address = None, patch_offset = None, patch_size = None, patch_add = None):
super(RelocSlot, self).__init__()
self.name = name
self.flags = flags or RelocFlags.create()
self.patch_section = patch_section
self.patch_address = patch_address
self.patch_offset = patch_offset
self.patch_size = patch_size
self.patch_add = patch_add
self.size = 0
def __repr__(self):
return '<RelocSlot: name=%s, flags=%s, section=%s, address=%s, offset=%s, size=%s, add=%s>' % (self.name, self.flags.to_string(), self.patch_section, UINT32_FMT(self.patch_address), self.patch_offset, self.patch_size, self.patch_add)
class Reference(object):
__slots__ = ('refers_to',)
def __init__(self, refers_to):
self.refers_to = refers_to
def __repr__(self):
return '<%s: refers to "%s">' % (self.__class__.__name__, self.refers_to)
class NumberPayloadSlot(Slot):
def unpack_value(self):
raise NotImplementedError()
def __init__(self, *args, **kwargs):
super(NumberPayloadSlot, self).__init__(*args, **kwargs)
assert isinstance(self.value, ExpressionNode), repr(self.value)
if self.value.is_int():
self.unpack_value()
else:
self.refers_to = Reference(self.value)
self.value = None
def resolve_reference(self, section, sections, references):
assert self.refers_to is not None
reference, self.refers_to = self.refers_to.refers_to, None
if reference.is_str():
re = RelocSlot(reference.value, flags = RelocFlags.create(relative = False, inst_aligned = False),
patch_section = section, patch_address = section.ptr, patch_size = self.size * 8, patch_offset = 0)
else:
lh, op, rh = reference.value
if rh.is_str():
lh, rh = rh, lh
assert lh.is_str()
assert rh.is_int()
assert op == '+'
re = RelocSlot(lh.value, flags = RelocFlags.create(relative = False, inst_aligned = False),
patch_section = section, patch_address = section.ptr, patch_size = self.size * 8, patch_offset = 0, patch_add = rh.value)
sections['.reloc'].content.append(re)
self.value = [0x79] * self.size
class ByteSlot(NumberPayloadSlot):
symbol_type = mm.binary.SymbolDataTypes.CHAR
size = 1
def unpack_value(self):
self.value = [(self.value.value & 0xFF) or 0]
class ShortSlot(NumberPayloadSlot):
symbol_type = mm.binary.SymbolDataTypes.SHORT
size = 2
def unpack_value(self):
v = self.value.value
self.value = [v & 0xFF, (v >> 8) & 0xFF]
class WordSlot(NumberPayloadSlot):
symbol_type = mm.binary.SymbolDataTypes.INT
size = 4
def unpack_value(self):
v = self.value.value
self.value = [v & 0xFF, (v >> 8) & 0xFF, (v >> 16) & 0xFF, (v >> 24) & 0xFF]
class CharSlot(NumberPayloadSlot):
symbol_type = mm.binary.SymbolDataTypes.CHAR
size = 1
def unpack_value(self):
self.value = [ord(self.value or '\0') & 0xFF]
class SpaceSlot(Slot):
symbol_type = mm.binary.SymbolDataTypes.ASCII
def __init__(self, *args, **kwargs):
super(SpaceSlot, self).__init__(*args, **kwargs)
self.size = self.value
self.value = None
def finalize_value(self):
if self.section.flags.bss is False:
self.value = [0 for _ in range(0, self.size)]
class AsciiSlot(Slot):
symbol_type = mm.binary.SymbolDataTypes.ASCII
def __init__(self, *args, **kwargs):
super(AsciiSlot, self).__init__(*args, **kwargs)
v = decode_string(self.value) or ''
self.value = [ord(c) & 0xFF for c in v]
self.size = len(self.value)
class StringSlot(Slot):
symbol_type = mm.binary.SymbolDataTypes.STRING
def __init__(self, *args, **kwargs):
super(StringSlot, self).__init__(*args, **kwargs)
v = decode_string(self.value) or ''
self.value = [ord(c) & 0xFF for c in v] + [0]
self.size = len(self.value)
class BytesSlot(Slot):
symbol_type = mm.binary.SymbolDataTypes.ASCII
def __init__(self, *args, **kwargs):
super(BytesSlot, self).__init__(*args, **kwargs)
v = self.value or ''
self.value = [b & 0xFF for b in v]
self.size = len(self.value)
class AlignSlot(Slot):
size = 0
def place_in_section(self, section, sections, references):
assert isinstance(self.value, ExpressionNode)
assert self.value.is_int()
aligned_ptr = align(self.value.value, section.ptr)
self.size = aligned_ptr - section.ptr
self.value = [0] * self.size
class FunctionSlot(Slot):
symbol_type = mm.binary.SymbolDataTypes.FUNCTION
size = 0
class InstrSlot(Slot):
symbol_type = mm.binary.SymbolDataTypes.FUNCTION
size = 4
def resolve_reference(self, section, sections, references):
assert self.refers_to is not None
instr = self.value
reference, instr.refers_to = instr.refers_to, None
reloc = RelocSlot(reference.operand, flags = RelocFlags.create(relative = instr.desc.relative_address, inst_aligned = instr.desc.inst_aligned), patch_section = section, patch_address = section.ptr)
instr.fill_reloc_slot(instr, reloc)
sections['.reloc'].content.append(reloc)
def place_in_section(self, section, sections, references):
# labels in data sections were handled by their parenting slots,
# for labels in text sections we must create necessary slots
for label in self.labels:
slot = FunctionSlot(ctx = self.ctx, labels = [label], section = section, section_ptr = section.ptr, location = label.location)
sections['.symtab'].content.append(slot)
def finalize_value(self):
v = encoding_to_u32(self.value)
self.value = [v & 0xFF, (v >> 8) & 0xFF, (v >> 16) & 0xFF, (v >> 24) & 0xFF]
class SymbolList(UserDict):
def __init__(self, ctx):
UserDict.__init__(self)
self._ctx = ctx
def touch(self, name, loc):
if name in self.data:
raise self._ctx.get_error(ConflictingNamesError, name = name, prev_location = self.data[name])
self.data[name] = loc
def sizeof(o):
if isinstance(o, RelocSlot):
return 0
if isinstance(o, Slot):
return o.size
if isinstance(o, ctypes.LittleEndianStructure):
return ctypes.sizeof(o)
if isinstance(o, integer_types):
return 1
return ctypes.sizeof(o)
class AssemblerProcess(LoggingCapable, object):
def __init__(self, filepath, base_address = None, writable_sections = False, defines = None, includes = None, logger = None):
super(AssemblerProcess, self).__init__(logger)
self._filepath = filepath
self.base_address = base_address or 0x00000000
self.defines = defines or {}
self.includes = includes or []
self.preprocessed = None
self.ast_root = None
self.sections_pass1 = None
self.labels = None
self.global_symbols = None
self.sections_pass2 = None
self.includes.insert(0, os.getcwd())
def get_error(self, cls, location = None, **kwargs):
kwargs['location'] = location.copy()
return cls(**kwargs)
def preprocess(self):
includes = ['-I %s' % i for i in self.includes]
defines = ['-D%s' % i for i in self.defines]
cmd = '/usr/bin/cpp %s %s %s' % (' '.join(includes), ' '.join(defines), self._filepath)
self.preprocessed = bytes2str(subprocess.check_output(cmd, shell = True))
def parse(self):
assert self.preprocessed is not None
self.ast_root = FileNode(self._filepath)
lexer = AssemblyLexer()
parser = AssemblyParser(lexer, logger = self._logger)
parser.parse(self.preprocessed, self.ast_root)
def pass1(self):
"""
Pass #1 transforms list of AST nodes into a multiple lists of Slots,
grouped by sections. It preserves necessary information for later
resolution of slots referencing each other. Also, list of known
labels is created.
"""
assert self.ast_root is not None
class Context(object):
curr_section = None
labels = []
instruction_set = cpu.instructions.DuckyInstructionSet
D = self.DEBUG
D('Pass #1')
sections = collections.OrderedDict([
('.text', Section('.text', s_flags = 'lrx')),
| |
The URI that can be used to request the next set of paged results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Task]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TaskListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class TaskRun(ProxyResource):
"""The task run that has the ARM resource and properties.
The task run will have the information of request and result of a run.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.SystemData
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.IdentityProperties
:param location: The location of the resource.
:type location: str
:ivar provisioning_state: The provisioning state of this task run. Possible values include:
"Creating", "Updating", "Deleting", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2019_06_01_preview.models.ProvisioningState
:param run_request: The request (parameters) for the run.
:type run_request: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.RunRequest
:ivar run_result: The result of this task run.
:vartype run_result: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.Run
:param force_update_tag: How the run should be forced to rerun even if the run request
configuration has not changed.
:type force_update_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'run_result': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'identity': {'key': 'identity', 'type': 'IdentityProperties'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'run_request': {'key': 'properties.runRequest', 'type': 'RunRequest'},
'run_result': {'key': 'properties.runResult', 'type': 'Run'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TaskRun, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.location = kwargs.get('location', None)
self.provisioning_state = None
self.run_request = kwargs.get('run_request', None)
self.run_result = None
self.force_update_tag = kwargs.get('force_update_tag', None)
class TaskRunListResult(msrest.serialization.Model):
"""The collection of task runs.
:param value: The collection value.
:type value: list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskRun]
:param next_link: The URI that can be used to request the next set of paged results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[TaskRun]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TaskRunListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class TaskRunRequest(RunRequest):
"""The parameters for a task run request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of the run request.Constant filled by server.
:type type: str
:param is_archive_enabled: The value that indicates whether archiving is enabled for the run or
not.
:type is_archive_enabled: bool
:param agent_pool_name: The dedicated agent pool for the run.
:type agent_pool_name: str
:param log_template: The template that describes the repository and tag information for run log
artifact.
:type log_template: str
:param task_id: Required. The resource ID of task against which run has to be queued.
:type task_id: str
:param override_task_step_properties: Set of overridable parameters that can be passed when
running a Task.
:type override_task_step_properties:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.OverrideTaskStepProperties
"""
_validation = {
'type': {'required': True},
'task_id': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'is_archive_enabled': {'key': 'isArchiveEnabled', 'type': 'bool'},
'agent_pool_name': {'key': 'agentPoolName', 'type': 'str'},
'log_template': {'key': 'logTemplate', 'type': 'str'},
'task_id': {'key': 'taskId', 'type': 'str'},
'override_task_step_properties': {'key': 'overrideTaskStepProperties', 'type': 'OverrideTaskStepProperties'},
}
def __init__(
self,
**kwargs
):
super(TaskRunRequest, self).__init__(**kwargs)
self.type = 'TaskRunRequest' # type: str
self.task_id = kwargs['task_id']
self.override_task_step_properties = kwargs.get('override_task_step_properties', None)
class TaskRunUpdateParameters(msrest.serialization.Model):
"""The parameters for updating a task run.
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.IdentityProperties
:param location: The location of the resource.
:type location: str
:param tags: A set of tags. The ARM resource tags.
:type tags: dict[str, str]
:param run_request: The request (parameters) for the new run.
:type run_request: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.RunRequest
:param force_update_tag: How the run should be forced to rerun even if the run request
configuration has not changed.
:type force_update_tag: str
"""
_attribute_map = {
'identity': {'key': 'identity', 'type': 'IdentityProperties'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'run_request': {'key': 'properties.runRequest', 'type': 'RunRequest'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TaskRunUpdateParameters, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.run_request = kwargs.get('run_request', None)
self.force_update_tag = kwargs.get('force_update_tag', None)
class TaskUpdateParameters(msrest.serialization.Model):
"""The parameters for updating a task.
:param identity: Identity for the resource.
:type identity: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.IdentityProperties
:param tags: A set of tags. The ARM resource tags.
:type tags: dict[str, str]
:param status: The current status of task. Possible values include: "Disabled", "Enabled".
:type status: str or ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskStatus
:param platform: The platform properties against which the run has to happen.
:type platform:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.PlatformUpdateParameters
:param agent_configuration: The machine configuration of the run agent.
:type agent_configuration:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.AgentProperties
:param agent_pool_name: The dedicated agent pool for the task.
:type agent_pool_name: str
:param timeout: Run timeout in seconds.
:type timeout: int
:param step: The properties for updating a task step.
:type step: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TaskStepUpdateParameters
:param trigger: The properties for updating trigger properties.
:type trigger: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TriggerUpdateParameters
:param credentials: The parameters that describes a set of credentials that will be used when
this run is invoked.
:type credentials: ~azure.mgmt.containerregistry.v2019_06_01_preview.models.Credentials
:param log_template: The template that describes the repository and tag information for run log
artifact.
:type log_template: str
"""
_attribute_map = {
'identity': {'key': 'identity', 'type': 'IdentityProperties'},
'tags': {'key': 'tags', 'type': '{str}'},
'status': {'key': 'properties.status', 'type': 'str'},
'platform': {'key': 'properties.platform', 'type': 'PlatformUpdateParameters'},
'agent_configuration': {'key': 'properties.agentConfiguration', 'type': 'AgentProperties'},
'agent_pool_name': {'key': 'properties.agentPoolName', 'type': 'str'},
'timeout': {'key': 'properties.timeout', 'type': 'int'},
'step': {'key': 'properties.step', 'type': 'TaskStepUpdateParameters'},
'trigger': {'key': 'properties.trigger', 'type': 'TriggerUpdateParameters'},
'credentials': {'key': 'properties.credentials', 'type': 'Credentials'},
'log_template': {'key': 'properties.logTemplate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TaskUpdateParameters, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.tags = kwargs.get('tags', None)
self.status = kwargs.get('status', None)
self.platform = kwargs.get('platform', None)
self.agent_configuration = kwargs.get('agent_configuration', None)
self.agent_pool_name = kwargs.get('agent_pool_name', None)
self.timeout = kwargs.get('timeout', None)
self.step = kwargs.get('step', None)
self.trigger = kwargs.get('trigger', None)
self.credentials = kwargs.get('credentials', None)
self.log_template = kwargs.get('log_template', None)
class TimerTrigger(msrest.serialization.Model):
"""The properties of a timer trigger.
All required parameters must be populated in order to send to Azure.
:param schedule: Required. The CRON expression for the task schedule.
:type schedule: str
:param status: The current status of trigger. Possible values include: "Disabled", "Enabled".
:type status: str or ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TriggerStatus
:param name: Required. The name of the trigger.
:type name: str
"""
_validation = {
'schedule': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'schedule': {'key': 'schedule', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TimerTrigger, self).__init__(**kwargs)
self.schedule = kwargs['schedule']
self.status = kwargs.get('status', None)
self.name = kwargs['name']
class TimerTriggerDescriptor(msrest.serialization.Model):
"""TimerTriggerDescriptor.
:param timer_trigger_name: The timer trigger name that caused the run.
:type timer_trigger_name: str
:param schedule_occurrence: The occurrence that triggered the run.
:type schedule_occurrence: str
"""
_attribute_map = {
'timer_trigger_name': {'key': 'timerTriggerName', 'type': 'str'},
'schedule_occurrence': {'key': 'scheduleOccurrence', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TimerTriggerDescriptor, self).__init__(**kwargs)
self.timer_trigger_name = kwargs.get('timer_trigger_name', None)
self.schedule_occurrence = kwargs.get('schedule_occurrence', None)
class TimerTriggerUpdateParameters(msrest.serialization.Model):
"""The properties for updating a timer trigger.
All required parameters must be populated in order to send to Azure.
:param schedule: The CRON expression for the task schedule.
:type schedule: str
:param status: The current status of trigger. Possible values include: "Disabled", "Enabled".
:type status: str or ~azure.mgmt.containerregistry.v2019_06_01_preview.models.TriggerStatus
:param name: Required. The name of the trigger.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'schedule': {'key': 'schedule', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TimerTriggerUpdateParameters, self).__init__(**kwargs)
self.schedule = kwargs.get('schedule', None)
self.status = kwargs.get('status', None)
self.name = kwargs['name']
class TriggerProperties(msrest.serialization.Model):
"""The properties of a trigger.
:param timer_triggers: The collection of timer triggers.
:type timer_triggers:
list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.TimerTrigger]
:param source_triggers: The collection of triggers based on source code repository.
:type source_triggers:
list[~azure.mgmt.containerregistry.v2019_06_01_preview.models.SourceTrigger]
:param base_image_trigger: The trigger based on base image dependencies.
:type base_image_trigger:
~azure.mgmt.containerregistry.v2019_06_01_preview.models.BaseImageTrigger
"""
| |
= np.zeros((self.num_models, num_windows, self.get_caffe_output_size()))
num_mini_batches = int(np.ceil(np.float(num_windows)/self.caffe_batch_size))
for i in range(num_mini_batches):
batch_start = i*self.caffe_batch_size
batch_end = (i+1)*self.caffe_batch_size
if batch_end > num_windows:
batch_end_resize = num_windows
else:
batch_end_resize = batch_end
classify_start_time = time.time()
# Perform classification on the full mini batch size
for model_num in range(self.num_models):
result = self.get_scores_model(window_batch[batch_start:batch_end,:,:,:], model_num)
# Store only the valid scores
batch_scores[model_num, batch_start:batch_end_resize,:] = result[0:batch_end_resize-batch_start,:]
log.info('Classification of batch size {} took {} seconds.'.format(self.caffe_batch_size, (time.time() - classify_start_time)))
return batch_scores
def classify_batch(self, batch, batch_size):
class_names = np.empty((batch_size, self.num_models, self.top_n), dtype="S32")
class_vals = np.zeros((batch_size, self.num_models, self.top_n), dtype=np.float32)
scores = self.classify_batch_all_models(batch, batch_size)
for model_num in range(self.num_models):
# TODO: Can we improve data access?
class_names[:, model_num, :], class_vals[:, model_num, :] = classify_from_scores(scores[model_num,:,:], self.labels)
return class_names, class_vals
def read_labels(labels_file):
if not labels_file:
log.info('WARNING: No labels file provided. Results will be difficult to interpret.')
return None
labels = []
with open(labels_file) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
if len(labels) == 0:
raise ValueError("No labels found")
return labels
def classify_from_scores(scores, labels):
top_n = 5
if scores.shape[1] < top_n:
top_n = scores.shape[1]
indices = (-scores).argsort()[:, 0:top_n] # take top n results
num_windows = scores.shape[0]
class_names = np.empty((num_windows, top_n), dtype="S32")
class_vals = np.zeros((num_windows, top_n), dtype=np.float32)
for i_window in range(num_windows):
for i_index in range(top_n):
index = indices[i_window, i_index]
label = labels[index]
class_names[i_window, i_index] = label
class_vals[i_window, i_index] = round(100.0*scores[i_window, index], 4)
return class_names, class_vals
def process_results(gdal_image, x0, y0, x_vals, y_vals,
window_sizes, image_name, item_date, sat_id, cat_id,
num_ensembles, caffemodels,
threshold, class_names, class_vals, threshold_dict, output_vectors_dir):
# Get the model names
model_names = []
for caffemodel in caffemodels:
model_names.append(get_model_name_from_path(caffemodel))
num_windows = class_names.shape[0]
tn = 5 if class_names.shape[2] >5 else class_names.shape[2]
for i_window in range(num_windows):
all_match = True
for i_ens in range(1, num_ensembles):
all_match = all_match and class_names[i_window, 0, 0] == class_names[i_window, i_ens, 0]
if not all_match:
log.info("Ensemble differs")
continue
top_cat = class_names[i_window, 0, 0]
avg = 0.0
for i_ens in range(num_ensembles):
avg += class_vals[i_window, i_ens, 0]
avg /= num_ensembles
log.info("Found match for category '%s' with avg score: %s" % (top_cat, avg))
if avg >= threshold_dict[top_cat.lower()]:
x = x_vals[i_window]
y = y_vals[i_window]
window_size = window_sizes[i_window]
top_n_geojson = []
for i_ens in range(num_ensembles):
ens_top_n = []
for i_n in range(tn):
top = [class_names[i_window, i_ens, i_n], float(class_vals[i_window, i_ens, i_n])]
ens_top_n.append(top)
top_n_geojson.append(ens_top_n)
geom = get_polygon_array(gdal_image, x0, y0, x, x + window_size, y, y + window_size)
geojson_item = generate_geojson_item(geom, top_n_geojson, model_names, image_name, item_date, sat_id, cat_id, caffemodels)
geojson_item = dict(geojson_item)
write_vector_file(geojson_item, os.path.join( output_vectors_dir, OUTPUT_VECTORS_FILE ) )
else:
log.info("Average of the scores is below threshold")
class Pyramid(object):
def __init__(self, max_window_size=DEFAULT_WIN_SIZE,
max_window_step=DEFAULT_STEP_SIZE,
min_window_size=DEFAULT_MIN_PYRAMID_SIZE,
window_scale_factor=DEFAULT_PYRAMID_SCALE_FACTOR,
window_sizes=None,
step_sizes=None):
"""
Constructor
@param max_window_size size of the largest window
@param max_window_step largest window step size
@param max_window_size size of the largest window
@param max_window_size size of the largest window
"""
# Specifying windows_sizes overrides other parameters
if not window_sizes is None:
assert isinstance(window_sizes, (list, tuple))
self.num_sizes = len(window_sizes)
self.window_sizes = np.zeros((self.num_sizes), dtype=np.int)
self.window_sizes[:] = window_sizes[:]
else:
val = np.log(float(min_window_size)/max_window_size)/np.log(1./window_scale_factor)
self.num_sizes = int(val)+1
self.window_sizes = np.zeros((self.num_sizes), dtype=np.int)
self.window_sizes[0] = max_window_size
for i in range(1, self.num_sizes):
self.window_sizes[i] = self.window_sizes[i-1]/window_scale_factor
if not step_sizes is None:
assert isinstance(step_sizes, (list, tuple))
self.step_sizes = np.zeros((self.num_sizes), dtype=np.int)
self.step_sizes[:] = step_sizes[:]
else:
self.step_sizes = np.zeros((self.num_sizes), dtype=np.int)
self.step_sizes[0] = max_window_step
for i in range(1, self.num_sizes):
#self.step_sizes[i] = self.step_sizes[i-1]/window_scale_factor
step = int(self.step_sizes[i-1]/window_scale_factor)
if step == 0:
# Smallest possible step size is one
step = 1
self.step_sizes[i] = step
self.current = 0
def calc_pyramiding(self, image_shape):
window_counter = 0
pyramid_histogram = {}
# Iterator over self
for win_size, win_step in self:
num_windows = num_sliding_windows(image_shape[1:], step_size=win_step, window_size=(win_size, win_size))
window_counter += num_windows
pyramid_histogram[(win_size,win_step)] = num_windows
num_windows = window_counter
return num_windows, pyramid_histogram
def get_window_histogram(self, image_shape):
_, pyr_hist = self.calc_pyramiding(image_shape)
return pyr_hist
def get_num_windows(self, image_shape):
num_windows, _ = self.calc_pyramiding(image_shape)
return num_windows
def __iter__(self):
return self
def next(self):
if self.current < self.num_sizes:
self.current += 1
return self.window_sizes[self.current-1], self.step_sizes[self.current-1]
else:
self.current = 0
raise StopIteration
def get_window_sizes(self):
return self.window_sizes
def get_step_sizes(self):
return self.step_sizes
def num_sliding_windows(image_shape, step_size, window_size):
num_windows = ((image_shape[0]-window_size[0])/step_size+1)*((image_shape[1]-window_size[1])/step_size+1)
return num_windows
def get_polygon_array(gdal_image, x0, y0, pixel_left, pixel_right, pixel_top, pixel_bottom):
new_left, new_top, _ = gdal_image.tfRasterToGeo(x0+pixel_left, y0+pixel_top)
new_right, new_bottom, _ = gdal_image.tfRasterToGeo(x0+pixel_right, y0+pixel_bottom)
return [[[new_left, new_bottom], [new_left, new_top], [new_right, new_top], [new_right, new_bottom],
[new_left, new_bottom]]]
def convert_model_top_n_to_hash(top_n, model_names):
top_n_hash = {}
for i in range(len(top_n)):
model_name = model_names[i]
for entries in top_n[i]:
key = "_".join([model_name, strip_label_id(entries[0])]).replace(" ", "_") + '_dbl'
top_n_hash[key] = entries[1]
return top_n_hash
def strip_label_id(value):
return re.sub(LABEL_ID_REGEX, '', value)
def generate_global_top_n(local_top_ns):
return generate_global_top_items(local_top_ns)[:5]
def generate_global_top_items(local_top_ns, prefix=None):
global_top_hash = {}
for i in range(len(local_top_ns)):
for j in range(len(local_top_ns[i])):
label = strip_label_id(local_top_ns[i][j][0])
if prefix:
label = prefix + label
label = label.replace(" ", "_") + '_dbl'
if label in global_top_hash.keys():
value = global_top_hash[label]
if local_top_ns[i][j][1] > value:
global_top_hash[label] = local_top_ns[i][j][1]
else:
global_top_hash[label] = local_top_ns[i][j][1]
return sorted(global_top_hash.items(), key=lambda x: x[1], reverse=True)
def get_model_name_from_path(model_path):
basename = os.path.basename(model_path)
model_name, extension = os.path.splitext(basename)
return model_name
def generate_geojson_item(geometry, top_n, model_names, image_name, item_date, sat_id, cat_id, models):
global_top_n = generate_global_top_n(top_n)
item_type = global_top_n[0][0]
item_score = global_top_n[0][1]
models = [model.split(INPUT_DIR_PATH)[-1] for model in (models or [])] # remove parent directory
name = os.path.basename(image_name)
values = []
for entry in global_top_n:
values.append(entry[0])
comment = " ".join(values)
return {
'type': 'Feature',
'geometry': {
'type': 'Polygon',
'coordinates': geometry
},
'properties': {
'date': item_date,
'name': name,
'type': item_type,
'score': item_score,
'comment': comment,
'source': 'gbdx-caffe',
'sat_id': sat_id,
'cat_id': cat_id
}
}
def init_vector_dir(output_dir):
try:
# create dir if necessary
if not os.path.exists(output_dir):
log.info("Creating output directory: " + output_dir)
os.makedirs(output_dir)
except Exception, e:
error_msg = "Encountered exception: " + str(e)
log.error(error_msg)
def write_vector_file(geojson_item, output_path):
try:
out = None
if not os.path.exists(output_path):
out = open( output_path, "w")
out.write('{ "type": "FeatureCollection","features": [''')
else:
out = open(output_path, 'a')
out.write(",")
json.dump(geojson_item, out)
out.close()
except Exception, e:
error_msg = "Encountered exception: " + str(e)
log.error(error_msg)
def zip_vectors_file(output_dir, zip_path):
try:
zip_file = zipfile.ZipFile(zip_path, 'w', allowZip64=True)
for file_name in os.listdir(output_dir):
file_path = os.path.join(output_dir, file_name)
zip_file.write(file_path, file_name)
os.remove(file_path)
zip_file.close()
os.rmdir(output_dir)
except Exception, e:
error_msg = "Encountered exception: " + str(e)
log.error(error_msg)
raise RuntimeError(error_msg, e)
def single_tiles_slide_window_classify(gdal_image, image, x0,y0, args,
image_name, item_date, sat_id, cat_id,
mean_files, caffemodels, deploy_files, labels_files,
classifier, gpu_flag, threshold_dict, output_vectors_dir):
image_shape = image.shape
# Calculate the window and step sizes for the pyramiding
pyr = Pyramid(max_window_size=args.win_size,
max_window_step=args.step_size,
min_window_size=args.pyramid_min_size,
window_scale_factor=args.pyramid_scale_factor,
window_sizes=args.pyramid_window_sizes,
step_sizes=args.pyramid_step_sizes)
log.info("Pyramid window sizes: "+ str( pyr.get_window_sizes()) )
log.info("Pyramid step sizes: " + str( pyr.get_step_sizes()) )
log.info("Pyramid num_windows: " + str( pyr.get_num_windows(image_shape) ) )
log.info("Pyramid histogram: " + str( pyr.get_window_histogram(image_shape) ) )
pyr_window_batcher = PyramidWindowBatcher(
pyr,
classifier.get_caffe_num_channels(),
classifier.get_caffe_window_size(),
num_windows=pyr.get_num_windows(image_shape),
max_batch_size=4096,
mult_size=classifier.get_caffe_batch_size(),
transform=classifier.get_transformer())
for batch, x_vals, y_vals, window_sizes, batch_size in pyr_window_batcher.iter_batches(image):
# Perform ensemble classification on the batch
class_names, class_vals = classifier.classify_batch(batch, batch_size)
# Generate geojson files etc.
process_results(gdal_image, x0, y0,
x_vals[:batch_size], y_vals[:batch_size], window_sizes[:batch_size],
image_name, item_date, sat_id, cat_id,
classifier.get_num_models(), caffemodels, args.threshold,
class_names, class_vals, threshold_dict, output_vectors_dir)
def classify_broad_area_multi_process(gdal_image, image, x0,y0, args,
image_name, item_date, sat_id, cat_id,
mean_files, caffemodels, deploy_files, labels_files,
classifier, gpu_flag, threshold_dict):
a = datetime.datetime.now()
single_tiles_slide_window_classify(gdal_image, image, x0,y0, args,
image_name, item_date, sat_id, cat_id,
mean_files, caffemodels, deploy_files, labels_files,
classifier, gpu_flag, threshold_dict, args.output_vectors_dir)
b = datetime.datetime.now()
c = b - a
log.debug("Total Time to process: ["+str(c)+"]")
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--tif", "-t", help="The geotif to perform analysis on", required=True)
parser.add_argument("--imd", "-i", help="The imd metadata file")
parser.add_argument("--model_paths", "-m", help="The directory holding the model files", required=True, nargs="+")
parser.add_argument("--threshold", "-th",
help="The probability threshold above which an item will be written to the output",
type=float, default=DEFAULT_THRESHOLD)
parser.add_argument("--win_size", "-w", help="The window size in pixels", type=int, default=DEFAULT_WIN_SIZE)
parser.add_argument("--step_size", "-s", help="The step size in pixels", type=int, default=DEFAULT_STEP_SIZE)
parser.add_argument("--status_path", "-sp", help="The output path for the status file", default=DEFAULT_STATUS_JSON_PATH)
parser.add_argument("--pyramid_min_size", "-pms", help="The minimum pyramid size in pixels", type=int, default=DEFAULT_MIN_PYRAMID_SIZE)
parser.add_argument("--pyramid_scale_factor", "-psf", help="The scale factor to scale images in the pyramid", type=float, default=DEFAULT_PYRAMID_SCALE_FACTOR)
parser.add_argument("--bounding_box", "-bb", help="The sub-section of the geotif to analyze", default=None)
parser.add_argument("--gpu_flag", "-gf", help="The flag to set when using gpu", default=DEFAULT_GPU_FLAG)
parser.add_argument("--image_name", "-in", help="The name of image to include in name field of output vectors", default=None)
parser.add_argument("--pyramid_window_sizes", "-pws", help="Sliding window sizes", default=None)
parser.add_argument("--pyramid_step_sizes", "-pss", help="Sliding window step sizes", default=None)
parser.add_argument("--bands", "-b", help="Band", default=None)
parser.add_argument("--num_processes", "-np", help="Number of CPU processes", default=DEFAULT_NUM_PROCESSES)
parser.add_argument("--log_level", "-ll", | |
ret = inst.listvirus()
# callback 함수가 있다면 callback 함수 호출
if isinstance(cb_fn, types.FunctionType):
cb_fn(inst.__module__, ret)
else: # callback 함수가 없으면 악성코드 목록을 누적하여 리턴
vlist += ret
if self.verbose:
print ' [-] %s.listvirus() :' % inst.__module__
for vname in ret:
print ' - %s' % vname
except AttributeError:
continue
return vlist
# ---------------------------------------------------------------------
# scan(self, filename, *callback)
# 플러그인 엔진에게 악성코드 검사를 요청한다.
# 입력값 : filename - 악성코드 검사 대상 파일 또는 폴더 이름
# callback - 검사 시 출력 화면 관련 콜백 함수
# 리턴값 : 0 - 성공
# 1 - Ctrl+C를 이용해서 악성코드 검사 강제 종료
# -1 - 콜백 함수가 너무 많음
# ---------------------------------------------------------------------
def scan(self, filename, *callback):
import kernel
# 파일을 한 개씩 검사 요청할 경우 압축으로 인해 self.update_info 정보가 누적 된 경우
self.update_info = []
scan_callback_fn = None # 악성코드 검사 콜백 함수
move_master_file = False # 마스터 파일 격리 필요 여부
t_master_file = '' # 마스터 파일
# 악성코드 검사 결과
ret_value = {
'filename': '', # 파일 이름
'result': False, # 악성코드 발견 여부
'virus_name': '', # 발견된 악성코드 이름
'virus_id': -1, # 악성코드 ID
'engine_id': -1 # 악성코드를 발견한 플러그인 엔진 ID
}
try: # 콜백 함수 저장
scan_callback_fn = callback[0]
self.disinfect_callback_fn = callback[1]
self.update_callback_fn = callback[2]
self.quarantine_callback_fn = callback[3]
except IndexError:
pass
# 1. 검사 대상 리스트에 파일을 등록
file_info = k2file.FileStruct(filename)
file_scan_list = [file_info]
# 최초 한번만 하위 폴더 검색
is_sub_dir_scan = True
while len(file_scan_list):
try:
t_file_info = file_scan_list.pop(0) # 검사 대상 파일 하나를 가짐
real_name = t_file_info.get_filename()
# 폴더면 내부 파일리스트만 검사 대상 리스트에 등록
if os.path.isdir(real_name):
# 폴더 등을 처리할 때를 위해 뒤에 붇는 os.sep는 우선 제거
real_name = os.path.abspath(real_name)
# 콜백 호출 또는 검사 리턴값 생성
ret_value['result'] = False # 폴더이므로 악성코드 없음
ret_value['filename'] = real_name # 검사 파일 이름
ret_value['file_struct'] = t_file_info # 검사 파일 이름
ret_value['scan_state'] = kernel.NOT_FOUND # 악성코드 없음
self.result['Folders'] += 1 # 폴더 개수 카운트
if self.options['opt_list']: # 옵션 내용 중 모든 리스트 출력인가?
self.call_scan_callback_fn(scan_callback_fn, ret_value)
if is_sub_dir_scan:
# 폴더 안의 파일들을 검사대상 리스트에 추가
flist = glob.glob1(real_name, '*')
tmp_flist = []
for rfname in flist:
rfname = os.path.join(real_name, rfname)
tmp_info = k2file.FileStruct(rfname)
tmp_flist.append(tmp_info)
file_scan_list = tmp_flist + file_scan_list
if self.options['opt_nor']: # 하위 폴더 검색 옵션이 체크
is_sub_dir_scan = False # 하위 폴더 검색 하지 않음
elif os.path.isfile(real_name) or t_file_info.is_archive(): # 검사 대상이 파일인가? 압축 해제 대상인가?
self.result['Files'] += 1 # 파일 개수 카운트
# 압축된 파일이면 해제하기
if real_name == '': # 이미 실제 파일명이 존재하지 않으면 압축 파일임
ret, ret_fi = self.unarc(t_file_info)
if ret:
t_file_info = ret_fi # 압축 결과물이 존재하면 파일 정보 교체
else: # 압축 해제 오류 발생
if ret_fi: # 오류 메시지가 존재하는가?
# 콜백 호출 또는 검사 리턴값 생성
ret_value['result'] = ret # 악성코드 발견 여부
ret_value['engine_id'] = -1 # 엔진 ID
ret_value['virus_name'] = ret_fi # 에러 메시지로 대체
ret_value['virus_id'] = -1 # 악성코드 ID
ret_value['scan_state'] = kernel.ERROR # 악성코드 검사 상태
ret_value['file_struct'] = t_file_info # 검사 파일 이름
if self.options['opt_list']: # 모든 리스트 출력인가?
self.call_scan_callback_fn(scan_callback_fn, ret_value)
continue
# 비정상 종료의 파일을 찾기 위해 추가된 모드
if self.options['opt_debug']: # 디버깅 모드인가?
ret_value['result'] = False # 악성코드 발견 여부
ret_value['engine_id'] = -1 # 엔진 ID
ret_value['virus_name'] = 'debug' # 에러 메시지로 대체
ret_value['virus_id'] = -1 # 악성코드 ID
ret_value['scan_state'] = kernel.ERROR # 악성코드 검사 상태
ret_value['file_struct'] = t_file_info # 검사 파일 이름
self.call_scan_callback_fn(scan_callback_fn, ret_value)
# 2. 포맷 분석
ff = self.format(t_file_info)
# 파일로 악성코드 검사
ret, vname, mid, scan_state, eid = self.__scan_file(t_file_info, ff)
if self.options['opt_feature'] != 0xffffffff: # 인공지능 AI를 위한 Feature 추출
self.__feature_file(t_file_info, ff, self.options['opt_feature'])
if ret: # 악성코드 진단 개수 카운트
if scan_state == kernel.INFECTED:
self.result['Infected_files'] += 1
elif scan_state == kernel.SUSPECT:
self.result['Suspect_files'] += 1
elif scan_state == kernel.WARNING:
self.result['Warnings'] += 1
self.identified_virus.update([vname])
# 콜백 호출 또는 검사 리턴값 생성
ret_value['result'] = ret # 악성코드 발견 여부
ret_value['engine_id'] = eid # 엔진 ID
ret_value['virus_name'] = vname # 악성코드 이름
ret_value['virus_id'] = mid # 악성코드 ID
ret_value['scan_state'] = scan_state # 악성코드 검사 상태
ret_value['file_struct'] = t_file_info # 검사 파일 이름
# 격리 시점 체크하기?
if move_master_file:
if t_master_file != t_file_info.get_master_filename():
# print 'move 2 :', t_master_file
self.__arcclose()
self.__quarantine_file(t_master_file)
move_master_file = False
if ret_value['result']: # 악성코드 발견인가?
t_master_file = t_file_info.get_master_filename()
# 격리소에 생성시 악성코드 이름 부여할 경우 사용할 목적임
if not self.quarantine_name.get(t_master_file, None):
self.quarantine_name[t_master_file] = ret_value['virus_name']
action_type = self.call_scan_callback_fn(scan_callback_fn, ret_value)
if self.options['opt_move'] or self.options['opt_copy']:
if t_file_info.get_additional_filename() == '':
# print 'move 1 :', t_master_file
self.__arcclose()
self.__quarantine_file(t_master_file)
move_master_file = False
else:
move_master_file = True
else: # 격리 옵션이 치료 옵션보다 우선 적용
if action_type == k2const.K2_ACTION_QUIT: # 종료인가?
return 0
d_ret = self.__disinfect_process(ret_value, action_type)
if d_ret: # 치료 성공?
# 악성코드 치료 후 해당 파일이 삭제되지 않고 존재한다면 다시 검사 필요
if self.options['opt_dis'] or \
(action_type == k2const.K2_ACTION_DISINFECT or action_type == k2const.K2_ACTION_DELETE):
# 치료 옵션이 존재할때에만... 실행
if os.path.exists(t_file_info.get_filename()):
t_file_info.set_modify(True)
file_scan_list = [t_file_info] + file_scan_list
else:
# 압축 파일 최종 치료 처리
self.__update_process(t_file_info)
else:
# 압축 파일 최종 치료 처리
self.__update_process(t_file_info)
# 이미 해당 파일이 악성코드라고 판명되었다면
# 그 파일을 압축해제해서 내부를 볼 필요는 없다.
# 압축 파일이면 검사대상 리스트에 추가
try:
arc_file_list = self.arclist(t_file_info, ff)
if len(arc_file_list):
file_scan_list = arc_file_list + file_scan_list
'''
# 한 개의 정보가 추가되는 것 중에 /<...> 형태로 입력되는 파일이면 파일 카운트를 하지 않는다.
if len(arc_file_list) == 1 and \
self.disable_path.search(arc_file_list[0].get_additional_filename()):
self.result['Files'] -= 1 # 파일 개수 카운트
'''
except zipfile.BadZipfile: # zip 헤더 오류
pass
# 검사 결과 출력하기
if self.options['opt_list']: # 모든 리스트 출력인가?
self.call_scan_callback_fn(scan_callback_fn, ret_value)
except KeyboardInterrupt:
return 1 # 키보드 종료
except:
if k2const.K2DEBUG:
import traceback
print traceback.format_exc()
pass
self.__update_process(None, True) # 최종 파일 정리
# 격리 시점 체크하기?
if move_master_file:
# print 'move 3 :', t_master_file
self.__arcclose()
self.__quarantine_file(t_master_file)
move_master_file = False
return 0 # 정상적으로 검사 종료
# ---------------------------------------------------------------------
# call_scan_callback_fn(self, a_scan_callback_fn, ret_value)
# 악성코드 검사 결과 출력 시 /<...> 표시는 제외하고 출력한다.
# 입력값 : a_scan_callback_fn - 콜백 함수
# ret_value : 출력 대상
# 리턴값 : scan 콜백 함수의 리턴값
# ---------------------------------------------------------------------
def call_scan_callback_fn(self, a_scan_callback_fn, ret_value):
if isinstance(a_scan_callback_fn, types.FunctionType):
fs = ret_value['file_struct'] # 출력할 파일 정보
rep_path = self.disable_path.sub('', fs.get_additional_filename())
fs.set_additional_filename(rep_path)
ret_value['file_struct'] = fs
return a_scan_callback_fn(ret_value)
# ---------------------------------------------------------------------
# __quarantine_file(self, filename)
# 악성코드 파일을 격리소로 이동한다
# 입력값 : filename - 격리 대상 파일 이름
# ---------------------------------------------------------------------
def __quarantine_file(self, filename):
if self.options['infp_path'] and (self.options['opt_move'] or self.options['opt_copy']):
is_success = False
try:
if self.options['opt_qname']:
x = self.quarantine_name.get(filename, None)
if x:
q_path = os.path.join(self.options['infp_path'], x)
self.quarantine_name.pop(filename)
else:
q_path = self.options['infp_path']
else:
q_path = self.options['infp_path']
if not os.path.exists(q_path):
os.makedirs(q_path) # 다중 폴더 생성
if self.options['opt_qhash']: # 해시로 격리
t_filename = hashlib.sha256(open(filename, 'rb').read()).hexdigest()
else:
t_filename = os.path.split(filename)[-1]
# 격리소에 동일한 파일 이름이 존재하는지 체크
fname = os.path.join(q_path, t_filename)
t_quarantine_fname = fname
count = 1
while True:
if os.path.exists(t_quarantine_fname):
t_quarantine_fname = '%s (%d)' % (fname, count) # 유니크한 파일 이름 생성
count += 1
else:
break
if self.options['opt_move']:
shutil.move(filename, t_quarantine_fname) # 격리소로 이동
elif self.options['opt_copy']:
shutil.copy(filename, t_quarantine_fname) # 격리소로 복사
q_type = k2const.K2_QUARANTINE_COPY
is_success = True
except (shutil.Error, OSError) as e:
pass
if isinstance(self.quarantine_callback_fn, types.FunctionType):
if self.options['opt_copy']:
q_type = k2const.K2_QUARANTINE_COPY
else:
q_type = k2const.K2_QUARANTINE_MOVE
self.quarantine_callback_fn(filename, is_success, q_type)
# ---------------------------------------------------------------------
# __update_process(self, file_struct, immediately_flag=False)
# update_info를 갱신한다.
# 입력값 : file_struct - 파일 정보 구조체
# immediately_flag - update_info 모든 정보 갱신 여부
# ---------------------------------------------------------------------
def __update_process(self, file_struct, immediately_flag=False):
# 압축 파일 정보의 재압축을 즉시하지 않고 내부 구성을 확인하여 처리한다.
if immediately_flag is False:
if len(self.update_info) == 0: # 아무런 파일이 없으면 추가
self.update_info.append(file_struct)
else:
n_file_info = file_struct # 현재 작업 파일 정보
p_file_info = self.update_info[-1] # 직전 파일 정보
# 마스터 파일이 같은가? (압축 엔진이 있을때만 유효)
if p_file_info.get_master_filename() == n_file_info.get_master_filename() and \
n_file_info.get_archive_engine_name() is not None:
if p_file_info.get_level() <= n_file_info.get_level():
# 마스터 파일이 같고 계속 압축 깊이가 깊어지면 계속 누적
self.update_info.append(n_file_info)
else:
ret_file_info = p_file_info
while ret_file_info.get_level() != n_file_info.get_level():
# 마스터 파일이 같고 압축 깊이가 달라지면 내부 파일 업데이트 시점
ret_file_info = self.__update_arc_file_struct(ret_file_info)
self.update_info.append(ret_file_info) # 결과 파일 추가
self.update_info.append(n_file_info) | |
bbox_inches='tight')
plt.close()
# Generate plot showing only elevations, and only the convnet results
width = 8
scale = width/(srtm.shape[1])
height = 1.05 * 3 * srtm.shape[0] * scale
fig, axes = plt.subplots(nrows=3, figsize=(width,height))
# Row 1: LiDAR DTM
axes[0].imshow(dtm, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[0].set_title('a) LiDAR (resampled to SRTM resolution)', x=0, ha='left', size=9, pad=4)
axes[0].axis('off')
# Row 2: SRTM DSM
axes[1].imshow(srtm, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[1].set_title('b) SRTM: RMSE={:.2f}m (compared to LiDAR)'.format(srtm_RMSE), x=0, ha='left', size=9, pad=4)
axes[1].axis('off')
# Row 3: Convnet - elevations
axes[2].imshow(cn_elv, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[2].set_title('c) FCN-corrected SRTM: RMSE={:.2f}m (an improvement of {:.1f}% over raw SRTM)'.format(cn_RMSE, cn_RMSE_reduction), x=0, ha='left', size=9, pad=4)
axes[2].axis('off')
# Add a small north arrow indicator to each map
arrowprops = dict(facecolor='black', width=1.5, headwidth=4, headlength=4)
if zone == 'TSM16_ATG':
x, y, arrow_length = 0.07, 0.95, 0.04
xytext = (x-arrow_length, y)
else:
x, y, arrow_length = 0.97, 0.96, 0.07
xytext = (x, y-arrow_length)
for ax in axes.ravel():
ax.annotate('N', xy=(x,y), xycoords='axes fraction', xytext=xytext, textcoords='axes fraction', arrowprops=arrowprops, ha='center', va='center', fontsize=10)
# Add a simple scale bar to the DTM map, assuming that each grid cell is approx. 23m (SRTM at this latitude)
ncells_1km = 1000/23
offset = 8
adjust_y = 10 if zone == 'TSM16_ATG' else 0
axes[0].plot([offset, offset + ncells_1km], [offset + adjust_y, offset + adjust_y], color='black', linewidth=0.8)
axes[0].plot([offset, offset], [offset-1+adjust_y, offset+1+adjust_y], color='black', linewidth=0.8)
axes[0].plot([offset + ncells_1km, offset + ncells_1km], [offset-1+adjust_y, offset+1+adjust_y], color='black', linewidth=0.8)
axes[0].annotate('1km', xy=(offset + 0.5*ncells_1km, 1.3*offset + adjust_y), ha='center', va='top', size=8)
# Tighten layout
fig.tight_layout(pad=1)
# Adjust layout to fit two colourbars at the bottom
fig.subplots_adjust(bottom=0.07, wspace=0.05, hspace=0.07)
# Add colourbar for elevations
elv_cbar = fig.add_axes([0.06, 0.04, 0.88, 0.015]) # [left, bottom, width, height]
fig.colorbar(cm.ScalarMappable(cmap=elv_cmap, norm=elv_norm), cax=elv_cbar, orientation='horizontal').set_label(label='Elevation [m]', size=9)
elv_cbar.tick_params(labelsize=8)
# Save figure
fig.savefig('{}/maps_elv_{}_convnet.png'.format(folder_fig, zone), dpi=300, bbox_inches='tight')
plt.close()
###############################################################################
# 5. Compare overall residuals using boxplots #
###############################################################################
# Read in error residuals calculated earlier for the test dataset
residuals_dict_rf = pickle.load(open('{}/rf_residuals.p'.format(folder_results_rf), 'rb'))
residuals_dict_densenet = pickle.load(open('{}/densenet_residuals_models.p'.format(folder_results_densenet), 'rb'))
residuals_dict_convnet = pickle.load(open('{}/convnet_residuals_models.p'.format(folder_results_convnet), 'rb'))
# Check that initial residuals are the same (in each dictionary)
fig, axes = plt.subplots(figsize=(9,5))
axes.boxplot([d['test']['initial'] for d in [residuals_dict_rf, residuals_dict_densenet, residuals_dict_convnet]], showfliers=False)
# Get residuals to plot
res_initial = residuals_dict_convnet['test']['initial']
res_baseline = residuals_dict_convnet['test']['naive']
res_rf = residuals_dict_rf['test']['rf']
res_densenet = residuals_dict_densenet['test']['densenet_ensemble']
res_convnet = residuals_dict_convnet['test']['convnet_ensemble']
# Boxplots of error residuals
bp_data = [res_initial, res_baseline, res_rf, res_densenet, res_convnet]
bp_colours = ['dimgrey', 'darkgrey'] + [model_colours[m] for m in models]
bp_label_colours = ['dimgrey', 'darkgrey'] + [label_colours[m] for m in models]
# Add boxplots to the figure
fig, axes = plt.subplots(figsize=(9,5))
bps = axes.boxplot(bp_data, showfliers=False, medianprops={'color':'black'}, patch_artist=True)
for patch, colour in zip(bps['boxes'], bp_colours):
patch.set_facecolor(colour)
# Add axis ticks & labels
axes.set_xticks(range(1,6))
axes.set_xticklabels(['Initial','Baseline\ncorrection','RF\ncorrection','DCN\ncorrection','FCN\ncorrection'])
axes.set_ylabel('Residual error before/after correction [m]')
# Turn spines off
[axes.spines[edge].set_visible(False) for edge in ['top','right']]
# Add a horizontal line for zero error
axes.axhline(y=0, linestyle='dashed', color='black', linewidth=0.8, alpha=0.3)
# Add labels for medians & IQR
iqr_label_y = 0
for i, data in enumerate(bp_data):
median = np.median(data)
q75, q25 = np.percentile(data, [75 ,25])
iqr = q75 - q25
iqr_label_y = max(1.02*(q75 + 1.5*iqr), iqr_label_y)
axes.annotate('{:.3f}m'.format(median), xy=(i+1.28, median), ha='left', va='center')
axes.annotate('IQR = {:.3f}m'.format(iqr), xy=(i+1, iqr_label_y), color=bp_label_colours[i], fontweight='bold', ha='center', va='bottom')
fig.tight_layout()
fig.savefig('{}/residuals_bymodel_boxplots.png'.format(folder_fig), dpi=300)
plt.close()
###############################################################################
# 6. Assess correction efficacy by zone, land cover, FLHA class & HAND range #
###############################################################################
# Set up a dictionary to contain SRTM-LiDAR difference values corresponding to each Manaaki Whenua landclass type present in that LiDAR zone coverage
diff_by_landcover = {1:{'label':'Artificial\nsurfaces', 'data':[], 'colour':(78/255, 78/255, 78/255)},
2:{'label':'Bare/lightly-\nvegetated\nsurfaces', 'data':[], 'colour':(255/255, 235/255, 190/255)},
3:{'label':'Water\nbodies', 'data':[], 'colour':(0/255, 197/255, 255/255)},
4:{'label':'Cropland', 'data':[], 'colour':(255/255, 170/255, 0/255)},
5:{'label':'Grassland,\nSedgeland\n& Marshland', 'data':[], 'colour':(255/255, 255/255, 115/255)},
6:{'label':'Scrub &\nShrubland', 'data':[], 'colour':(137/255, 205/255, 102/255)},
7:{'label':'Forest', 'data':[], 'colour':(38/255, 115/255, 0/255)},
8:{'label':'Other', 'data':[], 'colour':'#FF0000'}}
# Initalise dictionary to hold test residuals, classed in different ways
res = {'initial':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}},
'rf':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}},
'dn':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}},
'cn':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}}}
# Loop through the three test zones
for i, zone in enumerate(test_zones):
print('Processing {} zone...'.format(zone))
# Get the test array & SRTM props dictionary
_, _, _, test, _, _, _, srtm_props = get_base_raster_data(zone)
# Land cover classes
lcdb_tif = '{}/LCDB_GroupID_{}.tif'.format(folder_lcdb, zone)
lcdb_ds = gdal.Open(lcdb_tif, gdalconst.GA_ReadOnly)
lcdb = np.array(lcdb_ds.ReadAsArray())
lcdb_ds = None
# Import Random Forest arrays - ONLY test dataset pixels
rf_predictions = np.load('{}/RF_Predictions_ByZone_{}.npy'.format(folder_results_rf, zone))
rf_cor, rf_elv, rf_res, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'test_clip', no_data=-9999)
# Import Densenet arrays - ONLY test dataset pixels
dn_predictions = np.load('{}/predictions/densenet_ensemble_{}_prediction.npy'.format(folder_results_densenet, zone))
dn_cor, dn_elv, dn_res, _,_,_,_,_,_ = process_1D_predictions(zone, dn_predictions, 'test_clip', no_data=-9999)
# Import Convnet arrays - ONLY test dataset pixels
cn_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
cn_cor, cn_elv, cn_res, dtm, srtm, diff, merit, flha, hand = process_2D_predictions(zone, cn_predictions, 'test_clip', no_data=-9999)
# Check extent of the test patch coverage, with reference to the zone coverage as a whole
x_min = np.where(test==1)[1].min()
x_max = np.where(test==1)[1].max()
y_min = np.where(test==1)[0].min()
y_max = np.where(test==1)[0].max()
# For the LCDB array, set to np.nan any pixels which aren't in the test patches & clip it to test data extent
lcdb[test==0] = np.nan
lcdb = lcdb[y_min:y_max+1, x_min:x_max+1]
# Mask all arrays wherever no_data values are present
dtm = np.ma.masked_equal(dtm, no_data)
srtm = np.ma.masked_equal(srtm, no_data)
diff = np.ma.masked_equal(diff, no_data)
test = np.ma.masked_equal(test[y_min:y_max+1, x_min:x_max+1], no_data)
lcdb = np.ma.masked_equal(lcdb, no_data)
flha = np.ma.masked_equal(flha, no_data)
hand = np.ma.masked_equal(hand, no_data)
rf_cor = np.ma.masked_equal(rf_cor, no_data)
rf_elv = np.ma.masked_equal(rf_elv, no_data)
rf_res = np.ma.masked_equal(rf_res, no_data)
dn_cor = np.ma.masked_equal(dn_cor, no_data)
dn_elv = np.ma.masked_equal(dn_elv, no_data)
dn_res = np.ma.masked_equal(dn_res, no_data)
cn_cor = np.ma.masked_equal(cn_cor, no_data)
cn_elv = np.ma.masked_equal(cn_elv, no_data)
cn_res = np.ma.masked_equal(cn_res, no_data)
# Check that all arrays have the same shape
if not (dtm.shape == srtm.shape == diff.shape == test.shape == lcdb.shape == flha.shape == hand.shape == rf_cor.shape == dn_cor.shape == cn_cor.shape):
print('Different test array dimensions!')
break
# Class residuals by test zone
# Get list of residuals for that zone (for each model)
res_initial_byzone = diff.flatten().tolist()
res_rf_byzone = rf_res.flatten().tolist()
res_dn_byzone = dn_res.flatten().tolist()
res_cn_byzone = cn_res.flatten().tolist()
# Filter out any None values (masked)
res_initial_byzone = [r for r in res_initial_byzone if (not np.isnan(r) and r != None)]
res_rf_byzone = [r for r in res_rf_byzone if (not np.isnan(r) and r != None)]
res_dn_byzone = [r for r in res_dn_byzone if (not np.isnan(r) and r != None)]
res_cn_byzone = [r for r in res_cn_byzone if (not np.isnan(r) and r != None)]
# Update dictionary of all test residuals
res['initial']['by_zone'][zone] = res_initial_byzone
res['initial']['by_zone']['All'] = np.append(res['initial']['by_zone']['All'], res_initial_byzone)
res['rf']['by_zone'][zone] = res_rf_byzone
res['rf']['by_zone']['All'] = np.append(res['rf']['by_zone']['All'], res_rf_byzone)
res['dn']['by_zone'][zone] = res_dn_byzone
res['dn']['by_zone']['All'] = np.append(res['dn']['by_zone']['All'], res_dn_byzone)
res['cn']['by_zone'][zone] = res_cn_byzone
res['cn']['by_zone']['All'] = np.append(res['cn']['by_zone']['All'], res_cn_byzone)
# Class residuals by land cover class
# Loop through each potential land cover class (as defined in proc_LCDB.py) and calculate elevation residuals for that particular class
for i in range(1,8):
# Get lists of residuals for that land cover class - for each of the input residual arrays
res_initial_byclass = diff[lcdb==i].flatten().tolist()
res_rf_byclass = rf_res[lcdb==i].flatten().tolist()
res_dn_byclass = dn_res[lcdb==i].flatten().tolist()
res_cn_byclass = cn_res[lcdb==i].flatten().tolist()
# Filter out any None values (masked)
res_initial_byclass = [r for r in res_initial_byclass if (not np.isnan(r) and r != None)]
res_rf_byclass = [r for r in res_rf_byclass if (not np.isnan(r) and r != None)]
res_dn_byclass = [r for r in res_dn_byclass if (not np.isnan(r) and r != None)]
res_cn_byclass = [r for r in res_cn_byclass if (not np.isnan(r) and r != None)]
# Update dictionary of all test residuals
res['initial']['by_lcdb'][i] = np.append(res['initial']['by_lcdb'][i], res_initial_byclass)
res['rf']['by_lcdb'][i] = np.append(res['rf']['by_lcdb'][i], res_rf_byclass)
res['dn']['by_lcdb'][i] = np.append(res['dn']['by_lcdb'][i], res_dn_byclass)
res['cn']['by_lcdb'][i] = np.append(res['cn']['by_lcdb'][i], res_cn_byclass)
# Class residuals by NIWA's Flood Hazard susceptibility map
# Loop through each potential land cover class (as defined in proc_LCDB.py) and calculate elevation residuals for that particular class
for flha_code, flha_label in zip([1,0], ['flood','noflood']):
# Get lists of residuals for that flood susceptibility - for each of the input residual arrays
res_initial_byflha = diff[flha==flha_code].flatten().tolist()
res_rf_byflha = rf_res[flha==flha_code].flatten().tolist()
res_dn_byflha = dn_res[flha==flha_code].flatten().tolist()
res_cn_byflha = cn_res[flha==flha_code].flatten().tolist()
# Filter out any None values (masked)
res_initial_byflha = [r for | |
message['t_ToRecipients']['t_Mailbox'] = [recipients_mailbox]
summary = {'subject': message.get('t_Subject'),
'create_time': message.get('t_DateTimeCreated'),
'sent_time': message.get('t_DateTimeSent')}
action_result.update_summary(summary)
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS)
def _delete_email(self, param):
action_result = ActionResult(dict(param))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
message_id = self._handle_py_ver_compat_for_input_unicode(param[EWSONPREM_JSON_ID])
self._target_user = param.get(EWSONPREM_JSON_EMAIL)
message_ids = ph_utils.get_list_from_string(message_id)
data = ews_soap.get_delete_email(message_ids)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_delete_response)
# Process errors
if phantom.is_fail(ret_val):
self.add_action_result(action_result)
return action_result.get_status()
if not resp_json:
self.add_action_result(action_result)
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
if not isinstance(resp_json, list):
resp_json = [resp_json]
for msg_id, resp_message in zip(message_ids, resp_json):
curr_param = dict(param)
curr_param.update({"id": msg_id})
curr_ar = self.add_action_result(ActionResult(curr_param))
resp_class = resp_message.get('@ResponseClass', '')
if resp_class == 'Error':
curr_ar.set_status(phantom.APP_ERROR, EWSONPREM_ERR_FROM_SERVER.format(**(self._get_error_details(resp_message))))
continue
curr_ar.set_status(phantom.APP_SUCCESS, "Email deleted")
# Set the Status
return phantom.APP_SUCCESS
def _clean_str(self, string):
if not string:
return ''
return string.replace('{', '-').replace('}', '-')
def _extract_folder_path(self, extended_property):
if not extended_property:
return ''
# As of right now, the folder path is the only extended property
# that the app extracts, so parse the value directly, once the app starts
# parsing other extended properties, the 't:ExtendedFieldURI dictionary will
# require to be parsed and validated
value = extended_property.get('t:Value')
if not value:
return ''
value = value.lstrip('\\')
# I don't know why exchange gives back the path with
# '\\' separators since '\' is a valid char allowed in a folder name
# makes things confusing and extra parsing code to be written.
# Therefore the app treats folder paths with '/' as the separator, keeps
# things less confusing for users.
value = value.replace('\\', '/')
if not value:
return ''
return value
def _get_root_folder_id(self, user, action_result, is_public_folder=False):
if is_public_folder:
root_folder_id = 'publicfoldersroot'
else:
root_folder_id = 'root'
folder_info = {'id': root_folder_id, 'display_name': root_folder_id, 'children_count': -1, 'folder_path': ''}
return (phantom.APP_SUCCESS, folder_info)
def _get_matching_folder_path(self, folder_list, folder_name, folder_path, action_result):
""" The input folder is a list, meaning the folder name matched multiple folder
Given the folder path, this function will return the one that matches, or fail
"""
if not folder_list:
return (action_result(phantom.APP_ERROR, "Unable to find info about folder '{0}'. Returned info list empty"
.format(folder_name)), None)
for curr_folder in folder_list:
curr_folder_path = self._extract_folder_path(curr_folder.get('t:ExtendedProperty'))
if self._handle_py_ver_compat_for_input_str(curr_folder_path) == folder_path:
return (phantom.APP_SUCCESS, curr_folder)
return (action_result.set_status(phantom.APP_ERROR, "Folder paths did not match while searching for folder: '{0}'"
.format(folder_path)), None)
def _get_folder_info(self, user, folder_path, action_result, is_public_folder=False):
# hindsight is always 20-20, set the folder path separator to be '/', thinking folder names allow '\' as a char.
# turns out even '/' is supported by office365, so let the action escape the '/' char if it's part of the folder name
folder_path = folder_path.replace('\\/', self.REPLACE_CONST)
folder_names = folder_path.split('/')
folder_names = list(filter(None, folder_names))
if not folder_names:
return (action_result.set_status(phantom.APP_ERROR, "Please provide a valid value for folder path"), None)
for i, folder_name in enumerate(folder_names):
folder_names[i] = folder_name.replace(self.REPLACE_CONST, '/')
if is_public_folder:
parent_folder_id = 'publicfoldersroot'
else:
parent_folder_id = 'root'
for i, folder_name in enumerate(folder_names):
curr_valid_folder_path = '/'.join(folder_names[:i + 1])
self.save_progress('Getting info about {0}\\{1}'.format(self._clean_str(user), curr_valid_folder_path))
input_xml = ews_soap.xml_get_children_info(user, child_folder_name=folder_name, parent_folder_id=parent_folder_id)
ret_val, resp_json = self._make_rest_call(action_result, input_xml, self._check_findfolder_response)
if phantom.is_fail(ret_val):
return (action_result.get_status(), None)
total_items = resp_json.get('m:RootFolder', {}).get('@TotalItemsInView', '0')
if total_items == '0':
return (action_result.set_status(phantom.APP_ERROR, "Folder '{0}' not found, possibly not present"
.format(curr_valid_folder_path)), None)
folder = resp_json.get('m:RootFolder', {}).get('t:Folders', {}).get('t:Folder')
if not folder:
return (action_result.set_status(phantom.APP_ERROR, "Information about '{0}' not found in response, possibly not present"
.format(curr_valid_folder_path)), None)
if not isinstance(folder, list):
folder = [folder]
ret_val, folder = self._get_matching_folder_path(folder, folder_name, curr_valid_folder_path, action_result)
if phantom.is_fail(ret_val):
return (action_result.get_status(), None)
if not folder:
return (action_result.set_status(phantom.APP_ERROR,
"Information for folder '{0}' not found in response, possibly not present".format(curr_valid_folder_path)), None)
folder_id = folder.get('t:FolderId', {}).get('@Id')
if not folder_id:
return (action_result.set_status(phantom.APP_ERROR,
"Folder ID information not found in response for '{0}', possibly not present".format(curr_valid_folder_path)), None)
parent_folder_id = folder_id
folder_info = {'id': folder_id,
'display_name': folder.get('t:DisplayName'),
'children_count': folder.get('t:ChildFolderCount'),
'folder_path': self._extract_folder_path(folder.get('t:ExtendedProperty'))}
return (phantom.APP_SUCCESS, folder_info)
def _copy_move_email(self, param, action="copy"):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
message_id = self._handle_py_ver_compat_for_input_unicode(param[EWSONPREM_JSON_ID])
folder_path = self._handle_py_ver_compat_for_input_str(param[EWSONPREM_JSON_FOLDER])
user = self._handle_py_ver_compat_for_input_str(param[EWSONPREM_JSON_EMAIL])
is_public_folder = param.get(EWS_JSON_IS_PUBLIC_FOLDER, False)
# Set the user to impersonate (i.e. target_user), by default it is the destination user
self._target_user = user
# Use a different email if specified
impersonate_email = self._handle_py_ver_compat_for_input_str(param.get(EWS_JSON_IMPERSONATE_EMAIL))
if impersonate_email:
self._target_user = impersonate_email
# finally see if impersonation has been enabled/disabled for this action
# as of right now copy or move email is the only action that allows over-ride
impersonate = not(param.get(EWS_JSON_DONT_IMPERSONATE, False))
self._impersonate = impersonate
ret_val, folder_info = self._get_folder_info(user, folder_path, action_result, is_public_folder)
if phantom.is_fail(ret_val):
return action_result.get_status()
data = ews_soap.get_copy_email(message_id, folder_info['id'])
response_checker = self._check_copy_response
if action == "move":
data = ews_soap.get_move_email(message_id, folder_info['id'])
response_checker = self._check_move_response
ret_val, resp_json = self._make_rest_call(action_result, data, response_checker)
# Process errors
if phantom.is_fail(ret_val):
return action_result.get_status()
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
new_email_id = None
action_verb = 'copied' if action == "copy" else 'moved'
try:
new_email_id = next(iter(resp_json['m:Items'].values()))['t:ItemId']['@Id']
except Exception:
return action_result.set_status(phantom.APP_SUCCESS, "Unable to get {0} Email ID".format(action_verb))
action_result.add_data({'new_email_id': new_email_id})
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS, "Email {0}".format(action_verb.title()))
def _resolve_name(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
email = self._handle_py_ver_compat_for_input_str(param[EWSONPREM_JSON_EMAIL])
self._impersonate = False
data = ews_soap.xml_get_resolve_names(email)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_resolve_names_response)
# Process errors
if phantom.is_fail(ret_val):
message = action_result.get_message()
if 'ErrorNameResolutionNoResults' in message:
message = 'No email found. The input parameter might not be a valid alias or email.'
return action_result.set_status(phantom.APP_SUCCESS, message)
else:
return action_result.set_status(phantom.APP_ERROR, message)
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
resolution_set = resp_json.get('m:ResolutionSet', {}).get('t:Resolution')
if not resolution_set:
return action_result.set_summary({'total_entries': 0})
if not isinstance(resolution_set, list):
resolution_set = [resolution_set]
action_result.update_summary({'total_entries': len(resolution_set)})
for curr_resolution in resolution_set:
self._cleanse_key_names(curr_resolution)
contact = curr_resolution.get('t_Contact')
if contact:
email_addresses = contact.get('t_EmailAddresses', {}).get('t_Entry', [])
if email_addresses:
if not isinstance(email_addresses, list):
email_addresses = [email_addresses]
contact['t_EmailAddresses'] = email_addresses
action_result.add_data(curr_resolution)
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS)
def _expand_dl(self, param):
action_result = self.add_action_result(ActionResult(dict(param)))
# Connectivity
self.save_progress(phantom.APP_PROG_CONNECTING_TO_ELLIPSES, self._host)
group = self._handle_py_ver_compat_for_input_str(param[EWSONPREM_JSON_GROUP])
self._group_list.append(group)
self._impersonate = False
data = ews_soap.get_expand_dl(group)
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_expand_dl_response)
# Process errors
if phantom.is_fail(ret_val):
message = action_result.get_message()
if 'ErrorNameResolutionNoResults' in message:
message += ' The input parameter might not be a distribution list.'
action_result.add_data({"t_EmailAddress": group})
return action_result.set_status(phantom.APP_ERROR, message)
if not resp_json:
return action_result.set_status(phantom.APP_ERROR, 'Result does not contain RootFolder key')
mailboxes = resp_json.get('m:DLExpansion', {}).get('t:Mailbox')
if not mailboxes:
action_result.set_summary({'total_entries': 0})
return action_result.set_status(phantom.APP_SUCCESS)
if not isinstance(mailboxes, list):
mailboxes = [mailboxes]
action_result.update_summary({'total_entries': len(mailboxes)})
for mailbox in mailboxes:
value = any(elem in [mailbox['t:EmailAddress'], mailbox['t:Name']] for elem in self._group_list)
if param.get('recursive', False) and "DL" in mailbox['t:MailboxType'] and not value:
param[EWSONPREM_JSON_GROUP] = mailbox['t:EmailAddress']
self._expand_dl(param)
self._cleanse_key_names(mailbox)
action_result.add_data(mailbox)
# Set the Status
return action_result.set_status(phantom.APP_SUCCESS)
def _get_email_epoch(self, resp_json):
return None
def _get_rfc822_format(self, resp_json, action_result):
try:
mime_content = resp_json['m:Items']['t:Message']['t:MimeContent']['#text']
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Email MimeContent missing in response.")
try:
rfc822_email = base64.b64decode(mime_content)
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
error_text = EWSONPREM_EXCEPTION_ERR_MESSAGE.format(error_code, error_msg)
self.debug_print("Unable to decode Email Mime Content. {0}".format(error_text))
return action_result.set_status(phantom.APP_ERROR, "Unable to decode Email Mime Content")
return (phantom.APP_SUCCESS, rfc822_email)
def _get_attachment_meta_info(self, attachment, curr_key, parent_internet_message_id, parent_guid):
attach_meta_info = dict()
try:
attach_meta_info['attachmentId'] = attachment['t:AttachmentId']['@Id']
except Exception:
pass
try:
attach_meta_info['attachmentType'] = curr_key[2:].replace('Attachment', '').lower()
except Exception:
pass
attach_meta_info['parentInternetMessageId'] = parent_internet_message_id
attach_meta_info['parentGuid'] = parent_guid
# attachmentID, attachmentType
for k, v in attachment.items():
if not isinstance(v, str):
continue
# convert the key to the convention used by cef
cef_key_name = k[2:]
cef_key_name = cef_key_name[0].lower() + cef_key_name[1:]
attach_meta_info[cef_key_name] = v
return attach_meta_info
def _extract_ext_properties_from_attachments(self, resp_json):
email_headers_ret = list()
attach_meta_info_ret = list()
if 'm:Items' not in resp_json:
k = next(iter(resp_json.keys()))
resp_json['m:Items'] = resp_json.pop(k)
data = None
# Get the attachments
try:
for key in EWSONPREM_MAIL_TYPES:
if key in resp_json['m:Items']:
data = resp_json['m:Items'][key]
attachments = data['t:Attachments']
except Exception:
return RetVal3(phantom.APP_SUCCESS)
attachment_ids = list()
internet_message_id = None
try:
internet_message_id = data['t:InternetMessageId']
except Exception:
internet_message_id = None
email_guid = resp_json['emailGuid']
for curr_key in list(attachments.keys()):
attachment_data = attachments[curr_key]
if not isinstance(attachment_data, list):
attachment_data = [attachment_data]
for curr_attachment in attachment_data:
attachment_ids.append(curr_attachment['t:AttachmentId']['@Id'])
# Add the info that we have right now
curr_attach_meta_info = self._get_attachment_meta_info(curr_attachment, curr_key, internet_message_id, email_guid)
if curr_attach_meta_info:
attach_meta_info_ret.append(curr_attach_meta_info)
if not attachment_ids:
return RetVal3(phantom.APP_SUCCESS)
data = ews_soap.xml_get_attachments_data(attachment_ids)
action_result = ActionResult()
ret_val, resp_json = self._make_rest_call(action_result, data, self._check_get_attachment_response)
# Process errors
if phantom.is_fail(ret_val):
return RetVal3(action_result.get_status())
if not isinstance(resp_json, list):
resp_json = [resp_json]
for curr_attachment_data in resp_json:
| |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy
from scipy.spatial.distance import cdist
from .anchor_head_template import AnchorHeadTemplate
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self,in_dim,activation):
super(Self_Attn,self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize,C,width ,height = x.size()
proj_query = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
energy = torch.bmm(proj_query,proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value,attention.permute(0,2,1) )
out = out.view(m_batchsize,C,width,height)
out = self.gamma*out + x
return out,attention
class GradReverse(torch.autograd.Function):
def __init__(self, lambd):
self.lambd = lambd
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
return (grad_output * self.lambd)
def grad_reverse(x, lambd):
return GradReverse(lambd)(x)
class LocationAttentionLayer(nn.Module):
def __init__(self, num_channels, kernel_size, kernel_size2=0, no_sigmoid=False, detach=False):
super(LocationAttentionLayer, self).__init__()
if kernel_size2 == 0:
kernel_size2 = kernel_size
self.kernel_size = kernel_size
self.kernel_size2 = kernel_size2
self.patch_matrix = nn.Parameter(torch.randn(1, kernel_size, kernel_size2), requires_grad=True)
# self.patch_conv = nn.Conv2d(num_channels, 1, kernel_size, kernel_size) n, 126, 126
self.sigmoid = nn.Sigmoid()
self.no_sigmoid = no_sigmoid
self.detach = detach
def forward(self, input_tensor):
#2, 512, 126, 126
# print("kernel_size", self.kernel_size, self.kernel_size2)
# print("input_tensor", input_tensor.shape)
bt, c, h, w = input_tensor.size()
# print("bt, c, h, w", bt, c, h, w)
# print("input_tensor", input_tensor.shape)
# patch_tensor = self.patch_conv(input_tensor)
# print("patch_tensor", patch_tensor.shape)
# print("self.patch_matrix.repeat(bt*c, 1, 1)", self.patch_matrix.repeat(bt*c, 1, 1).shape)
if self.no_sigmoid:
input_tensor = input_tensor.contiguous().view(-1, h, w) #
input_tensor = input_tensor * self.patch_matrix.repeat(bt*c, 1, 1)
input_tensor = input_tensor.view(bt, c, h, w)
else:
input_tensor = input_tensor.view(-1, h, w) #
att_matrix = self.patch_matrix.repeat(bt*c, 1, 1)
# if self.detach:
# att_matrix = att_matrix.detach()
input_tensor = input_tensor * att_matrix
# z = x * att_matrix.detach()
# z = x.detach() * att_matrix
input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)
return input_tensor
class LocationAttentionDoubleLayer(nn.Module):
def __init__(self, num_channels, kernel_size, kernel_size2=0, no_sigmoid=False):
super(LocationAttentionDoubleLayer, self).__init__()
if kernel_size2 == 0:
kernel_size2 = kernel_size
self.patch_matrix = nn.Parameter(torch.randn(1, kernel_size, kernel_size2), requires_grad=True)
# self.patch_conv = nn.Conv2d(num_channels, 1, kernel_size, kernel_size) n, 126, 126
self.sigmoid = nn.Sigmoid()
self.no_sigmoid = no_sigmoid
def forward(self, input_tensor, dom_atten):
#2, 512, 126, 126
# print("dom_atten", dom_atten.shape) # 3, 514, 128, 128
# print("input_tensor", input_tensor.shape) # , , 128, 128
bt, c, h, w = input_tensor.size()
# print("bt, c, h, w", bt, c, h, w)
# print("input_tensor", input_tensor.shape)
# patch_tensor = self.patch_conv(input_tensor)
# print("patch_tensor", patch_tensor.shape)
if self.no_sigmoid:
input_tensor = input_tensor.contiguous().view(-1, h, w) #
dom_atten = dom_atten.contiguous().view(-1, h, w)
max_att = torch.max(dom_atten, self.patch_matrix.repeat(bt*c, 1, 1))
input_tensor = input_tensor * max_att
input_tensor = input_tensor.view(bt, c, h, w)
else:
input_tensor = input_tensor.view(-1, h, w) #
dom_atten = dom_atten.view(-1, h, w) #
max_att = torch.max(dom_atten, self.patch_matrix.repeat(bt*c, 1, 1))
input_tensor = input_tensor * max_att
input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)
return input_tensor
class SpatialSELayer(nn.Module):
"""
Re-implementation of SE block -- squeezing spatially and exciting channel-wise described in:
*Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018*
"""
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer, self).__init__()
self.conv = nn.Conv2d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor, weights=None):
"""
:param weights: weights for few shot learning
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output_tensor
"""
# spatial squeeze
batch_size, channel, a, b = input_tensor.size()
# print("input_tensor.size()", input_tensor.size()) #2, 512, 126, 126
if weights is not None:
weights = torch.mean(weights, dim=0)
weights = weights.view(1, channel, 1, 1)
out = F.conv2d(input_tensor, weights)
else:
out = self.conv(input_tensor)
# print("out.size()", out.size()) #2, 1, 126, 126
squeeze_tensor = self.sigmoid(out)
# print("squeeze_tensor.size()", squeeze_tensor.size()) # 2, 1, 126, 126
# spatial excitation
squeeze_tensor = squeeze_tensor.view(batch_size, 1, a, b)
# print("squeeze_tensor 2.size()", squeeze_tensor.size()) # 2, 1, 126, 126
output_tensor = torch.mul(input_tensor, squeeze_tensor)
# print("output_tensor 2.size()", output_tensor.size()) #2, 512, 126, 126
#output_tensor = torch.mul(input_tensor, squeeze_tensor)
return output_tensor
class ChannelSELayer(nn.Module):
"""
Re-implementation of Squeeze-and-Excitation (SE) block described in:
*Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*
"""
def __init__(self, num_channels, reduction_ratio=2):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output tensor
"""
batch_size, num_channels, H, W = input_tensor.size() #2, 512, 126, 126
# Average along each channel
squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(dim=2) #2, 512, 126*126(1)
# channel excitation
fc_out_1 = self.relu(self.fc1(squeeze_tensor))
fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
a, b = squeeze_tensor.size()
output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1))
return output_tensor
class LocalDomainClassifier(nn.Module):
def __init__(self, input_channels=256, context=False):
super(LocalDomainClassifier, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 256, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv2 = nn.Conv2d(256, 128, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv3 = nn.Conv2d(128, 1, kernel_size=1, stride=1,
padding=0, bias=False)
self.context = context
# print("sef context", self.context)
self._init_weights()
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
#m.bias.data.zero_()
normal_init(self.conv1, 0, 0.01)
normal_init(self.conv2, 0, 0.01)
normal_init(self.conv3, 0, 0.01)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
if self.context:
feat = F.avg_pool2d(x, (x.size(2), x.size(3)))
x = self.conv3(x)
return F.sigmoid(x),feat
else:
x = self.conv3(x)
return F.sigmoid(x)
class AnchorHeadFuseFPNCombineCrossScale(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, nusc=False, input_channels_fpn=None, num_fpn_up=0, num_fpn_down=0, num_fpn_downup=0, fpn_layers=[], voxel_size=[0.1, 0.1, 0.2], **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training, nusc=nusc,
num_fpn_up=num_fpn_up, num_fpn_down=num_fpn_down, num_fpn_downup=num_fpn_downup, fpn_layers=fpn_layers, voxel_size=voxel_size, **kwargs
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
#####################################
self.voxel_det_seconv_attention = self.model_cfg.get('VOXEL_DET_SECONV_ATTENTION', False)
self.voxel_det_se_attention = self.model_cfg.get('VOXEL_DET_SE_ATTENTION', False)
self.voxel_det_patch_attention = self.model_cfg.get('VOXEL_DET_PATCH_ATTENTION', False)
self.voxel_dom_seconv_attention = self.model_cfg.get('VOXEL_DOM_SECONV_ATTENTION', False)
self.voxel_dom_se_attention = self.model_cfg.get('VOXEL_DOM_SE_ATTENTION', False)
self.voxel_dom_patch_attention = self.model_cfg.get('VOXEL_DOM_PATCH_ATTENTION', False)
self.joint_attention = self.model_cfg.get('VOXEL_DETDOM_JOINT_ATTENTION', False)
self.dom_patch_first = self.model_cfg.get('DOM_PATCH_FIRST', False)
self.no_sigmoid = self.model_cfg.get('NO_SIGMOID', False)
if self.sep_two_dom or (self.double_pma and not self.joint_pma):
self.input_channels_dom_sep = input_channels
if self.range_guidance:
if self.range_guidance_dom_only:
self.input_channels = input_channels
if self.range_guidance_dist:
self.input_channels_dom = input_channels + 1
else:
self.input_channels_dom = input_channels + 2
else:
if self.range_guidance_dist:
self.input_channels = input_channels + 1
else:
self.input_channels = input_channels + 2
self.input_channels_dom = self.input_channels
else:
self.input_channels = input_channels
self.input_channels_dom = input_channels
if self.joint_two_dom:
if self.dom_patch_first or self.patch_unplug_context:
self.input_channels_dom_joint = input_channels
else:
if self.range_guidance_dist:
self.input_channels_dom_joint = input_channels + 1
else:
self.input_channels_dom_joint = input_channels + 2
if self.joint_pma:
self.input_channels_dom_joint = input_channels + 2
self.num_keypoints_range = self.model_cfg.get('NUM_KEYPOINTS_RANGE', {})
self.range_keys = self.num_keypoints_range.keys()
self.point_fc_range = nn.ModuleDict()
# self.domain_classifier_range = nn.ModuleDict()
for i in self.range_keys:
self.point_fc_range[i] = nn.Sequential(nn.Linear(self.num_keypoints_range[i], input_channels), nn.ReLU(True), nn.Dropout())
self.input_channels_fpn = input_channels_fpn
self.input_channels_dom_fpn = {}
if self.sep_two_dom or (self.double_pma and not self.joint_pma):
self.input_channels_dom_sep_fpn = {}
if self.joint_two_dom or self.joint_pma:
self.input_channels_dom_joint_fpn = {}
for layer in self.fpn_layers:
if self.sep_two_dom or (self.double_pma and not self.joint_pma):
self.input_channels_dom_sep_fpn[layer] = input_channels_fpn[layer]
if self.range_guidance:
if self.range_guidance_dom_only:
if self.range_guidance_dist:
self.input_channels_dom_fpn[layer] = input_channels_fpn[layer] + 1
else:
self.input_channels_dom_fpn[layer] = input_channels_fpn[layer] + 2
else:
if self.range_guidance_dist:
self.input_channels_fpn[layer] = input_channels_fpn[layer] + 1
else:
self.input_channels_fpn[layer] = input_channels_fpn[layer] + 2
self.input_channels_dom_fpn[layer] = self.input_channels_fpn[layer]
else:
self.input_channels_dom_fpn[layer] = input_channels_fpn[layer]
if self.joint_two_dom:
if self.dom_patch_first or self.patch_unplug_context:
self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer]
else:
if self.range_guidance_dist:
self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer] + 1
else:
self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer] + 2
if self.joint_pma:
self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer] + 2
######### DOM CONTEXT ######
if self.dom_context:
dom_fc1, dom_fc2 = self.model_cfg.get('DOM_FC', [1024, 256])
else:
dom_fc1, dom_fc2 = self.model_cfg.get('DOM_FC', [1024, 1024])
if self.dom_context:
self.context_num = 1
if not self.sep_fpn_dom_context:
self.context_num += self.num_fpn_up + self.num_fpn_down + self.num_fpn_downup
if self.num_fpn_downup == 1:
self.context_num += 1
#256 512
if self.point_feat_in_voxel_dom:
self.context_num += 2 # point context 256*2=512
self.input_channels += self.context_num*dom_fc2
for layer in self.fpn_layers:
self.input_channels_fpn[layer] += self.context_num*dom_fc2
# print('self.input_channels_fpn[layer] ini', layer, self.input_channels_fpn[layer])
if self.range_guidance_new_conv_dom_context:
self.context_num = 1
if not self.sep_fpn_dom_context:
self.context_num += self.num_fpn_up + self.num_fpn_down + self.num_fpn_downup
if self.num_fpn_downup == 1:
self.context_num += | |
server
# fntag = pfn is used for OutPutFileCatalog.xml that is used by the mover for the stage out
# The SURL will be added to the metadata file for fntag = lfn to allow for server side LFC registration
status = True
flist = []
glist = []
from SiteMover import SiteMover
# get the experiment object
thisExperiment = getExperiment(experiment)
# for metadata.xml prepare the file for potential guid grabbing
if "metadata" in fname and None in fguids:
metadata_filename = prepareMetadata(fname + ".PAYLOAD")
else:
metadata_filename = fname
# add log file
if alog:
flist.append(alog)
if not alogguid:
if not jr:
alogguid = getGUID()
tolog("Generated log guid: %s" % (alogguid))
else:
tolog("!!WARNING!!2999!! Log guid generation not allowed in recovery mode")
alogguid = ''
status = False
glist.append(alogguid)
# add additional output files (only for CERNVM, not NG or any other sites)
if additionalOutputFile:
flist.append(additionalOutputFile)
if not additionalOutputFileGuid:
additionalOutputFileGuid = getGUID()
glist.append(additionalOutputFileGuid)
if fnlist:
flist = flist + fnlist
tolog("fnlist = %s" % str(fnlist))
tolog("fguids = %s" % str(fguids))
for i in range(0, len(fnlist)):
# check for guid
try:
_dummy = fguids[i]
del _dummy
except IndexError, e:
guid = findGuid(analJob, metadata_filename, fnlist[i])
if guid and guid != "":
tolog("Found guid for file: %s (%s)" % (fnlist[i], guid))
else:
if not jr:
guid = getGUID()
tolog("Generated guid for file (%d): %s (%s)" % (i, fnlist[i], guid))
else:
tolog("!!WARNING!!2999!! Guid generation not allowed in recovery mode (file: %s)" % (fnlist[i]))
guid = ''
status = False
fguids.insert(i, guid)
else:
if not fguids[i]:
guid = findGuid(analJob, metadata_filename, fnlist[i])
if guid and guid != "":
tolog("Found guid for file: %s (%s)" % (fnlist[i], guid))
else:
if not jr:
guid = getGUID()
tolog("Generated guid for file (%d): %s (%s)" % (i, fnlist[i], guid))
else:
tolog("!!WARNING!!2999!! Guid generation not allowed in recovery mode (file: %s)" % (fnlist[i]))
guid = ''
status = False
try:
fguids[i] = guid
except:
fguids.insert(i, guid)
if fntag == "lfn":
# check for file size
try:
_dummy = fsize[i]
del _dummy
except IndexError, e:
#print "This item doesn't exist"
fsize.insert(i, "")
# check for checksum
try:
_dummy = checksum[i]
del _dummy
except IndexError, e:
#print "This item doesn't exist"
checksum.insert(i, "")
glist = glist + fguids
if fntag == "pfn":
#create the PoolFileCatalog.xml-like file in the workdir
fd = open(fname, "w")
fd.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
fd.write("<!-- Edited By POOL -->\n")
fd.write('<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n')
fd.write("<POOLFILECATALOG>\n")
for i in range(0, len(flist)): # there's only one file in flist if it is for the object store
fd.write(' <File ID="%s">\n' % (glist[i]))
fd.write(" <physical>\n")
fd.write(' <pfn filetype="ROOT_All" name="%s"/>\n' % (flist[i]))
fd.write(" </physical>\n")
fd.write(" </File>\n")
fd.write("</POOLFILECATALOG>\n")
fd.close()
elif fntag == "lfn":
# create the metadata.xml-like file that's needed by dispatcher jobs
fd=open(fname, "w")
fd.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
fd.write("<!-- ATLAS file meta-data catalog -->\n")
fd.write('<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n')
fd.write("<POOLFILECATALOG>\n")
for i in range(0, len(flist)):
fd.write(' <File ID="%s">\n' % (glist[i]))
fd.write(" <logical>\n")
fd.write(' <lfn name="%s"/>\n' % (flist[i]))
fd.write(" </logical>\n")
# add SURL metadata (not known yet) for server LFC registration
# use the GUID as identifier (the string "<GUID>-surltobeset" will later be replaced with the SURL)
if thisExperiment:
special_xml = thisExperiment.getMetadataForRegistration(glist[i])
if special_xml != "":
fd.write(special_xml)
# add log file metadata later (not known yet)
if flist[i] == alog:
fd.write(' <metadata att_name="fsize" att_value=""/>\n')
fd.write(' <metadata att_name="csumtypetobeset" att_value=""/>\n')
elif (additionalOutputFile and flist[i] == additionalOutputFile):
if ".xml" in additionalOutputFile:
fd.write(' <metadata att_name="fsizeXML" att_value=""/>\n')
fd.write(' <metadata att_name="csumtypetobesetXML" att_value=""/>\n')
else:
fd.write(' <metadata att_name="fsizeAdditional" att_value=""/>\n')
fd.write(' <metadata att_name="csumtypetobesetAdditional" att_value=""/>\n')
else:
if len(fsize) != 0:
fd.write(' <metadata att_name="fsize" att_value="%s"/>\n' % (fsize[i]))
if len(checksum) != 0:
fd.write(' <metadata att_name="%s" att_value="%s"/>\n' %\
(SiteMover.getChecksumType(checksum[i]), checksum[i]))
fd.write(" </File>\n")
fd.write("</POOLFILECATALOG>\n")
fd.close()
else:
tolog("!!WARNING!!1234!! fntag is neither lfn nor pfn, did not manage to create the XML file for output files")
# dump the file to the log
dumpFile(fname, topilotlog=True)
return status
def stageInPyModules(initdir, workdir):
""" copy pilot python modules into pilot workdir from condor initial dir """
status = True
ec = 0
tolog('initdir is %s '%initdir)
tolog('workdir is %s '%workdir)
if workdir and initdir:
for k in getFileList():
if os.path.isfile("%s/%s" % (initdir, k)):
try:
shutil.copy2("%s/%s" % (initdir, k), workdir)
except Exception, e:
tolog("!!WARNING!!2999!! stageInPyModules failed to copy file %s/%s to %s: %s" % (initdir, k, workdir, e))
status = False
break
elif os.path.isdir("%s/%s" % (initdir, k)):
try:
shutil.copytree("%s/%s" % (initdir, k), "%s/%s" % (workdir,k))
except Exception, e:
tolog("!!WARNING!!2999!! stageInPyModules failed to copy directory %s/%s to %s: %s" % (initdir, k, workdir, e))
status = False
break
else:
tolog("!!WARNING!!2999!! File missing during stage in: %s/%s" % (initdir, k))
if status:
tolog("Pilot modules have been copied to %s" % (workdir))
else:
# get error handler
error = PilotErrors()
ec = error.ERR_GENERALERROR
return ec
def removePyModules(_dir):
""" Remove pilot python modules from workdir """
if _dir:
for k in getFileList(path_dir=_dir):
if not "runargs" in k:
try:
os.system("rm -rf %s/%s*" % (_dir, k))
except:
pass
def setTimeConsumed(t_tuple):
""" set the system+user time spent by the job """
# The cpuConsumptionTime is the system+user time while wall time is encoded in pilotTiming, third number.
# Previously the cpuConsumptionTime was "corrected" with a scaling factor but this was deemed outdated and is now set to 1.
t_tot = reduce(lambda x, y:x+y, t_tuple[2:3])
conversionFactor = 1.0
cpuCU = "s" # "kSI2kseconds"
cpuCT = int(t_tot*conversionFactor)
return cpuCU, cpuCT, conversionFactor
def timeStamp():
""" return ISO-8601 compliant date/time format """
tmptz = time.timezone
if tmptz > 0:
signstr = '-'
else:
signstr = '+'
tmptz_hours = int(tmptz/3600)
return str("%s%s%02d%02d" % (time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime()), signstr, tmptz_hours, int(tmptz/60-tmptz_hours*60)))
def timeStampUTC(t=None, format="%d %b %H:%M:%S"):
""" return UTC time stamp """
if not t:
t = time.time()
return time.strftime(format, time.gmtime(t))
def getJobStatus(jobId, pshttpurl, psport, path):
"""
Return the current status of job <jobId> from the dispatcher
typical dispatcher response: 'status=finished&StatusCode=0'
StatusCode 0: succeeded
10: time-out
20: general error
30: failed
In the case of time-out, the dispatcher will be asked one more time after 10s
"""
status = 'unknown'
StatusCode = -1
nod = {}
nod['ids'] = jobId
url = "%s:%s/server/panda/getStatus" % (pshttpurl, repr(psport))
# ask dispatcher about lost job status
trial = 1
max_trials = 2
while trial <= max_trials:
try:
# open connection
ret = httpConnect(nod, url, path=path, mode="GETSTATUS")
response = ret[1]
tolog("response: %s" % str(response))
if response:
try:
# decode the response
# eg. var = ['status=notfound', 'attemptNr=0', 'StatusCode=0']
# = response
# create a dictionary of the response (protects against future updates)
# eg. dic = {'status': 'activated', 'attemptNr': '0', 'StatusCode': '0'}
# dic = {}
# for i in range(len(var)):
# key = var[i].split('=')[0]
# dic[key] = var[i].split('=')[1]
status = response['status'] # e.g. 'holding'
attemptNr = int(response['attemptNr']) # e.g. '0'
StatusCode = int(response['StatusCode']) # e.g. '0'
except Exception, e:
tolog("!!WARNING!!2997!! Exception: Dispatcher did not return allowed values: %s, %s" % (str(ret), e))
status = "unknown"
attemptNr = -1
StatusCode = 20
else:
tolog("!!WARNING!!2998!! Dispatcher did not return allowed values: %s" % str(ret))
status = "unknown"
attemptNr = -1
StatusCode = 20
except Exception,e:
tolog("Could not interpret job status from dispatcher: %s, %s" % (response, e))
status = 'unknown'
attemptNr = -1
StatusCode = -1
break
else:
if StatusCode == 0: # success
break
elif StatusCode == 10: # time-out
trial = trial + 1
time.sleep(10)
continue
elif StatusCode == 20: # other error
if ret[0] == 13056 or ret[0] == '13056':
tolog("Wrong certificate used with curl operation? (encountered error 13056)")
break
else: # general error
break
return status, attemptNr, StatusCode
def getExitCode(path, filename):
""" Try to read the exit code from the pilot stdout log """
ec = -1
# first create a tmp file with only the last few lines of the status file to avoid
# reading a potentially long status file
tmp_file_name = "tmp-tail-dump-file"
try:
os.system("tail %s/%s >%s/%s" % (path, filename, path, tmp_file_name))
except Exception, e:
tolog("Job Recovery could not create tmp file %s: %s" % (tmp_file_name, e))
else:
# open the tmp file and | |
<filename>background/sql_math.py<gh_stars>1-10
#! /usr/bin/python3
import mysql.connector
import datetime
import json
import os # for writing to file
mydb = mysql.connector.connect( # connect to database
host="127.0.0.1",
user="root",
password="<PASSWORD>",
auth_plugin='mysql_native_password',
database="SNCF"
)
class clean:
def __init__(self):
self.mydb = mydb
self.mycursor = mydb.cursor()
self.today = datetime.date.today()
self.yesterday = self.today - datetime.timedelta(days = 1)
#self.yesterday = '2022-05-03'
self.yesterday = str(self.yesterday).replace('-', '_')
self.disruption_list = ['begin', 'end', 'id', 'message', 'severity_name', 'trip_id', 'trip_name']
#self.impacted_object_list = ['date', 'begin', 'end', 'amended_arrival_time', 'amended_departure_time', 'departure_status', 'base_arrival_time', 'base_departure_time', 'arrival_status', 'cause', 'is_detour', 'id_impacted_stop', 'label_impacted_stop', 'name_impacted_stop', 'stop_time_effect']
#self.vehicle_journey_list = ['value', 'date', 'begin', 'end', 'headsign', 'id', 'name', 'trip_id', 'trip_name']
self.routes_list = ['id_direction', 'value', 'id_stop_points', 'lat', 'lon', 'name_stop_area', 'closing_time', 'id_commercial_mode', 'name_commercial_mode', 'opening_time', 'id_physical_modes', 'name_physical_modes', 'routes_name']
self.impacted_object_list1 = ['departure_status','arrival_status', 'cause', 'id_impacted_stop', 'name_impacted_stop']
self.impacted_object_list2 = ['lat', 'lon']
self.impacted_object_list3 = ['amended_arrival_time', 'amended_departure_time', 'base_arrival_time', 'base_departure_time']
self.stop_times_list1 = ['headsign_stop', 'id_stop_point', 'name_stop_point']
self.stop_times_list2 = ['lat', 'lon']
self.stop_times_list3 = ['arrival_time', 'departure_time', 'utc_arrival_time', 'utc_departure_time']
def clean_data(self):
sql = f"UPDATE disruptions_{self.yesterday} SET message = NULL WHERE message = 'zzz'"
self.mycursor.execute(sql)
self.mydb.commit()
for i in self.impacted_object_list1:
sql = f"UPDATE impacted_object_{self.yesterday} SET {i} = NULL WHERE {i} = 'zzz'"
self.mycursor.execute(sql)
self.mydb.commit()
for i in self.impacted_object_list2:
sql = f"UPDATE impacted_object_{self.yesterday} SET {i} = NULL WHERE {i} = '0'"
self.mycursor.execute(sql)
self.mydb.commit()
sql = f"UPDATE impacted_object_{self.yesterday} SET cause = NULL WHERE cause = ''"
self.mycursor.execute(sql)
self.mydb.commit()
for i in self.impacted_object_list3:
sql = f"UPDATE impacted_object_{self.yesterday} SET {i} = NULL WHERE {i} = '00:00:00'"
self.mycursor.execute(sql)
self.mydb.commit()
for i in self.stop_times_list1:
sql = f"UPDATE stop_times_{self.yesterday} SET {i} = NULL WHERE {i} = 'zzz'"
self.mycursor.execute(sql)
self.mydb.commit()
for i in self.stop_times_list2:
sql = f"UPDATE stop_times_{self.yesterday} SET {i} = NULL WHERE {i} = '0'"
self.mycursor.execute(sql)
self.mydb.commit()
for i in self.stop_times_list3:
sql = f"UPDATE stop_times_{self.yesterday} SET {i} = NULL WHERE {i} = '00:00:00'"
self.mycursor.execute(sql)
self.mydb.commit()
sql = f"UPDATE stop_times_{self.yesterday} SET headsign_stop = NULL WHERE headsign_stop = '00'"
self.mycursor.execute(sql)
self.mydb.commit()
class write: # just here os
def __init__(self):
self.mydb = mydb
self.mycursor = mydb.cursor(buffered=True)
self.today = datetime.date.today()
self.yesterday1 = self.today - datetime.timedelta(days = 1)
#self.yesterday1 = '2022-04-30'
self.yesterday = str(self.yesterday1).replace('-', '_')
if not os.path.exists(f"calculus/{self.yesterday1}.txt"): # create file
open(f'calculus/{self.yesterday1}.txt', 'w').close()
clean().clean_data() # to have null
def write_data(self):
# create a dictionary of the data
data_dict = {}
text, result = search.general(self)
for i in range(len(result)):
data_dict.update({text[i]:result[i]})
# add dictionaries to data_dict
dict_to_dict = {}
test = {}
message, result = search.disruptions_message(self)
for i in range(len(result)):
# append at the end of dict_to_dcit the test
test.update({f'data{i}':message[i], f'value{i}':result[i]})
dict_to_dict.update(test)
data_dict.update({'disruptions_message':dict_to_dict})
dict_to_dict = {}
test = {}
message, result, lat, lon = search.citi_impacted(self)
for i in range(len(result)):
# append at the end of dict_to_dcit the test
test.update({f'data{i}':message[i], f'value{i}':result[i], f'lat{i}':lat[i], f'lon{i}':lon[i]})
dict_to_dict.update(test)
data_dict.update({'citi_impacted':dict_to_dict})
dict_to_dict = {}
test = {}
message, result, lat, lon, tot = search.citi_time_impacted(self)
for i in range(len(result)):
# append at the end of dict_to_dcit the test
test.update({f'data{i}':message[i], f'value{i}':result[i], f'lat{i}':lat[i], f'lon{i}':lon[i]})
dict_to_dict.update(test)
data_dict.update({'citi_time_impacted':dict_to_dict})
data_dict.update({'citi_time_impacted_tot':tot})
dict_to_dict = {}
test = {}
message, result = search.disruptions_severity_name(self)
for i in range(len(result)):
# append at the end of dict_to_dcit the test
test.update({f'data{i}':message[i], f'value{i}':result[i]})
dict_to_dict.update(test)
data_dict.update({'disruptions_severity_name':dict_to_dict})
dict_to_dict = {}
test = {}
message, result, message1, result1, percent = search.routes(self)
for i in range(len(result)):
# append at the end of dict_to_dcit the test
test.update({f'data{i}':message[i], f'value{i}':result[i]})
dict_to_dict.update(test)
data_dict.update({'routes_max_retard':dict_to_dict})
dict_to_dict = {}
test = {}
for i in range(len(result1)):
# append at the end of dict_to_dcit the test
test.update({f'data{i}':message1[i], f'value{i}':result1[i]})
dict_to_dict.update(test)
data_dict.update({'routes_min_retard':dict_to_dict})
data_dict.update({'%_routes': percent})
self.write_to_file(data_dict)
def write_to_file(self, data): # data must be dict
with open(f'calculus/{self.yesterday1}.txt', 'a') as f:
json.dump(data, f)
class search:
def __init__(self):
self.mydb = mydb
self.mycursor = mydb.cursor(buffered=True)
self.today = datetime.date.today()
self.yesterday = self.today - datetime.timedelta(days = 1)
#self.yesterday = '2022-04-27'
self.yesterday = str(self.yesterday).replace('-', '_')
def general(self):
text = ['vehicle', 'stop_times', 'disruptions', '%_vehicle', 'impacted_stop', '%_stop_times','routes']
result = []
# total vehicle journey
self.mycursor.execute(f"SELECT COUNT(*) FROM vehicle_journeys_{self.yesterday}")
result.append(self.mycursor.fetchone()[0])
# total stop times
self.mycursor.execute(f"SELECT COUNT(*) FROM stop_times_{self.yesterday}")
result.append(self.mycursor.fetchone()[0])
# total disruption
self.mycursor.execute(f"SELECT COUNT(*) FROM disruptions_{self.yesterday}")
result.append(self.mycursor.fetchone()[0])
# in % of total vehicle journey
result.append(round(result[2] * 100 / result[0],2))
# total impacted object
self.mycursor.execute(f"SELECT COUNT(*) FROM impacted_object_{self.yesterday}")
result.append(self.mycursor.fetchone()[0])
# in % of total stop times
result.append(round(result[4] * 100 / result[1],2))
# total routes
self.mycursor.execute(f"SELECT COUNT(*) FROM routes_{self.yesterday}")
result.append(self.mycursor.fetchone()[0])
return text,result
def disruptions_message(self):
result = []
message = []
# search all message and number of time it appears
self.mycursor.execute(f"SELECT message FROM disruptions_{self.yesterday}")
for i in self.mycursor.fetchall():
if i[0] in message:
index = message.index(i[0])
result[index] += 1
else:
message.append(i[0])
result.append(1)
if None in message:
index = message.index(None)
result.pop(index)
message.pop(index)
# return the 10 most common disruptions message
top_result = []
top_message = []
for i in range(10):
vmax_result = max(result)
imax_result = result.index(vmax_result)
vmax_message = message[imax_result]
top_result.append(vmax_result)
top_message.append(vmax_message)
result[imax_result] = 0
return top_message, top_result
def citi_impacted(self):
result = []
message = []
# search all cities and number of time it appears
self.mycursor.execute(f"SELECT name_impacted_stop FROM impacted_object_{self.yesterday}")
for i in self.mycursor.fetchall():
if i[0] in message:
index = message.index(i[0])
result[index] += 1
else:
message.append(i[0])
result.append(1)
if None in message:
index = message.index(None)
result.pop(index)
message.pop(index)
# return the 10 most commen impacted city by appearance
top_result = []
top_message = []
for i in range(10):
vmax_result = max(result)
imax_result = result.index(vmax_result)
vmax_message = message[imax_result]
top_result.append(vmax_result)
top_message.append(vmax_message)
result[imax_result] = 0
lon = []
lat = []
for i in range(len(top_message)):
self.mycursor.execute(f"SELECT lat,lon FROM impacted_object_{self.yesterday} WHERE name_impacted_stop = '{top_message[i]}'")
lat.append(self.mycursor.fetchone()[0])
lon.append(self.mycursor.fetchone()[1])
return top_message, top_result, lat, lon
def citi_time_impacted(self):
station = []
time = []
# select base_arrival_time and base_departure_time and name_impacted_stop where departure_time is delayed
self.mycursor.execute(f"SELECT base_arrival_time, base_departure_time, name_impacted_stop FROM impacted_object_{self.yesterday} WHERE departure_status = 'delayed' AND base_arrival_time IS NOT NULL AND base_departure_time IS NOT NULL")
for i in self.mycursor.fetchall():
if i[2] in station:
index = station.index(i[2])
time[index] += (i[1] -i[0]).seconds
else:
station.append(i[2])
time.append((i[1] -i[0]).seconds)
tot_time_impacted = sum(time)
tot_time_impacted = datetime.timedelta(seconds = tot_time_impacted)
tot_time_impacted = str(tot_time_impacted)
# return the 10 most common impacted city by time
top_station = []
top_time = []
for i in range(10):
vmax_time = max(time)
imax_time = time.index(vmax_time)
vmax_station = station[imax_time]
top_time.append(vmax_time)
top_station.append(vmax_station)
time[imax_time] = 0
for i in range(len(top_station)):
conversion = datetime.timedelta(seconds = top_time[i])
top_time[i] = str(conversion)
lon = []
lat = []
for i in range(len(top_station)):
self.mycursor.execute(f"SELECT lat,lon FROM impacted_object_{self.yesterday} WHERE name_impacted_stop = '{top_station[i]}'")
lat.append(self.mycursor.fetchone()[0])
lon.append(self.mycursor.fetchone()[1])
return top_station, top_time, lat, lon, tot_time_impacted
def disruptions_severity_name(self):
message = []
result = []
# count all severity_name
self.mycursor.execute(f"SELECT severity_name FROM disruptions_{self.yesterday}")
for i in self.mycursor.fetchall():
if i[0] in message:
index = message.index(i[0])
result[index] += 1
else:
message.append(i[0])
result.append(1)
sort_message = []
sort_result = []
# sort the result by descending order
for i in range(len(result)):
vmax = max(result)
index = result.index(vmax)
sort_result.append(vmax)
sort_message.append(message[index])
result.pop(index)
message.pop(index)
return sort_message, sort_result
def routes(self):
trip_name_disruptions = []
routes_name = []
routes_disruption_count = []
time_trip_name = []
self.mycursor.execute(f"SELECT trip_name FROM impacted_object_{self.yesterday}")
for i in self.mycursor.fetchall():
if i[0] not in trip_name_disruptions:
trip_name_disruptions.append(i[0])
self.mycursor.execute(f"SELECT routes_name FROM routes_{self.yesterday}")
for i in self.mycursor.fetchall():
if i[0] not in routes_name:
routes_name.append(i[0])
routes_disruption_count.append(0)
for i in range(len(trip_name_disruptions)):
self.mycursor.execute(f"SELECT name_impacted_stop FROM impacted_object_{self.yesterday} WHERE name_impacted_stop IS NOT NULL AND trip_name ='{trip_name_disruptions[i]}' ORDER BY Trainid ASC LIMIT 1")
first = self.mycursor.fetchall()
if first == []:
first = 'empty'
first = first[0][0]
self.mycursor.execute(f"SELECT name_impacted_stop FROM impacted_object_{self.yesterday} WHERE name_impacted_stop IS NOT NULL AND trip_name ='{trip_name_disruptions[i]}' ORDER BY amended_arrival_time DESC LIMIT 1")
end = self.mycursor.fetchall()
if end == []:
end = 'empty'
end = end[0][0]
time_trip_name = []
self.mycursor.execute(f"SELECT amended_arrival_time FROM impacted_object_{self.yesterday} WHERE amended_arrival_time IS NOT NULL AND trip_name = '{trip_name_disruptions[i]}' ")
for j in self.mycursor.fetchall():
time_trip_name.append(j[0])
if not time_trip_name:
time_trip_name.append(0)
if time_trip_name[0] > time_trip_name[-1]:
self.mycursor.execute(f"SELECT name_impacted_stop FROM impacted_object_{self.yesterday} WHERE name_impacted_stop IS NOT NULL AND trip_name ='{trip_name_disruptions[i]}' AND amended_arrival_time = '{time_trip_name[-1]}'")
end = self.mycursor.fetchall()
normal = f"{first} - {end}"
reverse = f"{end} - {first}"
if normal in routes_name:
index = routes_name.index(normal)
routes_disruption_count[index] += 1
if reverse in routes_name:
index = routes_name.index(reverse)
routes_disruption_count[index] += 1
sort_message = []
sort_result = []
# sort the result by descending order
for i in range(10):
vmax = max(routes_disruption_count)
index = routes_disruption_count.index(vmax)
sort_result.append(vmax)
sort_message.append(routes_name[index])
routes_disruption_count.pop(index)
routes_name.pop(index)
| |
must be specified")
self.thermodynamic_state = thermodynamic_state
self.sampler_state = sampler_state
# Initialize
self.iteration = 0
# For GHMC / Langevin integrator
self.collision_rate = 1.0 / unit.picoseconds
self.timestep = 2.0 * unit.femtoseconds
self.nsteps = 500 # number of steps per update
self.verbose = True
self.platform = platform
# For writing PDB files
self.pdbfile = None
self.topology = None
self._timing = dict()
self._initializeNetCDF(ncfile)
self._initialized = False
def _initialize(self):
# Create an integrator
integrator_name = 'Langevin'
if integrator_name == 'GHMC':
from openmmtools.integrators import GHMCIntegrator
self.integrator = GHMCIntegrator(
temperature=self.thermodynamic_state.temperature,
collision_rate=self.collision_rate,
timestep=self.timestep)
elif integrator_name == 'Langevin':
from simtk.openmm import LangevinIntegrator
self.integrator = LangevinIntegrator(
self.thermodynamic_state.temperature, self.collision_rate,
self.timestep)
else:
raise Exception(
"integrator_name '%s' not valid." % (integrator_name))
# Create a Context
if self.platform is not None:
self.context = openmm.Context(self.thermodynamic_state.system,
self.integrator, self.platform)
else:
self.context = openmm.Context(self.thermodynamic_state.system,
self.integrator)
self.thermodynamic_state.update_context(self.context, self.integrator)
self.sampler_state.update_context(self.context)
self.context.setVelocitiesToTemperature(
self.thermodynamic_state.temperature)
self._initialized = True
def _initializeNetCDF(self, ncfile):
self.ncfile = ncfile
if self.ncfile == None:
return
natoms = self.thermodynamic_state.system.getNumParticles()
self.ncfile.createDimension('iterations', None)
self.ncfile.createDimension(
'atoms', natoms) # TODO: What do we do if dimension can change?
self.ncfile.createDimension('spatial', 3)
self.ncfile.createVariable(
'positions',
'f4',
dimensions=('iterations', 'atoms', 'spatial'),
zlib=True,
chunksizes=(1, natoms, 3))
self.ncfile.createVariable(
'box_vectors',
'f4',
dimensions=('iterations', 'spatial', 'spatial'),
zlib=True,
chunksizes=(1, 3, 3))
self.ncfile.createVariable(
'potential', 'f8', dimensions=('iterations', ), chunksizes=(1, ))
self.ncfile.createVariable(
'sample_positions_time',
'f4',
dimensions=('iterations', ),
chunksizes=(1, ))
# Weight adaptation information
self.ncfile.createVariable(
'stage', 'i2', dimensions=('iterations', ), chunksizes=(1, ))
self.ncfile.createVariable(
'gamma', 'f8', dimensions=('iterations', ), chunksizes=(1, ))
def update(self):
"""
Update the sampler with one step of sampling.
"""
if not self._initialized:
self._initialize()
if self.verbose:
print("." * 80)
print("MCMC sampler iteration %d" % self.iteration)
initial_time = time.time()
# Reset statistics
if hasattr(self.integrator, 'setGlobalVariableByName'):
self.integrator.setGlobalVariableByName('naccept', 0)
# Take some steps
self.integrator.step(self.nsteps)
# Get new sampler state.
self.sampler_state = SamplerState.createFromContext(self.context)
# Report statistics
if hasattr(self.integrator, 'getGlobalVariableByName'):
naccept = self.integrator.getGlobalVariableByName('naccept')
fraction_accepted = float(naccept) / float(self.nsteps)
if self.verbose:
print("Accepted %d / %d GHMC steps (%.2f%%)." %
(naccept, self.nsteps, fraction_accepted * 100))
final_time = time.time()
elapsed_time = final_time - initial_time
self._timing['sample positions'] = elapsed_time
if self.verbose:
final_energy = self.context.getState(
getEnergy=True).getPotentialEnergy(
) * self.thermodynamic_state.beta
print('Final energy is %12.3f kT' % (final_energy))
print('elapsed time %8.3f s' % elapsed_time)
if self.ncfile:
self.ncfile.variables['positions'][
self.
iteration, :, :] = self.sampler_state.positions[:, :] / unit.nanometers
for k in range(3):
self.ncfile.variables['box_vectors'][
self.iteration,
k, :] = self.sampler_state.box_vectors[k, :] / unit.nanometers
self.ncfile.variables['potential'][
self.
iteration] = self.thermodynamic_state.beta * self.context.getState(
getEnergy=True).getPotentialEnergy()
self.ncfile.variables['sample_positions_time'][
self.iteration] = elapsed_time
# Increment iteration count
self.iteration += 1
if self.verbose:
print("." * 80)
if self.pdbfile is not None:
print("Writing frame...")
from simtk.openmm.app import PDBFile
PDBFile.writeModel(self.topology, self.sampler_state.positions,
self.pdbfile, self.iteration)
self.pdbfile.flush()
def run(self, niterations=1):
"""
Run the sampler for the specified number of iterations
Parameters
----------
niterations : int, optional, default=1
Number of iterations to run the sampler for.
"""
for iteration in range(niterations):
self.update()
class NCMCSampler(object):
def __init__(self, thermodynamic_state, sampler_state, move):
# Make a deep copy of the state so that initial state is unchanged.
self.thermodynamic_state = copy.deepcopy(thermodynamic_state)
self.sampler_state = copy.deepcopy(sampler_state)
self.move = move
def run(self, n_iterations=1, integrator_idx=0):
"""
Run the sampler for a specified number of iterations.
Parameters
----------
n_iterations : int
Number of iterations of the sampler to run.
"""
# Apply move for n_iterations.
for iteration in range(n_iterations):
self.move.apply(self.thermodynamic_state, self.sampler_state,
integrator_idx)
def minimize(self,
tolerance=1.0 * unit.kilocalories_per_mole / unit.angstroms,
max_iterations=100,
context_cache=None):
"""Minimize the current configuration.
Parameters
----------
tolerance : simtk.unit.Quantity, optional
Tolerance to use for minimization termination criterion (units of
energy/(mole*distance), default is 1*kilocalories_per_mole/angstroms).
max_iterations : int, optional
Maximum number of iterations to use for minimization. If 0, the minimization
will continue until convergence (default is 100).
context_cache : openmmtools.cache.ContextCache, optional
The ContextCache to use for Context creation. If None, the global cache
openmmtools.cache.global_context_cache is used (default is None).
"""
if context_cache is None:
context_cache = cache.global_context_cache
timer = Timer()
# Use LocalEnergyMinimizer
timer.start("Context request")
integrator = openmm.VerletIntegrator(1.0 * unit.femtosecond)
context, integrator = context_cache.get_context(
self.thermodynamic_state, integrator)
self.sampler_state.apply_to_context(context)
logger.debug("LocalEnergyMinimizer: platform is %s" %
context.getPlatform().getName())
logger.debug("Minimizing with tolerance %s and %d max. iterations." %
(tolerance, max_iterations))
timer.stop("Context request")
timer.start("LocalEnergyMinimizer minimize")
openmm.LocalEnergyMinimizer.minimize(context, tolerance,
max_iterations)
timer.stop("LocalEnergyMinimizer minimize")
context_state = context.getState(
getPositions=True,
getVelocities=True,
getEnergy=True,
enforcePeriodicBox=self.thermodynamic_state.is_periodic)
potential_energy = context_state.getPotentialEnergy()
print(potential_energy)
# Retrieve data.
self.sampler_state.update_from_context(context)
#timer.report_timing()
class NCMCMove(BaseIntegratorMove):
def __init__(self,
timestep=1.0 * unit.femtosecond,
collision_rate=10.0 / unit.picoseconds,
n_steps=1000,
temperature=300 * unit.kelvin,
reassign_velocities=True,
**kwargs):
super(NCMCMove, self).__init__(n_steps=n_steps, **kwargs)
self.timestep = timestep
self.temperature = temperature
self.collision_rate = collision_rate
self.n_accepted = 0 # Number of accepted steps.
self.n_proposed = 0 # Number of attempted steps.
#self.reporters = [NetCDF4Reporter('test.nc', 1)]
@property
def fraction_accepted(self):
"""Ratio between accepted over attempted moves (read-only).
If the number of attempted steps is 0, this is numpy.NaN.
"""
if self.n_proposed == 0:
return np.NaN
# TODO drop the casting when stop Python2 support
return float(self.n_accepted) / self.n_proposed
@property
def statistics(self):
"""The acceptance statistics as a dictionary."""
return dict(n_accepted=self.n_accepted, n_proposed=self.n_proposed)
@statistics.setter
def statistics(self, value):
self.n_accepted = value['n_accepted']
self.n_proposed = value['n_proposed']
def reset_statistics(self):
"""Reset the internal statistics of number of accepted and attempted moves."""
self.n_accepted = 0
self.n_proposed = 0
def apply(self, thermodynamic_state, sampler_state, integrator_idx):
"""Propagate the state through the integrator.
This updates the SamplerState after the integration. It also logs
benchmarking information through the utils.Timer class.
Parameters
----------
thermodynamic_state : openmmtools.states.ThermodynamicState
The thermodynamic state to use to propagate dynamics.
sampler_state : openmmtools.states.SamplerState
The sampler state to apply the move to. This is modified.
See Also
--------
openmmtools.utils.Timer
"""
move_name = self.__class__.__name__ # shortcut
timer = Timer()
# Check if we have to use the global cache.
if self.context_cache is None:
context_cache = cache.global_context_cache
else:
print('Found context cache')
context_cache = self.context_cache
# Create integrator.
integrator = self._get_integrator(thermodynamic_state)
self.integrator_idx = integrator_idx
# Create context.
timer.start("{}: Context request".format(move_name))
context, integrator = context_cache.get_context(
thermodynamic_state, integrator)
#print('SamplerContext compat:', sampler_state.is_context_compatible(context))
integrator.setCurrentIntegrator(self.integrator_idx)
#integrator = integrator.getIntegrator(integrator_idx)
#RestorableOpenMMObject.restore_interface(integrator)
#integrator.pretty_print()
#print('Current Integrator:', integrator)
timer.stop("{}: Context request".format(move_name))
logger.debug("{}: Context obtained, platform is {}".format(
move_name,
context.getPlatform().getName()))
# Perform the integration.
for attempt_counter in range(self.n_restart_attempts + 1):
#If we reassign velocities, we can ignore the ones in sampler_state.
sampler_state.apply_to_context(
context, ignore_velocities=self.reassign_velocities)
if self.reassign_velocities:
context.setVelocitiesToTemperature(
thermodynamic_state.temperature)
# Subclasses may implement _before_integration().
self._before_integration(context, thermodynamic_state)
#specify nc integrator variables to report in verbose output
self._integrator_keys_ = [
'lambda', 'shadow_work', 'protocol_work', 'Eold', 'Enew'
]
try:
# Run dynamics.
timer.start("{}: step({})".format(move_name, self.n_steps))
test_int = integrator.getIntegrator(self.integrator_idx)
RestorableOpenMMObject.restore_interface(test_int)
print(test_int)
try:
for key in self._integrator_keys_:
print(key, test_int.getGlobalVariableByName(key))
except Exception:
pass
integrator.step(self.n_steps)
try:
for key in self._integrator_keys_:
print(key, test_int.getGlobalVariableByName(key))
except Exception:
pass
except Exception as e:
print(e)
# Catches particle positions becoming nan during integration.
restart = True
else:
timer.stop("{}: step({})".format(move_name, self.n_steps))
# We get also velocities here even if we don't need them because we
# will recycle this State to update the sampler state object. This
# way we won't need a second call to Context.getState().
context_state = context.getState(
getPositions=True,
getVelocities=True,
getEnergy=True,
enforcePeriodicBox=thermodynamic_state.is_periodic)
# Check for NaNs in energies.
potential_energy = context_state.getPotentialEnergy()
print('potential_energy', potential_energy)
restart = np.isnan(
potential_energy.value_in_unit(potential_energy.unit))
# Restart the move if we found NaNs.
if restart:
err_msg = (
'Potential energy is NaN after {} attempts of integration '
'with move {}'.format(attempt_counter,
self.__class__.__name__))
# If we are on our last chance before crash, try to re-initialize context
if attempt_counter == self.n_restart_attempts - 1:
logger.error(
err_msg +
' Trying to reinitialize Context as a last-resort restart attempt...'
)
context.reinitialize()
sampler_state.apply_to_context(context)
thermodynamic_state.apply_to_context(context)
# If we have hit the number of restart attempts, raise an exception.
elif attempt_counter == self.n_restart_attempts:
# Restore the context to the state right before the integration.
sampler_state.apply_to_context(context)
logger.error(err_msg)
raise IntegratorMoveError(err_msg, self, context)
else:
logger.warning(err_msg + ' Attempting a restart...')
else:
break
# Subclasses can read here info from the context to update internal statistics.
self._after_integration(context, thermodynamic_state)
# Updated sampler state.
timer.start("{}: update sampler state".format(move_name))
# This is an optimization around the fact that Collective Variables are not a part of the State,
# but are a part of the Context. We do this call twice to minimize duplicating information fetched from
# the State.
# Update everything but the collective variables from the State object
sampler_state.update_from_context(
context_state, ignore_collective_variables=True)
# Update only the collective variables from the Context
sampler_state.update_from_context(
context,
ignore_positions=True,
ignore_velocities=True,
ignore_collective_variables=False)
timer.stop("{}: update sampler state".format(move_name))
#timer.report_timing()
def | |
<reponame>AlainLich/dpath-python<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -*- mode: Python -*-
#
from dpath import options
import dpath.segments as api
import dpath
from hypothesis import given, assume, settings, HealthCheck
import hypothesis.strategies as st
import unittest
import os
import sys
import re
# enables to modify some globals
MAX_SAMPLES = None
if __name__ == "__main__":
if "-v" in sys.argv:
MAX_SAMPLES = 30
MAX_LEAVES = 20
# ..............................................................................
# This allows checking that we did not break things by setting
# dpath.options.DPATH_ACCEPT_RE_REGEXP = True
# ..............................................................................
if "--re" in sys.argv:
dpath.options.DPATH_ACCEPT_RE_REGEXP = True
# enable re.regexp support in path expr.
# default is disable
settings.register_profile("default", suppress_health_check=(HealthCheck.too_slow,))
settings.load_profile(os.getenv(u'HYPOTHESIS_PROFILE', 'default'))
if MAX_SAMPLES is None:
MAX_LEAVES = 50
MAX_SAMPLES = 100
random_key_int = st.integers(0, 1000)
random_key_str = st.binary() | st.text()
random_key = random_key_str | random_key_int
random_segments = st.lists(random_key)
random_leaf = st.integers() | st.floats() | st.booleans() | st.binary() | st.text() | st.none()
if options.ALLOW_EMPTY_STRING_KEYS:
random_thing = st.recursive(
random_leaf,
lambda children: (st.lists(children) | st.tuples(children)
| st.dictionaries(st.binary() | st.text(), children)),
max_leaves=MAX_LEAVES)
else:
random_thing = st.recursive(
random_leaf,
lambda children: (st.lists(children) | st.tuples(children)
| st.dictionaries(st.binary(min_size=1) | st.text(min_size=1),
children)),
max_leaves=MAX_LEAVES)
random_node = random_thing.filter(lambda thing: isinstance(thing, (list, tuple, dict)))
if options.ALLOW_EMPTY_STRING_KEYS:
random_mutable_thing = st.recursive(
random_leaf,
lambda children: st.lists(children) | st.dictionaries(st.binary() | st.text(),
children),
max_leaves=MAX_LEAVES)
else:
random_mutable_thing = st.recursive(
random_leaf,
lambda children: (st.lists(children)
| st.dictionaries(st.binary(min_size=1) | st.text(min_size=1),
children)),
max_leaves=MAX_LEAVES)
random_mutable_node = random_mutable_thing.filter(lambda thing: isinstance(thing, (list, dict)))
@st.composite
def mutate(draw, segment):
# Convert number segments.
segment = api.int_str(segment)
# Infer the type constructor for the result.
kind = type(segment)
# Produce a valid kind conversion for our wildcards.
if isinstance(segment, bytes):
def to_kind(v):
try:
return bytes(v, 'utf-8')
except:
return kind(v)
else:
def to_kind(v):
return kind(v)
# Convert to an list of single values.
converted = []
for i in range(len(segment)):
# This carefully constructed nonsense to get a single value
# is necessary to work around limitations in the bytes type
# iteration returning integers instead of byte strings of
# length 1.
c = segment[i:i + 1]
# Check for values that need to be escaped.
if c in tuple(map(to_kind, ('*', '?', '[', ']'))):
c = to_kind('[') + c + to_kind(']')
converted.append(c)
# Start with a non-mutated result.
result = converted
# 50/50 chance we will attempt any mutation.
change = draw(st.sampled_from((True, False)))
if change:
result = []
# For every value in segment maybe mutate, maybe not.
for c in converted:
# If the length isn't 1 then, we know this value is already
# an escaped special character. We will not mutate these.
if len(c) != 1:
result.append(c)
else:
result.append(draw(st.sampled_from((c, to_kind('?'), to_kind('*')))))
combined = kind().join(result)
# If we by chance produce the star-star result, then just revert
# back to the original converted segment. This is not the mutation
# you are looking for.
if combined == to_kind('**'):
combined = kind().join(converted)
return combined
@st.composite
def random_segments_with_glob(draw):
segments = draw(random_segments)
glob = list(map(lambda x: draw(mutate(x)), segments))
# 50/50 chance we will attempt to add a star-star to the glob.
use_ss = draw(st.sampled_from((True, False)))
if use_ss:
# Decide if we are inserting a new segment or replacing a range.
insert_ss = draw(st.sampled_from((True, False)))
if insert_ss:
index = draw(st.integers(0, len(glob)))
glob.insert(index, '**')
else:
start = draw(st.integers(0, len(glob)))
stop = draw(st.integers(start, len(glob)))
glob[start:stop] = ['**']
return (segments, glob)
@st.composite
def random_segments_with_nonmatching_glob(draw):
(segments, glob) = draw(random_segments_with_glob())
# Generate a segment that is not in segments.
invalid = draw(random_key.filter(lambda x: x not in segments and x not in ('*', '**')))
# Do we just have a star-star glob? It matches everything, so we
# need to replace it entirely.
if len(glob) == 1 and glob[0] == '**':
glob = [invalid]
# Do we have a star glob and only one segment? It matches anything
# in the segment, so we need to replace it entirely.
elif len(glob) == 1 and glob[0] == '*' and len(segments) == 1:
glob = [invalid]
# Otherwise we can add something we know isn't in the segments to
# the glob.
else:
index = draw(st.integers(0, len(glob)))
glob.insert(index, invalid)
return (segments, glob)
@st.composite
def random_walk(draw):
node = draw(random_mutable_node)
found = tuple(api.walk(node))
assume(len(found) > 0)
return (node, draw(st.sampled_from(found)))
@st.composite
def random_leaves(draw):
node = draw(random_mutable_node)
found = tuple(api.leaves(node))
assume(len(found) > 0)
return (node, draw(st.sampled_from(found)))
def setup():
# Allow empty strings in segments.
options.ALLOW_EMPTY_STRING_KEYS = True
def teardown():
# Revert back to default.
options.ALLOW_EMPTY_STRING_KEYS = False
#
# Run under unittest
#
class TestSegments(unittest.TestCase):
DO_DEBUG_PRINT = False
@settings(max_examples=MAX_SAMPLES)
@given(random_node)
def test_kvs(self, node):
'''
Given a node, kvs should produce a key that when used to extract
from the node renders the exact same value given.
'''
for k, v in api.kvs(node):
assert node[k] is v
@settings(max_examples=MAX_SAMPLES)
@given(random_leaf)
def test_leaf_with_leaf(self, leaf):
'''
Given a leaf, leaf should return True.
'''
assert api.leaf(leaf) is True
@settings(max_examples=MAX_SAMPLES)
@given(random_node)
def test_leaf_with_node(self, node):
'''
Given a node, leaf should return False.
'''
assert api.leaf(node) is False
@settings(max_examples=MAX_SAMPLES)
@given(random_thing)
def test_walk(self, thing):
'''
Given a thing to walk, walk should yield key, value pairs where key
is a tuple of non-zero length.
'''
for k, v in api.walk(thing):
assert isinstance(k, tuple)
assert len(k) > 0
@settings(max_examples=MAX_SAMPLES)
@given(random_node)
def test_get(self, node):
'''
Given a node, get should return the exact value given a key for all
key, value pairs in the node.
'''
for k, v in api.walk(node):
assert api.get(node, k) is v
@settings(max_examples=MAX_SAMPLES)
@given(random_node)
def test_has(self, node):
'''
Given a node, has should return True for all paths, False otherwise.
'''
for k, v in api.walk(node):
assert api.has(node, k) is True
# If we are at a leaf, then we can create a value that isn't
# present easily.
if api.leaf(v):
assert api.has(node, k + (0,)) is False
@settings(max_examples=MAX_SAMPLES)
@given(random_segments)
def test_expand(self, segments):
'''
Given segments expand should produce as many results are there were
segments and the last result should equal the given segments.
'''
count = len(segments)
result = list(api.expand(segments))
assert count == len(result)
if count > 0:
assert segments == result[-1]
@settings(max_examples=MAX_SAMPLES)
@given(random_node)
def test_types(self, node):
'''
Given a node, types should yield a tuple of key, type pairs and the
type indicated should equal the type of the value.
'''
for k, v in api.walk(node):
ts = api.types(node, k)
ta = ()
for tk, tt in ts:
ta += (tk,)
assert type(api.get(node, ta)) is tt
@settings(max_examples=MAX_SAMPLES)
@given(random_node)
def test_leaves(self, node):
'''
Given a node, leaves should yield only leaf key, value pairs.
'''
for k, v in api.leaves(node):
assert api.leafy(v)
@settings(max_examples=MAX_SAMPLES)
@given(random_segments_with_glob())
def test_match(self, pair):
'''
Given segments and a known good glob, match should be True.
'''
(segments, glob) = pair
assert api.match(segments, glob) is True
@settings(max_examples=MAX_SAMPLES)
@given(random_segments_with_nonmatching_glob())
def test_match_nonmatching(self, pair):
'''
Given segments and a known bad glob, match should be False.
'''
(segments, glob) = pair
assert api.match(segments, glob) is False
@settings(max_examples=MAX_SAMPLES)
@given(walkable=random_walk(), value=random_thing)
def test_set_walkable(self, walkable, value):
'''
Given a walkable location, set should be able to update any value.
'''
(node, (segments, found)) = walkable
api.set(node, segments, value)
assert api.get(node, segments) is value
@settings(max_examples=MAX_SAMPLES)
@given(walkable=random_leaves(),
kstr=random_key_str,
kint=random_key_int,
value=random_thing,
extension=random_segments)
def test_set_create_missing(self, walkable, kstr, kint, value, extension):
'''
Given a walkable non-leaf, set should be able to create missing
nodes and set a new value.
'''
(node, (segments, found)) = walkable
assume(api.leaf(found))
parent_segments = segments[:-1]
parent = api.get(node, parent_segments)
if isinstance(parent, list):
assume(len(parent) < kint)
destination = parent_segments + (kint,) + tuple(extension)
elif isinstance(parent, dict):
assume(kstr not in parent)
destination = parent_segments + (kstr,) + tuple(extension)
else:
raise Exception('mad mad world')
api.set(node, destination, value)
assert api.get(node, destination) is value
@settings(max_examples=MAX_SAMPLES)
@given(thing=random_thing)
def test_fold(self, thing):
'''
Given a thing, count paths with fold.
'''
def f(o, p, a):
a[0] += 1
[count] = api.fold(thing, f, [0])
assert count == len(tuple(api.walk(thing)))
# ..............................................................................
# This allows to handle rare case documented in file: issues/err_walk.py
#
rex_rarecase = re.compile(r"\[[^[]+\]")
def excuseRareCase(segments):
for s in segments:
if TestSegments.rex_rarecase.match(s):
return True
return False
#
# ..............................................................................
@settings(max_examples=MAX_SAMPLES)
@given(walkable=random_walk())
def test_view(self, walkable):
'''
Given a walkable location, view that location.
'''
(node, (segments, found)) = walkable
assume(found == found) # Hello, nan! We don't want you here.
view = api.view(node, segments)
ag1 = api.get(view, | |
FrequencyFW(Firework):
def __init__(
self,
molecule=None,
name="frequency calculation",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs
):
"""
Optimize the given structure.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Defaults to openmp.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="FreqSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class PESScanFW(Firework):
def __init__(
self,
molecule=None,
name="potential energy surface scan",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
scan_variables=None,
db_file=None,
parents=None,
**kwargs
):
"""
Perform a potential energy surface scan by varying bond lengths, angles,
and/or dihedral angles in a molecule.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set,
such as dft_rung, basis_set, pcm_dielectric, scf_algorithm,
or max_scf_cycles. See pymatgen/io/qchem/sets.py for default
values of all input parameters. For instance, if a user wanted
to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30,
"basis_set": "6-311++g**"}. However, more advanced customization
of the input is also possible through the overwrite_inputs key
which allows the user to directly modify the rem, pcm, smd, and
solvent dictionaries that QChemDictSet passes to inputs.py to
print an actual input file.
scan_variables (dict): dict {str: list}, where the key is the type of variable ("stre"
for bond length, "bend" for angle, "tors" for dihedral angle),
and the list contains all of the variable set information
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
if scan_variables is None:
raise ValueError(
"Some variable input must be given! Provide some "
"bond, angle, or dihedral angle information."
)
qchem_input_params = qchem_input_params or dict()
qchem_input_params["scan_variables"] = scan_variables
input_file = "mol.qin"
output_file = "mol.qout"
t = list()
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="PESScanSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class FrequencyFlatteningOptimizeFW(Firework):
def __init__(
self,
molecule=None,
name="frequency flattening structure optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
max_iterations=10,
max_molecule_perturb_scale=0.3,
linked=True,
freq_before_opt=False,
perturb_geometry=False,
mode=None,
scale=1.0,
db_file=None,
parents=None,
**kwargs
):
"""
Iteratively optimize the given structure and flatten imaginary frequencies to ensure that
the resulting structure is a true minima and not a saddle point.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
max_iterations (int): Number of perturbation -> optimization -> frequency
iterations to perform. Defaults to 10.
max_molecule_perturb_scale (float): The maximum scaled perturbation that can be
applied to the molecule. Defaults to 0.3.
freq_before_opt (bool): If True (default False), run a frequency
calculation before any opt/ts searches to improve understanding
of the local potential energy surface. Only use this option if
linked=True.
perturb_geometry (bool): If True (default False), then modify the input geometry by some
translation matrix (N x 3, where N is the number of atoms) before optimizing.
mode (np.ndarray): If not None (default), then perturb the geometry by this matrix.
This will be ignored if perturb_geometry is False.
scale (float): Scaling factor for perturbation
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = []
if perturb_geometry:
t.append(PerturbGeometry(molecule=molecule, mode=mode, scale=scale))
# Make sure that subsequent firetasks use the perturbed Molecule
molecule = None
if freq_before_opt:
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="FreqSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
else:
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="OptSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="opt_with_frequency_flattener",
max_iterations=max_iterations,
max_molecule_perturb_scale=max_molecule_perturb_scale,
linked=linked,
freq_before_opt=freq_before_opt,
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={
"task_label": name,
"special_run_type": "frequency_flattener",
"linked": linked,
},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class FrequencyFlatteningTransitionStateFW(Firework):
def __init__(
self,
molecule=None,
name="frequency flattening transition state optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
max_iterations=3,
max_molecule_perturb_scale=0.3,
linked=True,
freq_before_opt=True,
perturb_geometry=False,
mode=None,
scale=1,
db_file=None,
parents=None,
**kwargs
):
"""
Iteratively optimize the transition state structure and flatten imaginary frequencies to
ensure that the resulting structure is a true transition state.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through | |
<reponame>gitter-badger/goes2go<filename>goes2go/rgb.py
## <NAME>
## August 8, 2019
"""
===========
RGB Recipes
===========
.. image:: /_static/RGB_sample.png
These functions take GOES-East or GOES-West multichannel data on a
fixed grid (files named ``ABI-L2-MCMIPC``) and generates a 3D
Red-Green-Blue (RGB) array for various GOES RGB products.
RGB recipes are based on the `GOES Quick Guides
<http://rammb.cira.colostate.edu/training/visit/quick_guides/>`_
and include the following:
- NaturalColor
- TrueColor
- FireTemperature
- AirMass
- DayCloudPhase
- DayConvection
- DayCloudConvection
- DayLandCloud
- DayLandCloudFire
- WaterVapor
- DifferentialWaterVapor
- DaySnowFog
- NighttimeMicrophysics
- Dust
- SulfurDioxide
- Ash
- SplitWindowDifference
- NightFogDifference
- RocketPlume ✨New - July 9, 2021
The returned RGB can easily be viewed with ``plt.imshow(RGB)``.
For imshow to show an RGB image, the values must range between 0 and 1.
Values are normalized between the range specified in the Quick Guides.
This normalization is synonymous to `contrast or histogram stretching
<https://micro.magnet.fsu.edu/primer/java/digitalimaging/processing/histogramstretching/index.html>`_
(`more info here
<https://staff.fnwi.uva.nl/r.vandenboomgaard/IPCV20162017/LectureNotes/IP/PointOperators/ImageStretching.html>`_)
and follows the formula:
.. code-block:: python
NormalizedValue = (OriginalValue-LowerLimit)/(UpperLimit-LowerLimit)
`Gamma correction <https://en.wikipedia.org/wiki/Gamma_correction>`_
darkens or lightens an image (`more info
<https://www.cambridgeincolour.com/tutorials/gamma-correction.htm>`_)
and follows the decoding formula:
.. code-block:: python
R_corrected = R**(1/gamma)
The input for all these functions are denoted by ``C`` for "channels" which
represents the GOES ABI multichannel file opened with xarray. For example:
.. code-block:: python
FILE = 'OR_ABI-L2-MCMIPC-M6_G17_s20192201631196_e20192201633575_c20192201634109.nc'
C = xarray.open_dataset(FILE)
All RGB products are demonstarted in the `make_RGB_Demo
<https://github.com/blaylockbk/goes2go/tree/master/notebooks>`_ notebook.
Note: I don't have a `GeoColor <https://journals.ametsoc.org/view/journals/atot/37/3/JTECH-D-19-0134.1.xml>`_
RGB, because it is much more involved than simply stacking RGB channels. If anyone does do
something similar to a GeoColor image, let me know!
ABI Band Reference
------------------
https://www.weather.gov/media/crp/GOES_16_Guides_FINALBIS.pdf
http://cimss.ssec.wisc.edu/goes/GOESR_QuickGuides.html
https://www.goes-r.gov/mission/ABI-bands-quick-info.html
=============== ================== ============================================== ======================================
ABI Band Number Central Wavelength Name Type
=============== ================== ============================================== ======================================
1 0.47 μm "Blue" Band Visible
2 0.64 μm "Red" Band Visible
3 0.86 μm "Veggie" Band Near-IR
4 1.37 μm "Cirrus" Band Near-IR
5 1.6 μm "Snow/Ice" Band Near-IR
6 2.2 μm "Cloud Particle Size" Band Near-IR
7 3.9 μm "Shortwave Window" Band IR (with reflected daytime component)
8 6.2 μm "Upper-Level Tropospheric Water Vapor" Band IR
9 6.9 μm "Mid-Level Tropospheric Water Vapor" Band IR
10 7.3 μm "Lower-level Water Vapor" Band IR
11 8.4 μm "Cloud-Top Phase" Band IR
12 9.6 μm "Ozone Band" IR
13 10.3 μm "Clean" IR Longwave Window Band IR
14 11.2 μm IR Longwave Window Band IR
15 12.3 μm "Dirty" Longwave Window Band IR
16 13.3 μm "CO2" Longwave infrared IR
=============== ================== ============================================== ======================================
"""
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import xarray as xr
from goes2go.tools import field_of_view
def get_imshow_kwargs(ds):
"""
Help determine the ``plt.imshow`` arguments.
Parameters
----------
ds : xarray.Dataset
Returns
-------
kwargs for the ``plt.imshow`` with the correct image extent limits.
Examples
--------
.. code:: python
r = TrueColor(G)
ax = common_features(r.crs)
ax.imshow(r.TrueColor, *\*\get_imshow_kwargs(r))
"""
return dict(
extent=[ds.x2.data.min(), ds.x2.data.max(), ds.y2.data.min(), ds.y2.data.max()],
transform=ds.crs,
origin="upper",
interpolation="none",
)
def rgb_as_dataset(G, RGB, description, latlon=False):
"""
Assemble a dataset with the RGB array with other data from the file.
Parameters
----------
G : xarray.Dataset
GOES ABI data from multispectral channel
RGB : array
A 3D array of R, G, and B values at each pixel
description : str
A description of what the RGB data represents.
latlon : bool
Derive the latitude and longitude of each pixel.
"""
# Assemble a new xarray.Dataset for the RGB data
ds = xr.Dataset({description.replace(" ", ""): (["y", "x", "rgb"], RGB)})
ds.attrs["description"] = description
# Convert x, y points to latitude/longitude
_, crs = field_of_view(G)
sat_h = G.goes_imager_projection.perspective_point_height
x2 = G.x * sat_h
y2 = G.y * sat_h
ds.coords["x2"] = x2
ds.coords["y2"] = y2
ds["x2"].attrs["long_name"] = "x sweep in crs units (m); x * sat_height"
ds["y2"].attrs["long_name"] = "y sweep in crs units (m); y * sat_height"
ds.attrs["crs"] = crs
if latlon:
X, Y = np.meshgrid(x2, y2)
a = ccrs.PlateCarree().transform_points(crs, X, Y)
lons, lats, _ = a[:, :, 0], a[:, :, 1], a[:, :, 2]
ds.coords["longitude"] = (("y", "x"), lons)
ds.coords["latitude"] = (("y", "x"), lats)
# Copy some coordinates and attributes of interest from the original data
for i in ["x", "y", "t", "geospatial_lat_lon_extent"]:
ds.coords[i] = G[i]
for i in [
"orbital_slot",
"platform_ID",
"scene_id",
"spatial_resolution",
"instrument_type",
"title",
]:
ds.attrs[i] = G.attrs[i]
## Provide some helpers to plot with imshow
ds.attrs["imshow_kwargs"] = get_imshow_kwargs(ds)
## Provide some helpers to plot with imshow and pcolormesh
## Not super useful, because pcolormesh doesn't allow nans in x, y dimension
# pcolormesh_kwargs = dict(
# color = RGB.reshape(np.shape(RGB)[0] * np.shape(RGB)[1], np.shape(RGB)[2])
# shading='nearest'
# )
# ds.attrs['pcolormesh_kwargs'] = pcolormesh_kwargs
return ds
def load_RGB_channels(C, channels):
"""
Return the R, G, and B arrays for the three channels requested. This
function will convert the data any units in Kelvin to Celsius.
Parameters
----------
C : xarray.Dataset
The GOES multi-channel file opened with xarray.
channels : tuple of size 3
A tuple of the channel number for each (R, G, B).
For example ``channel=(2, 3, 1)`` is for the true color RGB
Returns
-------
A list with three items that are used for R, G, and B.
>>> R, G, B = load_RGB_channels(C, (2,3,1))
"""
# Units of each channel requested
units = [C["CMI_C%02d" % c].units for c in channels]
RGB = []
for u, c in zip(units, channels):
if u == "K":
# Convert form Kelvin to Celsius
RGB.append(C["CMI_C%02d" % c].data - 273.15)
else:
RGB.append(C["CMI_C%02d" % c].data)
return RGB
def gamma_correction(a, gamma, verbose=False):
"""
Darken or lighten an image with `gamma correction
<https://en.wikipedia.org/wiki/Gamma_correction>`_.
Parameters
----------
a : array-like
An array of values, typically the RGB array of values in
an image.
gamma : float
Gamma value to decode the image by.
Values > 1 will lighten an image.
Values < 1 will darken an image.
"""
if verbose:
if gamma > 1:
print("Gamma Correction: 🌔 Lighten image")
elif gamma < 1:
print("Gamma Correction: 🌒 Darken image")
else:
print("Gamma Correction: 🌓 Gamma=1. No correction made.")
return a
# Gamma decoding formula
return np.power(a, 1 / gamma)
def normalize(value, lower_limit, upper_limit, clip=True):
"""
Normalize values between 0 and 1.
Normalize between a lower and upper limit. In other words, it
converts your number to a value in the range between 0 and 1.
Follows `normalization formula
<https://stats.stackexchange.com/a/70807/220885>`_
This is the same concept as `contrast or histogram stretching
<https://staff.fnwi.uva.nl/r.vandenboomgaard/IPCV20162017/LectureNotes/IP/PointOperators/ImageStretching.html>`_
.. code:: python
NormalizedValue = (OriginalValue-LowerLimit)/(UpperLimit-LowerLimit)
Parameters
----------
value :
The original value. A single value, vector, or array.
upper_limit :
The upper limit.
lower_limit :
The lower limit.
clip : bool
- True: Clips values between 0 and 1 for RGB.
- False: Retain the numbers that extends outside 0-1 range.
Output:
Values normalized between the upper and lower limit.
"""
norm = (value - lower_limit) / (upper_limit - lower_limit)
if clip:
norm = np.clip(norm, 0, 1)
return norm
# ======================================================================
# ======================================================================
def TrueColor(C, gamma=2.2, pseudoGreen=True, night_IR=True, **kwargs):
"""
True Color RGB:
(See `Quick Guide <http://cimss.ssec.wisc.edu/goes/OCLOFactSheetPDFs/ABIQuickGuide_CIMSSRGB_v2.pdf>`__ for reference)
This is similar to the NaturalColor RGB, but uses a different gamma
correction and does not apply contrast stretching. I think these
images look a little "washed out" when compared to the NaturalColor
RGB. So, I would recommend using the NaturalColor RGB.
For more details on combing RGB and making the psedo green channel, refer to
`Bah et al. 2018 <https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2018EA000379>`_.
.. image:: /_static/TrueColor.png
.. image:: /_static/gamma_demo_TrueColor.png
.. image:: /_static/Color-IR_demo.png
Parameters
----------
C : xarray.Dataset
A GOES ABI multichannel file opened with xarray.
gamma : float
Darken or lighten an image with `gamma correction
<https://en.wikipedia.org/wiki/Gamma_correction>`_.
Values > 1 will lighten an image.
Values < 1 will darken an image.
pseudoGreen : bool
True: returns the calculated "True" green color
False: returns the "veggie" channel
night_IR : bool
If True, use Clean IR (channel 13) as maximum RGB value overlay
so that cold clouds show up at night. (Be aware that some
daytime clouds might appear brighter).
\*\*kwargs :
Keyword arguments for ``rgb_as_dataset`` function.
- latlon : derive latitude and longitude of each pixel
"""
# Load the three channels into appropriate R, G, and B variables
R, G, B = load_RGB_channels(C, (2, 3, 1))
# Apply | |
<reponame>SkyLined/mHTTP<filename>cHTTPClientSideProxyServer.py
import re, time;
try: # mDebugOutput use is Optional
from mDebugOutput import *;
except: # Do nothing if not available.
ShowDebugOutput = lambda fxFunction: fxFunction;
fShowDebugOutput = lambda sMessage: None;
fEnableDebugOutputForModule = lambda mModule: None;
fEnableDebugOutputForClass = lambda cClass: None;
fEnableAllDebugOutput = lambda: None;
cCallStack = fTerminateWithException = fTerminateWithConsoleOutput = None;
from mHTTPConnections import cHTTPConnection, cHTTPResponse, cHTTPHeaders, mExceptions, cURL;
from mMultiThreading import cLock, cThread, cWithCallbacks;
from mNotProvided import *;
from mTCPIPConnections import cTransactionalBufferedTCPIPConnection;
from .cHTTPServer import cHTTPServer;
from .cHTTPClient import cHTTPClient;
from .cHTTPClientUsingProxyServer import cHTTPClientUsingProxyServer;
# To turn access to data store in multiple variables into a single transaction, we will create locks.
# These locks should only ever be locked for a short time; if it is locked for too long, it is considered a "deadlock"
# bug, where "too long" is defined by the following value:
gnDeadlockTimeoutInSeconds = 1; # We're not doing anything time consuming, so this should suffice.
def foGetErrorResponse(sVersion, uStatusCode, sBody):
return cHTTPResponse(
szVersion = sVersion,
uzStatusCode = uStatusCode,
o0zHeaders = cHTTPHeaders.foFromDict({
"Connection": "Close",
"Content-Type": "text/plain",
}),
s0Body = sBody,
bAutomaticallyAddContentLengthHeader = True,
);
def foGetResponseForException(oException, sHTTPVersion):
if isinstance(oException, (mExceptions.cDNSUnknownHostnameException, mExceptions.cTCPIPInvalidAddressException)):
return foGetErrorResponse(sHTTPVersion, 400, "The server cannot be found.");
if isinstance(oException, mExceptions.cTCPIPConnectTimeoutException):
return foGetErrorResponse(sHTTPVersion, 504, "Connecting to the server timed out.");
if isinstance(oException, mExceptions.cTCPIPDataTimeoutException):
return foGetErrorResponse(sHTTPVersion, 504, "The server did not respond before the request timed out.");
if isinstance(oException, mExceptions.cHTTPOutOfBandDataException):
return foGetErrorResponse(sHTTPVersion, 502, "The server send out-of-band data.");
if isinstance(oException, mExceptions.cTCPIPConnectionRefusedException):
return foGetErrorResponse(sHTTPVersion, 502, "The server did not accept our connection.");
if isinstance(oException, (mExceptions.cTCPIPConnectionShutdownException, mExceptions.cTCPIPConnectionDisconnectedException)):
return foGetErrorResponse(sHTTPVersion, 502, "The server disconnected before sending a response.");
if isinstance(oException, mExceptions.cHTTPInvalidMessageException):
return foGetErrorResponse(sHTTPVersion, 502, "The server send an invalid HTTP response.");
if mExceptions.cSSLException and isinstance(oException, mExceptions.cSSLSecureTimeoutException):
return foGetErrorResponse(sHTTPVersion, 504, "The connection to the server could not be secured before the request timed out.");
if mExceptions.cSSLException and isinstance(oException, (mExceptions.cSSLSecureHandshakeException, mExceptions.cSSLIncorrectHostnameException)):
return foGetErrorResponse(sHTTPVersion, 504, "The connection to the server could not be secured.");
raise;
class cHTTPClientSideProxyServer(cWithCallbacks):
u0DefaultMaxNumberOfConnectionsToChainedProxy = 10;
n0DefaultSecureConnectionToChainedProxyTimeoutInSeconds = 5;
n0DefaultSecureTimeoutInSeconds = zNotProvided; # Let mHTTPConnection pick a default.
n0DefaultTransactionTimeoutInSeconds = 10;
n0DefaultSecureConnectionPipeTotalDurationTimeoutInSeconds = None;
n0DefaultSecureConnectionPipeIdleTimeoutInSeconds = 20;
n0DefaultConnectionTerminateTimeoutInSeconds = 10;
@ShowDebugOutput
def __init__(oSelf,
szHostname = zNotProvided, uzPort = zNotProvided,
o0ServerSSLContext = None,
o0zCertificateStore = zNotProvided,
o0ChainedProxyURL = None,
o0ChainedProxyHTTPClient = None,
bAllowUnverifiableCertificatesForChainedProxy = False,
bCheckChainedProxyHostname = True,
u0zMaxNumberOfConnectionsToChainedProxy = zNotProvided,
# Connections to proxy use nzConnectTimeoutInSeconds
n0zSecureConnectionToChainedProxyTimeoutInSeconds = zNotProvided,
# Connections to proxy use nzTransactionTimeoutInSeconds
o0InterceptSSLConnectionsCertificateAuthority = None,
n0zConnectTimeoutInSeconds = zNotProvided,
n0zSecureTimeoutInSeconds = zNotProvided,
n0zTransactionTimeoutInSeconds = zNotProvided,
bAllowUnverifiableCertificates = False,
bCheckHostname = True,
n0zSecureConnectionPipeTotalDurationTimeoutInSeconds = zNotProvided,
n0zSecureConnectionPipeIdleTimeoutInSeconds = zNotProvided,
u0zMaxNumberOfConnectionsToServer = zNotProvided,
):
oSelf.__o0InterceptSSLConnectionsCertificateAuthority = o0InterceptSSLConnectionsCertificateAuthority;
oSelf.__n0zConnectTimeoutInSeconds = n0zConnectTimeoutInSeconds;
oSelf.__n0zSecureTimeoutInSeconds = fxzGetFirstProvidedValueIfAny(n0zSecureTimeoutInSeconds, oSelf.n0DefaultSecureTimeoutInSeconds);
oSelf.__n0zTransactionTimeoutInSeconds = fxzGetFirstProvidedValueIfAny(n0zTransactionTimeoutInSeconds, oSelf.n0DefaultTransactionTimeoutInSeconds);
oSelf.__bAllowUnverifiableCertificates = bAllowUnverifiableCertificates;
oSelf.__bCheckHostname = bCheckHostname;
oSelf.__n0SecureConnectionPipeTotalDurationTimeoutInSeconds = fxGetFirstProvidedValue( \
n0zSecureConnectionPipeTotalDurationTimeoutInSeconds, oSelf.n0DefaultSecureConnectionPipeTotalDurationTimeoutInSeconds);
oSelf.__n0SecureConnectionPipeIdleTimeoutInSeconds = fxGetFirstProvidedValue( \
n0zSecureConnectionPipeIdleTimeoutInSeconds, oSelf.n0DefaultSecureConnectionPipeIdleTimeoutInSeconds);
oSelf.__oPropertyAccessTransactionLock = cLock(
"%s.__oPropertyAccessTransactionLock" % oSelf.__class__.__name__,
n0DeadlockTimeoutInSeconds = gnDeadlockTimeoutInSeconds
);
oSelf.__aoSecureConnectionsFromClient = [];
oSelf.__aoSecureConnectionThreads = [];
oSelf.__bStopping = False;
oSelf.__oTerminatedLock = cLock(
"%s.__oTerminatedLock" % oSelf.__class__.__name__,
bLocked = True
);
oSelf.fAddEvents(
"new connection from client",
"connect to server failed", "new connection to server",
"request received from client", "request sent to server",
"connection piped between client and server", "connection intercepted between client and server",
"response received from server", "response sent to client",
"request sent to and response received from server", "request received from and response sent to client",
"connection to server terminated", "connection from client terminated",
"client terminated", "server terminated",
"terminated"
);
# Create client
if o0ChainedProxyHTTPClient:
assert not o0ChainedProxyURL, \
"Cannot provide both a chained proxy URL (%s) and HTTP client (%s)" % \
(o0ChainedProxyURL, o0ChainedProxyHTTPClient);
# Ideally, we want to check the caller did not provide any unapplicable arguments here, but that's a lot of
# work, so I've pushed this out until it makes sense to add these checks
oSelf.oHTTPClient = o0ChainedProxyHTTPClient;
oSelf.__bUsingChainedProxy = True;
elif o0ChainedProxyURL:
# Ideally, we want to check the caller did not provide any unapplicable arguments here, but that's a lot of
# work, so I've pushed this out until it makes sense to add these checks
oSelf.oHTTPClient = cHTTPClientUsingProxyServer(
oProxyServerURL = o0ChainedProxyURL,
bAllowUnverifiableCertificatesForProxy = bAllowUnverifiableCertificatesForChainedProxy,
bCheckProxyHostname = bCheckChainedProxyHostname,
o0zCertificateStore = o0zCertificateStore,
u0zMaxNumberOfConnectionsToProxy = fxGetFirstProvidedValue( \
u0zMaxNumberOfConnectionsToChainedProxy, oSelf.u0DefaultMaxNumberOfConnectionsToChainedProxy),
n0zConnectToProxyTimeoutInSeconds = n0zConnectTimeoutInSeconds,
n0zSecureConnectionToProxyTimeoutInSeconds = fxGetFirstProvidedValue( \
n0zSecureConnectionToChainedProxyTimeoutInSeconds, oSelf.n0DefaultSecureConnectionToChainedProxyTimeoutInSeconds),
n0zSecureConnectionToServerTimeoutInSeconds = oSelf.__n0zSecureTimeoutInSeconds,
n0zTransactionTimeoutInSeconds = oSelf.__n0zTransactionTimeoutInSeconds,
bAllowUnverifiableCertificates = bAllowUnverifiableCertificates,
bCheckHostname = bCheckHostname,
);
oSelf.__bUsingChainedProxy = True;
else:
# Ideally, we want to check the caller did not provide any unapplicable arguments here, but that's a lot of
# work, so I've pushed this out until it makes sense to add these checks
oSelf.oHTTPClient = cHTTPClient(
o0zCertificateStore = o0zCertificateStore,
u0zMaxNumberOfConnectionsToServer = u0zMaxNumberOfConnectionsToServer,
n0zConnectTimeoutInSeconds = n0zConnectTimeoutInSeconds,
n0zSecureTimeoutInSeconds = oSelf.__n0zSecureTimeoutInSeconds,
n0zTransactionTimeoutInSeconds = oSelf.__n0zTransactionTimeoutInSeconds,
bAllowUnverifiableCertificates = bAllowUnverifiableCertificates,
bCheckHostname = bCheckHostname,
);
oSelf.__bUsingChainedProxy = False;
# Create server
oSelf.oHTTPServer = cHTTPServer(
ftxRequestHandler = oSelf.__ftxRequestHandler,
szHostname = szHostname,
uzPort = uzPort,
o0SSLContext = o0ServerSSLContext,
);
# Forward events from client
oSelf.oHTTPClient.fAddCallback("connect failed", lambda oHTTPServer, sHostname, uPort, oException:
oSelf.fFireCallbacks("connect to server failed", sHostname, uPort, oException));
oSelf.oHTTPClient.fAddCallback("new connection", lambda oHTTPServer, oConnection:
oSelf.fFireCallbacks("new connection to server", oConnection));
oSelf.oHTTPClient.fAddCallback("request sent", lambda oHTTPServer, oConnection, oRequest:
oSelf.fFireCallbacks("request sent to server", oConnection, oRequest));
oSelf.oHTTPClient.fAddCallback("response received", lambda oHTTPServer, oConnection, oResponse:
oSelf.fFireCallbacks("response received from server", oConnection, oResponse));
oSelf.oHTTPClient.fAddCallback("request sent and response received", lambda oHTTPServer, oConnection, oRequest, oResponse:
oSelf.fFireCallbacks("request sent to and response received from server", oConnection, oRequest, oResponse));
oSelf.oHTTPClient.fAddCallback("connection terminated", lambda oHTTPServer, oConnection:
oSelf.fFireCallbacks("connection to server terminated", oConnection));
oSelf.oHTTPClient.fAddCallback("terminated",
oSelf.__fHandleTerminatedCallbackFromClient);
# Forward events from server
oSelf.oHTTPServer.fAddCallback("new connection",
lambda oHTTPServer, oConnection: oSelf.fFireCallbacks("new connection from client", oConnection));
oSelf.oHTTPServer.fAddCallback("request received",
lambda oHTTPServer, oConnection, oRequest: oSelf.fFireCallbacks("request received from client", oConnection, oRequest));
oSelf.oHTTPServer.fAddCallback("response sent",
lambda oHTTPServer, oConnection, oResponse: oSelf.fFireCallbacks("response sent to client", oConnection, oResponse));
oSelf.oHTTPServer.fAddCallback("request received and response sent",
lambda oHTTPServer, oConnection, oRequest, oResponse: oSelf.fFireCallbacks("request received from and response sent to client", oConnection, oRequest, oResponse));
oSelf.oHTTPServer.fAddCallback("connection terminated",
lambda oHTTPServer, oConnection: oSelf.fFireCallbacks("connection from client terminated", oConnection));
oSelf.oHTTPServer.fAddCallback("terminated",
oSelf.__fHandleTerminatedCallbackFromServer);
@ShowDebugOutput
def __fHandleTerminatedCallbackFromServer(oSelf, oHTTPServer):
assert oSelf.__bStopping, \
"HTTP server terminated unexpectedly";
oSelf.fFireCallbacks("server terminated", oHTTPServer);
oSelf.__fCheckForTermination();
@ShowDebugOutput
def __fHandleTerminatedCallbackFromClient(oSelf, oHTTPClient):
assert oSelf.__bStopping, \
"HTTP client terminated unexpectedly";
oSelf.fFireCallbacks("client terminated", oHTTPClient);
oSelf.__fCheckForTermination();
@ShowDebugOutput
def __fCheckForTermination(oSelf):
oSelf.__oPropertyAccessTransactionLock.fAcquire();
try:
if oSelf.bTerminated:
return fShowDebugOutput("Already terminated.");
if not oSelf.oHTTPServer.bTerminated:
return fShowDebugOutput("Not terminated: server still running.");
if not oSelf.oHTTPClient.bTerminated:
return fShowDebugOutput("Not terminated: client still running.");
if oSelf.__aoSecureConnectionsFromClient:
return fShowDebugOutput("Not terminated: %d open connections." % len(oSelf.__aoSecureConnectionsFromClient));
if oSelf.__aoSecureConnectionThreads:
return fShowDebugOutput("Not terminated: %d running thread." % len(oSelf.__aoSecureConnectionThreads));
oSelf.__oTerminatedLock.fRelease();
finally:
oSelf.__oPropertyAccessTransactionLock.fRelease();
fShowDebugOutput("%s terminating." % oSelf.__class__.__name__);
oSelf.fFireCallbacks("terminated");
# These features are passed to the server part of a proxy
@property
def bTerminated(oSelf):
return not oSelf.__oTerminatedLock.bLocked;
@property
def sAddress(oSelf):
return oSelf.oHTTPServer.sAddress;
@property
def bSecure(oSelf):
return oSelf.oHTTPServer.bSecure;
@property
def sURL(oSelf):
return oSelf.oHTTPServer.sURL;
@ShowDebugOutput
def fStop(oSelf):
oSelf.__bStopping = True;
fShowDebugOutput("Stopping HTTP server...");
oSelf.oHTTPServer.fStop();
fShowDebugOutput("Stopping HTTP client...");
oSelf.oHTTPClient.fStop();
oSelf.__oPropertyAccessTransactionLock.fAcquire();
try:
aoSecureConnections = oSelf.__aoSecureConnectionsFromClient[:];
finally:
oSelf.__oPropertyAccessTransactionLock.fRelease();
for oSecureConnection in aoSecureConnections:
fShowDebugOutput("Stopping secure connection %s..." % oSecureConnection);
oSecureConnection.fStop();
@ShowDebugOutput
def fTerminate(oSelf):
if oSelf.bTerminated:
fShowDebugOutput("Already terminated.");
return True;
# Prevent any new connections from being accepted.
oSelf.__bStopping = True;
fShowDebugOutput("Terminating HTTP server...");
oSelf.oHTTPServer.fTerminate();
fShowDebugOutput("Terminating HTTP client...");
oSelf.oHTTPClient.fTerminate();
oSelf.__oPropertyAccessTransactionLock.fAcquire();
try:
aoSecureConnections = oSelf.__aoSecureConnectionsFromClient[:];
finally:
oSelf.__oPropertyAccessTransactionLock.fRelease();
for oSecureConnection in aoSecureConnections:
fShowDebugOutput("Terminating secure connection %s..." % oSecureConnection);
oSecureConnection.fTerminate();
@ShowDebugOutput
def fWait(oSelf):
# We could just wait for the termined lock, but while debugging, we may want
# to know exactly what it is we're waiting for:
if oSelf.__oTerminatedLock.bLocked:
fShowDebugOutput("Waiting for HTTP server...");
oSelf.oHTTPServer.fWait();
fShowDebugOutput("Waiting for HTTP client...");
oSelf.oHTTPClient.fWait();
oSelf.__oPropertyAccessTransactionLock.fAcquire();
try:
aoSecureConnectionThreads = oSelf.__aoSecureConnectionThreads[:];
finally:
oSelf.__oPropertyAccessTransactionLock.fRelease();
for oSecureConnectionThread in aoSecureConnectionThreads:
fShowDebugOutput("Waiting for secure connection thread %s..." % oSecureConnectionThread);
oSecureConnectionThread.fWait();
@ShowDebugOutput
def fbWait(oSelf, nTimeoutInSeconds):
# We could just wait for the termined lock, but while debugging, we may want
# to know exactly what it is we're waiting for:
if oSelf.__oTerminatedLock.bLocked:
nEndTime = time.clock() + nTimeoutInSeconds;
fShowDebugOutput("Waiting for HTTP server...");
if not oSelf.oHTTPServer.fbWait(nTimeoutInSeconds):
fShowDebugOutput("Timeout.");
return False;
fShowDebugOutput("Waiting for HTTP client...");
nRemainingTimeoutInSeconds = nEndTime - time.clock();
if not oSelf.oHTTPClient.fbWait(nRemainingTimeoutInSeconds):
fShowDebugOutput("Timeout.");
return False;
oSelf.__oPropertyAccessTransactionLock.fAcquire();
try:
aoSecureConnectionThreads = oSelf.__aoSecureConnectionThreads[:];
finally:
oSelf.__oPropertyAccessTransactionLock.fRelease();
for oSecureConnectionThread in aoSecureConnectionThreads:
fShowDebugOutput("Waiting for secure connection thread %s..." % oSecureConnectionThread);
| |
<reponame>ewengillies/sqlalchemy
import logging.handlers
import sqlalchemy as tsa
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import util
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_raises_return
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import eq_regex
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.util import lazy_gc
def exec_sql(engine, sql, *args, **kwargs):
with engine.connect() as conn:
return conn.exec_driver_sql(sql, *args, **kwargs)
class LogParamsTest(fixtures.TestBase):
__only_on__ = "sqlite"
__requires__ = ("ad_hoc_engines",)
def setup(self):
self.eng = engines.testing_engine(options={"echo": True})
self.no_param_engine = engines.testing_engine(
options={"echo": True, "hide_parameters": True}
)
exec_sql(self.eng, "create table if not exists foo (data string)")
exec_sql(
self.no_param_engine,
"create table if not exists foo (data string)",
)
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.engine")]:
log.addHandler(self.buf)
def teardown(self):
exec_sql(self.eng, "drop table if exists foo")
for log in [logging.getLogger("sqlalchemy.engine")]:
log.removeHandler(self.buf)
def test_log_large_list_of_dict(self):
exec_sql(
self.eng,
"INSERT INTO foo (data) values (:data)",
[{"data": str(i)} for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[{'data': '0'}, {'data': '1'}, {'data': '2'}, {'data': '3'}, "
"{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}"
" ... displaying 10 of 100 total bound "
"parameter sets ... {'data': '98'}, {'data': '99'}]",
)
def test_repr_params_large_list_of_dict(self):
eq_(
repr(
sql_util._repr_params(
[{"data": str(i)} for i in range(100)],
batches=10,
ismulti=True,
)
),
"[{'data': '0'}, {'data': '1'}, {'data': '2'}, {'data': '3'}, "
"{'data': '4'}, {'data': '5'}, {'data': '6'}, {'data': '7'}"
" ... displaying 10 of 100 total bound "
"parameter sets ... {'data': '98'}, {'data': '99'}]",
)
def test_log_no_parameters(self):
exec_sql(
self.no_param_engine,
"INSERT INTO foo (data) values (:data)",
[{"data": str(i)} for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[SQL parameters hidden due to hide_parameters=True]",
)
def test_log_large_list_of_tuple(self):
exec_sql(
self.eng,
"INSERT INTO foo (data) values (?)",
[(str(i),) for i in range(100)],
)
eq_(
self.buf.buffer[1].message,
"[('0',), ('1',), ('2',), ('3',), ('4',), ('5',), "
"('6',), ('7',) ... displaying 10 of 100 total "
"bound parameter sets ... ('98',), ('99',)]",
)
def test_log_positional_array(self):
with self.eng.connect() as conn:
exc_info = assert_raises_return(
tsa.exc.DBAPIError,
conn.execute,
tsa.text("SELECT * FROM foo WHERE id IN :foo AND bar=:bar"),
{"foo": [1, 2, 3], "bar": "hi"},
)
assert (
"[SQL: SELECT * FROM foo WHERE id IN ? AND bar=?]\n"
"[parameters: ([1, 2, 3], 'hi')]\n" in str(exc_info)
)
eq_(self.buf.buffer[1].message, "([1, 2, 3], 'hi')")
def test_repr_params_positional_array(self):
eq_(
repr(
sql_util._repr_params(
[[1, 2, 3], 5], batches=10, ismulti=False
)
),
"[[1, 2, 3], 5]",
)
def test_repr_params_unknown_list(self):
# not known if given multiparams or not. repr params with
# straight truncation
eq_(
repr(
sql_util._repr_params(
[[i for i in range(300)], 5], batches=10, max_chars=80
)
),
"[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... "
"(1315 characters truncated) ... , 293, 294, 295, 296, "
"297, 298, 299], 5]",
)
def test_repr_params_positional_list(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
eq_(
repr(
sql_util._repr_params(
[[i for i in range(300)], 5],
batches=10,
max_chars=80,
ismulti=False,
)
),
"[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... "
"292, 293, 294, 295, 296, 297, 298, 299], 5]",
)
def test_repr_params_named_dict(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
params = {"key_%s" % i: i for i in range(10)}
eq_(
repr(
sql_util._repr_params(
params, batches=10, max_chars=80, ismulti=False
)
),
repr(params),
)
def test_repr_params_ismulti_named_dict(self):
# given non-multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
param = {"key_%s" % i: i for i in range(10)}
eq_(
repr(
sql_util._repr_params(
[param for j in range(50)],
batches=5,
max_chars=80,
ismulti=True,
)
),
"[%(param)r, %(param)r, %(param)r ... "
"displaying 5 of 50 total bound parameter sets ... "
"%(param)r, %(param)r]" % {"param": param},
)
def test_repr_params_ismulti_list(self):
# given multi-params in a list. repr params with
# per-element truncation, mostly does the exact same thing
eq_(
repr(
sql_util._repr_params(
[
[[i for i in range(300)], 5],
[[i for i in range(300)], 5],
[[i for i in range(300)], 5],
],
batches=10,
max_chars=80,
ismulti=True,
)
),
"[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1 ... "
"(1310 characters truncated) ... 292, 293, 294, 295, 296, 297, "
"298, 299], 5]]",
)
def test_log_large_parameter_single(self):
import random
largeparam = "".join(chr(random.randint(52, 85)) for i in range(5000))
exec_sql(self.eng, "INSERT INTO foo (data) values (?)", (largeparam,))
eq_(
self.buf.buffer[1].message,
"('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
def test_log_large_multi_parameter(self):
import random
lp1 = "".join(chr(random.randint(52, 85)) for i in range(5))
lp2 = "".join(chr(random.randint(52, 85)) for i in range(8))
lp3 = "".join(chr(random.randint(52, 85)) for i in range(670))
exec_sql(self.eng, "SELECT ?, ?, ?", (lp1, lp2, lp3))
eq_(
self.buf.buffer[1].message,
"('%s', '%s', '%s ... (372 characters truncated) ... %s')"
% (lp1, lp2, lp3[0:149], lp3[-149:]),
)
def test_log_large_parameter_multiple(self):
import random
lp1 = "".join(chr(random.randint(52, 85)) for i in range(5000))
lp2 = "".join(chr(random.randint(52, 85)) for i in range(200))
lp3 = "".join(chr(random.randint(52, 85)) for i in range(670))
exec_sql(
self.eng,
"INSERT INTO foo (data) values (?)",
[(lp1,), (lp2,), (lp3,)],
)
eq_(
self.buf.buffer[1].message,
"[('%s ... (4702 characters truncated) ... %s',), ('%s',), "
"('%s ... (372 characters truncated) ... %s',)]"
% (lp1[0:149], lp1[-149:], lp2, lp3[0:149], lp3[-149:]),
)
def test_exception_format_dict_param(self):
exception = tsa.exc.IntegrityError("foo", {"x": "y"}, None)
eq_regex(
str(exception),
r"\(.*.NoneType\) None\n\[SQL: foo\]\n\[parameters: {'x': 'y'}\]",
)
def test_exception_format_hide_parameters(self):
exception = tsa.exc.IntegrityError(
"foo", {"x": "y"}, None, hide_parameters=True
)
eq_regex(
str(exception),
r"\(.*.NoneType\) None\n\[SQL: foo\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
)
def test_exception_format_hide_parameters_dbapi_round_trip(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values \(:data\)\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
lambda: exec_sql(
self.no_param_engine,
"INSERT INTO nonexistent (data) values (:data)",
[{"data": str(i)} for i in range(10)],
),
)
def test_exception_format_hide_parameters_nondbapi_round_trip(self):
foo = Table("foo", MetaData(), Column("data", String))
with self.no_param_engine.connect() as conn:
assert_raises_message(
tsa.exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required "
r"for bind parameter 'the_data_2'\n"
r"\[SQL: SELECT foo.data \nFROM foo \nWHERE "
r"foo.data = \? OR foo.data = \?\]\n"
r"\[SQL parameters hidden due to hide_parameters=True\]",
conn.execute,
select([foo]).where(
or_(
foo.c.data == bindparam("the_data_1"),
foo.c.data == bindparam("the_data_2"),
)
),
{"the_data_1": "some data"},
)
def test_exception_format_unexpected_parameter(self):
# test that if the parameters aren't any known type, we just
# run through repr()
exception = tsa.exc.IntegrityError("foo", "bar", "bat")
eq_regex(
str(exception),
r"\(.*.str\) bat\n\[SQL: foo\]\n\[parameters: 'bar'\]",
)
def test_exception_format_unexpected_member_parameter(self):
# test that if the parameters aren't any known type, we just
# run through repr()
exception = tsa.exc.IntegrityError("foo", ["bar", "bat"], "hoho")
eq_regex(
str(exception),
r"\(.*.str\) hoho\n\[SQL: foo\]\n\[parameters: \['bar', 'bat'\]\]",
)
def test_result_large_param(self):
import random
largeparam = "".join(chr(random.randint(52, 85)) for i in range(5000))
self.eng.echo = "debug"
result = exec_sql(self.eng, "SELECT ?", (largeparam,))
row = result.first()
eq_(
self.buf.buffer[1].message,
"('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
if util.py3k:
eq_(
self.buf.buffer[3].message,
"Row ('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
else:
eq_(
self.buf.buffer[3].message,
"Row (u'%s ... (4703 characters truncated) ... %s',)"
% (largeparam[0:148], largeparam[-149:]),
)
if util.py3k:
eq_(
repr(row),
"('%s ... (4702 characters truncated) ... %s',)"
% (largeparam[0:149], largeparam[-149:]),
)
else:
eq_(
repr(row),
"(u'%s ... (4703 characters truncated) ... %s',)"
% (largeparam[0:148], largeparam[-149:]),
)
def test_error_large_dict(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values \(:data\)\]\n"
r"\[parameters: "
r"\[{'data': '0'}, {'data': '1'}, {'data': '2'}, "
r"{'data': '3'}, {'data': '4'}, {'data': '5'}, "
r"{'data': '6'}, {'data': '7'} ... displaying 10 of "
r"100 total bound parameter sets ... {'data': '98'}, "
r"{'data': '99'}\]",
lambda: exec_sql(
self.eng,
"INSERT INTO nonexistent (data) values (:data)",
[{"data": str(i)} for i in range(100)],
),
)
def test_error_large_list(self):
assert_raises_message(
tsa.exc.DBAPIError,
r".*INSERT INTO nonexistent \(data\) values "
r"\(\?\)\]\n\[parameters: \[\('0',\), \('1',\), \('2',\), "
r"\('3',\), \('4',\), \('5',\), \('6',\), \('7',\) "
r"... displaying "
r"10 of 100 total bound parameter sets ... "
r"\('98',\), \('99',\)\]",
lambda: exec_sql(
self.eng,
"INSERT INTO | |
instead
See http://trac.sagemath.org/14801 for details.
sage: h = f.convolution(g)
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: P = f.plot(); Q = g.plot(rgbcolor=(1,1,0)); R = h.plot(rgbcolor=(0,1,1));
sage: # Type show(P+Q+R) to view
sage: f = Piecewise([[(0,1),1*x^0],[(1,2),2*x^0],[(2,3),1*x^0]]) ## example 1
sage: g = f.convolution(f)
sage: h = f.convolution(g)
sage: P = f.plot(); Q = g.plot(rgbcolor=(1,1,0)); R = h.plot(rgbcolor=(0,1,1));
sage: # Type show(P+Q+R) to view
sage: f = Piecewise([[(-1,1),1]]) ## example 2
sage: g = Piecewise([[(0,3),x]])
sage: f.convolution(g)
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
Piecewise defined function with 3 parts, [[(-1, 1), 0], [(1, 2), -3/2*x], [(2, 4), -3/2*x]]
sage: g = Piecewise([[(0,3),1*x^0],[(3,4),2*x^0]])
sage: f.convolution(g)
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
Piecewise defined function with 5 parts, [[(-1, 1), x + 1], [(1, 2), 3], [(2, 3), x], [(3, 4), -x + 8], [(4, 5), -2*x + 10]]
"""
f = self
g = other
M = min(min(f.end_points()),min(g.end_points()))
N = max(max(f.end_points()),max(g.end_points()))
R2 = PolynomialRing(QQ,2,names=["tt","uu"])
tt,uu = R2.gens()
conv = 0
f0 = f.functions()[0]
g0 = g.functions()[0]
R1 = f0.parent()
xx = R1.gen()
var = repr(xx)
if len(f.intervals())==1 and len(g.intervals())==1:
f = f.unextend()
g = g.unextend()
a1 = f.intervals()[0][0]
a2 = f.intervals()[0][1]
b1 = g.intervals()[0][0]
b2 = g.intervals()[0][1]
i1 = repr(f0).replace(var,repr(uu))
i2 = repr(g0).replace(var,"("+repr(tt-uu)+")")
cmd1 = "integrate((%s)*(%s),%s,%s,%s)"%(i1,i2, uu, a1, tt-b1) ## if a1+b1 < tt < a2+b1
cmd2 = "integrate((%s)*(%s),%s,%s,%s)"%(i1,i2, uu, tt-b2, tt-b1) ## if a1+b2 < tt < a2+b1
cmd3 = "integrate((%s)*(%s),%s,%s,%s)"%(i1,i2, uu, tt-b2, a2) ## if a1+b2 < tt < a2+b2
cmd4 = "integrate((%s)*(%s),%s,%s,%s)"%(i1,i2, uu, a1, a2) ## if a2+b1 < tt < a1+b2
conv1 = maxima.eval(cmd1)
conv2 = maxima.eval(cmd2)
conv3 = maxima.eval(cmd3)
conv4 = maxima.eval(cmd4)
# this is a very, very, very ugly hack
x = PolynomialRing(QQ,'x').gen()
fg1 = sage_eval(conv1.replace("tt",var), {'x':x}) ## should be = R2(conv1)
fg2 = sage_eval(conv2.replace("tt",var), {'x':x}) ## should be = R2(conv2)
fg3 = sage_eval(conv3.replace("tt",var), {'x':x}) ## should be = R2(conv3)
fg4 = sage_eval(conv4.replace("tt",var), {'x':x}) ## should be = R2(conv4)
if a1-b1<a2-b2:
if a2+b1!=a1+b2:
h = Piecewise([[(a1+b1,a1+b2),fg1],[(a1+b2,a2+b1),fg4],[(a2+b1,a2+b2),fg3]])
else:
h = Piecewise([[(a1+b1,a1+b2),fg1],[(a1+b2,a2+b2),fg3]])
else:
if a1+b2!=a2+b1:
h = Piecewise([[(a1+b1,a2+b1),fg1],[(a2+b1,a1+b2),fg2],[(a1+b2,a2+b2),fg3]])
else:
h = Piecewise([[(a1+b1,a2+b1),fg1],[(a2+b1,a2+b2),fg3]])
return h
if len(f.intervals())>1 or len(g.intervals())>1:
z = Piecewise([[(-3*abs(N-M),3*abs(N-M)),0*xx**0]])
ff = f.functions()
gg = g.functions()
intvlsf = f.intervals()
intvlsg = g.intervals()
for i in range(len(ff)):
for j in range(len(gg)):
f0 = Piecewise([[intvlsf[i],ff[i]]])
g0 = Piecewise([[intvlsg[j],gg[j]]])
h = g0.convolution(f0)
z = z + h
return z.unextend()
def derivative(self):
r"""
Returns the derivative (as computed by maxima)
Piecewise(I,`(d/dx)(self|_I)`), as I runs over the
intervals belonging to self. self must be piecewise polynomial.
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1-x
sage: f = Piecewise([[(0,1),f1],[(1,2),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.derivative()
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
Piecewise defined function with 2 parts, [[(0, 1), x |--> 0], [(1, 2), x |--> -1]]
sage: f1(x) = -1
sage: f2(x) = 2
sage: f = Piecewise([[(0,pi/2),f1],[(pi/2,pi),f2]])
sage: f.derivative()
Piecewise defined function with 2 parts, [[(0, 1/2*pi), x |--> 0], [(1/2*pi, pi), x |--> 0]]
::
sage: f = Piecewise([[(0,1), (x * 2)]], x)
sage: f.derivative()
Piecewise defined function with 1 parts, [[(0, 1), x |--> 2]]
"""
x = self.default_variable()
dlist = [[(a, b), derivative(f(x), x).function(x)] for (a,b),f in self.list()]
return Piecewise(dlist)
def tangent_line(self, pt):
"""
Computes the linear function defining the tangent line of the
piecewise function self.
EXAMPLES::
sage: f1(x) = x^2
sage: f2(x) = 5-x^3+x
sage: f = Piecewise([[(0,1),f1],[(1,2),f2]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: tf = f.tangent_line(0.9) ## tangent line at x=0.9
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: P = f.plot(rgbcolor=(0.7,0.1,0.5), plot_points=40)
sage: Q = tf.plot(rgbcolor=(0.7,0.2,0.2), plot_points=40)
sage: P + Q
Graphics object consisting of 4 graphics primitives
"""
pt = QQ(pt)
R = QQ[self.default_variable()]
x = R.gen()
der = self.derivative()
tanline = (x-pt)*der(pt)+self(pt)
dlist = [[(a, b), tanline] for (a,b),f in self.list()]
return Piecewise(dlist)
def plot(self, *args, **kwds):
"""
Returns the plot of self.
Keyword arguments are passed onto the plot command for each piece
of the function. E.g., the plot_points keyword affects each
segment of the plot.
EXAMPLES::
sage: f1(x) = 1
sage: f2(x) = 1-x
sage: f3(x) = exp(x)
sage: f4(x) = sin(2*x)
sage: f = Piecewise([[(0,1),f1],[(1,2),f2],[(2,3),f3],[(3,10),f4]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: P = f.plot(rgbcolor=(0.7,0.1,0), plot_points=40)
sage: P
Graphics object consisting of 4 graphics primitives
Remember: to view this, type show(P) or P.save("path/myplot.png")
and then open it in a graphics viewer such as GIMP.
TESTS:
We should not add each piece to the legend individually, since
this creates duplicates (:trac:`12651`). This tests that only
one of the graphics objects in the plot has a non-``None``
``legend_label``::
sage: f1 = sin(x)
sage: f2 = cos(x)
sage: f = piecewise([[(-1,0), f1],[(0,1), f2]])
sage: p = f.plot(legend_label='$f(x)$')
sage: lines = [
... line
... for line in p._objects
... if line.options()['legend_label'] is not None ]
sage: len(lines)
1
"""
from sage.plot.all import plot, Graphics
g = Graphics()
for i, ((a,b), f) in enumerate(self.list()):
# If it's the first piece, pass all arguments. Otherwise,
# filter out 'legend_label' so that we don't add each
# piece to the legend separately (trac #12651).
if i != 0 and 'legend_label' in kwds:
del kwds['legend_label']
g += plot(f, a, b, *args, **kwds)
return g
def fourier_series_cosine_coefficient(self,n,L):
r"""
Returns the n-th Fourier series coefficient of
`\cos(n\pi x/L)`, `a_n`.
INPUT:
- ``self`` - the function f(x), defined over -L x L
- ``n`` - an integer n=0
- ``L`` - (the period)/2
OUTPUT:
`a_n = \frac{1}{L}\int_{-L}^L f(x)\cos(n\pi x/L)dx`
EXAMPLES::
sage: f(x) = x^2
sage: f = Piecewise([[(-1,1),f]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.fourier_series_cosine_coefficient(2,1)
pi^(-2)
sage: f(x) = x^2
sage: f = Piecewise([[(-pi,pi),f]])
sage: f.fourier_series_cosine_coefficient(2,pi)
1
sage: f1(x) = -1
sage: f2(x) = 2
sage: f = Piecewise([[(-pi,pi/2),f1],[(pi/2,pi),f2]])
sage: f.fourier_series_cosine_coefficient(5,pi)
-3/5/pi
"""
from sage.all import cos, pi
x = var('x')
result = sum([(f(x)*cos(pi*x*n/L)/L).integrate(x, a, b)
for (a,b), f in self.list()])
if is_Expression(result):
return result.simplify_trig()
return result
def fourier_series_sine_coefficient(self,n,L):
r"""
Returns the n-th Fourier series coefficient of
`\sin(n\pi x/L)`, `b_n`.
INPUT:
- ``self`` - the function f(x), defined over -L x L
- ``n`` - an integer n0
- ``L`` - (the period)/2
OUTPUT:
`b_n = \frac{1}{L}\int_{-L}^L f(x)\sin(n\pi x/L)dx`
EXAMPLES::
sage: f(x) = x^2
sage: f = Piecewise([[(-1,1),f]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f.fourier_series_sine_coefficient(2,1) # L=1, n=2
0
"""
from sage.all import sin, pi
x = var('x')
result = sum([(f(x)*sin(pi*x*n/L)/L).integrate(x, a, b)
for (a,b), f in self.list()])
if is_Expression(result):
return result.simplify_trig()
return result
def _fourier_series_helper(self, N, L, scale_function):
r"""
A helper function for the construction of Fourier series. The
argument scale_function is a function which takes in n,
representing the `n^{th}` coefficient, and return an
expression to scale the sine and cosine coefficients by.
EXAMPLES::
sage: f(x) = x^2
sage: f = Piecewise([[(-1,1),f]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
sage: f._fourier_series_helper(3, 1, lambda n: 1)
cos(2*pi*x)/pi^2 - 4*cos(pi*x)/pi^2 + 1/3
"""
from sage.all import pi, sin, cos, srange
x = self.default_variable()
a0 = self.fourier_series_cosine_coefficient(0,L)
result = a0/2 + sum([(self.fourier_series_cosine_coefficient(n,L)*cos(n*pi*x/L) +
self.fourier_series_sine_coefficient(n,L)*sin(n*pi*x/L))*
scale_function(n)
for n in srange(1,N)])
return result.expand()
def fourier_series_partial_sum(self,N,L):
r"""
Returns the partial sum
.. math::
f(x) \sim \frac{a_0}{2} + \sum_{n=1}^N [a_n\cos(\frac{n\pi x}{L}) + b_n\sin(\frac{n\pi x}{L})],
as a string.
EXAMPLE::
sage: f(x) = x^2
sage: f = Piecewise([[(-1,1),f]])
doctest:...: DeprecationWarning: use lower-case piecewise instead
See http://trac.sagemath.org/14801 for details.
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import unittest.mock
import numpy as np
from tests.unit import utils
UNIT_TRIANGLE = np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
SPACING = np.spacing # pylint: disable=no-member
class Test_newton_refine_solve(unittest.TestCase):
@staticmethod
def _call_function_under_test(
jac_both, x_val, triangle_x, y_val, triangle_y
):
from bezier.hazmat import triangle_intersection
return triangle_intersection.newton_refine_solve(
jac_both, x_val, triangle_x, y_val, triangle_y
)
def test_it(self):
jac_both = np.asfortranarray([[1.0], [1.0], [-2.0], [2.0]])
delta_s, delta_t = self._call_function_under_test(
jac_both, 0.5, 0.25, 0.75, 1.25
)
self.assertEqual(delta_s, -0.125)
self.assertEqual(delta_t, -0.1875)
class Test_newton_refine(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, degree, x_val, y_val, s, t):
from bezier.hazmat import triangle_intersection
return triangle_intersection.newton_refine(
nodes, degree, x_val, y_val, s, t
)
def test_improvement(self):
nodes = np.asfortranarray(
[
[0.0, 0.5, 1.0, 0.0, 0.5, -0.25],
[0.0, -0.25, 0.0, 0.5, 0.5, 0.875],
]
)
# This triangle is given by
# [(4 s - t^2) / 4, (4 s^2 + 4 s t - t^2 - 4 s + 8 t) / 8]
s = 0.25
t = 0.5
# At our points, the Jacobian is
# [1, -1/4]
# [0, 1 ]
# hence there will be no round-off when applying the inverse.
# (x_val, y_val), = triangle.evaluate_cartesian(0.5, 0.25)
x_val = 0.484375
y_val = 0.1796875
new_s, new_t = self._call_function_under_test(
nodes, 2, x_val, y_val, s, t
)
self.assertEqual(new_s, 247.0 / 512.0)
self.assertEqual(new_t, 31.0 / 128.0)
def test_at_solution(self):
nodes = np.asfortranarray(
[[0.0, 0.5, 1.0, 0.0, 0.5, 0.0], [0.0, 0.0, 0.0, 0.5, 0.5, 1.0]]
)
# This triangle is given by [s, t].
s = 0.375
t = 0.75
# Since x(s) = s and y(t) = t, we simply use the same x/y and s/t.
x_val = s
y_val = t
new_s, new_t = self._call_function_under_test(
nodes, 2, x_val, y_val, s, t
)
self.assertEqual(new_s, s)
self.assertEqual(new_t, t)
class Test_update_locate_candidates(unittest.TestCase):
@staticmethod
def _call_function_under_test(
candidate, next_candidates, x_val, y_val, degree
):
from bezier.hazmat import triangle_intersection
return triangle_intersection.update_locate_candidates(
candidate, next_candidates, x_val, y_val, degree
)
@unittest.mock.patch(
"bezier.hazmat.triangle_helpers.subdivide_nodes",
return_value=(
unittest.mock.sentinel.nodes_a,
unittest.mock.sentinel.nodes_b,
unittest.mock.sentinel.nodes_c,
unittest.mock.sentinel.nodes_d,
),
)
def test_contained(self, subdivide_nodes):
nodes = np.asfortranarray(
[[0.0, 0.5, 1.0, 0.0, 0.75, 0.0], [0.0, 0.25, 0.0, 0.5, 0.75, 1.0]]
)
candidate = (1.25, 1.25, -0.25, nodes)
next_candidates = []
ret_val = self._call_function_under_test(
candidate, next_candidates, 0.5625, 0.375, 2
)
self.assertIsNone(ret_val)
expected = [
(1.375, 1.375, -0.125, unittest.mock.sentinel.nodes_a),
(1.25, 1.25, 0.125, unittest.mock.sentinel.nodes_b),
(1.0, 1.375, -0.125, unittest.mock.sentinel.nodes_c),
(1.375, 1.0, -0.125, unittest.mock.sentinel.nodes_d),
]
self.assertEqual(next_candidates, expected)
subdivide_nodes.assert_called_once_with(nodes, 2)
def test_not_contained(self):
nodes = np.asfortranarray([[0.0, 2.0, -1.0], [0.0, 3.0, 2.0]])
candidate = (2.0, 0.5, 0.5, nodes)
next_candidates = []
ret_val = self._call_function_under_test(
candidate, next_candidates, 9.0, 9.0, 1
)
self.assertIsNone(ret_val)
self.assertEqual(next_candidates, [])
class Test_mean_centroid(unittest.TestCase):
@staticmethod
def _call_function_under_test(candidates):
from bezier.hazmat import triangle_intersection
return triangle_intersection.mean_centroid(candidates)
def test_it(self):
candidates = (
(1.0, 1.0, None, None),
(2.25, 1.5, None, None),
(1.25, 4.25, None, None),
)
centroid_x, centroid_y = self._call_function_under_test(candidates)
self.assertEqual(centroid_x, 0.5)
self.assertEqual(centroid_y, 0.75)
class Test_locate_point(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes, degree, x_val, y_val):
from bezier.hazmat import triangle_intersection
return triangle_intersection.locate_point(nodes, degree, x_val, y_val)
def test_it(self):
nodes = UNIT_TRIANGLE.copy(order="F")
degree = 1
x_val = 0.25
y_val = 0.625
s, t = self._call_function_under_test(nodes, degree, x_val, y_val)
self.assertEqual(s, x_val)
self.assertEqual(t, y_val)
def test_extra_newton_step(self):
# x(s, t) = -2 (s + 2 t) (t - 1)
# y(s, t) = 2 (s + 1) t
nodes = np.asfortranarray(
[[0.0, 1.0, 2.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 1.0, 2.0, 2.0]]
)
degree = 2
x_val = 0.59375
y_val = 0.25
s, t = self._call_function_under_test(nodes, degree, x_val, y_val)
# NOTE: We can use the resultant to find that the **true** answers
# are roots of the following polynomials.
# 64 s^3 + 101 s^2 + 34 s - 5 = 0
# 128 t^3 - 192 t^2 + 91 t - 8 = 0
# Using extended precision, we can find these values to more
# digits than what is supported by IEEE-754.
expected_s = 0.109190958136897160638
self.assertAlmostEqual(s, expected_s, delta=6 * SPACING(expected_s))
expected_t = 0.11269475204698919699
self.assertAlmostEqual(t, expected_t, delta=SPACING(expected_t))
def test_no_match(self):
nodes = UNIT_TRIANGLE.copy(order="F")
degree = 1
x_val = -0.125
y_val = 0.25
self.assertIsNone(
self._call_function_under_test(nodes, degree, x_val, y_val)
)
class Test_same_intersection(unittest.TestCase):
@staticmethod
def _call_function_under_test(intersection1, intersection2, **kwargs):
from bezier.hazmat import triangle_intersection
return triangle_intersection.same_intersection(
intersection1, intersection2, **kwargs
)
def test_same(self):
intersection = make_intersect(10, 0.5, 99, 0.75)
result = self._call_function_under_test(intersection, intersection)
self.assertTrue(result)
def test_almost_same(self):
intersection1 = make_intersect(10, 0.5, 99, 0.75)
intersection2 = make_intersect(10, 0.5, 99, 0.875)
result = self._call_function_under_test(intersection1, intersection2)
self.assertFalse(result)
result = self._call_function_under_test(
intersection1, intersection2, wiggle=0.5
)
self.assertTrue(result)
def test_different_edge(self):
intersection1 = make_intersect(10, 0.5, 99, 0.5)
intersection2 = make_intersect(10, 0.5, 98, 0.5)
intersection3 = make_intersect(11, 0.5, 99, 0.5)
self.assertFalse(
self._call_function_under_test(intersection1, intersection2)
)
self.assertFalse(
self._call_function_under_test(intersection1, intersection3)
)
def test_different_param(self):
intersection1 = make_intersect(1, 0.5, 9, 0.5)
intersection2 = make_intersect(1, 0.75, 9, 0.5)
intersection3 = make_intersect(1, 0.5, 9, 0.75)
self.assertFalse(
self._call_function_under_test(intersection1, intersection2)
)
self.assertFalse(
self._call_function_under_test(intersection1, intersection3)
)
class Test_verify_duplicates(unittest.TestCase):
@staticmethod
def _call_function_under_test(duplicates, uniques):
from bezier.hazmat import triangle_intersection
return triangle_intersection.verify_duplicates(duplicates, uniques)
def test_empty(self):
self.assertIsNone(self._call_function_under_test([], []))
def test_success(self):
uniq = make_intersect(1, 0.0, 2, 0.25)
self.assertIsNone(self._call_function_under_test([uniq], [uniq]))
def test_success_triple(self):
uniq = make_intersect(1, 0.0, 2, 0.0)
self.assertIsNone(
self._call_function_under_test([uniq, uniq, uniq], [uniq])
)
def test_failed_uniqueness(self):
uniq = make_intersect(1, 0.375, 2, 0.75)
with self.assertRaises(ValueError):
self._call_function_under_test([], [uniq, uniq])
def test_bad_duplicate(self):
dupe = make_intersect(1, 0.75, 2, 0.25)
uniq = make_intersect(1, 0.25, 2, 0.75)
with self.assertRaises(ValueError):
self._call_function_under_test([dupe], [uniq])
def test_bad_single_corner(self):
uniq = make_intersect(1, 0.125, 2, 0.125)
with self.assertRaises(ValueError):
self._call_function_under_test([uniq], [uniq])
def test_bad_double_corner(self):
uniq = make_intersect(1, 0.0, 2, 1.0)
with self.assertRaises(ValueError):
self._call_function_under_test([uniq, uniq, uniq], [uniq])
def test_bad_count(self):
uniq = make_intersect(1, 0.375, 2, 0.75)
with self.assertRaises(ValueError):
self._call_function_under_test([uniq, uniq], [uniq])
class Test_verify_edge_segments(unittest.TestCase):
@staticmethod
def _call_function_under_test(edge_infos):
from bezier.hazmat import triangle_intersection
return triangle_intersection.verify_edge_segments(edge_infos)
def test_none(self):
return_value = self._call_function_under_test(None)
self.assertIsNone(return_value)
def test_valid(self):
edge_infos = [((0, 0.0, 0.5), (4, 0.25, 1.0), (5, 0.0, 0.75))]
return_value = self._call_function_under_test(edge_infos)
self.assertIsNone(return_value)
def test_bad_params(self):
from bezier.hazmat import triangle_intersection
edge_infos = [((0, 0.0, 1.5), (4, 0.25, 1.0), (5, 0.0, 0.75))]
with self.assertRaises(ValueError) as exc_info:
self._call_function_under_test(edge_infos)
exc_args = exc_info.exception.args
expected = (
triangle_intersection.BAD_SEGMENT_PARAMS,
(0, 0.0, 1.5),
)
self.assertEqual(exc_args, expected)
def test_consecutive_segments(self):
from bezier.hazmat import triangle_intersection
edge_infos = [
((0, 0.25, 0.5), (4, 0.25, 1.0), (5, 0.0, 0.75), (0, 0.0, 0.25))
]
with self.assertRaises(ValueError) as exc_info:
self._call_function_under_test(edge_infos)
exc_args = exc_info.exception.args
expected = (
triangle_intersection.SEGMENTS_SAME_EDGE,
(0, 0.0, 0.25),
(0, 0.25, 0.5),
)
self.assertEqual(exc_args, expected)
class Test_add_edge_end_unused(unittest.TestCase):
@staticmethod
def _call_function_under_test(intersection, duplicates, intersections):
from bezier.hazmat import triangle_intersection
return triangle_intersection.add_edge_end_unused(
intersection, duplicates, intersections
)
def test_match_s(self):
intersection1 = make_intersect(
1, 0.0, 2, 0.5, interior_curve=get_enum("COINCIDENT_UNUSED")
)
intersection2 = make_intersect(
1, 0.0, 2, 0.5, interior_curve=get_enum("FIRST")
)
duplicates = []
intersections = [intersection2]
return_value = self._call_function_under_test(
intersection1, duplicates, intersections
)
self.assertIsNone(return_value)
self.assertEqual(duplicates, [intersection2])
self.assertEqual(intersections, [intersection1])
def test_match_t(self):
intersection1 = make_intersect(
0, 0.5, 0, 0.0, interior_curve=get_enum("COINCIDENT_UNUSED")
)
intersection2 = make_intersect(
0, 0.5, 0, 0.0, interior_curve=get_enum("TANGENT_FIRST")
)
duplicates = []
intersections = [intersection2]
return_value = self._call_function_under_test(
intersection1, duplicates, intersections
)
self.assertIsNone(return_value)
self.assertEqual(duplicates, [intersection2])
self.assertEqual(intersections, [intersection1])
def test_no_match(self):
intersection1 = make_intersect(
1, 0.5, 0, 0.0, interior_curve=get_enum("COINCIDENT_UNUSED")
)
intersection2 = make_intersect(
2, 0.0, 2, 0.5, interior_curve=get_enum("SECOND")
)
intersection3 = make_intersect(
1, 0.5, 0, 0.75, interior_curve=get_enum("FIRST")
)
intersections = [intersection2, intersection3]
return_value = self._call_function_under_test(
intersection1, None, intersections
)
self.assertIsNone(return_value)
self.assertEqual(
intersections, [intersection2, intersection3, intersection1]
)
class Test_check_unused(unittest.TestCase):
@staticmethod
def _call_function_under_test(intersection, duplicates, unused):
from bezier.hazmat import triangle_intersection
return triangle_intersection.check_unused(
intersection, duplicates, unused
)
def test_match_s(self):
intersection1 = make_intersect(1, 0.0, 2, 0.5)
intersection2 = make_intersect(
1, 0.0, 2, 0.5, interior_curve=get_enum("COINCIDENT_UNUSED")
)
duplicates = []
unused = [intersection2]
is_duplicate = self._call_function_under_test(
intersection1, duplicates, unused
)
self.assertTrue(is_duplicate)
self.assertEqual(duplicates, [intersection1])
def test_match_t(self):
intersection1 = make_intersect(1, 0.5, 2, 0.0)
intersection2 = make_intersect(
1, 0.5, 2, 0.0, interior_curve=get_enum("COINCIDENT_UNUSED")
)
duplicates = []
unused = [intersection2]
is_duplicate = self._call_function_under_test(
intersection1, duplicates, unused
)
self.assertTrue(is_duplicate)
self.assertEqual(duplicates, [intersection1])
def test_no_match(self):
intersection1 = make_intersect(1, 0.5, 0, 0.0)
intersection2 = make_intersect(
2, 0.0, 2, 0.5, interior_curve=get_enum("COINCIDENT_UNUSED")
)
intersection3 = make_intersect(
1, 0.5, 0, 0.75, interior_curve=get_enum("COINCIDENT_UNUSED")
| |
frequency agility requested channel change",
0x1F, "Zigbee no joinable beacons; execute ATFR",
0x20, "Zigbee token space recovered",
0x21, "Zigbee token space unrecoverable",
0x22, "Zigbee token space corrupt",
0x23, "Zigbee dual-mode metaframe error",
0x24, "BLE connect",
0x25, "BLE disconnect",
0x34, "Bandmask configuration failed",
0x80, "Stack reset",
0x81, "FIB bootloader reset",
0x82, "Send or join command issued with connect from AP",
0x83, "AP not found",
0x84, "PSK not configured",
0x87, "SSID not found",
0x88, "Failed to join with security enabled",
0x89, "Core lockup or crystal reset failure",
0x8A, "Invalid channel",
0x8B, "Low VCC reset",
0x8E, "Failed to join AP")
if code in opts:
print(pad_text("Modem status") + opts[opts.index(code) + 1])
return
if code >= 0x80:
print(pad_text("Modem status") + "Stack Error")
return
print("[Error] Unknown modem status code " + get_hex(code))
def get_packet_status(code):
"""
Decode the packet's status byte and print the relevant status message.
Args:
code (int): The status code included in the packet.
"""
text = ""
if code == 0x00: text = "packet not acknowledged, "
if code & 0x01: text += "packet acknowledged, "
if code & 0x02: text += "broadcast packet, "
if code & 0x20: text += "APS-encrypted packet, "
if code & 0x40: text += "End-Device sent packet, "
text = text[0:1].upper() + text[1:-2]
print(pad_text("Status") + text)
def get_delivery_status(code):
"""
Decode the packet's address delivery status byte and print the relevant status message.
Args:
code (int): The status code included in the packet.
"""
opts = (0x00, "Success",
0x01, "MAC ACK failure",
0x02, "CCA failure",
0x03, "Packet not transmitted and purged",
0x04, "Physical error on the interface",
0x15, "Invalid destination endpoint",
0x18, "No buffers available",
0x21, "Network ACK failure",
0x22, "Not joined to network",
0x23, "Self-addressed",
0x24, "Address not found",
0x25, "Route not found",
0x26, "Broadcast relay not heard",
0x2B, "Invalid binding table index",
0x2C, "Invalid endpoint",
0x2D, "Attempted broadcast with APS transmission",
0x2E, "Attempted unicast with APS transmission, but EE=0",
0x31, "Software error occurred",
0x32, "Resource error: lack of free buffers, timers etc.",
0x74, "Data payload too large",
0x75, "Indirect message unrequested",
0x76, "Client socket creation attempt failed",
0xBB, "Key not authorized")
if code in opts:
print(pad_text("Delivery status") + opts[opts.index(code) + 1])
return
print("[ERROR] Unknown Delivery status code " + get_hex(code))
def get_discovery_status(code):
"""
Decode the packet's address discovery status byte and print the relevant status message.
Args:
code (int): The status code included in the packet.
"""
opts = ("No Discovery Overhead", "Address Discovery", "Route Discovery", "Address and Route", "Extended timeout discovery")
if -1 < code < 4:
print(pad_text("Discovery status") + opts[code])
elif code == 0x40:
print(pad_text("Discovery status") + opts[4])
else:
print("[ERROR] Unknown Discovery status code " + get_hex(code))
def get_zcl_attribute_status(code):
"""
Decode the status of a ZCL attribute access operation.
Args:
code (int): The status code included in the packet.
Returns:
str: The ZCL attribute status.
"""
opts = (0x00, "Success",
0x01, "Failure",
0x7e, "Not authorized",
0x7f, "Reserved field not zero",
0x80, "Malformed command",
0x81, "Unsupported cluster command",
0x82, "Unsupported general command",
0x83, "Unsupported manufacturer's cluster command",
0x84, "Unsupported manufacturer's general command",
0x85, "Invalid field",
0x86, "Unsupported attribute",
0x87, "Invalid value",
0x88, "Read only",
0x89, "Insufficient space",
0x8a, "Duplicate exists",
0x8b, "Not found",
0x8c, "Unreportable attribute",
0x8d, "Invalid data type",
0x8e, "Invalid selector",
0x8f, "Write only",
0x90, "Not found",
0x91, "Not found",
0x92, "Read only",
0x93, "Insufficient space",
0x94, "Duplicate exists",
0x95, "Not found",
0x96, "Unreportable attribute",
0x97, "Invalid data type",
0x98, "Invalid selector",
0x99, "Write only",
0x9a, "Not found",
0xc0, "Not found",
0xc1, "Not found",
0xc2, "Not found",
0xc3, "Not found")
if code in opts: return opts[opts.index(code) + 1]
return "Unknown"
def get_zcl_attribute_type(code):
"""
Determine a ZCL attribute's data type from its type code.
Args:
code(int): The ZCL data type code included in the packet.
Returns:
str: The ZCL attribute type.
"""
opts = (0x00, "NULL",
0x08, "DATA8",
0x09, "DATA16",
0x0a, "DATA24",
0x0b, "DATA32",
0x0c, "DATA40",
0x0d, "DATA48",
0x0e, "DATA56",
0x0f, "DATA64",
0x10, "BOOL",
0x18, "MAP8",
0x19, "MAP16",
0x1a, "MAP24",
0x1b, "MAP32",
0x1c, "MAP40",
0x1d, "MAP48",
0x1e, "MAP56",
0x1f, "MAP64",
0x20, "UINT8",
0x21, "UINT16",
0x22, "UINT24",
0x23, "UINT32",
0x24, "UINT40",
0x25, "UNIT48",
0x26, "UNIT56",
0x27, "UINT64",
0x28, "INT8",
0x29, "INT16",
0x2a, "INT24",
0x2b, "INT32",
0x2c, "INT40",
0x2d, "NIT48",
0x2e, "NIT56",
0x2f, "INT64",
0x30, "ENUM8",
0x31, "ENUM16",
0x38, "SEMI",
0x39, "SINGLE",
0x3a, "DOUBLE",
0x41, "OCTSTR",
0x42, "STRING",
0x43, "OCTSTR16",
0x44, "STRING16",
0x48, "ARRAY",
0x4c, "STRUCT",
0x50, "SET",
0x51, "BAG",
0xe0, "ToD",
0xe1, "DATE",
0xe2, "UTC",
0xe8, "CLUSTERID",
0xe9, "ATTRID",
0xea, "BACOID",
0xf0, "EUI64",
0xf1, "KEY128",
0xff, "UNK")
if code in opts: return opts[opts.index(code) + 1]
return "OPAQUE"
def get_zcl_attribute_size(code):
"""
Determine the number of bytes a given ZCL attribute takes up.
Args:
code (int): The attribute size code included in the packet.
Returns:
int: size of the attribute data in bytes, or -1 for error/no size.
"""
opts = (0x00, 0,
0x08, 1,
0x09, 2,
0x0a, 3,
0x0b, 4,
0x0c, 5,
0x0d, 6,
0x0e, 7,
0x0f, 8,
0x10, 1,
0x18, 1,
0x19, 2,
0x1a, 3,
0x1b, 4,
0x1c, 5,
0x1d, 6,
0x1e, 7,
0x1f, 8,
0x20, 1,
0x21, 2,
0x22, 3,
0x23, 4,
0x24, 5,
0x25, 6,
0x26, 7,
0x27, 8,
0x28, 1,
0x29, 3,
0x2a, 3,
0x2b, 4,
0x2c, 5,
0x2d, 6,
0x2e, 7,
0x2f, 8,
0x30, 1,
0x31, 2,
0x38, 2,
0x38, 4,
0x39, 8,
0x41, -1,
0x42, -1,
0x43, -1,
0x44, -1,
0x48, -1,
0x4c, -1,
0x50, -1,
0x51, -1,
0xe0, 4,
0xe1, 4,
0xe2, 4,
0xe8, 2,
0xe9, 2,
0xea, 4,
0xf0, 8,
0xf1, 16,
0xff, 0)
for i in range(0, len(opts), 2):
if code == opts[i]: return opts[i + 1]
return -1
def get_zdo_command(code):
"""
Display a ZDO request or response command name.
Args:
code (int): The ZDO command code included in the packet.
Returns:
str: The command name.
"""
opts = ("16-bit Address", 0x0000,
"64-bit Address", 0x0001,
"Node Descriptor", 0x0002,
"Simple Descriptor", 0x0004,
"Active Endpoints", 0x0005,
"Match Descriptor", 0x0006,
"Complex Descriptor", 0x0010,
"User Descriptor", 0x0011,
"Device Announce", 0x0013,
"User Descriptor Set", 0x0014,
"Management Network Discovery", 0x0030,
"Management LQI (Neighbor Table)", 0x0031,
"Management RTG (Routing Table)", 0x0032,
"Management Leave", 0x0034,
"Management Permit Join", 0x0036,
"Management Network Update", 0x0038)
for i in range(0, len(opts), 2):
# Look at the lower bits for comparison as bit 15 is what
# distinguishes responses (set) from requests (unset)
if code & 0x7FFF == opts[i + 1]:
# Append the appropriate message type
return opts[i] + (" Response" if code > 0x7FFF else " Request")
return "[ERROR] Unknown ZDO command " + get_hex(code, 4)
def get_zdo_type(code):
"""
Display the ZDO request type as embedded in the request.
Args:
code (int): The request type code included in the packet.
"""
if code == 0x00:
print(pad_text(" Request type") + "Single device response")
elif code == 0x01:
print(pad_text(" Request type") + "Extended response")
else:
print("[ERROR] Unknown ZDO request type " + get_hex(code))
def get_zdo_status(code):
"""
Display the ZDO status code embedded in the response.
Args:
code (int): The status code included in the packet.
"""
print(pad_text(" Response status") + get_zcl_attribute_status(code))
def get_node_desc(frame_data, start):
"""
Display the ZDO Node Descriptor response.
Args:
frame_data (list): The packet data.
start (int): The index of the start of the descriptor.
"""
# Node Descriptor Byte 1
get_device_type((frame_data[start] & 0xE0) >> 5)
if frame_data[start] & 0x10 == 0x10:
print(pad_text(" Complex descriptor available") + "Yes")
else:
print(pad_text(" Complex descriptor available") + "No")
if frame_data[start] & 0x08 == 0x08:
print(pad_text(" User descriptor available") + "Yes")
else:
print(pad_text(" User descriptor available") + "No")
# Byte 2
flags = get_binary(frame_data[start + 1])
print(pad_text(" APS flags") + "b" + flags[0:3])
opts = ("868MHz", "R", "900MHz", "2.4GHz", "R")
text = ""
for i in range(3, 8):
if flags[i] == "1": text = opts[i - 3]
if text == "R": text = "[ERROR] Reserved band indicated"
print(pad_text(" Frequency band") + text)
# Byte 3
text = get_device_capability(frame_data[start + 2])
print(pad_text(" MAC capabilities") + text)
# Bytes 4 and 5
print(pad_text(" | |
<gh_stars>0
'''
Process data and prepare inputs for Neural Event Model.
'''
import bz2
import gzip
import json
import logging
import numpy as np
import sys
from gensim import models
from scipy.sparse import csr_matrix
from six import iteritems
from sklearn.preprocessing import normalize, LabelEncoder
from typing import List
from builtins import isinstance
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout,
format='[%(asctime)s]%(levelname)s(%(name)s): %(message)s')
LOGGER = logging.getLogger(__name__)
class DataProcessor:
'''
Read in data in json format, index and vectorize words, preparing data for train or test.
'''
def __init__(self):
# All types of arguments seen by the processor. A0, A1, etc.
self.arg_types = []
self.max_sentence_length = None
self.max_arg_length = None
self.word_index = {"NONE": 0, "UNK": 1} # NONE is padding, UNK is OOV.
self.label_encoder = None
self.set_labels = set()
def index_data(self, filename, tokenize=None, add_new_words=True, pad_info=None, include_sentences_in_events=False, \
use_event_structure=True, min_event_structure=1, max_event_structure=1, min_args_event=1, return_data=False):
'''
Read data from file, and return indexed inputs. If this is for test, do not add new words to the
vocabulary (treat them as unk). pad_info is applicable when we want to pad data to a pre-specified
length (for example when testing, we want to make the sequences the same length as those from train).
'''
rows_buffer = []
indexed_data = []
open_file = gzip.open if filename.endswith('.gz') else (bz2.open if filename.endswith('.bz2') else open)
count_rows = 0
for row in open_file(filename, mode='rt', encoding='utf-8', errors='replace'):
rows_buffer.append(row)
count_rows += 1
if (len(rows_buffer) >= 1000):
indexed_data.extend(self._index_data_batch(rows_buffer, tokenize, add_new_words, include_sentences_in_events, \
min_event_structure=min_event_structure, max_event_structure=max_event_structure, \
min_args_event=min_args_event, return_data=return_data))
rows_buffer.clear()
indexed_data.extend(self._index_data_batch(rows_buffer, tokenize, add_new_words, include_sentences_in_events, \
min_event_structure=min_event_structure, max_event_structure=max_event_structure, \
min_args_event=min_args_event, return_data=return_data))
LOGGER.info(f"INDEXED DATA/ROWS: {len(indexed_data)}/{count_rows} (with min of {min_args_event} args)")
inputs, labels, datasrc = self.pad_data(indexed_data, pad_info, use_event_structure, return_data=return_data)
return (inputs, self._make_one_hot(labels), datasrc) if return_data else (inputs, self._make_one_hot(labels))
def _index_data_batch(self, rows_batch, tokenize=None, add_new_words=True, include_sentences_in_events=False, \
min_event_structure=1, max_event_structure=1, min_args_event=1, return_data=False):
indexed_data = []
for row in rows_batch:
row = row.strip()
row = row if row.startswith(('{')) else '{' + '{'.join(row.split('{')[1:])
row = row if row.endswith(('}')) else '}'.join(row.split('}')[:-1]) + '}'
datum = None
try:
datum = json.loads(row)
except json.decoder.JSONDecodeError:
if (len(row.strip()) > 0):
warn_msg = f"ERROR ON INDEX_DATA: The row isn't in json format: '{row}'"
LOGGER.warn(warn_msg)
continue
indexed_sentence = self._index_string(datum["sentence"], tokenize=tokenize, add_new_words=add_new_words)
datum_event_structure = datum["event_structure"]
list_datum_event_structure = []
if isinstance(datum_event_structure, list):
len_event_structure = len(datum_event_structure)
if (len_event_structure > 0) \
and ((min_event_structure is None) or (len_event_structure >= max(min_event_structure, 0))) \
and ((max_event_structure is None) or (len_event_structure <= max(max_event_structure, 1))):
list_datum_event_structure = datum_event_structure #= datum_event_structure[0]
else:
# discards sentences without event or without number of event levels expected and continue reading
continue
else:
list_datum_event_structure = [datum_event_structure]
for datum_event_structure in list_datum_event_structure:
if (min_args_event is not None) and (len(datum_event_structure.keys()) < max(min_args_event, 1)):
# discards sentences with a number of insufficient arguments from an event
continue
indexed_event_args = {key: self._index_string(datum_event_structure[key], tokenize=tokenize, add_new_words=add_new_words) \
for key in datum_event_structure.keys()}
# After index with stemming some args could be empty, so filter again
indexed_event_args = {key: value for key, value in indexed_event_args.items() if len(value) > 0}
if (min_args_event is not None) and (len(indexed_event_args.keys()) < max(min_args_event, 1)):
# discards sentences with a number of insufficient arguments from an event
continue
if include_sentences_in_events:
indexed_event_args["sentence"] = indexed_sentence
indexed_row = [indexed_sentence, indexed_event_args]
try:
label = datum["meta_info"][0]
indexed_row.append(label)
except:
try:
label = datum["label"]
indexed_row.append(label)
except:
pass
if return_data:
indexed_row.append(datum)
indexed_data.append(tuple(indexed_row))
return indexed_data
def _index_string(self, string: str, tokenize=None, add_new_words=True):
tokens = self.apply_tokenize_func(string, tokenize).lower().split()
for token in tokens:
if token not in self.word_index and add_new_words:
self.word_index[token] = len(self.word_index)
token_indices = [self.word_index[token] if token in self.word_index else self.word_index["UNK"] \
for token in tokens]
return token_indices
def apply_tokenize_func(self, string: str, tokenize=None):
tokenize = [] if (tokenize is None) else (list(tokenize) if isinstance(tokenize, (list, tuple)) else [tokenize])
for tokenizer in tokenize:
tokens = tokenizer(string)
string = " ".join(tokens)
return string
def _make_one_hot(self, labels, label_encoder=None):
'''
Takes labels and converts them into one hot representations.
'''
try:
_ = self.label_encoder
except AttributeError:
self.label_encoder = None
try:
_ = self.set_labels
except AttributeError:
self.set_labels = set()
if labels is None:
return None
if (label_encoder is not None):
self.label_encoder = label_encoder
else:
if (self.label_encoder is None):
self.label_encoder = LabelEncoder()
try:
self.label_encoder.fit(self.set_labels)
except ValueError:
pass
self.label_encoder.fit(labels)
self.set_labels.update(self.label_encoder.classes_)
return self.label_encoder.transform(labels)
def pad_data(self, indexed_data, pad_info, use_event_structure=True, return_data=False):
'''
Takes a list of tuples containing indexed sentences, indexed event structures and labels, and returns numpy
arrays.
'''
sentence_inputs = []
# Setting max sentence length
if not pad_info:
pad_info = {}
labels = None
datasrc = None
len_indexed_data = len(indexed_data[0])
zip_indexed_data = zip(*indexed_data)
if len_indexed_data > 3:
indexed_sentences, indexed_event_structures, labels, datasrc = zip_indexed_data
labels = np.asarray(labels)
datasrc = np.asarray(datasrc)
elif len_indexed_data == 3:
if return_data:
indexed_sentences, indexed_event_structures, datasrc = zip_indexed_data
datasrc = np.asarray(datasrc)
else:
indexed_sentences, indexed_event_structures, labels = zip_indexed_data
labels = np.asarray(labels)
else:
indexed_sentences, indexed_event_structures = zip_indexed_data
event_structures_have_sentences = False
if "sentence" in indexed_event_structures[0]:
# This means index_data included sentences in event structures. We need to pad accordingly.
event_structures_have_sentences = True
if "max_sentence_length" in pad_info:
self.max_sentence_length = pad_info["max_sentence_length"]
else:
self.max_sentence_length = max([len(indexed_sentence) for indexed_sentence in indexed_sentences])
# Padding and/or truncating sentences
for indexed_sentence in indexed_sentences:
sentence_inputs.append(csr_matrix(self._pad_indexed_string(indexed_sentence, self.max_sentence_length)))
# Removing unnecessary arguments.
if "wanted_args" in pad_info:
self.arg_types = list(pad_info["wanted_args"])
if "V" not in self.arg_types:
self.arg_types = ["V"] + self.arg_types
if "sentence" not in self.arg_types and event_structures_have_sentences:
self.arg_types += ["sentence"]
else:
arg_types = []
for event_structure in indexed_event_structures:
arg_types += event_structure.keys()
self.arg_types = list(set(arg_types))
# Making ordered event argument indices, converting argument dicts into lists with a canonical order.
ordered_event_structures = []
for event_structure in indexed_event_structures:
ordered_event_structure = [event_structure[arg_type] if arg_type in event_structure else \
[self.word_index["NONE"]] for arg_type in self.arg_types]
ordered_event_structures.append(ordered_event_structure)
if "max_arg_length" in pad_info:
self.max_arg_length = pad_info["max_arg_length"]
else:
self.max_arg_length = max([max([len(arg) for arg in structure]) \
for structure in ordered_event_structures])
event_inputs = []
for event_structure in ordered_event_structures:
event_inputs.append(csr_matrix([self._pad_indexed_string(indexed_arg, self.max_arg_length) \
for indexed_arg in event_structure]))
indexed_sentences = None
indexed_event_structures = None
ordered_event_structures = None
if use_event_structure:
sentence_inputs = None
inputs = np.asarray(event_inputs)
else:
event_inputs = None
inputs = np.asarray(sentence_inputs)
return inputs, labels, datasrc
def _pad_indexed_string(self, indexed_string: List[int], max_string_length: int):
'''
Pad and/or truncate an indexed string to the max length. Both padding and truncation happen from the left.
'''
string_length = len(indexed_string)
# Padding on or truncating from the left
padded_string = ([self.word_index["NONE"]] * (max_string_length - string_length) \
+ indexed_string)[-max_string_length:]
return padded_string
def get_pad_info(self):
'''
Returns the information required to pad or truncate new datasets to make new inputs look like those
processed so far. This is useful to make test data the same size as train data.
'''
pad_info = {}
if self.arg_types is not None:
pad_info["wanted_args"] = self.arg_types
if self.max_arg_length is not None:
pad_info["max_arg_length"] = self.max_arg_length
if self.max_sentence_length is not None:
pad_info["max_sentence_length"] = self.max_sentence_length
return pad_info
def get_embedding(self, embedding_file, add_extra_words=False):
'''
Reads in a pretrained embedding file, and returns a numpy array with vectors for words in word index.
'''
LOGGER.info("Begin of reading pretrained word embeddings ...")
if ('.txt' in embedding_file):
(pretrained_embedding, embedding_size) = self._get_embedding_from_txt(embedding_file)
else:
(pretrained_embedding, embedding_size) = self._get_embedding_from_bin(embedding_file)
if add_extra_words:
# adding words pretrained still aren't in word_index
tokens = list(pretrained_embedding.keys() - self.word_index.keys())
for token in tokens:
self.word_index[token] = len(self.word_index)
len_word_index = len(self.word_index)
shape_embedding = (len_word_index, embedding_size)
#embedding = np.array(list(pretrained_embedding.values()))
# eps = np.finfo(embedding.dtype).eps
# low_embedding = embedding.min(axis=0)
# high_embedding = embedding.max(axis=0) + eps
# LOGGER.info(f"EMBEDDING LOW: {low_embedding.min()}\tEMBEDDING HIGH: {high_embedding.min()}\tEMBEDDING MIN-ABS: {np.amin(np.absolute(embedding))}")
embedding = np.zeros(shape_embedding) # np.random.uniform(low_embedding, high_embedding, shape_embedding)
count_words_pretrained_embedding = 0
for word in self.word_index:
if word in pretrained_embedding:
embedding[self.word_index[word]] = pretrained_embedding[word]
count_words_pretrained_embedding += 1
low_embedding = embedding.min(axis=0)
high_embedding = embedding.max(axis=0)
LOGGER.info(f"EMBEDDING LOW: {low_embedding.min()}\tEMBEDDING HIGH: {high_embedding.min()}")
# Each term without word-embedding receives a representation very close to the origin of the vector space, but not zero.
embedding[self.word_index["UNK"]] += np.finfo(embedding.dtype).eps
# normalize embeddings with l2-norm
# axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature
embedding = normalize(embedding, axis=1)
# embedding[self.word_index["NONE"]] = np.zeros(embedding_size)
low_embedding = embedding.min(axis=0)
high_embedding = embedding.max(axis=0)
LOGGER.info(f"NORMALIZED EMBEDDING LOW: {low_embedding.min()}\tNORMALIZED EMBEDDING HIGH: {high_embedding.min()}")
LOGGER.info("End of reading pretrained word embeddings.")
proportion = (count_words_pretrained_embedding * 100.0) / len_word_index
string_proportion = f"Proportion of pre-embedding words: {proportion:.2f}% ({count_words_pretrained_embedding} / {len_word_index})."
| |
dest, src
srcRegsRelativeLats = [0, 0, 0, 1, 1, 0]
class HPI_CLZ_T1(MinorFUTiming):
description = 'HPI_CLZ_T1'
mask, match = t32_opcode('1111_1010_1011_xxxx__1111_xxxx_1000_xxxx')
srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0]
class HPI_CLZ_A1(MinorFUTiming):
description = 'HPI_CLZ_A1'
mask, match = a32_opcode('xxxx_0001_0110_xxxx__xxxx_xxxx_0001_xxxx')
srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0]
class HPI_CMN_immediate_A1(MinorFUTiming):
description = 'HPI_CMN_immediate_A1'
mask, match = a32_opcode('xxxx_0011_0111_xxxx__xxxx_xxxx_xxxx_xxxx')
srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0]
class HPI_CMN_register_A1(MinorFUTiming):
description = 'HPI_CMN_register_A1'
mask, match = a32_opcode('xxxx_0001_0111_xxxx__xxxx_xxxx_xxx0_xxxx')
srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0]
class HPI_CMP_immediate_A1(MinorFUTiming):
description = 'HPI_CMP_immediate_A1'
mask, match = a32_opcode('xxxx_0011_0101_xxxx__xxxx_xxxx_xxxx_xxxx')
srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0]
class HPI_CMP_register_A1(MinorFUTiming):
description = 'HPI_CMP_register_A1'
mask, match = a32_opcode('xxxx_0001_0101_xxxx__xxxx_xxxx_xxx0_xxxx')
srcRegsRelativeLats = [3, 3, 3, 2, 2, 3, 3, 3, 0]
class HPI_MLA_T1(MinorFUTiming):
description = 'HPI_MLA_T1'
mask, match = t32_opcode('1111_1011_0000_xxxx__xxxx_xxxx_0000_xxxx')
# z, z, z, a, l?, r?
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_MLA_A1(MinorFUTiming):
description = 'HPI_MLA_A1'
mask, match = a32_opcode('xxxx_0000_001x_xxxx__xxxx_xxxx_1001_xxxx')
# z, z, z, a, l?, r?
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_MADD_A64(MinorFUTiming):
description = 'HPI_MADD_A64'
mask, match = a64_opcode('x001_1011_000x_xxxx__0xxx_xxxx_xxxx_xxxx')
# a, l?, r?
srcRegsRelativeLats = [1, 1, 1, 0]
extraCommitLat = 1
class HPI_MLS_T1(MinorFUTiming):
description = 'HPI_MLS_T1'
mask, match = t32_opcode('1111_1011_0000_xxxx__xxxx_xxxx_0001_xxxx')
# z, z, z, l?, a, r?
srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0]
class HPI_MLS_A1(MinorFUTiming):
description = 'HPI_MLS_A1'
mask, match = a32_opcode('xxxx_0000_0110_xxxx__xxxx_xxxx_1001_xxxx')
# z, z, z, l?, a, r?
srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0]
class HPI_MOVT_A1(MinorFUTiming):
description = 'HPI_MOVT_A1'
mask, match = t32_opcode('xxxx_0010_0100_xxxx__xxxx_xxxx_xxxx_xxxx')
class HPI_MUL_T1(MinorFUTiming):
description = 'HPI_MUL_T1'
mask, match = t16_opcode('0100_0011_01xx_xxxx')
class HPI_MUL_T2(MinorFUTiming):
description = 'HPI_MUL_T2'
mask, match = t32_opcode('1111_1011_0000_xxxx_1111_xxxx_0000_xxxx')
class HPI_PKH_T1(MinorFUTiming):
description = 'HPI_PKH_T1'
mask, match = t32_opcode('1110_1010_110x_xxxx__xxxx_xxxx_xxxx_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 1, 0]
class HPI_PKH_A1(MinorFUTiming):
description = 'HPI_PKH_A1'
mask, match = a32_opcode('xxxx_0110_1000_xxxx__xxxx_xxxx_xx01_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 1, 0]
class HPI_QADD_QSUB_T1(MinorFUTiming):
description = 'HPI_QADD_QSUB_T1'
mask, match = t32_opcode('1111_1010_1000_xxxx__1111_xxxx_10x0_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 1, 0]
class HPI_QADD_QSUB_A1(MinorFUTiming):
description = 'HPI_QADD_QSUB_A1'
mask, match = a32_opcode('xxxx_0001_00x0_xxxx__xxxx_xxxx_0101_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 1, 0]
# T1 QADD16 QADD8 QSUB16 QSUB8 UQADD16 UQADD8 UQSUB16 UQSUB8
class HPI_QADD_ETC_T1(MinorFUTiming):
description = 'HPI_QADD_ETC_T1'
mask, match = t32_opcode('1111_1010_1x0x_xxxx__1111_xxxx_0x01_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 1, 0]
# A1 QADD16 QADD8 QSAX QSUB16 QSUB8 UQADD16 UQADD8 UQASX UQSAX UQSUB16 UQSUB8
class HPI_QADD_ETC_A1(MinorFUTiming):
description = 'HPI_QADD_ETC_A1'
mask, match = a32_opcode('xxxx_0110_0x10_xxxx__xxxx_xxxx_xxx1_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 1, 0]
class HPI_QASX_QSAX_UQASX_UQSAX_T1(MinorFUTiming):
description = 'HPI_QASX_QSAX_UQASX_UQSAX_T1'
mask, match = t32_opcode('1111_1010_1x10_xxxx__1111_xxxx_0x01_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 1, 0]
class HPI_QDADD_QDSUB_T1(MinorFUTiming):
description = 'HPI_QDADD_QDSUB_T1'
mask, match = t32_opcode('1111_1010_1000_xxxx__1111_xxxx_10x1_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 1, 0]
class HPI_QDADD_QDSUB_A1(MinorFUTiming):
description = 'HPI_QDADD_QSUB_A1'
mask, match = a32_opcode('xxxx_0001_01x0_xxxx__xxxx_xxxx_0101_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 1, 0]
class HPI_RBIT_A1(MinorFUTiming):
description = 'HPI_RBIT_A1'
mask, match = a32_opcode('xxxx_0110_1111_xxxx__xxxx_xxxx_0011_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 0]
class HPI_REV_REV16_A1(MinorFUTiming):
description = 'HPI_REV_REV16_A1'
mask, match = a32_opcode('xxxx_0110_1011_xxxx__xxxx_xxxx_x011_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 0]
class HPI_REVSH_A1(MinorFUTiming):
description = 'HPI_REVSH_A1'
mask, match = a32_opcode('xxxx_0110_1111_xxxx__xxxx_xxxx_1011_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 0]
class HPI_ADD_ETC_A1(MinorFUTiming):
description = 'HPI_ADD_ETC_A1'
mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_x001_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 2, 0]
class HPI_ADD_ETC_T1(MinorFUTiming):
description = 'HPI_ADD_ETC_A1'
mask, match = t32_opcode('1111_1010_100x_xxxx__1111_xxxx_0xx0_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 2, 0]
class HPI_SASX_SHASX_UASX_UHASX_A1(MinorFUTiming):
description = 'HPI_SASX_SHASX_UASX_UHASX_A1'
mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_0011_xxxx')
srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0]
class HPI_SBFX_UBFX_A1(MinorFUTiming):
description = 'HPI_SBFX_UBFX_A1'
mask, match = a32_opcode('xxxx_0111_1x1x_xxxx__xxxx_xxxx_x101_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 0]
### SDIV
sdiv_lat_expr = expr_top(let([
('left', un('SignExtend32To64', int_reg(src(4)))),
('right', un('SignExtend32To64', int_reg(src(3)))),
('either_signed', bin('Or',
bin('SLessThan', ref('left'), literal(0)),
bin('SLessThan', ref('right'), literal(0)))),
('left_size', un('SizeInBits', un('Abs', ref('left')))),
('signed_adjust', if_expr(ref('either_signed'), literal(1), literal(0))),
('right_size', un('SizeInBits',
bin('UDiv', un('Abs', ref('right')),
if_expr(ref('either_signed'), literal(4), literal(2))))),
('left_minus_right', if_expr(
bin('SLessThan', ref('left_size'), ref('right_size')),
literal(0),
bin('Sub', ref('left_size'), ref('right_size'))))
],
bin('Add',
ref('signed_adjust'),
if_expr(bin('Equal', ref('right'), literal(0)),
literal(0),
bin('UDiv', ref('left_minus_right'), literal(4))))
))
sdiv_lat_expr64 = expr_top(let([
('left', un('SignExtend32To64', int_reg(src(0)))),
('right', un('SignExtend32To64', int_reg(src(1)))),
('either_signed', bin('Or',
bin('SLessThan', ref('left'), literal(0)),
bin('SLessThan', ref('right'), literal(0)))),
('left_size', un('SizeInBits', un('Abs', ref('left')))),
('signed_adjust', if_expr(ref('either_signed'), literal(1), literal(0))),
('right_size', un('SizeInBits',
bin('UDiv', un('Abs', ref('right')),
if_expr(ref('either_signed'), literal(4), literal(2))))),
('left_minus_right', if_expr(
bin('SLessThan', ref('left_size'), ref('right_size')),
literal(0),
bin('Sub', ref('left_size'), ref('right_size'))))
],
bin('Add',
ref('signed_adjust'),
if_expr(bin('Equal', ref('right'), literal(0)),
literal(0),
bin('UDiv', ref('left_minus_right'), literal(4))))
))
class HPI_SDIV_A1(MinorFUTiming):
description = 'HPI_SDIV_A1'
mask, match = a32_opcode('xxxx_0111_0001_xxxx__xxxx_xxxx_0001_xxxx')
extraCommitLat = 0
srcRegsRelativeLats = []
extraCommitLatExpr = sdiv_lat_expr
class HPI_SDIV_A64(MinorFUTiming):
description = 'HPI_SDIV_A64'
mask, match = a64_opcode('x001_1010_110x_xxxx__0000_11xx_xxxx_xxxx')
extraCommitLat = 0
srcRegsRelativeLats = []
extraCommitLatExpr = sdiv_lat_expr64
### SEL
class HPI_SEL_A1(MinorFUTiming):
description = 'HPI_SEL_A1'
mask, match = a32_opcode('xxxx_0110_1000_xxxx__xxxx_xxxx_1011_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 2, 2, 0]
class HPI_SEL_A1_Suppress(MinorFUTiming):
description = 'HPI_SEL_A1_Suppress'
mask, match = a32_opcode('xxxx_0110_1000_xxxx__xxxx_xxxx_1011_xxxx')
srcRegsRelativeLats = []
suppress = True
class HPI_SHSAX_SSAX_UHSAX_USAX_A1(MinorFUTiming):
description = 'HPI_SHSAX_SSAX_UHSAX_USAX_A1'
mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_0101_xxxx')
# As Default
srcRegsRelativeLats = [3, 3, 2, 2, 2, 1, 0]
class HPI_USUB_ETC_A1(MinorFUTiming):
description = 'HPI_USUB_ETC_A1'
mask, match = a32_opcode('xxxx_0110_0xx1_xxxx__xxxx_xxxx_x111_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 2, 0]
class HPI_SMLABB_T1(MinorFUTiming):
description = 'HPI_SMLABB_T1'
mask, match = t32_opcode('1111_1011_0001_xxxx__xxxx_xxxx_00xx_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_SMLABB_A1(MinorFUTiming):
description = 'HPI_SMLABB_A1'
mask, match = a32_opcode('xxxx_0001_0000_xxxx__xxxx_xxxx_1xx0_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_SMLAD_T1(MinorFUTiming):
description = 'HPI_SMLAD_T1'
mask, match = t32_opcode('1111_1011_0010_xxxx__xxxx_xxxx_000x_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_SMLAD_A1(MinorFUTiming):
description = 'HPI_SMLAD_A1'
mask, match = a32_opcode('xxxx_0111_0000_xxxx__xxxx_xxxx_00x1_xxxx')
# z, z, z, l, r, a
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_SMLAL_T1(MinorFUTiming):
description = 'HPI_SMLAL_T1'
mask, match = t32_opcode('1111_1011_1100_xxxx__xxxx_xxxx_0000_xxxx')
class HPI_SMLAL_A1(MinorFUTiming):
description = 'HPI_SMLAL_A1'
mask, match = a32_opcode('xxxx_0000_111x_xxxx__xxxx_xxxx_1001_xxxx')
class HPI_SMLALBB_T1(MinorFUTiming):
description = 'HPI_SMLALBB_T1'
mask, match = t32_opcode('1111_1011_1100_xxxx__xxxx_xxxx_10xx_xxxx')
class HPI_SMLALBB_A1(MinorFUTiming):
description = 'HPI_SMLALBB_A1'
mask, match = a32_opcode('xxxx_0001_0100_xxxx__xxxx_xxxx_1xx0_xxxx')
class HPI_SMLALD_T1(MinorFUTiming):
description = 'HPI_SMLALD_T1'
mask, match = t32_opcode('1111_1011_1100_xxxx__xxxx_xxxx_110x_xxxx')
class HPI_SMLALD_A1(MinorFUTiming):
description = 'HPI_SMLALD_A1'
mask, match = a32_opcode('xxxx_0111_0100_xxxx__xxxx_xxxx_00x1_xxxx')
class HPI_SMLAWB_T1(MinorFUTiming):
description = 'HPI_SMLAWB_T1'
mask, match = t32_opcode('1111_1011_0011_xxxx__xxxx_xxxx_000x_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_SMLAWB_A1(MinorFUTiming):
description = 'HPI_SMLAWB_A1'
mask, match = a32_opcode('xxxx_0001_0010_xxxx__xxxx_xxxx_1x00_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 0, 2, 0]
class HPI_SMLSD_A1(MinorFUTiming):
description = 'HPI_SMLSD_A1'
mask, match = a32_opcode('xxxx_0111_0000_xxxx__xxxx_xxxx_01x1_xxxx')
class HPI_SMLSLD_T1(MinorFUTiming):
description = 'HPI_SMLSLD_T1'
mask, match = t32_opcode('1111_1011_1101_xxxx__xxxx_xxxx_110x_xxxx')
class HPI_SMLSLD_A1(MinorFUTiming):
description = 'HPI_SMLSLD_A1'
mask, match = a32_opcode('xxxx_0111_0100_xxxx__xxxx_xxxx_01x1_xxxx')
class HPI_SMMLA_T1(MinorFUTiming):
description = 'HPI_SMMLA_T1'
mask, match = t32_opcode('1111_1011_0101_xxxx__xxxx_xxxx_000x_xxxx')
# ^^^^ != 1111
srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0]
class HPI_SMMLA_A1(MinorFUTiming):
description = 'HPI_SMMLA_A1'
# Note that this must be after the encoding for SMMUL
mask, match = a32_opcode('xxxx_0111_0101_xxxx__xxxx_xxxx_00x1_xxxx')
# ^^^^ != 1111
srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0]
class HPI_SMMLS_T1(MinorFUTiming):
description = 'HPI_SMMLS_T1'
mask, match = t32_opcode('1111_1011_0110_xxxx__xxxx_xxxx_000x_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0]
class HPI_SMMLS_A1(MinorFUTiming):
description = 'HPI_SMMLS_A1'
mask, match = a32_opcode('xxxx_0111_0101_xxxx__xxxx_xxxx_11x1_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 0, 0, 0]
class HPI_SMMUL_T1(MinorFUTiming):
description = 'HPI_SMMUL_T1'
mask, match = t32_opcode('1111_1011_0101_xxxx__1111_xxxx_000x_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 0, 0]
class HPI_SMMUL_A1(MinorFUTiming):
description = 'HPI_SMMUL_A1'
mask, match = a32_opcode('xxxx_0111_0101_xxxx__1111_xxxx_00x1_xxxx')
srcRegsRelativeLats = [0, 0, 0, 0, 0, 0]
class HPI_SMUAD_T1(MinorFUTiming):
description = 'HPI_SMUAD_T1'
mask, match = t32_opcode('1111_1011_0010_xxxx__1111_xxxx_000x_xxxx')
class HPI_SMUAD_A1(MinorFUTiming):
description = 'HPI_SMUAD_A1'
mask, match = a32_opcode('xxxx_0111_0000_xxxx__1111_xxxx_00x1_xxxx')
class HPI_SMULBB_T1(MinorFUTiming):
description = 'HPI_SMULBB_T1'
mask, match = t32_opcode('1111_1011_0001_xxxx__1111_xxxx_00xx_xxxx')
class HPI_SMULBB_A1(MinorFUTiming):
description = 'HPI_SMULBB_A1'
mask, match = a32_opcode('xxxx_0001_0110_xxxx__xxxx_xxxx_1xx0_xxxx')
class HPI_SMULL_T1(MinorFUTiming):
description = 'HPI_SMULL_T1'
mask, match = t32_opcode('1111_1011_1000_xxxx__xxxx_xxxx_0000_xxxx')
class HPI_SMULL_A1(MinorFUTiming):
description = 'HPI_SMULL_A1'
mask, match = a32_opcode('xxxx_0000_110x_xxxx__xxxx_xxxx_1001_xxxx')
class HPI_SMULWB_T1(MinorFUTiming):
description = 'HPI_SMULWB_T1'
mask, match = t32_opcode('1111_1011_0011_xxxx__1111_xxxx_000x_xxxx')
class HPI_SMULWB_A1(MinorFUTiming):
description = 'HPI_SMULWB_A1'
mask, match = a32_opcode('xxxx_0001_0010_xxxx__xxxx_xxxx_1x10_xxxx')
class HPI_SMUSD_T1(MinorFUTiming):
description = 'HPI_SMUSD_T1'
mask, match = t32_opcode('1111_1011_0100_xxxx__1111_xxxx_000x_xxxx')
class HPI_SMUSD_A1(MinorFUTiming):
description = 'HPI_SMUSD_A1'
mask, match = a32_opcode('xxxx_0111_0000_xxxx__1111_xxxx_01x1_xxxx')
class HPI_SSAT_USAT_no_shift_A1(MinorFUTiming):
description = 'HPI_SSAT_USAT_no_shift_A1'
# Order *before* shift
mask, match = a32_opcode('xxxx_0110_1x1x_xxxx__xxxx_0000_0001_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 0]
class HPI_SSAT_USAT_shift_A1(MinorFUTiming):
description = 'HPI_SSAT_USAT_shift_A1'
# Order after shift
mask, match = a32_opcode('xxxx_0110_1x1x_xxxx__xxxx_xxxx_xx01_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 0]
class HPI_SSAT16_USAT16_A1(MinorFUTiming):
description = 'HPI_SSAT16_USAT16_A1'
mask, match = a32_opcode('xxxx_0110_1x10_xxxx__xxxx_xxxx_0011_xxxx')
srcRegsRelativeLats = [0, 0, 0, 2, 0]
class HPI_SXTAB_T1(MinorFUTiming):
description = 'HPI_SXTAB_T1'
mask, match = t32_opcode('1111_1010_0100_xxxx__1111_xxxx_1xxx_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 2, 0]
class HPI_SXTAB_SXTAB16_SXTAH_UXTAB_UXTAB16_UXTAH_A1(MinorFUTiming):
description = 'HPI_SXTAB_SXTAB16_SXTAH_UXTAB_UXTAB16_UXTAH_A1'
# Place AFTER HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1
# e6[9d][^f]0070 are undefined
mask, match = a32_opcode('xxxx_0110_1xxx_xxxx__xxxx_xxxx_0111_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 2, 0]
class HPI_SXTAB16_T1(MinorFUTiming):
description = 'HPI_SXTAB16_T1'
mask, match = t32_opcode('1111_1010_0010_xxxx__1111_xxxx_1xxx_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 2, 0]
class HPI_SXTAH_T1(MinorFUTiming):
description = 'HPI_SXTAH_T1'
mask, match = t32_opcode('1111_1010_0000_xxxx__1111_xxxx_1xxx_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 2, 0]
class HPI_SXTB_T1(MinorFUTiming):
description = 'HPI_SXTB_T1'
mask, match = t16_opcode('1011_0010_01xx_xxxx')
class HPI_SXTB_T2(MinorFUTiming):
description = 'HPI_SXTB_T2'
mask, match = t32_opcode('1111_1010_0100_1111__1111_xxxx_1xxx_xxxx')
srcRegsRelativeLats = [0, 0, 0, 1, 2, 0]
class HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1(MinorFUTiming):
description = 'HPI_SXTB_SXTB16_SXTH_UXTB_UXTB16_UXTH_A1'
# e6[9d]f0070 are undefined
mask, | |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Object reponsible for a measurement execution.
Please note that the 'real' work of performing the tasks is handled by the
engine. This object handles all the other aspects (running of the hooks,
handling of the monitors, ...)
"""
import os
import logging
from time import sleep
from threading import Thread, RLock
import enaml
from atom.api import Atom, Typed, ForwardTyped, Value, Bool
from enaml.widgets.api import Window
from enaml.layout.api import InsertTab, FloatItem
from enaml.application import deferred_call, schedule
from .engines.api import BaseEngine, ExecutionInfos
from .measurement import Measurement
from ..utils.flags import BitFlag
from ..utils.traceback import format_exc
logger = logging.getLogger(__name__)
def plugin():
"""Delayed import to avoid circular references.
"""
from .plugin import MeasurementPlugin
return MeasurementPlugin
def schedule_and_block(func, args=(), kwargs={}, priority=100):
"""Schedule a function call on the main thread and wait for it to complete.
"""
sheduled = schedule(func, args, kwargs, priority=100)
while sheduled.pending():
sleep(0.05)
class MeasurementProcessor(Atom):
"""Object reponsible for a measurement execution.
"""
#: Boolean indicating whether or not the processor is working.
active = Bool()
#: Reference to the measurement plugin.
plugin = ForwardTyped(plugin)
#: Currently run measurement or last measurement run.
running_measurement = Typed(Measurement)
#: Instance of the currently used engine.
engine = Typed(BaseEngine)
#: Boolean indicating whether or not process all enqueued measurements.
continuous_processing = Bool(True)
#: Monitors window
monitors_window = Typed(Window)
def start_measurement(self, measurement):
"""Start a new measurement.
"""
if self._thread and self._thread.is_alive():
self._state.set('stop_processing')
self._thread.join(5)
if self._thread.is_alive():
core = self.plugin.workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.errors.signal'
msg = ("Can't stop the running execution thread. Please "
"restart the application and consider reporting this "
"as a bug.")
core.invoke_command(cmd, dict(kind='error', message=msg))
return
if self.continuous_processing:
self._state.set('continuous_processing')
else:
self._state.clear('continuous_processing')
deferred_call(setattr, self, 'active', True)
self._thread = Thread(target=self._run_measurements,
args=(measurement,))
self._thread.daemon = True
self._thread.start()
def pause_measurement(self):
"""Pause the currently active measurement.
"""
logger.info('Pausing measurement %s.', self.running_measurement.name)
self.running_measurement.status = 'PAUSING'
self._state.set('pause_attempt')
if self._state.test('running_main'):
self.engine.pause()
self.engine.observe('status', self._watch_engine_state)
else:
if self._active_hook:
self._active_hook.pause()
self._active_hook.observe('paused', self._watch_hook_state)
def resume_measurement(self):
"""Resume the currently paused measurement.
"""
logger.info('Resuming measurement %s.', self.running_measurement.name)
self.running_measurement.status = 'RESUMING'
self._state.clear('paused')
self._state.set('resuming')
if self._state.test('running_main'):
self.engine.resume()
self.engine.observe('status', self._watch_engine_state)
else:
if self._active_hook:
self._active_hook.resume()
self._active_hook.observe('resumed',
self._watch_hook_state)
def stop_measurement(self, no_post_exec=False, force=False):
"""Stop the currently active measurement.
"""
if no_post_exec or force:
self._state.set('no_post_exec')
self._state.set('stop_attempt')
if self.running_measurement:
logger.info('Stopping measurement %s.',
self.running_measurement.name)
self.running_measurement.status = 'STOPPING'
if self._state.test('running_main'):
self.engine.stop(force)
else:
if self._active_hook:
self._active_hook.stop(force)
def stop_processing(self, no_post_exec=False, force=False):
"""Stop processing the enqueued measurements.
"""
if self.running_measurement:
logger.info('Stopping measurement %s.',
self.running_measurement.name)
if no_post_exec or force:
self._state.set('no_post_exec')
self._state.set('stop_attempt', 'stop_processing')
self._state.clear('processing')
if self._state.test('running_main'):
self.engine.stop(force)
else:
if self._active_hook:
self._active_hook.stop(force)
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
#: Background thread handling the measurement execution
_thread = Value()
#: Internal flags used to keep track of the execution state.
_state = Typed(BitFlag,
(('processing', 'running_pre_hooks', 'running_main',
'running_post_hooks', 'pause_attempt', 'paused',
'resuming', 'stop_attempt', 'stop_processing',
'no_post_exec', 'continuous_processing'),)
)
#: Hook currently executed. The value is meaningful only when
#: 'running_pre_hooks' or 'running_post_hooks' is set.
_active_hook = Value()
#: Lock to avoid race condition when pausing.
_lock = Value(factory=RLock)
def _run_measurements(self, measurement):
"""Run measurements (either all enqueued or only one)
This code is executed by a thread (stored in _thread)
Parameters
----------
measurement : Measurement
First measurement to run. Other measurements will be run in their
order of appearance in the queue if the user enable continuous
processing.
"""
# If the engine does not exist, create one.
plugin = self.plugin
if not self.engine:
engine = plugin.create('engine', plugin.selected_engine)
schedule_and_block(setattr, (self, 'engine', engine))
# Mark that we started processing measurements.
self._state.set('processing')
# Process enqueued measurement as long as we are supposed to.
while not self._state.test('stop_processing'):
# Clear the internal state to start fresh.
self._clear_state()
# If we were provided with a measurement use it, otherwise find the
# next one.
if measurement:
meas = measurement
measurement = None
else:
meas = self.plugin.find_next_measurement()
# If there is a measurement register it as the running one, update
# its status and log its execution.
if meas is not None:
meas_id = meas.name + '_' + meas.id
self._set_measurement_state('RUNNING',
'The measurement is being run.',
meas)
msg = 'Starting execution of measurement %s'
logger.info(msg % meas.name + meas.id)
status, infos = self._run_measurement(meas)
# Release runtime dependencies.
meas.dependencies.release_runtimes()
# If no measurement remains stop.
else:
break
# Log the result.
mess = 'Measurement %s processed, status : %s' % (meas_id, status)
if infos:
mess += '\n' + infos
logger.info(mess)
# Update the status and infos.
self._set_measurement_state(status, infos, clear=True)
# If we are supposed to stop, stop.
if (not self._state.test('continuous_processing') or
self._state.test('stop_processing')):
break
if self.engine and self.plugin.engine_policy == 'stop':
self._stop_engine()
self._state.clear('processing')
deferred_call(setattr, self, 'active', False)
def _run_measurement(self, measurement):
"""Run a single measurement.
"""
# Switch to running state.
measurement.enter_running_state()
meas_id = measurement.name + '_' + measurement.id
# Collect runtime dependencies
res, msg, errors = measurement.dependencies.collect_runtimes()
if not res:
status = 'SKIPPED' if 'unavailable' in msg else 'FAILED'
return status, msg + '\n' + errors_to_msg(errors)
# Records that we got access to all the runtimes.
mess = ('The use of all runtime resources have been granted to the '
'measurement %s' % meas_id)
logger.info(mess.replace('\n', ' '))
# Run checks now that we have all the runtimes.
if not measurement.forced_enqueued:
res, errors = measurement.run_checks()
if not res:
msg = 'Measurement %s failed to pass the checks :\n' % meas_id
return 'FAILED', msg + errors_to_msg(errors)
# Now that we know the measurement is going to run save it.
default_filename = meas_id + '.meas.ini'
path = os.path.join(measurement.root_task.default_path,
default_filename)
measurement.save(path)
logger.info('Starting measurement {}.'.format(meas_id))
# Execute all pre-execution hooks.
result, errors = self._run_pre_execution(measurement)
if not result:
msg = ('Measurement %s failed to run pre-execution hooks :\n' %
meas_id)
return 'FAILED', msg + errors_to_msg(errors)
result = True
errors = {}
if self._check_for_pause_or_stop():
# Connect new monitors, and start them.
logger.debug('Connecting monitors for measurement %s',
meas_id)
self._start_monitors(measurement)
# Assemble the task infos for the engine to run the main task.
deps = measurement.dependencies
infos = ExecutionInfos(
id=meas_id+'-main',
task=measurement.root_task,
build_deps=deps.get_build_dependencies().dependencies,
runtime_deps=deps.get_runtime_dependencies('main'),
observed_entries=measurement.collect_monitored_entries(),
checks=not measurement.forced_enqueued,
)
# Ask the engine to perform the main task.
logger.debug('Passing measurement %s to the engine.',
meas_id)
self._state.set('running_main')
execution_result = self.engine.perform(infos)
self._state.clear('running_main')
# Record the result and store engine return value in the
# measurement for the post execution hooks.
result &= execution_result.success
errors.update(execution_result.errors)
measurement.task_execution_result = execution_result
# Disconnect monitors.
logger.debug('Disonnecting monitors for measurement %s',
meas_id)
self._stop_monitors(measurement)
# Save the stop_attempt state to allow to run post execution if we
# are supposed to do so.
state = self._state.test('stop_attempt')
self._state.clear('stop_attempt')
# Execute all post-execution hooks if pertinent.
if not self._state.test('no_post_exec'):
res, errors = self._run_post_execution(measurement)
result &= res
if state:
self._state.set('stop_attempt')
if self._state.test('stop_attempt'):
return ('INTERRUPTED',
'The measurement has been interrupted by the user.')
if not result:
if not execution_result.success:
msg = 'Execution of the main task failed :\n'
else:
msg = 'Some post-execution hook failed to run :\n'
return 'FAILED', msg + errors_to_msg(errors)
return 'COMPLETED', 'The measurement successfully completed.'
def _run_pre_execution(self, measurement):
"""Run pre measurement execution operations.
Returns
-------
result : bool
Boolean indicating whether or not the operations succeeded.
report : dict
Dict storing the errors (as dict) by id of the operation in which
they occured.
"""
result = True
full_report = {}
self._state.set('running_pre_hooks')
meas_id = measurement.name + '_' + measurement.id
for id, hook in measurement.pre_hooks.items():
if not self._check_for_pause_or_stop():
break
logger.debug('Calling pre-measurement hook %s for measurement %s',
id, meas_id)
with self._lock:
self._active_hook = hook
try:
hook.run(self.plugin.workbench, self.engine)
except Exception:
result = False
full_report[id] = format_exc()
# Prevent issues with pausing/resuming
with self._lock:
self._active_hook.unobserve('paused', self._watch_hook_state)
self._active_hook = None
self._state.clear('running_pre_hooks')
return result, full_report
def _run_post_execution(self, measurement):
"""Run post measurement operations.
Parameters
----------
measurement : Measurement
Returns
-------
result : bool
Boolean indicating whether or not the operations succeeded.
report : dict
Dict storing the errors (as dict) by id of the operation in which
they occured.
"""
result = True
full_report = {}
self._state.set('running_post_hooks')
meas_id = measurement.name + '_' + measurement.id
for id, hook in measurement.post_hooks.items():
if not self._check_for_pause_or_stop():
break
logger.debug('Calling | |
#!/usr/bin/env python
version=1.8
nimda = """
{}
mm m mmmmm m m mmmm mm
#"m # # ## ## # "m ##
# #m # # # ## # # # # #
# # # # # "" # # # #mm#
# ## mm#mm # # #mmm" # #.py
{} v {} {}
"""
try:
import argparse
import operator
import requests
import datetime
import time
import sys
import os
except ImportError:
raise ImportError('<Errors occured :(. Some importing problem detected>')
initTime = datetime.datetime.now().time()
def checkForUpdates():
try:
req = requests.get("https://raw.githubusercontent.com/bichiko/nimda.py/master/nimda.py")
lines = req.text.split('\n')
for line in lines:
if "version" in line:
servVersion = float(line.split('=')[1])
if servVersion > version:
usrans = raw_input("New version (%s) is avalialbe. Do you want to update it now? (Y/n) " % (servVersion))
if usrans.lower() == 'y':
f = open(__file__,"w+")
f.write(req.text)
f.close()
print "Nimda.py has been updated. v %s"%servVersion
print "Please run it again"
sys.exit(0)
break
except Exception:
print "Error: In update checking"
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class CliPrint:
def __init__(self):
self.currTime = datetime.datetime.now().time()
def printLogo(self):
print nimda.format(bcolors.WARNING,bcolors.FAIL, version, bcolors.ENDC)
def headerText(self, this):
print "[{}] Trying combination of username(s) {} with provided passwords from {} file".format(initTime,this.usernames, this.passwordsTxt)
print "[%s] Brute-forcing %s" % (initTime,this.url)
print "[%s] Delay is %s milliseconds" % (initTime,this.delaySec)
print "[%s] Request method : %s" % (initTime,this.method.upper())
def errorText(self, text, ext = False):
print '['+str(self.currTime)+'] '+bcolors.FAIL+str(text)+bcolors.ENDC
sys.exit(0) if ext else None
def infoText(self, text, ext = False):
print '['+str(self.currTime)+'] '+bcolors.OKBLUE+str(text)+bcolors.ENDC+'\n'
sys.exit(0) if ext else None
def warnText(self, text, ext = False):
print '['+str(self.currTime)+'] '+bcolors.WARNING+str(text)+bcolors.ENDC+'\n'
sys.exit(0) if ext else None
def purpleText(self, text, ext = False):
print '['+str(self.currTime)+'] '+bcolors.HEADER+str(text)+bcolors.ENDC
sys.exit(0) if ext else None
def getTime(self):
return str(self.currTime)
try:
from pyquery import PyQuery
except Exception:
CliPrint().errorText('Error: You don\'t have library pyquery')
CliPrint().infoText('Please run command: sudo pip install pyquery', True)
try:
from time import sleep
except Exception:
CliPrint().errorText('Error: Probably you don\'t have library time or sleep')
CliPrint().infoText('Please run command: sudo pip install time/sleep', True)
class Brute:
"""Main class for BruteForce."""
def __init__(self):
self.breakFirstMatch = False
self.responseHeader = False
self.responseHtml = False
self.csrfEnabled = False
self.progresBar = False
self.debugging = False
self.verbose = False
self.delaySec = 0
self.statusCode = 0
self.requestsCounter = 0
self.correctCredentials = []
self.startTime = time.time()
self.csrfSelector = ''
self.contentText = ''
self.notContentText = ''
self.contentHeader = ''
self.progressDots = ''
self.notContentHeader = ''
self.setCookie = ''
self.usernames = None
self.url = None
self.passwordsTxt = None
self.postJson = dict()
self.formName = dict()
self.ses = requests.session()
self.os = 'win' if os.name == 'nt' else 'lin'
self.cookie = None
self.useragent = None
self.sslVerify = False
self.redirectCheck = True
self.method = 'POST'
self.tm_now = time.time()
self.tm_prev = 0.0
self.ss = 0
self.mm = 0
self.hh = 0
self.dd = 0
def getCookie(self):
cookieDict = dict()
if self.cookie is None:
return ''
cookieJar = [x.split('=') for x in self.cookie.split(";")]
[cookieDict.update({key[0].strip():key[1].strip()}) for key in cookieJar]
return cookieDict
#Method URL setter
def setUrl(self, url):
self.url = url
return self
#CSRF setter method
def setCsrf(self, csrf):
self.formName.update({'csrf':csrf})
self.csrfEnabled = True
return self
#usernames setter
def setUsernames(self, usernames):
try:
self.formName.update({'username':usernames.split('=')[0]})
self.usernames = usernames.split('=')[1].split(',')
except Exception:
CliPrint().errorText('Error: username isn\'t specified correctly')
CliPrint().infoText('syntax: username=\'user=admin,root\'', True)
#passwords setter method
def setPasswords(self, passwdTxt):
try:
self.formName.update({'password':passwdTxt.split('=')[0]})
self.passwordsTxt = passwdTxt.split('=')[1]
except Exception:
CliPrint().errorText('Error: password isn\'t specified correctly')
CliPrint().infoText('syntax: password=\'<PASSWORD>=<PASSWORD>.<PASSWORD>\'', True)
#post data setter
def setData(self, pData):
""" ppdata is without usernames and passwords """
try:
pdt = pData.split('&')
for x in range(0, len(pdt)):
currel = pdt[x].split('=')
self.postJson.update({currel[0]:currel[1]})
except Exception:
CliPrint().errorText('Error: Can\'t parse data')
CliPrint().infoText('syntax: data=\'param1=val1¶m2=val2&signin=Sign In\'', True)
# sned empty request to initialize parameters
def sendEmptyPostRequest(self):
tmpJson = self.postJson
tmpJson.update({self.formName['username']:'00000000'})
tmpJson.update({self.formName['password']:'<PASSWORD>'})
if self.csrfEnabled == True:
self.postJson.update({self.formName['csrf']:'00000000'})
try:
if self.method.lower() == 'post':
firstReq = self.ses.post(self.url, data = tmpJson, verify = self.sslVerify, cookies=self.getCookie(), headers={'user-agent':self.useragent})
else:
firstReq = self.ses.get(self.url, data = tmpJson, verify = self.sslVerify, cookies=self.getCookie(), headers={'user-agent':self.useragent})
except Exception:
CliPrint().errorText('Error: Can\'t send 1st request', True)
return firstReq
#find CSRF token in response HTML get element and return it
def getCsrfToken(self, response, selector):
try:
pq = PyQuery(response.text)
tag = pq(selector)
except Exception:
CliPrint().errorText('Error: Can\'t parse response HTML document', True)
return tag
def correctValOutput(self,PV,text,redir = False, corct = True):
# reset session
correctValue = None
self.progressDots += bcolors.OKGREEN +'*'+bcolors.ENDC if len(self.progressDots) < 10000 else ''
stat_code = PV[8] if redir else PV[3]
correct = PV[0] if corct else PV[7]
if self.verbose:
correctValue = "{}{} : {}, data: {}{}".format(correct,text,stat_code,PV[4],PV[5])
else:
correctValue = "{}{}:{}{}".format(correct,PV[1],PV[2],PV[5])
#print correct value in specified mode
print '['+CliPrint().getTime()+'] '+correctValue
#save credentials in the array
self.correctCredentials.append(correctValue)
self.ses = requests.session()
def startProccessing(self):
# Print header/welcome text
CliPrint().headerText(self)
#grab CSRF token value from previous request
csrf_token = self.getCsrfToken(self.sendEmptyPostRequest(), self.csrfSelector).val()
#get a size of the dictionary
sizeOfDict = sum(1 for line in open(self.passwordsTxt))
#loop usernames
for usrnms in self.usernames:
#open passwords dictionary as _dict variable
with open(self.passwordsTxt) as _dict:
#loop _dict array and read value line by line
for passwd in _dict:
#Just count my requests
self.requestsCounter+=1
#sleep in milliseconds if value is defined by user
# otherwise it is 0 by default.
#speed of requests depends on network condition
#every new request waits response to parse important data like cstf token and then trys to proceed
sleep(float(self.delaySec) / 1000) #milliseconds
# remove previous csrf value if csrf mode is enabled
if self.csrfEnabled == True:
del self.postJson[self.formName['csrf']]
#delete previous values from formdata list
del self.postJson[self.formName['username']]
del self.postJson[self.formName['password']]
# If csrf mode is enabled then add new key:value in formdata
if self.csrfEnabled == True:
self.postJson.update({self.formName['csrf'] : csrf_token})
#update formdata with new value of username
self.postJson.update({self.formName['username'] : usrnms})
# remove \n endlines from txt file
# and update password value
self.postJson.update({self.formName['password'] : passwd.rstrip()})
# debugging mode is on then print Post data
if self.debugging == True:
print self.postJson
# try to send request with current session
# ignore ssl check
try:
if self.method.lower() == 'post':
req = self.ses.post(self.url, data = self.postJson, verify = self.sslVerify, cookies=self.getCookie(), headers={'user-agent':self.useragent})
else:
req = self.ses.get(self.url, data = self.postJson, verify = self.sslVerify, cookies=self.getCookie(), headers={'user-agent':self.useragent})
except requests.exceptions.HTTPError as errh:
CliPrint().errorText("Http Error :"+errh, True)
except requests.exceptions.ConnectionError as errc:
CliPrint().errorText("Error Connecting :"+errc, True)
except requests.exceptions.Timeout as errt:
CliPrint().errorText("Timeout Error :"+errt, True)
except requests.exceptions.RequestException as err:
CliPrint().errorText("Error: Something happened "+err, True)
#spinner. Custom loading gif
if self.verbose != True:
os.system('cls') if self.os == 'win' else os.system('clear')
mySpinner = '\ '
if self.requestsCounter % 4 == 0:
mySpinner = '\ '
elif self.requestsCounter % 4 == 1:
mySpinner = '| '
elif self.requestsCounter % 4 == 2:
mySpinner = '/ '
else:
mySpinner = '- '
# if not verbose mode the output just correct credentials
if self.verbose != True:
CliPrint().headerText(self)
for cr in self.correctCredentials:
print ' - '+ cr
CliPrint().purpleText("{} : {}".format(usrnms, passwd.rstrip()))
CliPrint().purpleText("{} out of {}".format(self.requestsCounter, sizeOfDict*len(self.usernames)))
print "{}".format(self.progressDots) if self.progresBar == True else None
CliPrint().purpleText("{} {} seconds elapsed".format(mySpinner, time.time() - self.startTime))
PV = [bcolors.OKGREEN, usrnms, passwd.rstrip(),req.status_code, self.postJson, bcolors.ENDC, bcolors.FAIL,bcolors.HEADER,req.history]
if (int(self.statusCode) == int(req.status_code)) or ((self.contentText != '' and self.contentText in req.text) or (self.notContentText != '' and self.notContentText not in req.text)) or ((self.contentHeader != '' and self.contentHeader in req.text) or (self.notContentHeader != '' and self.notContentHeader not in req.text)):
self.correctValOutput(PV,'Correct! status-code');break
elif self.redirectCheck == True and len(req.history)>0:
self.correctValOutput(PV,'Correct! redirec-code',True);break
elif self.csrfEnabled and csrf_token == None:
self.correctValOutput(PV,'Possible combination! can\'t find csrf_token',False, False);break
else:
self.progressDots += bcolors.FAIL +'.'+bcolors.ENDC if len(self.progressDots) < 10000 else ''
CliPrint().errorText("{}WRONG! {}:{}, data: {}{}".format(PV[6],PV[1],PV[2],PV[4],PV[5])) if self.verbose == True else None
CliPrint().warnText("response-HTML: {}".format(req.text.encode('utf-8'))) if self.responseHtml == True else None
CliPrint().warnText("response-header: {}".format(req.headers)) if self.responseHeader == True else None
if self.csrfEnabled == True:
csrf_token = self.getCsrfToken(req, self.csrfSelector).val()
#save current time value
self.tm_prev=time.time()
#print logo in the end
CliPrint().printLogo() if self.verbose else None
print "Done in {} seconds".format(time.time() - self.startTime)
for cr in self.correctCredentials:
print cr
if len(self.correctCredentials) == 0:
CliPrint().errorText('%sSorry we couldn\'t find any matched credentials%s' % (bcolors.FAIL, bcolors.ENDC))
if | |
the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
:param Mapping[str, 'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequestsArgs'] requests: Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[Mapping[str, 'outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimits']]:
"""
Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "limits")
@property
@pulumi.getter
def requests(self) -> Optional[Mapping[str, 'outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequests']]:
"""
Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
"""
return pulumi.get(self, "requests")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesLimits(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersResourcesRequests(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContext(dict):
"""
SecurityContext is not allowed for ephemeral containers.
"""
def __init__(__self__, *,
allow_privilege_escalation: Optional[bool] = None,
capabilities: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilities'] = None,
privileged: Optional[bool] = None,
proc_mount: Optional[str] = None,
read_only_root_filesystem: Optional[bool] = None,
run_as_group: Optional[int] = None,
run_as_non_root: Optional[bool] = None,
run_as_user: Optional[int] = None,
se_linux_options: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptions'] = None,
windows_options: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptions'] = None):
"""
SecurityContext is not allowed for ephemeral containers.
:param bool allow_privilege_escalation: AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilitiesArgs' capabilities: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param bool privileged: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
:param str proc_mount: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
:param bool read_only_root_filesystem: Whether this container has a read-only root filesystem. Default is false.
:param int run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param bool run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param int run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptionsArgs' se_linux_options: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptionsArgs' windows_options: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
if allow_privilege_escalation is not None:
pulumi.set(__self__, "allow_privilege_escalation", allow_privilege_escalation)
if capabilities is not None:
pulumi.set(__self__, "capabilities", capabilities)
if privileged is not None:
pulumi.set(__self__, "privileged", privileged)
if proc_mount is not None:
pulumi.set(__self__, "proc_mount", proc_mount)
if read_only_root_filesystem is not None:
pulumi.set(__self__, "read_only_root_filesystem", read_only_root_filesystem)
if run_as_group is not None:
pulumi.set(__self__, "run_as_group", run_as_group)
if run_as_non_root is not None:
pulumi.set(__self__, "run_as_non_root", run_as_non_root)
if run_as_user is not None:
pulumi.set(__self__, "run_as_user", run_as_user)
if se_linux_options is not None:
pulumi.set(__self__, "se_linux_options", se_linux_options)
if windows_options is not None:
pulumi.set(__self__, "windows_options", windows_options)
@property
@pulumi.getter(name="allowPrivilegeEscalation")
def allow_privilege_escalation(self) -> Optional[bool]:
"""
AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN
"""
return pulumi.get(self, "allow_privilege_escalation")
@property
@pulumi.getter
def capabilities(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilities']:
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
return pulumi.get(self, "capabilities")
@property
@pulumi.getter
def privileged(self) -> Optional[bool]:
"""
Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.
"""
return pulumi.get(self, "privileged")
@property
@pulumi.getter(name="procMount")
def proc_mount(self) -> Optional[str]:
"""
procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.
"""
return pulumi.get(self, "proc_mount")
@property
@pulumi.getter(name="readOnlyRootFilesystem")
def read_only_root_filesystem(self) -> Optional[bool]:
"""
Whether this container has a read-only root filesystem. Default is false.
"""
return pulumi.get(self, "read_only_root_filesystem")
@property
@pulumi.getter(name="runAsGroup")
def run_as_group(self) -> Optional[int]:
"""
The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_group")
@property
@pulumi.getter(name="runAsNonRoot")
def run_as_non_root(self) -> Optional[bool]:
"""
Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_non_root")
@property
@pulumi.getter(name="runAsUser")
def run_as_user(self) -> Optional[int]:
"""
The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "run_as_user")
@property
@pulumi.getter(name="seLinuxOptions")
def se_linux_options(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptions']:
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "se_linux_options")
@property
@pulumi.getter(name="windowsOptions")
def windows_options(self) -> Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextWindowsOptions']:
"""
The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
"""
return pulumi.get(self, "windows_options")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextCapabilities(dict):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
"""
def __init__(__self__, *,
add: Optional[Sequence[str]] = None,
drop: Optional[Sequence[str]] = None):
"""
The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.
:param Sequence[str] add: Added capabilities
:param Sequence[str] drop: Removed capabilities
"""
if add is not None:
pulumi.set(__self__, "add", add)
if drop is not None:
pulumi.set(__self__, "drop", drop)
@property
@pulumi.getter
def add(self) -> Optional[Sequence[str]]:
"""
Added capabilities
"""
return pulumi.get(self, "add")
@property
@pulumi.getter
def drop(self) -> Optional[Sequence[str]]:
"""
Removed capabilities
"""
return pulumi.get(self, "drop")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecEphemeralContainersSecurityContextSeLinuxOptions(dict):
"""
The SELinux context to be applied to the container. If unspecified, the container runtime will allocate | |
import itertools
import networkx as nx
def get_animate_words():
animate_words = set(line.strip() for line in open('../animacy/animate.unigrams.txt', 'r', encoding='utf8'))
animate_words.update({"i", "me", "myself", "mine", "my", "we", "us", "ourself", "ourselves", "ours", "our",
"you", "yourself", "yours", "your", "yourselves", "he", "him", "himself", "his", "she",
"her", "herself", "hers", "her", "one", "oneself", "one's", "they", "them", "themself",
"themselves", "theirs", "their", "they", "them", "'em", "themselves", "who", "whom",
"whose"})
return animate_words
def get_inanimate_words():
inanimate_words = set(line.strip() for line in open('../animacy/inanimate.unigrams.txt', 'r', encoding='utf8'))
inanimate_words.update({"it", "itself", "its", "where", "when"})
return inanimate_words
ANIMATE = get_animate_words()
INANIMATE = get_inanimate_words()
class SubClauseFinder:
def __init__(self):
# target relations of dependency parsing
self.TARGET_RELATIONS = {'relcl', 'advcl', 'ccomp', 'csubj', 'csubjpass', 'xcomp'}
def get_dependency_tree(self, root):
# SpaCy dependency parse doesnt return a tree, start from the root token and
# navigate down the tree via .children
dependency_tree = [root]
while sum([len(list(tok.children)) for tok in dependency_tree[-1]]) > 0:
dependency_tree.append(list(itertools.chain.from_iterable(
[list(tok.children) for tok in dependency_tree[-1]])))
dependency_tree = list(itertools.chain.from_iterable(dependency_tree))
return dependency_tree
def get_subclauses(self, annotated_sent):
root = [token for token in annotated_sent if token.dep_ == 'ROOT']
dependency_tree = self.get_dependency_tree(root)
# iterate the edges to find dependent clauses relations
subordinate_edges = []
for clause_root in dependency_tree:
if clause_root.dep_ in self.TARGET_RELATIONS:
subordinate_edges.append(clause_root)
subclauses = []
for clause_root in subordinate_edges:
clause_type = self.identify_clause_type(clause_root.dep_)
# extract information of specific clause type
if clause_type == 'RELATIVE':
clause = RelativeClause(annotated_sent, clause_root)
elif clause_type == 'ADJUNCT':
clause = AdjunctClause(annotated_sent, clause_root)
elif clause_type == 'COMPLEMENT':
clause = ComplementClause(annotated_sent, clause_root)
else:
raise ValueError
subclauses.append(clause)
return subclauses
def identify_clause_type(self, clause_root_dep):
if clause_root_dep == 'relcl':
return 'RELATIVE'
elif clause_root_dep == 'advcl':
return 'ADJUNCT'
elif clause_root_dep in {'ccomp', 'csubj', 'csubjpass', 'xcomp'}:
return 'COMPLEMENT'
else:
raise ValueError
class SubordinateClause:
# Abstract class for storing subordinate clause information.
# Actual subordinate clauses extend this class.
def __init__(self, annotated_sent, clause_root):
self.annotated_sent = annotated_sent
self.clause_root = clause_root
self.clause_root_dep = clause_root.dep_
# identify clause finiteness
self.is_finite = None
# type of subordinate clause
self.clause_type = None
self.clause_span = None
# subordinator is'mark' in adverbial clauses and complement clause but 'ref' in relative clause
self.subordinator = None
# level of embeddedness, main clause at level 0
self.embeddedness = None
def get_is_finite(self):
if self.is_finite is None:
self.identify_finiteness()
return self.is_finite
def get_clause_type(self):
if self.clause_type is None:
self.identify_clause_type()
return self.clause_type
def get_clause_span(self):
if self.clause_span is None:
self.set_clause_span()
return self.clause_span
def get_subordinator(self):
if self.subordinator is None:
self.identify_subordinator()
return self.subordinator
def get_embeddedness(self):
if self.embeddedness is None:
self.count_embeddedness()
return self.embeddedness
def referent_dependency(self, outedge):
"""
https://www.mathcs.emory.edu/~choi/doc/cu-2012-choi.pdf
A referent is the relation between a wh-complementizer in a relative clause and its referential head. In
Referent relations are represented as secondary dependencies because integrating them with
other dependencies breaks the single-head tree property (e.g., which would have multiple heads in Figure 28).
"""
# TODO: Not Implemented
return False
def identify_subordinator(self):
# for relative clauses, find the "referent"
if self.get_clause_type() == 'RELATIVE':
head_noun = self.clause_root.head
for child in head_noun.children:
if self.referent_dependency(child):
self.subordinator = child
else:
# for adverbial and complement clauses, find the "mark"
for child in self.clause_root.children:
child_dep = child.dep_
if child_dep == 'mark': # MARKER
self.subordinator = child
def set_clause_span(self):
self.clause_span = self.make_span(self.clause_root)
def count_embeddedness(self):
sent_root = [token for token in self.annotated_sent if token.dep_ == 'ROOT'][0]
if sent_root.head.i == sent_root.i or sent_root.i == self.clause_root.i:
self.embeddedness = 1
return
# find number of edges to go from clause root to sent root
# Load spaCy's dependency tree into a networkx graph
edges = []
for token in self.annotated_sent:
for child in token.children:
edges.append(('{0}'.format(token.i),
'{0}'.format(child.i)))
graph = nx.Graph(edges)
# Get the length and path
levels = nx.shortest_path_length(graph, source=str(self.clause_root.i), target=str(sent_root.i))
return levels
def identify_clause_type(self):
if self.clause_root_dep == 'relcl':
self.clause_type = 'RELATIVE'
elif self.clause_root_dep == 'advcl':
self.clause_type = 'ADJUNCT'
elif self.clause_root_dep in {'ccomp', 'csubj', 'csubjpass', 'xcomp'}:
self.clause_type = 'COMPLEMENT'
def identify_finiteness(self):
"""
check if the sub clause finite
Finite clauses are clauses that contain verbs which show tense. Otherwise they are nonfinite.
some examples:
I had something to eat [before leaving].
[After having spent six hours at the hospital], they eventually came home.
[Helped by local volunteers], staff at the museum have spent many years cataloguing photographs.
He left the party and went home, [not having anyone to talk to].
The person to ask [about going to New Zealand] is Beck.
You have to look at the picture really carefully [in order to see all the detail].
"""
# xcomp is nonfinite by definition
if self.clause_root_dep == 'xcomp':
self.is_finite = False
return
# the verb is the root of the clause
idx_word_before_verb = self.clause_root.i - 1
verb_pos = self.clause_root.pos_
if idx_word_before_verb < self.annotated_sent.start:
if verb_pos in {"VBG" "VBN"}:
self.is_finite = False
return
else:
# not VBG or VBN, then finite
self.is_finite = True
return
wordBeforeVerb = self.annotated_sent[idx_word_before_verb - self.annotated_sent.start]
# if the verb follows TO or a preposition, it is nonfinite
posWordBeforeVerb = wordBeforeVerb.pos_
if posWordBeforeVerb in {"IN", "TO"}:
self.is_finite = False
return
# if verb is gerund (VBG), it must have an aux, otherwise nonfinite
if verb_pos == "VBG":
hasAux = False
# check if there is aux
for child in self.clause_root.children_: # childIterable(self.clause_root)
rel = child.dep_
if rel == "aux":
hasAux = True
if not hasAux:
self.is_finite = False
return
# if verb is past participle (VBN), it must have aux/auxpass which is not VBGs, otherwise non-finite
if verb_pos == "VBN":
vbg_aux = False
# check if there is aux that is not in gerund form
for child in self.clause_root.children_: # childIterable
if child.dep_ in {"aux" "auxpass"}:
# get pos of aux
aux = child # child.getDependent()
auxPOS = aux.pos_
if auxPOS == "VBG":
vbg_aux = True
if vbg_aux:
self.is_finite = False
return
self.is_finite = True
def make_span(self, word):
i = word.i - self.annotated_sent.start
span = self.annotated_sent[
self.annotated_sent[i].left_edge.i - self.annotated_sent.start:
self.annotated_sent[i].right_edge.i + 1 - self.annotated_sent.start]
return span
class RelativeClause(SubordinateClause):
def __init__(self, annotated_sent, clause_root):
super().__init__(annotated_sent, clause_root)
self.clause_type = "RELATIVE"
# further information related to relative clause
self.is_restrictive = None
# head noun
# TODO: Decide if noun chunking should be used
self.head_noun = self.clause_root.head
self.is_head_noun_animate = None
# head noun role in main clause
self.head_noun_role_in_main_clause = None
self.head_noun_role_in_sub_clause = None
# relative clauses's embeddedness is different from the other two types of clause
self.embeddedness = max(self.get_embeddedness() - 1, 1)
def get_head_noun(self):
return self.head_noun
def get_head_noun_animacy(self):
if self.get_head_noun() is not None:
if self.is_head_noun_animate is None:
self.set_head_noun_animacy()
return self.is_head_noun_animate
def get_head_noun_role_in_main_clause(self):
if self.get_head_noun() is not None:
if self.head_noun_role_in_main_clause is None:
self.set_head_noun_roles()
return self.head_noun_role_in_main_clause
def get_head_noun_role_in_sub_clause(self):
if self.get_head_noun() is not None:
if self.head_noun_role_in_sub_clause is None:
self.set_head_noun_roles()
return self.head_noun_role_in_sub_clause
def get_is_restrictive(self):
if self.is_restrictive is None:
self.set_restrictiveness()
return self.is_restrictive
def set_head_noun_animacy(self):
# TODO: use alternate method to detect animacy (Language Models)
if self.get_head_noun() in ANIMATE:
self.is_head_noun_animate = True
else:
self.is_head_noun_animate = False
def set_head_noun_roles(self):
# TODO: Check function
# https://www.brighthubeducation.com/english-homework-help/32754-the-functions-of-nouns-and-noun-phrases/
is_from_inside_rc = False
edge = self.get_head_noun()
relation = edge.dep_
head_idx = edge.head.i
# see if it is from inside or outside of the RC
span = self.get_clause_span()
if span.start <= head_idx <= span.end-1:
is_from_inside_rc = True
if relation in {'nsubj', 'nsubjpass'}:
self.set_role('SUBJECT', is_from_inside_rc)
elif relation == 'dobj':
self.set_role('DIRECT_OBJECT', is_from_inside_rc)
elif relation == 'pobj': # 'iobj'
self.set_role('INDIRECT_OBJECT', is_from_inside_rc)
elif relation == 'nmod':
self.set_role('PREPOSITION_COMPLEMENT', is_from_inside_rc)
elif relation == 'appos':
self.set_role('APPOSITIVE', is_from_inside_rc)
def set_role(self, role, is_from_inside_rc):
if is_from_inside_rc:
self.head_noun_role_in_sub_clause = role
else:
self.head_noun_role_in_main_clause = role
def set_restrictiveness(self):
# if zero relativizer or "that", restrictive
subordinator = self.get_subordinator()
if subordinator is None or subordinator.text.lower() == "that":
self.is_restrictive = True
return
head_noun = self.get_head_noun()
if head_noun is not None:
# if the head noun is personal pronoun or proper noun(s), the clause is nonrestrictive
head_noun_pos = head_noun.pos_
if head_noun_pos in {"NNP", "NNPS", "PRP"}:
self.is_restrictive = False
return
# if the head noun is modified by an indefinite determiner like 'a', 'some', or 'any', restrictive
for child in head_noun.children:
relation = child.dep_
if relation == 'det': # DETERMINER
determiner = child.text.lower()
if determiner in {"a", "an", "some", "any"}:
self.is_restrictive = True
return
self.is_restrictive = True
class AdjunctClause(SubordinateClause):
# function of clause, e.g. temporal, modal, instrumental...
def __init__(self, annotated_sent, clause_root):
super().__init__(annotated_sent, clause_root)
self.clause_type | |
import dataclasses
import datetime
from pyactus.domain import enums
from pyactus.domain import primitives
@dataclasses.dataclass
class Stock():
"""STK :: Stock.
Any instrument which is bought at a certain amount (market price normally) and then follows an index.
"""
# Calendar :: Calendar defines the non-working days which affect the dates of contract events (CDE's) in combination with EOMC and BDC. Custom calendars can be added as additional enum options.
calendar : enums.Calendar
# Business Day Convention :: BDC's are linked to a calendar. Calendars have working and non-working days. A BDC value other than N means that cash flows cannot fall on non-working days, they must be shifted to the next business day (following) or the previous on (preceding).These two simple rules get refined twofold:- Following modified (preceding): Same like following (preceding), however if a cash flow gets shifted into a new month, then it is shifted to preceding (following) business day.- Shift/calculate (SC) and calculate/shift (CS). Accrual, principal, and possibly other calculations are affected by this choice. In the case of SC first the dates are shifted and after the shift cash flows are calculated. In the case of CS it is the other way round.Attention: Does not affect non-cyclical dates such as PRD, MD, TD, IPCED since they can be set to the correct date directly.
business_day_convention : enums.BusinessDayConvention
# End Of Month Convention :: When computing schedules a special problem arises if an anchor date is at the end of a month and a cycle of monthly or quarterly is applied (yearly in the case of leap years only). How do we have to interpret an anchor date April 30 plus 1M cycles? In case where EOM is selected, it will jump to the 31st of May, then June 30, July 31 and so on. If SM is selected, it will jump to the 30st always with of course an exception in February. This logic applies for all months having 30 or less days and an anchor date at the last day. Month with 31 days will at any rate jump to the last of the month if anchor date is on the last day.
end_of_month_convention : enums.EndOfMonthConvention
# Contract Type :: The ContractType is the most important information. It defines the cash flow generating pattern of a contract. The ContractType information in combination with a given state of the risk factors will produce a deterministic sequence of cash flows which are the basis of any financial analysis.
contract_type : enums.ContractType
# Status Date :: SD holds the date per which all attributes of the record were updated. This is especially important for the highly dynamic attributes like Accruals, Notional, interest rates in variable instruments etc.
status_date : datetime.datetime
# Contract Role :: CNTRL defines which position the CRID ( the creator of the contract record ) takes in a contract. For example, whether the contract is an asset or liability, a long or short position for the CRID. Most contracts are simple on or off balance sheet positions which are assets, liabilities. Such contracts can also play a secondary role as a collateral. The attribute is highly significant since it determines the direction of all cash flows. The exact meaning is given with each CT in the ACTUS High Level Specification document.
contract_role : enums.ContractRole
# Creator Identifier :: This identifies the legal entity creating the contract record. The counterparty of the contract is tracked in CPID.CRID is ideally the official LEI which can be a firm, a government body, even a single person etc. However, this can also refer to a annonymous group in which case this information is not to be disclosed. CRID may also refer to a group taking a joint risk.
creator_id : str
# Contract Identifier :: Unique identifier of a contract. If the system is used on a single firm level, an internal unique ID can be generated. If used on a national or globally level, a globally unique ID is required.
contract_id : str
# Market Object Code :: Is pointing to the market value at SD (MarketObject).Unique codes for market objects must be used.
market_object_code : str
# Counterparty Identifier :: CPID identifies the counterparty to the CRID in this contract.CPID is ideally the official LEI which can be a firm, a government body, even a single person etc. However, this can also refer to a annonymous group in which case this information is not to be disclosed. CPID may also refer to a group taking a joint risk or more generally, CPID is the main counterparty, against which the contract has been settled.
counterparty_id : str
# Contract Performance :: Indicates the current contract performance status. Different states of the contract range from performing to default.
contract_performance : enums.ContractPerformance
# Seniority :: Refers to the order of repayment in the event of a sale or default of the issuer.
seniority : enums.Seniority
# Non Performing Date :: The date of the (uncovered) payment event responsible for the current value of the Contract Performance attribute.
non_performing_date : datetime.datetime
# Cycle Anchor Date Of Dividend :: Date from which the dividend payment date schedule is calculated according to the cycle length. The first dividend payment event takes place on this anchor.
cycle_anchor_date_of_dividend : datetime.datetime
# Cycle Of Dividend :: Defines in combination with DVANX the payment points of dividends. The dividend payment schedule will start at DVANX and end at MaximumProjectionPeriod (cf. sheet Modeling Parameters).
cycle_of_dividend : primitives.Cycle
# Next Dividend Payment Amount :: Defines the next dividend payment (amount) whereas the date of dividend payment is defined through the DVANX/DVCL pair. If DVCL is defined, then this amount will be used as dividend payment for each future dividend payment date.
next_dividend_payment_amount : float
# Ex Dividend Date :: In case contract is traded between DVEX and next DV payment date (i.e. PRD>DVEX & PRD<next DV payment date), then the old holder of the contract (previous to the trade) receives the next DV payment. In other words, the next DV payment is cancelled for the new (after the trade) holder of the contract.
ex_dividend_date : datetime.datetime
# Currency :: The currency of the cash flows.
currency : str
# Contract Deal Date :: This date signifies the origination of the contract where an agreement between the customer and the bank has been settled. From this date on, the institution will have a (market) risk position for financial contracts. This is even the case when IED is in future.
contract_deal_date : datetime.datetime
# Notional Principal :: Current nominal value of the contract. For debt instrument this is the current remaining notional outstanding. NT is generally the basis on which interest payments are calculated. If IPCBS is set, IPCBS may introduce a different basis for interest payment calculation.
notional_principal : float
# Quantity :: This attribute relates either to physical contracts (COM) or underlyings of traded contracts. In case of physical contracts it holds the number of underlying units of the specific good (e.g. number of barrels of oil). In case of well defined traded contracts it holds the number of defined underlying instruments. Example: QT of STK CTs underlying a FUTUR indicates the number of those specific STK CTs which underlie the FUTUR.
quantity : float
# Purchase Date :: If a contract is bought after initiation (for example a bond on the secondary market) this date has to be set. It refers to the date at which the payment (of PPRD) and transfer of the security happens. In other words, PRD - if set - takes the role otherwise IED has from a cash | |
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('flush_args')
if self.filedesc is not None:
oprot.writeFieldBegin('filedesc', TType.STRING, 1)
oprot.writeString(self.filedesc.encode('utf-8') if sys.version_info[0] == 2 else self.filedesc)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(flush_args)
flush_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'filedesc', 'UTF8', None, ), # 1
)
class flush_result(object):
"""
Attributes:
- e
"""
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = FileSystemException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('flush_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(flush_result)
flush_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', [FileSystemException, None], None, ), # 1
)
class read_args(object):
"""
Attributes:
- filedesc
- size
"""
def __init__(self, filedesc=None, size=None,):
self.filedesc = filedesc
self.size = size
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filedesc = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.size = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('read_args')
if self.filedesc is not None:
oprot.writeFieldBegin('filedesc', TType.STRING, 1)
oprot.writeString(self.filedesc.encode('utf-8') if sys.version_info[0] == 2 else self.filedesc)
oprot.writeFieldEnd()
if self.size is not None:
oprot.writeFieldBegin('size', TType.I32, 2)
oprot.writeI32(self.size)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(read_args)
read_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'filedesc', 'UTF8', None, ), # 1
(2, TType.I32, 'size', None, None, ), # 2
)
class read_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = FileSystemException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('read_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeBinary(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(read_result)
read_result.thrift_spec = (
(0, TType.STRING, 'success', 'BINARY', None, ), # 0
(1, TType.STRUCT, 'e', [FileSystemException, None], None, ), # 1
)
class readline_args(object):
"""
Attributes:
- filedesc
"""
def __init__(self, filedesc=None,):
self.filedesc = filedesc
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filedesc = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('readline_args')
if self.filedesc is not None:
oprot.writeFieldBegin('filedesc', TType.STRING, 1)
oprot.writeString(self.filedesc.encode('utf-8') if sys.version_info[0] == 2 else self.filedesc)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(readline_args)
readline_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'filedesc', 'UTF8', None, ), # 1
)
class readline_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = FileSystemException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('readline_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeBinary(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(readline_result)
readline_result.thrift_spec = (
(0, TType.STRING, 'success', 'BINARY', None, ), # 0
(1, TType.STRUCT, 'e', [FileSystemException, None], None, ), # 1
)
class readlines_args(object):
"""
Attributes:
- filedesc
- sizehint
"""
def __init__(self, filedesc=None, sizehint=None,):
self.filedesc = filedesc
self.sizehint = sizehint
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.filedesc = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.sizehint = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('readlines_args')
if self.filedesc is not None:
oprot.writeFieldBegin('filedesc', TType.STRING, 1)
oprot.writeString(self.filedesc.encode('utf-8') if sys.version_info[0] == 2 else self.filedesc)
oprot.writeFieldEnd()
if self.sizehint is not None:
oprot.writeFieldBegin('sizehint', TType.I32, 2)
oprot.writeI32(self.sizehint)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(readlines_args)
readlines_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'filedesc', 'UTF8', None, ), # 1
(2, TType.I32, 'sizehint', None, None, ), # 2
)
class readlines_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in range(_size7):
_elem12 = iprot.readBinary()
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = FileSystemException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('readlines_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter13 in self.success:
| |
#!/usr/local/uvcdat/1.3.1/bin/python
# TODO List
# 1) Fix multiple plots->single png (set 3, set 6 primarily, set 1/2 level vars). Need to investigate template stuff
# 2) Fix obs vs model variable name issues (requires lots of framework changes, probably need Jeff to poke at it) DONE for hardcoded variables
# 3) Merge set3(b) and 6(b) code since it is very similar. Readd set3b/6b and make them work in case EA needs them? Does EA need them?
# 4) Further code clean up IN PROGRESS
# 5) Work on set 5 DONE
# 6) Work on set 9 IN PROGRESS
# 7) Work on splitting up opts and add >2 filetable support
# 8) Clean up computation/reductions.py redundant/duplicated functions
# 9) Fix labels on set 3 (numbers->JAN FEB ... DEC) (Email sent to Jim/Jeff)
from metrics.packages.diagnostic_groups import *
#from metrics.packages.common.diagnostic_groups import *
from metrics.computation.reductions import *
from metrics.frontend.uvcdat import *
from metrics.computation.plotspec import *
import metrics.frontend.defines as defines
from metrics.packages.lmwg.defines import *
### Derived unreduced variables (DUV) definitions
### These are variables that are nonlinear or require 2 passes to get a final
### result.
# Probably could pass the reduction_function for all of these instead of a flag, but this puts
# all of the reduction functions in the same place in case they need to change or something.
class evapfrac_redvar ( reduced_variable ):
def __init__(self, filetable, fn, season=None, region=None, flag=None):
duv = derived_var('EVAPFRAC_A', inputs=['FCTR', 'FCEV', 'FGEV', 'FSH'], func=evapfrac_special)
if fn == 'SEASONAL':
reduced_variable.__init__(
self, variableid='EVAPFRAC_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduce2latlon_seasonal(x, season, vid=vid)),
duvs={'EVAPFRAC_A':duv})
if fn == 'TREND':
if flag == 'MONTHLY':
reduced_variable.__init__(
self, variableid='EVAPFRAC_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceMonthlyTrendRegion(x, region, vid=vid)),
duvs={'EVAPFRAC_A':duv})
else:
reduced_variable.__init__(
self, variableid='EVAPFRAC_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceAnnTrendRegion(x, region, vid=vid)),
duvs={'EVAPFRAC_A':duv})
class rnet_redvar( reduced_variable ):
def __init__(self, filetable, fn, season=None, region=None, flag=None):
duv = derived_var('RNET_A', inputs=['FSA', 'FIRA'], func=aminusb)
if fn == 'SEASONAL':
reduced_variable.__init__(
self, variableid='RNET_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduce2latlon_seasonal(x, season, vid=vid)),
duvs={'RNET_A': duv})
if fn == 'TREND':
if flag == 'MONTHLY':
reduced_variable.__init__(
self, variableid='RNET_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceMonthlyTrendRegion(x, region, vid=vid)),
duvs={'RNET_A':duv})
else:
reduced_variable.__init__(
self, variableid='RNET_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceAnnTrendRegion(x, region, vid=vid)),
duvs={'RNET_A':duv})
if fn == 'SINGLE':
reduced_variable.__init__(
self, variableid='RNET_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceAnnTrendRegion(x, region, single=True, vid=vid)),
duvs={'RNET_A':duv})
class albedos_redvar( reduced_variable ):
def __init__(self, filetable, fn, varlist, season=None, region=None, flag=None):
vname = varlist[0]+'_'+varlist[1]
duv = derived_var(vname, inputs=varlist, func=ab_ratio)
if fn == 'SEASONAL':
reduced_variable.__init__(
self, variableid=vname,
filetable=filetable,
reduction_function=(lambda x, vid=None: reduce2latlon_seasonal(x, season, vid=vid)),
duvs={vname: duv})
if fn == 'TREND':
if flag == 'MONTHLY':
reduced_variable.__init__(
self, variableid=vname,
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceMonthlyTrendRegion(x, region, vid=vid)),
duvs={vname: duv})
else:
reduced_variable.__init__(
self, variableid=vname,
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceAnnTrendRegion(x, region, vid=vid)),
duvs={vname: duv})
if fn == 'SINGLE':
reduced_variable.__init__(
self, variableid=vname,
filetable=filetable,
reduction_function=(lambda x, vid: reduceAnnTrendRegion(x, region, single=True, vid=vid)),
duvs={vname:duv})
# A couple only used for one set, so don't need more generalized.
class pminuse_seasonal( reduced_variable ):
def __init__(self, filetable, season):
duv = derived_var('P-E_A', inputs=['RAIN', 'SNOW', 'QSOIL', 'QVEGE', 'QVEGT'], func=pminuse)
reduced_variable.__init__(
self, variableid='P-E_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduce2latlon_seasonal(x, season, vid=vid)),
duvs={'P-E_A':duv})
class canopyevapTrend( reduced_variable ):
# Canopy evap = qvege/(rain+snow)
def __init__(self, filetable):
duv = derived_var('CE_A', inputs=['QVEGE', 'RAIN','SNOW'], func=canopy_special)
print 'in canopyevap.'
reduced_variable.__init__(
self, variableid='CE_A',
filetable=filetable,
reduction_function=(lambda x, vid=None: reduceAnnSingle(x, vid=vid)),
duvs={'CE_A':duv})
class prereduce ( reduced_variable ):
def __init__(self, filetable, var, region):
duv = derived_var(var+'_'+region, inputs=[var], func=reduceAnnSingle)
reduced_variable.__init__(
self, variableid=var+'_'+region, filetable=filetable,
reduction_function=(lambda x, vid=None: reduceRegion(x, defines.all_regions[region]['coords'], vid=vid)),
duvs={var+'_'+region:duv})
class co2ppmvTrendRegionSingle( reduced_variable ):
def __init__(self, filetable, region):
duv = derived_var('CO2_PPMV_A', inputs=['PCO2', 'PBOT'], func=adivb)
reduced_variable.__init__(
self, variableid='CO2_PPMV_A',
filetable=filetable,
reduction_function=(lambda x, vid: reduceAnnTrendRegion(x, region, single=True, vid=vid)),
duvs={'CO2_PPMV_A':duv})
class LMWG(BasicDiagnosticGroup):
#This class defines features unique to the LMWG Diagnostics.
#This is basically a copy and stripping of amwg.py since I can't
#figure out the code any other way. I am hoping to simplify this
#at some point. I would very much like to drop the "sets" baggage
#from NCAR and define properties of diags. Then, "sets" could be
#very simple descriptions involving highly reusable components.
def __init__(self):
pass
def list_variables( self, filetable1, filetable2=None, diagnostic_set_name="" ):
if diagnostic_set_name!="":
dset = self.list_diagnostic_sets().get( str(diagnostic_set_name), None )
if dset is None:
return self._list_variables( filetable1, filetable2 )
else: # Note that dset is a class not an object.
return dset._list_variables( filetable1, filetable2 )
else:
return self._list_variables( filetable1, filetable2 )
@staticmethod
def _list_variables( filetable1, filetable2=None, diagnostic_set_name="" ):
return BasicDiagnosticGroup._list_variables( filetable1, filetable2, diagnostic_set_name )
@staticmethod
def _all_variables( filetable1, filetable2, diagnostic_set_name ):
return BasicDiagnosticGroup._all_variables( filetable1, filetable2, diagnostic_set_name )
def list_diagnostic_sets( self ):
psets = lmwg_plot_spec.__subclasses__()
plot_sets = psets
for cl in psets:
plot_sets = plot_sets + cl.__subclasses__()
return { aps.name:aps for aps in plot_sets if
hasattr(aps,'name') and aps.name.find('dummy')<0 }
class lmwg_plot_spec(plot_spec):
package = LMWG # Note that this is a class not an object..
albedos = {'VBSA':['FSRVDLN', 'FSDSVDLN'], 'NBSA':['FSRNDLN', 'FSDSNDLN'], 'VWSA':['FSRVI', 'FSDSVI'], 'NWSA':['FSRNI', 'FSDSNI'], 'ASA':['FSR', 'FSDS']}
@staticmethod
def _list_variables( filetable1, filetable2=None ):
return lmwg_plot_spec.package._list_variables( filetable1, filetable2, "lmwg_plot_spec" )
@staticmethod
def _all_variables( filetable1, filetable2=None ):
return lmwg_plot_spec.package._all_variables( filetable1, filetable2, "lmwg_plot_spec" )
###############################################################################
###############################################################################
### Set 1 - Line plots of annual trends in energy balance, soil water/ice ###
### and temperature, runoff, snow water/ice, photosynthesis ###
### ###
### Set 1 supports model vs model comparisons, but does not require model ###
### vs obs comparisons. so ft2 is always a model and can be treated the ###
### same as ft1 and will need no variable name translations. This assumes ###
### both models have the same variables as well. ###
###############################################################################
###############################################################################
### TODO: Fix up plots when 2 model runs are available. Should show ft1 and ft2
### on a single plot, then show difference of ft1 and ft2 below OR as a separate
### option. Today it is a seperate option, but that might complicate things too
### much.
### However, the level_vars should *probably* have a separate option for
### difference plots because there would be 20 plots otherwise.
### Perhaps this needs to be a command line option or GUI check box?
class lmwg_plot_set1(lmwg_plot_spec):
varlist = []
name = '1 - Line plots of annual trends in energy balance, soil water/ice and temperature, runoff, snow water/ice, photosynthesis '
number = '1'
_derived_varnames = ['PREC', 'TOTRUNOFF', 'TOTSOILICE', 'TOTSOILLIQ']
### These are special cased since they have 10 levels plotted. However, they are not "derived" per se.
_level_vars = ['SOILLIQ', 'SOILICE', 'SOILPSI', 'TSOI']
def __init__(self, filetable1, filetable2, varid, seasonid=None, region=None, aux=None):
plot_spec.__init__(self,seasonid)
self.plottype = 'Yxvsx'
self._var_baseid = '_'.join([varid, 'set1'])
ft1id,ft2id = filetable_ids(filetable1,filetable2)
self.plot1_id = ft1id+'_'+varid
if filetable2 is not None:
self.plot2_id = ft1id+' - '+ft2id+'_'+varid
self.plotall_id = ft1id+'_'+ft2id+'_'+varid
else:
self.plotall_id = filetable1._strid+'__'+varid # must differ from plot1_id
self.seasons = ['ANN']
if not self.computation_planned:
self.plan_computation(filetable1, filetable2, varid, seasonid, region, aux)
@staticmethod
def _list_variables(filetable1, filetable2=None):
filevars = lmwg_plot_set1._all_variables(filetable1, filetable2)
allvars = filevars
listvars = allvars.keys()
listvars.sort()
return listvars
@staticmethod
def _all_variables(filetable1, filetable2=None):
allvars = lmwg_plot_spec.package._all_variables(filetable1, filetable2, "lmwg_plot_spec")
for dv in lmwg_plot_set1._derived_varnames:
allvars[dv] = basic_plot_variable
if filetable2 != None:
if dv not in filetable2.list_variables():
del allvars[dv]
return allvars
def plan_computation(self, filetable1, filetable2, varid, seasonid, region=None, aux=None):
self.reduced_variables = {}
self.derived_variables = {}
# No need for a separate function just use global.
region = defines.all_regions['Global']['coords']
# Take care of the oddballs first.
if varid in lmwg_plot_set1._level_vars:
# TODO: These should be combined plots for _1 and _2, and split off _3 into a separate thing somehow
vbase=varid
self.composite_plotspecs[self.plotall_id] = []
for i in range(0,10):
vn = vbase+str(i+1)+'_1'
ln = 'Layer '+str(i+1)
self.reduced_variables[vn] = reduced_variable(
variableid = vbase, filetable=filetable1, reduced_var_id=vn,
reduction_function=(lambda x, vid, i=i: reduceAnnTrendRegionLevel(x, region, i, vid)))
self.single_plotspecs[vn] = plotspec(vid=vn,
zvars = [vn], zfunc=(lambda z:z),
# z2, # z3,
plottype = self.plottype, title=ln)
self.composite_plotspecs[self.plotall_id].append(vn)
if filetable2 != None:
for i in range(0,10):
vn = vbase+str(i+1)
self.reduced_variables[vn+'_2'] = reduced_variable(
variableid = vbase, filetable=filetable2, reduced_var_id=vn+'_2',
reduction_function=(lambda x, vid, i=i: reduceAnnTrendRegionLevel(x, region, i, vid)))
self.single_plotspec[vn+'_2'] = plotspec(vid=vn+'_2',
zvars = [vn+'_2'], zfunc=(lambda z:z),
plottype = self.plottype, title=ln)
self.single_plotspec[vn+'_3'] = plotspec(
vid=vn+'_3', zvars = [vn+'_1', vn+'_2'],
zfunc=aminusb,
plottype = self.plottype, title=ln)
self.composite_plotspecs[self.plotall_id].append(vn+'_2')
self.composite_plotspecs[self.plotall_id].append(vn+'_3')
else: # Now everything else.
# Get the easy ones first
if varid not in lmwg_plot_set1._derived_varnames and varid not in lmwg_plot_set1._level_vars:
self.reduced_variables[varid+'_1'] = reduced_variable(variableid = varid,
filetable=filetable1, reduced_var_id = | |
is None
def chickering_distance(self, other) -> int:
"""
Return the total number of edge reversals plus twice the number of edge additions/deletions required
to turn this DAG into the DAG ``other``.
Parameters
----------
other:
the DAG against which to compare the Chickering distance.
Returns
-------
int
The Chickering distance between this DAG and the DAG ``other``.
Examples
--------
>>> from graphical_models import DAG
>>> d1 = DAG(arcs={(0, 1), (1, 2)})
>>> d2 = DAG(arcs={(0, 1), (2, 1), (3, 1)})
>>> d1.chickering_distance(d2)
3
"""
reversals = self._arcs & {tuple(reversed(arc)) for arc in other._arcs}
return len(reversals) + 2 * self.shd_skeleton(other)
def confusion_matrix(self, other, rates_only=False):
"""
Return the "confusion matrix" associated with estimating the CPDAG of ``other`` instead of the CPDAG of this DAG.
Parameters
----------
other:
The DAG against which to compare.
rates_only:
if True, the dictionary of results only contains the false positive rate, true positive rate, and precision.
Returns
-------
dict
Dictionary of results
* false_positive_arcs:
the arcs in the CPDAG of ``other`` which are not arcs or edges in the CPDAG of this DAG.
* false_positive_edges:
the edges in the CPDAG of ``other`` which are not arcs or edges in the CPDAG of this DAG.
* false_negative_arcs:
the arcs in the CPDAG of this graph which are not arcs or edges in the CPDAG of ``other``.
* true_positive_arcs:
the arcs in the CPDAG of ``other`` which are arcs in the CPDAG of this DAG.
* reversed_arcs:
the arcs in the CPDAG of ``other`` whose reversals are arcs in the CPDAG of this DAG.
* mistaken_arcs_for_edges:
the arcs in the CPDAG of ``other`` whose reversals are arcs in the CPDAG of this DAG.
* false_negative_edges:
the edges in the CPDAG of this DAG which are not arcs or edges in the CPDAG of ``other``.
* true_positive_edges:
the edges in the CPDAG of ``other`` which are edges in the CPDAG of this DAG.
* mistaken_edges_for_arcs:
the edges in the CPDAG of ``other`` which are arcs in the CPDAG of this DAG.
* num_false_positives:
the total number of: false_positive_arcs, false_positive_edges
* num_false_negatives:
the total number of: false_negative_arcs, false_negative_edges, mistaken_arcs_for_edges, and reversed_arcs
* num_true_positives:
the total number of: true_positive_arcs, true_positive_edges, and mistaken_edges_for_arcs
* num_true_negatives:
the total number of missing arcs/edges in ``other`` which are actually missing in this DAG.
* fpr:
the false positive rate, i.e., num_false_positives/(num_false_positives+num_true_negatives). If this DAG
is fully connected, defaults to 0.
* tpr:
the true positive rate, i.e., num_true_positives/(num_true_positives+num_false_negatives). If this DAG
is empty, defaults to 1.
* precision:
the precision, i.e., num_true_positives/(num_true_positives+num_false_positives). If ``other`` is
empty, defaults to 1.
Examples
--------
>>> from graphical_models import DAG
>>> d1 = DAG(arcs={(0, 1), (1, 2)})
>>> d2 = DAG(arcs={(0, 1), (2, 1)})
>>> cm = d1.confusion_matrix(d2)
>>> cm["mistaken_edges_for_arcs"]
{frozenset({0, 1}), frozenset({1, 2})},
>>> cm = d2.confusion_matrix(d1)
>>> cm["mistaken_arcs_for_edges"]
{(0, 1), (2, 1)}
"""
self_cpdag = self.cpdag()
from graphical_models.classes.dags.pdag import PDAG
if isinstance(other, PDAG):
other_cpdag = other
else:
other_cpdag = other.cpdag()
# HELPER SETS SELF
self_arcs_as_edges = {frozenset(arc) for arc in self_cpdag._arcs}
self_edges_as_arcs1 = {(i, j) for i, j in self_cpdag._edges}
self_edges_as_arcs2 = {(j, i) for i, j in self_edges_as_arcs1}
# HELPER SETS OTHER
other_arcs_reversed = {(j, i) for i, j in other_cpdag._arcs}
other_arcs_as_edges = {frozenset(arc) for arc in other_cpdag._arcs}
other_edges_as_arcs1 = {(i, j) for i, j in other_cpdag._edges}
other_edges_as_arcs2 = {(j, i) for i, j in other_edges_as_arcs1}
# MISSING IN TRUE GRAPH
false_positive_arcs = other_cpdag._arcs - self_cpdag._arcs - self_edges_as_arcs1 - self_edges_as_arcs2
false_positive_edges = other_cpdag._edges - self_cpdag._edges - self_arcs_as_edges
# ARC IN TRUE GRAPH
false_negative_arcs = self_cpdag._arcs - other_cpdag._arcs - other_edges_as_arcs1 - other_edges_as_arcs2
true_positive_arcs = self_cpdag._arcs & other_cpdag._arcs
reversed_arcs = self_cpdag._arcs & other_arcs_reversed
mistaken_arcs_for_edges = self_cpdag._arcs & (other_edges_as_arcs1 | other_edges_as_arcs2)
# EDGE IN TRUE GRAPH
false_negative_edges = self_cpdag._edges - other_cpdag._edges - other_arcs_as_edges
true_positive_edges = self_cpdag._edges & other_cpdag._edges
mistaken_edges_for_arcs = self_cpdag._edges & other_arcs_as_edges
# COMBINED_RESULTS
num_false_positives = len(false_positive_edges) + len(false_negative_arcs)
num_false_negatives = len(false_negative_arcs) + len(false_negative_edges) + len(mistaken_arcs_for_edges) + len(
reversed_arcs)
num_true_positives = len(true_positive_edges) + len(true_positive_arcs) + len(mistaken_edges_for_arcs)
num_true_negatives = comb(self.nnodes, 2) - num_false_positives - num_false_negatives - num_true_positives
# RATES
num_negatives = comb(self.nnodes, 2) - self.num_arcs
num_positives = self.num_arcs
num_returned_positives = (num_true_positives + num_false_positives)
fpr = num_false_positives / num_negatives if num_negatives != 0 else 0
tpr = num_true_positives / num_positives if num_positives != 0 else 1
precision = num_true_positives / num_returned_positives if num_returned_positives != 0 else 1
if rates_only:
return dict(
fpr=fpr,
tpr=tpr,
precision=precision
)
res = dict(
false_positive_arcs=false_positive_arcs,
false_positive_edges=false_positive_edges,
false_negative_arcs=false_negative_arcs,
true_positive_arcs=true_positive_arcs,
reversed_arcs=reversed_arcs,
mistaken_arcs_for_edges=mistaken_arcs_for_edges,
false_negative_edges=false_negative_edges,
true_positive_edges=true_positive_edges,
mistaken_edges_for_arcs=mistaken_edges_for_arcs,
num_false_positives=num_false_positives,
num_false_negatives=num_false_negatives,
num_true_positives=num_true_positives,
num_true_negatives=num_true_negatives,
fpr=fpr,
tpr=tpr,
precision=precision
)
return res
def confusion_matrix_skeleton(self, other):
"""
Return the "confusion matrix" associated with estimating the skeleton of ``other`` instead of the skeleton of
this DAG.
Parameters
----------
other:
The DAG against which to compare.
Returns
-------
dict
Dictionary of results
* false_positives:
the edges in the skeleton of ``other`` which are not in the skeleton of this DAG.
* false_negatives:
the edges in the skeleton of this graph which are not in the skeleton of ``other``.
* true_positives:
the edges in the skeleton of ``other`` which are acutally in the skeleton of this DAG.
* num_false_positives:
the total number of false_positives
* num_false_negatives:
the total number of false_negatives
* num_true_positives:
the total number of true_positives
* num_true_negatives:
the total number of missing edges in the skeleton of ``other`` which are actually missing in this DAG.
* fpr:
the false positive rate, i.e., num_false_positives/(num_false_positives+num_true_negatives). If this DAG
is fully connected, defaults to 0.
* tpr:
the true positive rate, i.e., num_true_positives/(num_true_positives+num_false_negatives). If this DAG
is empty, defaults to 1.
* precision:
the precision, i.e., num_true_positives/(num_true_positives+num_false_positives). If ``other`` is
empty, defaults to 1.
Examples
--------
>>> from graphical_models import DAG
>>> d1 = DAG(arcs={(0, 1), (1, 2)})
>>> d2 = DAG(arcs={(0, 1), (2, 1)})
>>> cm = d1.confusion_matrix_skeleton(d2)
>>> cm["tpr"]
1.0
>>> d3 = DAG(arcs={(0, 1), (0, 2)})
>>> cm = d2.confusion_matrix_skeleton(d3)
>>> cm["true_positives"]
{frozenset({0, 1})}
>>> cm["false_positives"]
{frozenset({0, 2})},
>>> cm["false_negatives"]
{frozenset({1, 2})}
"""
self_skeleton = self.skeleton
other_skeleton = other.skeleton
true_positives = self_skeleton & other_skeleton
false_positives = other_skeleton - self_skeleton
false_negatives = self_skeleton - other_skeleton
num_true_positives = len(true_positives)
num_false_positives = len(false_positives)
num_false_negatives = len(false_negatives)
num_true_negatives = comb(self.nnodes, 2) - num_true_positives - num_false_positives - num_false_negatives
num_positives = len(self_skeleton)
num_negatives = comb(self.nnodes, 2) - num_positives
tpr = num_true_positives / num_positives if num_positives != 0 else 1
fpr = num_false_positives / num_negatives if num_negatives != 0 else 0
res = dict(
true_positives=true_positives,
false_positives=false_positives,
false_negatives=false_negatives,
num_true_positives=num_true_positives,
num_false_positives=num_false_positives,
num_true_negatives=num_true_negatives,
num_false_negatives=num_false_negatives,
tpr=tpr,
fpr=fpr
)
return res
# === WRITING TO FILES
@classmethod
def from_gml(cls, filename):
raise NotImplementedError
@classmethod
def from_csv(cls, filename):
raise NotImplementedError
def save_gml(self, filename):
"""
TODO
"""
raise NotImplementedError
warn_untested() # TODO: ADD TEST
tab = ' '
indent = 0
newline = lambda indent: '\n' + (tab * indent)
with open(filename, 'w') as f:
f.write('graph [')
indent += 1
f.write(newline(indent))
f.write('directed 1')
f.write(newline(indent))
node2ix = core_utils.ix_map_from_list(self._nodes)
for node, ix in node2ix.items():
f.write('node [')
indent += 1
f.write(newline(indent))
f.write('id %s' % ix)
f.write(newline(indent))
f.write('label "%s"' % node)
indent -= 1
f.write(newline(indent))
f.write(']')
f.write(newline(indent))
for source, target in self._arcs:
f.write('edge [')
indent += 1
f.write(newline(indent))
f.write('source %s' % source)
f.write(newline(indent))
f.write('target %s' % target)
indent -= 1
f.write(newline(indent))
f.write(']')
f.write(newline(indent))
f.write(']')
def to_csv(self, filename):
"""
TODO
"""
raise NotImplementedError
warn_untested() # TODO: ADD TEST
with open(filename, 'w', newline='\n') as file:
writer = csv.writer(file)
for source, target in self._arcs:
writer.writerow([source, target])
# === NUMPY CONVERSION
@classmethod
def from_amat(cls, amat: np.ndarray):
"""
Return a DAG with arcs given by ``amat``, i.e. i->j if ``amat[i,j] != 0``.
Parameters
----------
amat:
Numpy matrix representing arcs in the DAG.
Examples
--------
>>> from graphical_models import DAG
>>> import numpy as np
>>> amat = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 0]])
>>> d = DAG.from_amat(amat)
>>> d.arcs
{(0, 2), (1, 2)}
"""
nodes = set(range(amat.shape[0]))
arcs = {(i, j) for i, j in itr.permutations(nodes, 2) if amat[i, | |
<reponame>rgschmitz1/BioDepot-workflow-builder
"""
Orange Canvas Tool Dock widget
"""
import sys
from AnyQt.QtWidgets import (
QWidget,
QSplitter,
QVBoxLayout,
QTextEdit,
QAction,
QSizePolicy,
QApplication,
)
from AnyQt.QtGui import QPalette, QDrag
from AnyQt.QtCore import (
Qt,
QSize,
QObject,
QPropertyAnimation,
QEvent,
QRect,
QPoint,
QModelIndex,
QPersistentModelIndex,
QEventLoop,
QMimeData,
)
from AnyQt.QtCore import pyqtProperty as Property, pyqtSignal as Signal
from ..gui.toolgrid import ToolGrid
from ..gui.toolbar import DynamicResizeToolBar
from ..gui.quickhelp import QuickHelp
from ..gui.framelesswindow import FramelessWindow
from ..document.quickmenu import MenuPage
from ..document.quickmenu import create_css_gradient
from .widgettoolbox import WidgetToolBox, iter_item
from ..registry.qt import QtWidgetRegistry
class SplitterResizer(QObject):
"""
An object able to control the size of a widget in a QSplitter instance.
"""
def __init__(self, parent=None):
QObject.__init__(self, parent)
self.__splitter = None
self.__widget = None
self.__updateOnShow = True # Need __update on next show event
self.__animationEnabled = True
self.__size = -1
self.__expanded = False
self.__animation = QPropertyAnimation(self, b"size_", self, duration=200)
self.__action = QAction("toogle-expanded", self, checkable=True)
self.__action.triggered[bool].connect(self.setExpanded)
def setSize(self, size):
"""Set the size of the controlled widget (either width or height
depending on the orientation).
.. note::
The controlled widget's size is only updated when it it is shown.
"""
if self.__size != size:
self.__size = size
self.__update()
def size(self):
"""Return the size of the widget in the splitter (either height of
width) depending on the splitter orientation.
"""
if self.__splitter and self.__widget:
index = self.__splitter.indexOf(self.__widget)
sizes = self.__splitter.sizes()
return sizes[index]
else:
return -1
size_ = Property(int, fget=size, fset=setSize)
def setAnimationEnabled(self, enable):
"""Enable/disable animation.
"""
self.__animation.setDuration(0 if enable else 200)
def animationEnabled(self):
return self.__animation.duration() == 0
def setSplitterAndWidget(self, splitter, widget):
"""Set the QSplitter and QWidget instance the resizer should control.
.. note:: the widget must be in the splitter.
"""
if splitter and widget and not splitter.indexOf(widget) > 0:
raise ValueError("Widget must be in a spliter.")
if self.__widget is not None:
self.__widget.removeEventFilter(self)
if self.__splitter is not None:
self.__splitter.removeEventFilter(self)
self.__splitter = splitter
self.__widget = widget
if widget is not None:
widget.installEventFilter(self)
if splitter is not None:
splitter.installEventFilter(self)
self.__update()
size = self.size()
if self.__expanded and size == 0:
self.open()
elif not self.__expanded and size > 0:
self.close()
def toogleExpandedAction(self):
"""Return a QAction that can be used to toggle expanded state.
"""
return self.__action
def open(self):
"""Open the controlled widget (expand it to sizeHint).
"""
self.__expanded = True
self.__action.setChecked(True)
if self.__splitter is None or self.__widget is None:
return
hint = self.__widget.sizeHint()
if self.__splitter.orientation() == Qt.Vertical:
end = hint.height()
else:
end = hint.width()
self.__animation.setStartValue(0)
self.__animation.setEndValue(end)
self.__animation.start()
def close(self):
"""Close the controlled widget (shrink to size 0).
"""
self.__expanded = False
self.__action.setChecked(False)
if self.__splitter is None or self.__widget is None:
return
self.__animation.setStartValue(self.size())
self.__animation.setEndValue(0)
self.__animation.start()
def setExpanded(self, expanded):
"""Set the expanded state.
"""
if self.__expanded != expanded:
if expanded:
self.open()
else:
self.close()
def expanded(self):
"""Return the expanded state.
"""
return self.__expanded
def __update(self):
"""Update the splitter sizes.
"""
if self.__splitter and self.__widget:
if sum(self.__splitter.sizes()) == 0:
# schedule update on next show event
self.__updateOnShow = True
return
splitter = self.__splitter
index = splitter.indexOf(self.__widget)
sizes = splitter.sizes()
current = sizes[index]
diff = current - self.__size
sizes[index] = self.__size
sizes[index - 1] = sizes[index - 1] + diff
self.__splitter.setSizes(sizes)
def eventFilter(self, obj, event):
if (
obj is self.__widget
and event.type() == QEvent.Resize
and self.__animation.state() == QPropertyAnimation.Stopped
):
# Update the expanded state when the user opens/closes the widget
# by dragging the splitter handle.
if self.__splitter.orientation() == Qt.Vertical:
size = event.size().height()
else:
size = event.size().width()
if self.__expanded and size == 0:
self.__action.setChecked(False)
self.__expanded = False
elif not self.__expanded and size > 0:
self.__action.setChecked(True)
self.__expanded = True
if (
obj is self.__splitter
and event.type() == QEvent.Show
and self.__updateOnShow
):
# Update the splitter state after receiving valid geometry
self.__updateOnShow = False
self.__update()
return QObject.eventFilter(self, obj, event)
class QuickHelpWidget(QuickHelp):
def minimumSizeHint(self):
"""Reimplemented to allow the Splitter to resize the widget
with a continuous animation.
"""
hint = QTextEdit.minimumSizeHint(self)
return QSize(hint.width(), 0)
class CanvasToolDock(QWidget):
"""Canvas dock widget with widget toolbox, quick help and
canvas actions.
"""
def __init__(self, parent=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
self.__setupUi()
def __setupUi(self):
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.toolbox = WidgetToolBox()
self.help = QuickHelpWidget(objectName="quick-help")
self.__splitter = QSplitter()
self.__splitter.setOrientation(Qt.Vertical)
self.__splitter.addWidget(self.toolbox)
self.__splitter.addWidget(self.help)
self.toolbar = DynamicResizeToolBar()
self.toolbar.setMovable(False)
self.toolbar.setFloatable(False)
self.toolbar.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)
layout.addWidget(self.__splitter, 10)
layout.addWidget(self.toolbar)
self.setLayout(layout)
self.__splitterResizer = SplitterResizer(self)
self.__splitterResizer.setSplitterAndWidget(self.__splitter, self.help)
def setQuickHelpVisible(self, state):
"""Set the quick help box visibility status.
"""
self.__splitterResizer.setExpanded(state)
def quickHelpVisible(self):
return self.__splitterResizer.expanded()
def setQuickHelpAnimationEnabled(self, enabled):
"""Enable/disable the quick help animation.
"""
self.__splitterResizer.setAnimationEnabled(enabled)
def toogleQuickHelpAction(self):
"""Return a checkable QAction for help show/hide.
"""
return self.__splitterResizer.toogleExpandedAction()
class QuickCategoryToolbar(ToolGrid):
"""A toolbar with category buttons.
"""
def __init__(self, parent=None, buttonSize=None, iconSize=None):
ToolGrid.__init__(self, parent, 1, buttonSize, iconSize, Qt.ToolButtonIconOnly)
self.__model = None
def setColumnCount(self, count):
raise Exception("Cannot set the column count on a Toolbar")
def setModel(self, model):
"""Set the registry model.
"""
if self.__model is not None:
self.__model.itemChanged.disconnect(self.__on_itemChanged)
self.__model.rowsInserted.disconnect(self.__on_rowsInserted)
self.__model.rowsRemoved.disconnect(self.__on_rowsRemoved)
self.clear()
self.__model = model
if self.__model is not None:
self.__model.itemChanged.connect(self.__on_itemChanged)
self.__model.rowsInserted.connect(self.__on_rowsInserted)
self.__model.rowsRemoved.connect(self.__on_rowsRemoved)
self.__initFromModel(model)
def __initFromModel(self, model):
"""Initialize the toolbar from the model.
"""
root = model.invisibleRootItem()
for item in iter_item(root):
action = self.createActionForItem(item)
self.addAction(action)
def createActionForItem(self, item):
"""Create the QAction instance for item.
"""
action = QAction(item.icon(), item.text(), self, toolTip=item.toolTip())
action.setData(item)
return action
def createButtonForAction(self, action):
"""Create a button for the action.
"""
button = ToolGrid.createButtonForAction(self, action)
item = action.data()
if item.data(Qt.BackgroundRole) is not None:
brush = item.background()
elif item.data(QtWidgetRegistry.BACKGROUND_ROLE) is not None:
brush = item.data(QtWidgetRegistry.BACKGROUND_ROLE)
else:
brush = self.palette().brush(QPalette.Button)
palette = button.palette()
palette.setColor(QPalette.Button, brush.color())
palette.setColor(QPalette.Window, brush.color())
button.setPalette(palette)
button.setProperty("quick-category-toolbutton", True)
style_sheet = (
"QToolButton {\n"
" background: %s;\n"
" border: none;\n"
" border-bottom: 1px solid palette(mid);\n"
"}"
)
button.setStyleSheet(style_sheet % create_css_gradient(brush.color()))
return button
def __on_itemChanged(self, item):
root = self.__model.invisibleRootItem()
if item.parentItem() == root:
row = item.row()
action = self._gridSlots[row].action
action.setText(item.text())
action.setIcon(item.icon())
action.setToolTip(item.toolTip())
def __on_rowsInserted(self, parent, start, end):
root = self.__model.invisibleRootItem()
if root == parent:
for index in range(start, end + 1):
item = parent.child(index)
self.addAction(self.createActionForItem(item))
def __on_rowsRemoved(self, parent, start, end):
root = self.__model.invisibleRootItem()
if root == parent:
for index in range(end, start - 1, -1):
action = self._gridSlots[index].action
self.removeAction(action)
class CategoryPopupMenu(FramelessWindow):
triggered = Signal(QAction)
hovered = Signal(QAction)
def __init__(self, parent=None, **kwargs):
FramelessWindow.__init__(self, parent, **kwargs)
self.setWindowFlags(self.windowFlags() | Qt.Popup)
layout = QVBoxLayout()
layout.setContentsMargins(6, 6, 6, 6)
self.__menu = MenuPage()
self.__menu.setActionRole(QtWidgetRegistry.WIDGET_ACTION_ROLE)
if sys.platform == "darwin":
self.__menu.view().setAttribute(Qt.WA_MacShowFocusRect, False)
self.__menu.triggered.connect(self.__onTriggered)
self.__menu.hovered.connect(self.hovered)
self.__dragListener = ItemViewDragStartEventListener(self)
self.__dragListener.dragStarted.connect(self.__onDragStarted)
self.__menu.view().viewport().installEventFilter(self.__dragListener)
layout.addWidget(self.__menu)
self.setLayout(layout)
self.__action = None
self.__loop = None
self.__item = None
def setCategoryItem(self, item):
"""
Set the category root item (:class:`QStandardItem`).
"""
self.__item = item
model = item.model()
self.__menu.setModel(model)
self.__menu.setRootIndex(item.index())
def popup(self, pos=None):
if pos is None:
pos = self.pos()
self.adjustSize()
geom = widget_popup_geometry(pos, self)
self.setGeometry(geom)
self.show()
def exec_(self, pos=None):
self.popup(pos)
self.__loop = QEventLoop()
self.__action = None
self.__loop.exec_()
self.__loop = None
if self.__action is not None:
action = self.__action
else:
action = None
return action
def hideEvent(self, event):
if self.__loop is not None:
self.__loop.exit(0)
return FramelessWindow.hideEvent(self, event)
def __onTriggered(self, action):
self.__action = action
self.triggered.emit(action)
self.hide()
if self.__loop:
self.__loop.exit(0)
def __onDragStarted(self, index):
desc = index.data(QtWidgetRegistry.WIDGET_DESC_ROLE)
icon = index.data(Qt.DecorationRole)
drag_data = QMimeData()
drag_data.setData(
"application/vnv.orange-canvas.registry.qualified-name",
desc.qualified_name.encode("utf-8"),
)
drag = QDrag(self)
drag.setPixmap(icon.pixmap(38))
drag.setMimeData(drag_data)
# TODO: Should animate (accept) hide.
self.hide()
# When a drag is started and the menu hidden the item's tool tip
# can still show for a short time UNDER the cursor preventing a
# drop.
viewport = self.__menu.view().viewport()
filter = ToolTipEventFilter()
viewport.installEventFilter(filter)
drag.exec_(Qt.CopyAction)
viewport.removeEventFilter(filter)
class ItemViewDragStartEventListener(QObject):
dragStarted = Signal(QModelIndex)
def __init__(self, parent=None):
QObject.__init__(self, parent)
self._pos = None
self._index = None
def eventFilter(self, viewport, event):
view = viewport.parent()
if event.type() == QEvent.MouseButtonPress and event.button() == Qt.LeftButton:
index = view.indexAt(event.pos())
if index is not None:
self._pos = event.pos()
self._index = QPersistentModelIndex(index)
elif (
event.type() == QEvent.MouseMove
and self._pos is not None
and (
(self._pos - event.pos()).manhattanLength()
>= QApplication.startDragDistance()
)
):
if self._index.isValid():
# Map to a QModelIndex in the model.
index = self._index
index = index.model().index(index.row(), index.column(), index.parent())
self._pos = None
self._index = None
self.dragStarted.emit(index)
return QObject.eventFilter(self, view, event)
class ToolTipEventFilter(QObject):
def eventFilter(self, receiver, event):
if event.type() == QEvent.ToolTip:
return True
return QObject.eventFilter(self, receiver, event)
def widget_popup_geometry(pos, widget):
widget.ensurePolished()
if widget.testAttribute(Qt.WA_Resized):
size = widget.size()
else:
size = widget.sizeHint()
desktop = QApplication.desktop()
screen_geom = desktop.availableGeometry(pos)
# Adjust the size to fit inside the screen.
if size.height() > screen_geom.height():
size.setHeight(screen_geom.height())
if size.width() | |
<filename>test/python/WMCore_t/WMSpec_t/Steps_t/Executors_t/LogArch_t.py
"""
Created on Jun 18, 2009
@author: meloam
"""
from __future__ import print_function
try:
# https://pylint.pycqa.org/en/latest/technical_reference/features.html
# W1626: the `reload` built-in function is missing in python3
# we can use imp.reload (deprecated) or importlib.reload
from importlib import reload
except:
pass
import copy
import logging
import os
import os.path
import shutil
import sys
import threading
import time
import unittest
from nose.plugins.attrib import attr
import WMCore.Storage.StageOutError as StageOutError
import WMCore.WMSpec.Steps.Builders.CMSSW as CMSSWBuilder
import WMCore.WMSpec.Steps.Builders.LogArchive as LogArchiveBuilder
import WMCore.WMSpec.Steps.Executors.LogArchive as LogArchiveExecutor
import WMCore.WMSpec.Steps.StepFactory as StepFactory
import WMCore.WMSpec.Steps.Templates.LogArchive as LogArchiveTemplate
import WMCore_t.WMSpec_t.samples.BasicProductionWorkload as testWorkloads
from WMCore.DataStructs.Job import Job
from WMCore.FwkJobReport.Report import Report
from WMCore.WMSpec.Makers.TaskMaker import TaskMaker
from WMCore.WMSpec.WMWorkload import newWorkload
from WMQuality.TestInit import TestInit
class LogArchiveTest(unittest.TestCase):
def setUp(self):
self.testInit = TestInit(__file__)
self.testDir = self.testInit.generateWorkDir()
# shut up SiteLocalConfig
os.environ['CMS_PATH'] = os.getcwd()
workload = copy.deepcopy(testWorkloads.workload)
task = workload.getTask("Production")
step = task.getStep("stageOut1")
# want to get the cmsstep so I can make the Report
cmsstep = task.getStep('cmsRun1')
self.cmsstepdir = os.path.join(self.testDir, 'cmsRun1')
os.mkdir(self.cmsstepdir)
open(os.path.join(self.cmsstepdir, '__init__.py'), 'w').close()
open(os.path.join(self.cmsstepdir, 'Report.pkl'), 'w').close()
cmsbuilder = CMSSWBuilder.CMSSW()
cmsbuilder(cmsstep.data, 'Production', self.cmsstepdir)
realstep = LogArchiveTemplate.LogArchiveStepHelper(step.data)
realstep.disableRetries()
self.realstep = realstep
self.stepDir = os.path.join(self.testDir, 'stepdir')
os.mkdir(self.stepDir)
builder = LogArchiveBuilder.LogArchive()
builder(step.data, 'Production', self.stepDir)
# stolen from CMSSWExecutor_t. thanks, dave
# first, delete all the sandboxen and taskspaces
# because of caching, this leaks from other tests in other files
# this sucks because the other tests are using sandboxen that
# are deleted after the test is over, which causes theses tests
# to break
modsToDelete = []
# not sure what happens if you delete from
# an arrey you're iterating over. doing it in
# two steps
for modname in sys.modules:
# need to blow away things in sys.modules, otherwise
# they are cached and we look at old taskspaces
if modname.startswith('WMTaskSpace'):
modsToDelete.append(modname)
if modname.startswith('WMSandbox'):
modsToDelete.append(modname)
for modname in modsToDelete:
try:
reload(sys.modules[modname])
except Exception:
pass
del sys.modules[modname]
self.oldpath = sys.path[:]
self.testInit = TestInit(__file__)
self.testDir = self.testInit.generateWorkDir()
self.job = Job(name="/UnitTests/DeleterTask/DeleteTest-test-job")
shutil.copyfile('/etc/hosts', os.path.join(self.testDir, 'testfile'))
self.workload = newWorkload("UnitTests")
self.task = self.workload.newTask("DeleterTask")
cmsswHelper = self.task.makeStep("cmsRun1")
cmsswHelper.setStepType('CMSSW')
stepHelper = cmsswHelper.addStep("DeleteTest")
stepHelper.setStepType('LogArchive')
self.cmsswstep = cmsswHelper.data
self.cmsswHelper = cmsswHelper
self.stepdata = stepHelper.data
self.stephelp = LogArchiveTemplate.LogArchiveStepHelper(stepHelper.data)
self.task.applyTemplates()
self.executor = StepFactory.getStepExecutor(self.stephelp.stepType())
taskMaker = TaskMaker(self.workload, os.path.join(self.testDir))
taskMaker.skipSubscription = True
taskMaker.processWorkload()
self.task.build(os.path.join(self.testDir, 'UnitTests'))
sys.path.insert(0, self.testDir)
sys.path.insert(0, os.path.join(self.testDir, 'UnitTests'))
# binDir = inspect.getsourcefile(ModuleLocator)
# binDir = binDir.replace("__init__.py", "bin")
#
# if not binDir in os.environ['PATH']:
# os.environ['PATH'] = "%s:%s" % (os.environ['PATH'], binDir)
open(os.path.join(self.testDir, 'UnitTests', '__init__.py'), 'w').close()
shutil.copyfile(os.path.join(os.path.dirname(__file__), 'MergeSuccess.pkl'),
os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
def tearDown(self):
sys.path = self.oldpath[:]
self.testInit.delWorkDir()
# making double sure WMTaskSpace and WMSandbox are gone
modsToDelete = []
# not sure what happens if you delete from
# an arrey you're iterating over. doing it in
# two steps
for modname in sys.modules:
# need to blow away things in sys.modules, otherwise
# they are cached and we look at old taskspaces
if modname.startswith('WMTaskSpace'):
modsToDelete.append(modname)
if modname.startswith('WMSandbox'):
modsToDelete.append(modname)
for modname in modsToDelete:
try:
reload(sys.modules[modname])
except Exception:
pass
del sys.modules[modname]
myThread = threading.currentThread()
if hasattr(myThread, "factory"):
myThread.factory = {}
def makeReport(self, fileName):
myReport = Report('oneitem')
myReport.addStep('stageOut1')
myReport.addOutputModule('module1')
myReport.addOutputModule('module2')
myReport.addOutputFile('module1', {'lfn': 'FILE1', 'size': 1, 'events': 1})
myReport.addOutputFile('module2', {'lfn': 'FILE2', 'size': 1, 'events': 1})
myReport.addOutputFile('module2', {'lfn': 'FILE3', 'size': 1, 'events': 1})
myReport.persist(fileName)
def testExecutorDoesntDetonate(self):
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 1
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
executor.initialise(self.stepdata, self.job)
self.setLocalOverride(self.stepdata)
executor.step = self.stepdata
executor.execute()
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'hosts')))
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'test1', 'hosts')))
return
def testUnitTestBackend(self):
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 1
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
helper = LogArchiveTemplate.LogArchiveStepHelper(self.stepdata)
helper.addOverride(override='command', overrideValue='test-win')
helper.addOverride(override='option', overrideValue='')
helper.addOverride(override='phedex-node', overrideValue='charlie.sheen.biz')
helper.addOverride(override='lfn-prefix', overrideValue='test-win')
executor.initialise(self.stepdata, self.job)
self.setLocalOverride(self.stepdata)
executor.step = self.stepdata
executor.execute()
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'hosts')))
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'test1', 'hosts')))
def testUnitTestBackendNew(self):
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 1
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
helper = LogArchiveTemplate.LogArchiveStepHelper(self.stepdata)
helper.addOverride(override='command', overrideValue='test-win')
helper.addOverride(override='option', overrideValue='')
helper.addOverride(override='phedex-node', overrideValue='charlie.sheen.biz')
helper.addOverride(override='lfn-prefix', overrideValue='test-win')
helper.setNewStageoutOverride(True)
executor.initialise(self.stepdata, self.job)
self.setLocalOverride(self.stepdata)
executor.step = self.stepdata
executor.execute()
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'hosts')))
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'test1', 'hosts')))
def setLocalOverride(self, step):
step.section_('override')
step.override.command = 'cp'
step.override.option = ''
step.override.__setattr__('lfn-prefix', self.testDir + "/")
step.override.__setattr__('phedex-node', 'DUMMYPNN')
class otherLogArchiveTexst(unittest.TestCase):
def setUp(self):
# stolen from CMSSWExecutor_t. thanks, dave
# first, delete all the sandboxen and taskspaces
# because of caching, this leaks from other tests in other files
# this sucks because the other tests are using sandboxen that
# are deleted after the test is over, which causes theses tests
# to break
modsToDelete = []
# not sure what happens if you delete from
# an arrey you're iterating over. doing it in
# two steps
for modname in sys.modules:
# need to blow away things in sys.modules, otherwise
# they are cached and we look at old taskspaces
if modname.startswith('WMTaskSpace'):
modsToDelete.append(modname)
if modname.startswith('WMSandbox'):
modsToDelete.append(modname)
for modname in modsToDelete:
try:
reload(sys.modules[modname])
except Exception:
pass
del sys.modules[modname]
self.oldpath = sys.path[:]
self.testInit = TestInit(__file__)
self.testDir = self.testInit.generateWorkDir()
self.job = Job(name="/UnitTests/DeleterTask/DeleteTest-test-job")
shutil.copyfile('/etc/hosts', os.path.join(self.testDir, 'testfile'))
self.workload = newWorkload("UnitTests")
self.task = self.workload.newTask("DeleterTask")
cmsswHelper = self.task.makeStep("cmsRun1")
cmsswHelper.setStepType('CMSSW')
stepHelper = cmsswHelper.addStep("DeleteTest")
stepHelper.setStepType('LogArchive')
self.cmsswstep = cmsswHelper.data
self.cmsswHelper = cmsswHelper
self.stepdata = stepHelper.data
self.stephelp = LogArchiveTemplate.LogArchiveStepHelper(stepHelper.data)
self.task.applyTemplates()
self.executor = StepFactory.getStepExecutor(self.stephelp.stepType())
taskMaker = TaskMaker(self.workload, os.path.join(self.testDir))
taskMaker.skipSubscription = True
taskMaker.processWorkload()
self.task.build(os.path.join(self.testDir, 'UnitTests'))
sys.path.insert(0, self.testDir)
sys.path.insert(0, os.path.join(self.testDir, 'UnitTests'))
# binDir = inspect.getsourcefile(ModuleLocator)
# binDir = binDir.replace("__init__.py", "bin")
#
# if not binDir in os.environ['PATH']:
# os.environ['PATH'] = "%s:%s" % (os.environ['PATH'], binDir)
open(os.path.join(self.testDir, 'UnitTests', '__init__.py'), 'w').close()
shutil.copyfile(os.path.join(os.path.dirname(__file__), 'MergeSuccess.pkl'),
os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
def tearDown(self):
sys.path = self.oldpath[:]
self.testInit.delWorkDir()
# making double sure WMTaskSpace and WMSandbox are gone
modsToDelete = []
# not sure what happens if you delete from
# an arrey you're iterating over. doing it in
# two steps
for modname in sys.modules:
# need to blow away things in sys.modules, otherwise
# they are cached and we look at old taskspaces
if modname.startswith('WMTaskSpace'):
modsToDelete.append(modname)
if modname.startswith('WMSandbox'):
modsToDelete.append(modname)
for modname in modsToDelete:
try:
reload(sys.modules[modname])
except Exception:
pass
del sys.modules[modname]
myThread = threading.currentThread()
if hasattr(myThread, "factory"):
myThread.factory = {}
@attr('integration')
def testCPBackendLogArchiveAgainstReportNew(self):
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 0
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
executor.initialise(self.stepdata, self.job)
self.setLocalOverride(self.stepdata)
self.stepdata.override.newLogArchive = True
executor.step = self.stepdata
executor.execute()
self.assertTrue(os.path.exists(os.path.join(self.testDir, 'hosts')))
self.assertTrue(os.path.exists(os.path.join(self.testDir, 'test1', 'hosts')))
@attr('integration')
def testCPBackendLogArchiveAgainstReportFailedStepNew(self):
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 1
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
executor.initialise(self.stepdata, self.job)
self.setLocalOverride(self.stepdata)
self.stepdata.override.newLogArchive = True
executor.step = self.stepdata
executor.execute()
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'hosts')))
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'test1', 'hosts')))
return
@attr('integration')
def testCPBackendLogArchiveAgainstReportOld(self):
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 0
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
executor.initialise(self.stepdata, self.job)
self.setLocalOverride(self.stepdata)
executor.step = self.stepdata
executor.execute()
self.assertTrue(os.path.exists(os.path.join(self.testDir, 'hosts')))
self.assertTrue(os.path.exists(os.path.join(self.testDir, 'test1', 'hosts')))
return
@attr('integration')
def testCPBackendLogArchiveAgainstReportFailedStepOld(self):
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 1
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
executor.initialise(self.stepdata, self.job)
self.setLocalOverride(self.stepdata)
executor.step = self.stepdata
executor.execute()
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'hosts')))
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'test1', 'hosts')))
return
@attr('workerNodeTest')
def testOnWorkerNodes(self):
raise RuntimeError
# Stage a file out, stage it back in, check it, delete it
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
myReport.data.cmsRun1.status = 1
del myReport.data.cmsRun1.output
myReport.data.cmsRun1.section_('output')
myReport.data.cmsRun1.output.section_('stagingTestOutput')
myReport.data.cmsRun1.output.stagingTestOutput.section_('files')
myReport.data.cmsRun1.output.stagingTestOutput.fileCount = 0
targetFiles = ['/store/temp/WMAgent/storetest-%s' % time.time(),
'/store/unmerged/WMAgent/storetest-%s' % time.time()]
for file in targetFiles:
print("Adding file for LogArchive %s" % file)
self.addLogArchiveFile(myReport, file)
myReport.persist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
executor = LogArchiveExecutor.LogArchive()
executor.initialise(self.stepdata, self.job)
executor.step = self.stepdata
print("beginning stageout")
executor.execute()
print("stageout done")
# pull in the report with the stage out info
myReport = Report()
myReport.unpersist(os.path.join(self.testDir, 'UnitTests', 'WMTaskSpace', 'cmsRun1', 'Report.pkl'))
print("Got the stage out data back")
print(myReport.data)
# now, transfer them back
# TODO make a stagein step in the task - Melo
import WMCore.Storage.FileManager as FileManagerModule
fileManager = FileManagerModule.FileManager(numberOfRetries=10, retryPauseTime=1)
for file in targetFiles:
print("Staging in %s" % file)
fileManager.stageOut(fileToStage={'LFN': file,
'PFN': '%s/%s' % (self.testDir, file)})
self.assertTrue(os.path.exists('%s/%s' % (self.testDir, file)))
# self.assertEqual(os.path.getsize('/etc/hosts', '%s/%s' % (self.testDir, file)))
# now, should delete the files we made
for file in targetFiles:
print("deleting %s" % file)
fileManager.deleteLFN(file)
# try staging in again to make sure teh files are gone
for file in targetFiles:
print("Staging in (should fail) %s" % file)
self.assertRaises(StageOutError,
FileManagerModule.FileManager.stageOut,
fileManager, fileToStage={'LFN': file,
'PFN': '%s/%s' % (self.testDir, file)})
# need to make sure files didn't show | |
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dns_mode": {
"type": "string",
"options": [
{
"value": "manual",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "auto",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"forticlient_enforcement": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"idle_timeoutinterval": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"domain": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv6_split_include": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv6_dns_server1": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv6_dns_server3": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ipv4_split_include": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"group_authentication_secret": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dhcp6_ra_linkaddr": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": False,
"v6.2.5": True,
"v6.2.7": True
}
},
"eap_exclude_peergrp": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"mesh_selector_type": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "subnet",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "host",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"auto_negotiate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"split_include_service": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"esn": {
"type": "string",
"options": [
{
"value": "require",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "allow",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": False,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"keepalive": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"localid_type": {
"type": "string",
"options": [
{
"value": "auto",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "fqdn",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "user-fqdn",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "keyid",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "address",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "asn1dn",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"cert_id_validation": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"authmethod_remote": {
"type": "string",
"options": [
{
"value": "psk",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "signature",
"revisions": {
"v6.0.0": True,
"v7.0.0": | |
<filename>cqlengine/tests/columns/test_container_columns.py
from datetime import datetime, timedelta
import json
from uuid import uuid4
from cqlengine import Model, ValidationError
from cqlengine import columns
from cqlengine.management import create_table, delete_table
from cqlengine.tests.base import BaseCassEngTestCase
class TestSetModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
int_set = columns.Set(columns.Integer, required=False)
text_set = columns.Set(columns.Text, required=False)
class JsonTestColumn(columns.Column):
db_type = 'text'
def to_python(self, value):
if value is None: return
if isinstance(value, basestring):
return json.loads(value)
else:
return value
def to_database(self, value):
if value is None: return
return json.dumps(value)
class TestSetColumn(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestSetColumn, cls).setUpClass()
delete_table(TestSetModel)
create_table(TestSetModel)
@classmethod
def tearDownClass(cls):
super(TestSetColumn, cls).tearDownClass()
delete_table(TestSetModel)
def test_empty_set_initial(self):
"""
tests that sets are set() by default, should never be none
:return:
"""
m = TestSetModel.create()
m.int_set.add(5)
m.save()
def test_deleting_last_item_should_succeed(self):
m = TestSetModel.create()
m.int_set.add(5)
m.save()
m.int_set.remove(5)
m.save()
m = TestSetModel.get(partition=m.partition)
self.assertNotIn(5, m.int_set)
def test_empty_set_retrieval(self):
m = TestSetModel.create()
m2 = TestSetModel.get(partition=m.partition)
m2.int_set.add(3)
def test_io_success(self):
""" Tests that a basic usage works as expected """
m1 = TestSetModel.create(int_set={1, 2}, text_set={'kai', 'andreas'})
m2 = TestSetModel.get(partition=m1.partition)
assert isinstance(m2.int_set, set)
assert isinstance(m2.text_set, set)
assert 1 in m2.int_set
assert 2 in m2.int_set
assert 'kai' in m2.text_set
assert 'andreas' in m2.text_set
def test_type_validation(self):
"""
Tests that attempting to use the wrong types will raise an exception
"""
with self.assertRaises(ValidationError):
TestSetModel.create(int_set={'string', True}, text_set={1, 3.0})
def test_partial_updates(self):
""" Tests that partial udpates work as expected """
m1 = TestSetModel.create(int_set={1, 2, 3, 4})
m1.int_set.add(5)
m1.int_set.remove(1)
assert m1.int_set == {2, 3, 4, 5}
m1.save()
m2 = TestSetModel.get(partition=m1.partition)
assert m2.int_set == {2, 3, 4, 5}
def test_partial_update_creation(self):
"""
Tests that proper update statements are created for a partial set update
:return:
"""
ctx = {}
col = columns.Set(columns.Integer, db_field="TEST")
statements = col.get_update_statement({1, 2, 3, 4}, {2, 3, 4, 5}, ctx)
assert len([v for v in ctx.values() if {1} == v.value]) == 1
assert len([v for v in ctx.values() if {5} == v.value]) == 1
assert len([s for s in statements if '"TEST" = "TEST" -' in s]) == 1
assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1
def test_update_from_none(self):
""" Tests that updating a 'None' list creates a straight insert statement """
ctx = {}
col = columns.Set(columns.Integer, db_field="TEST")
statements = col.get_update_statement({1, 2, 3, 4}, None, ctx)
#only one variable /statement should be generated
assert len(ctx) == 1
assert len(statements) == 1
assert ctx.values()[0].value == {1, 2, 3, 4}
assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0])
def test_update_from_empty(self):
""" Tests that updating an empty list creates a straight insert statement """
ctx = {}
col = columns.Set(columns.Integer, db_field="TEST")
statements = col.get_update_statement({1, 2, 3, 4}, set(), ctx)
#only one variable /statement should be generated
assert len(ctx) == 1
assert len(statements) == 1
assert ctx.values()[0].value == {1, 2, 3, 4}
assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0])
def test_instantiation_with_column_class(self):
"""
Tests that columns instantiated with a column class work properly
and that the class is instantiated in the constructor
"""
column = columns.Set(columns.Text)
assert isinstance(column.value_col, columns.Text)
def test_instantiation_with_column_instance(self):
"""
Tests that columns instantiated with a column instance work properly
"""
column = columns.Set(columns.Text(min_length=100))
assert isinstance(column.value_col, columns.Text)
def test_to_python(self):
""" Tests that to_python of value column is called """
column = columns.Set(JsonTestColumn)
val = {1, 2, 3}
db_val = column.to_database(val)
assert db_val.value == {json.dumps(v) for v in val}
py_val = column.to_python(db_val.value)
assert py_val == val
def test_default_empty_container_saving(self):
""" tests that the default empty container is not saved if it hasn't been updated """
pkey = uuid4()
# create a row with set data
TestSetModel.create(partition=pkey, int_set={3, 4})
# create another with no set data
TestSetModel.create(partition=pkey)
m = TestSetModel.get(partition=pkey)
self.assertEqual(m.int_set, {3, 4})
class TestListModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
int_list = columns.List(columns.Integer, required=False)
text_list = columns.List(columns.Text, required=False)
class TestListColumn(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestListColumn, cls).setUpClass()
delete_table(TestListModel)
create_table(TestListModel)
@classmethod
def tearDownClass(cls):
super(TestListColumn, cls).tearDownClass()
delete_table(TestListModel)
def test_initial(self):
tmp = TestListModel.create()
tmp.int_list.append(1)
def test_initial(self):
tmp = TestListModel.create()
tmp2 = TestListModel.get(partition=tmp.partition)
tmp2.int_list.append(1)
def test_io_success(self):
""" Tests that a basic usage works as expected """
m1 = TestListModel.create(int_list=[1, 2], text_list=['kai', 'andreas'])
m2 = TestListModel.get(partition=m1.partition)
assert isinstance(m2.int_list, list)
assert isinstance(m2.text_list, list)
assert len(m2.int_list) == 2
assert len(m2.text_list) == 2
assert m2.int_list[0] == 1
assert m2.int_list[1] == 2
assert m2.text_list[0] == 'kai'
assert m2.text_list[1] == 'andreas'
def test_type_validation(self):
"""
Tests that attempting to use the wrong types will raise an exception
"""
with self.assertRaises(ValidationError):
TestListModel.create(int_list=['string', True], text_list=[1, 3.0])
def test_partial_updates(self):
""" Tests that partial udpates work as expected """
final = range(10)
initial = final[3:7]
m1 = TestListModel.create(int_list=initial)
m1.int_list = final
m1.save()
m2 = TestListModel.get(partition=m1.partition)
assert list(m2.int_list) == final
def test_partial_update_creation(self):
""" Tests that proper update statements are created for a partial list update """
final = range(10)
initial = final[3:7]
ctx = {}
col = columns.List(columns.Integer, db_field="TEST")
statements = col.get_update_statement(final, initial, ctx)
assert len([v for v in ctx.values() if [2, 1, 0] == v.value]) == 1
assert len([v for v in ctx.values() if [7, 8, 9] == v.value]) == 1
assert len([s for s in statements if '"TEST" = "TEST" +' in s]) == 1
assert len([s for s in statements if '+ "TEST"' in s]) == 1
def test_update_from_none(self):
""" Tests that updating an 'None' list creates a straight insert statement """
ctx = {}
col = columns.List(columns.Integer, db_field="TEST")
statements = col.get_update_statement([1, 2, 3], None, ctx)
#only one variable /statement should be generated
assert len(ctx) == 1
assert len(statements) == 1
assert ctx.values()[0].value == [1, 2, 3]
assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0])
def test_update_from_empty(self):
""" Tests that updating an empty list creates a straight insert statement """
ctx = {}
col = columns.List(columns.Integer, db_field="TEST")
statements = col.get_update_statement([1, 2, 3], [], ctx)
#only one variable /statement should be generated
assert len(ctx) == 1
assert len(statements) == 1
assert ctx.values()[0].value == [1, 2, 3]
assert statements[0] == '"TEST" = :{}'.format(ctx.keys()[0])
def test_instantiation_with_column_class(self):
"""
Tests that columns instantiated with a column class work properly
and that the class is instantiated in the constructor
"""
column = columns.List(columns.Text)
assert isinstance(column.value_col, columns.Text)
def test_instantiation_with_column_instance(self):
"""
Tests that columns instantiated with a column instance work properly
"""
column = columns.List(columns.Text(min_length=100))
assert isinstance(column.value_col, columns.Text)
def test_to_python(self):
""" Tests that to_python of value column is called """
column = columns.List(JsonTestColumn)
val = [1, 2, 3]
db_val = column.to_database(val)
assert db_val.value == [json.dumps(v) for v in val]
py_val = column.to_python(db_val.value)
assert py_val == val
def test_default_empty_container_saving(self):
""" tests that the default empty container is not saved if it hasn't been updated """
pkey = uuid4()
# create a row with list data
TestListModel.create(partition=pkey, int_list=[1,2,3,4])
# create another with no list data
TestListModel.create(partition=pkey)
m = TestListModel.get(partition=pkey)
self.assertEqual(m.int_list, [1,2,3,4])
class TestMapModel(Model):
partition = columns.UUID(primary_key=True, default=uuid4)
int_map = columns.Map(columns.Integer, columns.UUID, required=False)
text_map = columns.Map(columns.Text, columns.DateTime, required=False)
class TestMapColumn(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestMapColumn, cls).setUpClass()
delete_table(TestMapModel)
create_table(TestMapModel)
@classmethod
def tearDownClass(cls):
super(TestMapColumn, cls).tearDownClass()
delete_table(TestMapModel)
def test_empty_default(self):
tmp = TestMapModel.create()
tmp.int_map['blah'] = 1
def test_empty_retrieve(self):
tmp = TestMapModel.create()
tmp2 = TestMapModel.get(partition=tmp.partition)
tmp2.int_map['blah'] = 1
def test_remove_last_entry_works(self):
tmp = TestMapModel.create()
tmp.text_map["blah"] = datetime.now()
tmp.save()
del tmp.text_map["blah"]
tmp.save()
tmp = TestMapModel.get(partition=tmp.partition)
self.assertNotIn("blah", tmp.int_map)
def test_io_success(self):
""" Tests that a basic usage works as expected """
k1 = uuid4()
k2 = uuid4()
now = datetime.now()
then = now + timedelta(days=1)
m1 = TestMapModel.create(int_map={1: k1, 2: k2}, text_map={'now': now, 'then': then})
m2 = TestMapModel.get(partition=m1.partition)
assert isinstance(m2.int_map, dict)
assert isinstance(m2.text_map, dict)
assert 1 in m2.int_map
assert 2 in m2.int_map
assert m2.int_map[1] == k1
assert m2.int_map[2] == k2
assert 'now' in m2.text_map
assert 'then' in m2.text_map
assert (now - m2.text_map['now']).total_seconds() < 0.001
assert (then - m2.text_map['then']).total_seconds() < 0.001
def test_type_validation(self):
"""
Tests that attempting to use the wrong types will raise an exception
"""
with self.assertRaises(ValidationError):
TestMapModel.create(int_map={'key': 2, uuid4(): 'val'}, text_map={2: 5})
def test_partial_updates(self):
""" Tests that partial udpates work as expected """
now = datetime.now()
#derez it a bit
now = datetime(*now.timetuple()[:-3])
early = now - timedelta(minutes=30)
earlier = early - timedelta(minutes=30)
later = now + timedelta(minutes=30)
initial = {'now': now, 'early': earlier}
final = {'later': later, 'early': early}
m1 = TestMapModel.create(text_map=initial)
m1.text_map = final
m1.save()
m2 = TestMapModel.get(partition=m1.partition)
assert m2.text_map == final
def test_updates_from_none(self):
""" Tests that updates from | |
<reponame>wasimaftab/kindred
import sys
import itertools
import kindred
import pickle
import argparse
import codecs
import time
import re
import string
from collections import defaultdict,Counter
import json
import six
import os
def acronymMatch(words,pos,currentAcronym,atStart,subpos=None):
if len(currentAcronym) == 0:
if not (subpos is None): # Can't finish acronym mid-word
return []
else:
return [pos+1]
curWord = words[pos].lower()
wordSplit = curWord.split('-')
curLetter = currentAcronym[-1]
moves = []
if subpos is None:
if atStart and curLetter == 's' and curWord[-1] == 's':
# Possible plural
moves.append( (words,pos,currentAcronym[:-1],False) )
if curLetter == curWord[0]:
moves.append( (words,pos-1,currentAcronym[:-1],False) )
if curWord == '-':
moves.append( (words,pos-1,currentAcronym,False) )
if len(wordSplit) > 1:
if subpos is None:
subpos = len(wordSplit)-1
if len(wordSplit[subpos]) > 0 and curLetter == wordSplit[subpos][0]:
if subpos == 0:
moves.append( (words,pos-1,currentAcronym[:-1],False) )
else:
moves.append( (words,pos,currentAcronym[:-1],False,subpos-1) )
possibleStarts = []
for move in moves:
possibleStarts += acronymMatch(*move)
return possibleStarts
def acronymDetection(words):
LRBs = [i for i, x in enumerate(words) if x == u'(']
RRBs = [i for i, x in enumerate(words) if x == u')']
acronyms = []
for i,j in itertools.product(LRBs,RRBs):
if j-i == 2:
acronymLoc = i+1
possibleAcronym = words[acronymLoc]
possibleStarts = acronymMatch(words,i-1,possibleAcronym.lower(),True)
if len(possibleStarts) > 0:
start = min(possibleStarts)
end = i
acronyms.append((start,end,acronymLoc))
return acronyms
def mergeWordsForFusionDetection(words):
prevWord = ""
mergedWords = []
start = 0
mergeChars = ['-','/',':']
for i,w in enumerate(words):
if w in mergeChars:
prevWord += w
elif len(prevWord) > 0 and prevWord[-1] in mergeChars:
prevWord += w
else:
if prevWord:
mergedWords.append((start,i-1,prevWord))
prevWord = w
start = i
if prevWord:
mergedWords.append((start,len(words)-1,prevWord))
return mergedWords
def fusionGeneDetection(words, lookupDict):
termtypesAndids,terms,locs = [],[],[]
origWords = list(words)
words = [ w.lower() for w in words ]
mergedWords = mergeWordsForFusionDetection(words)
for start,end,word in mergedWords:
split = re.split("[-/:]",word)
fusionCount = len(split)
if fusionCount == 1:
continue
allGenes = True
geneIDs = []
lookupIDCounter = Counter()
for s in split:
key = (s,)
if key in lookupDict:
isGene = False
for entityType,entityID in lookupDict[key]:
if entityType == 'gene':
for tmpID in entityID.split(';'):
lookupIDCounter[tmpID] += 1
geneIDs.append(entityID)
isGene = True
break
if not isGene:
allGenes = False
break
else:
allGenes = False
break
# We're going to check if there are any lookup IDs shared among all the "fusion" terms
# Hence this may not actually be a fusion, but just using multiple names of a gene
# e.g. HER2/neu
completeLookupIDs = [ id for id,count in lookupIDCounter.items() if count == fusionCount ]
if len(completeLookupIDs) > 0:
termtypesAndids.append([('gene',';'.join(completeLookupIDs))])
terms.append(tuple(origWords[start:end+1]))
locs.append((start,end+1))
elif allGenes: # All the terms look like genes (and different genes), so we're going to mark this as a fusion (or combo)
#geneTxt = ",".join(map(str,geneIDs))
geneIDs = [ geneID.replace(';','&') for geneID in geneIDs ]
termtypesAndids.append([('gene','combo|' + '|'.join(geneIDs))])
terms.append(tuple(origWords[start:end+1]))
locs.append((start,end+1))
return locs,terms,termtypesAndids
def getTermIDsAndLocations(np, lookupDict):
termtypesAndids,terms,locs = [],[],[]
# Lowercase all the tokens
#np = [ unicodeLower(w) for w in np ]
orignp = np
np = [ w.lower() for w in np ]
# The length of each search string will decrease from the full length
# of the text down to 1
for l in reversed(range(1, len(np)+1)):
# We move the search window through the text
for i in range(len(np)-l+1):
# Extract that window of text
s = tuple(np[i:i+l])
# Search for it in the dictionary
if s in lookupDict:
# If found, save the ID(s) in the dictionary
termtypesAndids.append(lookupDict[s])
terms.append(tuple(orignp[i:i+l]))
locs.append((i,i+l))
# And blank it out
np[i:i+l] = [ "" for _ in range(l) ]
# Then return the found term IDs
return locs,terms,termtypesAndids
def startsWithButNotAll(s,search):
return s.startswith(search) and len(s) > len(search)
def cleanupVariant(variant):
variant = variant.upper().replace('P.','')
aminoAcidInfo = [('ALA','A'),('ARG','R'),('ASN','N'),('ASP','D'),('CYS','C'),('GLU','E'),('GLN','Q'),('GLY','G'),('HIS','H'),('ILE','I'),('LEU','L'),('LYS','K'),('MET','M'),('PHE','F'),('PRO','P'),('SER','S'),('THR','T'),('TRP','W'),('TYR','Y'),('VAL','V')]
for longA,shortA in aminoAcidInfo:
variant = variant.replace(longA,shortA)
return variant
class EntityRecognizer:
"""
Annotates entities in a Corpus using an exact-dictionary matching scheme with additional heuristics. These heuristics include detecthing fusion gene mentions, microRNA, identifying acronyms to reduce ambiguity, identifying variants and more. All the options are parameters for the constructor of this class.
:ivar lookup: Used for the dictionary matching. A dictionary of terms (tuple of parsed words) to a list of (entityType,externalID).
:ivar detectFusionGenes: Whether it will try to identify fusion gene terms (e.g. BCR-ABL1). Lookup must contain terms of type 'gene'
:ivar detectMicroRNA: Whether it will identify microRNA terms (added as 'gene' entities)
:ivar acronymDetectionForAmbiguity: Whether it will try to identify acronyms and use this to deal with ambiguity (by removing incorrect matches to acronyms or the longer terms)
:ivar mergeTerms: Whether it will merge neighbouring terms that refer to the same external entity (e.g. HER2/neu as one term instead of two)
:ivar detectVariants: Whether it will identify a variant (e.g. V600E) and create an entity of type 'variant'
:ivar variantStopwords: Variant terms to be ignored (e.g. S100P) if detectVariants is used
:ivar detectPolymorphisms: Whether it will identify a SNP (using a dbSNP ID) and create an entity of type 'variant'
:ivar removePathways: Whether it will remove genes that are actually naming a signalling pathway (e.g. MTOR pathway)
"""
def __init__(self,lookup,detectFusionGenes=False,detectMicroRNA=False,acronymDetectionForAmbiguity=False,mergeTerms=False,detectVariants=False,variantStopwords=[],detectPolymorphisms=False,removePathways=False):
"""
Create an EntityRecognizer and provide the lookup table for terms and additional flags for what to identify in text
:param lookup: A dictionary of terms (tuple of parsed words) to a list of (entityType,externalID).
:param detectFusionGenes: Whether to try to identify fusion gene terms (e.g. BCR-ABL1). Lookup must contain terms of type 'gene'
:param detectMicroRNA: Whether to identify microRNA terms (added as 'gene' entities)
:param acronymDetectionForAmbiguity: Whether to try to identify acronyms and use this to deal with ambiguity (by removing incorrect matches to acronyms or the longer terms)
:param mergeTerms: Whether to merge neighbouring terms that refer to the same external entity (e.g. HER2/neu as one term instead of two)
:param detectVariants: Whether to identify a variant (e.g. V600E) and create an entity of type 'variant'
:param variantStopwords: Variant terms to be ignored (e.g. S100P) if detectVariants is used
:param detectPolymorphisms: Whether to identify a SNP (using a dbSNP ID) and create an entity of type 'variant'
:param removePathways: Remove genes that are actually naming a signalling pathway (e.g. MTOR pathway)
:type lookup: dict
:type detectFusionGenes: bool
:type detectMicroRNA: bool
:type acronymDetectionForAmbiguity: bool
:type mergeTerms: bool
:type detectVariants: bool
:type variantStopwords: list
:type detectPolymorphisms: bool
:type removePathways: bool
"""
assert isinstance(lookup,dict)
for termsmatch,typeAndIDs in lookup.items():
assert isinstance(termsmatch,tuple), "Lookup key must be a tuple of strings"
assert isinstance(typeAndIDs,set), "Lookup value must be a list of (entityType,externalID)"
assert len(typeAndIDs)>0, "Lookup value must be a list of (entityType,externalID)"
for typeAndID in typeAndIDs:
assert isinstance(typeAndID,tuple),"Lookup value must be a list of (entityType,externalID)"
assert len(typeAndID)==2, "Lookup value must be a list of (entityType,externalID)"
assert isinstance(detectFusionGenes,bool)
assert isinstance(detectMicroRNA,bool)
assert isinstance(acronymDetectionForAmbiguity,bool)
assert isinstance(mergeTerms,bool)
assert isinstance(detectVariants,bool)
assert isinstance(detectPolymorphisms,bool)
assert isinstance(variantStopwords,list)
for variantStopword in variantStopwords:
assert isinstance(variantStopword,six.string_types), "variantStopwords should be a list of strings"
self.lookup = lookup
self.detectFusionGenes = detectFusionGenes
self.detectMicroRNA = detectMicroRNA
self.acronymDetectionForAmbiguity = acronymDetectionForAmbiguity
self.mergeTerms = mergeTerms
self.detectVariants = detectVariants
self.variantStopwords = set(variantStopwords)
self.detectPolymorphisms = detectPolymorphisms
self.removePathways = removePathways
def _processWords(self, words):
locs,terms,termtypesAndids = getTermIDsAndLocations(words,self.lookup)
if self.detectVariants:
#snvRegex = r'^[A-Z][0-9]+[A-Z]$'
variantRegex1 = r'^[ACDEFGHIKLMNPQRSTVWY][1-9][0-9]*[ACDEFGHIKLMNPQRSTVWY]$'
variantRegex2 = r'^(p\.)?((Ala)|(Arg)|(Asn)|(Asp)|(Cys)|(Glu)|(Gln)|(Gly)|(His)|(Ile)|(Leu)|(Lys)|(Met)|(Phe)|(Pro)|(Ser)|(Thr)|(Trp)|(Tyr)|(Val))[1-9][0-9]*((Ala)|(Arg)|(Asn)|(Asp)|(Cys)|(Glu)|(Gln)|(Gly)|(His)|(Ile)|(Leu)|(Lys)|(Met)|(Phe)|(Pro)|(Ser)|(Thr)|(Trp)|(Tyr)|(Val))$'
filteredWords = [ w for w in words if not w in self.variantStopwords ]
snvMatches1 = [ not (re.match(variantRegex1,w) is None) for w in filteredWords ]
snvMatches2 = [ not (re.match(variantRegex2,w,re.IGNORECASE) is None) for w in filteredWords ]
snvMatches = [ (match1 or match2) for match1,match2 in zip(snvMatches1,snvMatches2) ]
for i,(w,snvMatch) in enumerate(zip(words,snvMatches)):
if snvMatch:
cleaned = cleanupVariant(w)
potentialLocs = (i,i+1)
if not potentialLocs in locs:
termtypesAndids.append([('variant',"substitution|%s"%cleaned)])
terms.append((w,))
locs.append(potentialLocs)
if self.detectPolymorphisms:
polymorphismRegex1 = r'^rs[1-9][0-9]*$'
polyMatches = [ not (re.match(polymorphismRegex1,w) is None) for w in words ]
for i,(w,polyMatch) in enumerate(zip(words,polyMatches)):
if polyMatch:
potentialLocs = (i,i+1)
if not potentialLocs in locs:
termtypesAndids.append([('variant','dbsnp|%s'%w)])
terms.append((w,))
locs.append(potentialLocs)
if self.detectMicroRNA:
for i,w in enumerate(words):
lw = w.lower()
if startsWithButNotAll(lw,"mir-") or startsWithButNotAll(lw,"hsa-mir-") or startsWithButNotAll(lw,"microrna-") or (startsWithButNotAll(lw,"mir") and lw[3] in string.digits):
potentialLocs = (i,i+1)
if not potentialLocs in locs:
termtypesAndids.append([('gene','mirna|'+w)])
terms.append((w,))
locs.append((i,i+1))
toRemove = []
if self.detectFusionGenes:
fusionLocs,fusionTerms,fusionTermtypesAndids = fusionGeneDetection(words,self.lookup)
for floc,fterm,ftermtypesAndid in zip(fusionLocs,fusionTerms,fusionTermtypesAndids):
if not floc in locs:
# Check for which entities to remove that are inside this fusion term
fstart,fend = floc
for tstart,tend in locs:
if fstart <= tstart and tend <= fend:
toRemove.append((tstart,tend))
locs.append(floc)
terms.append(fterm)
termtypesAndids.append(ftermtypesAndid)
filtered = zip(locs,terms,termtypesAndids)
filtered = [ (l,t,ti) for l,t,ti in filtered if not l in toRemove ]
filtered = sorted(filtered)
if self.mergeTerms:
# We'll attempt to merge terms (i.e. if a gene is referred to using two acronyms together)
# Example: Hepatocellular carcinoma (HCC) or HER2/ Neu or INK4B P15
locsToRemove = set()
for i in range(len(filtered)-1):
(startA,endA),termsA,termTypesAndIDsA = filtered[i]
(startB,endB),termsB,termTypesAndIDsB = filtered[i+1]
# Check that the terms are beside each other or separated by a /,- or (
if startB == endA or (startB == (endA+1) and words[endA] in ['/','-','(',')']):
idsA,idsB = set(),set()
for termType, termIDs in termTypesAndIDsA:
for termID in termIDs.split(';'):
idsA.add((termType,termID))
for termType, termIDs in termTypesAndIDsB:
for termID in termIDs.split(';'):
idsB.add((termType,termID))
idsIntersection = idsA.intersection(idsB)
# Detect if the either term is in brackets e.g. HER2 (ERBB2)
firstTermInBrackets,secondTermInBrackets = False,False
if startB == (endA+1) and endB < len(words) and words[endA] == '(' and words[endB] == ')':
secondTermInBrackets = True
if startB == (endA+1) and startA > 0 and words[startA-1] == '(' and words[endA] == ')':
firstTermInBrackets = True
# The two terms share IDs so we're going to merge them
idsShared = (len(idsIntersection) > 0)
if idsShared:
groupedByType = defaultdict(list)
for termType,termID in idsIntersection:
groupedByType[termType].append(termID)
locsToRemove.add((startA,endA))
locsToRemove.add((startB,endB))
if secondTermInBrackets:
thisLocs = (startA,endB+1)
thisTerms = tuple(words[startA:endB+1])
elif firstTermInBrackets:
thisLocs = (startA-1,endB)
thisTerms = tuple(words[startA-1:endB])
else:
thisLocs = (startA,endB)
thisTerms = tuple(words[startA:endB])
thisTermTypesAndIDs = [ (termType,";".join(sorted(termIDs))) for termType,termIDs in groupedByType.items() ]
filtered.append((thisLocs,thisTerms,thisTermTypesAndIDs))
# Now we have to remove the terms marked for deletion in the previous section
filtered = [ (locs,terms,termtypesAndids) for locs,terms,termtypesAndids in filtered if not locs in locsToRemove]
filtered = sorted(filtered)
if self.acronymDetectionForAmbiguity:
# And we'll check to see if there are any | |
"""*****************************************************************************************
MIT License
Copyright (c) 2022 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*****************************************************************************************"""
import Utils
from helper_functions import Fast_Caratheodory
import numpy as np
from scipy.optimize import linprog
from numpy import linalg as la
from scipy.linalg import null_space
from numpy.linalg import matrix_rank
from sklearn.decomposition import TruncatedSVD
import time
######################################################## Caratheodory ##################################################
def computeInitialWeightVector(P, p):
"""
This function given a point, solves the linear program dot(self.P.P^T, x) = p where x \in [0, \infty)^n,
and n denotes the number of rows of self.P.P.
:param p: A numpy array representing a point.
:return: A numpy array of n non-negative weights with respect to each row of self.P.P
"""
N = P.shape[0] # number of rows of P
# # Solve the linear program using scipy
# ts = time.time()
Q = P.T
Q = np.vstack((Q, np.ones((1, N))))
b = np.hstack((p, 1))
res = linprog(np.ones((N,)), A_eq=Q, b_eq=b, options={'maxiter': int(1e7), 'tol': 1e-10})
w = res.x
assert (np.linalg.norm(np.dot(P.T, w) - p) <= 1e-9, np.linalg.norm(np.dot(P.T, w) - p))
return w
def attainCaratheodorySet(P, p):
"""
The function at hand returns a set of at most d+1 indices of rows of P where d denotes the dimension of
rows of P. It calls the algorithms implemented by <NAME>, <NAME> and <NAME> at
"Fast and Accurate Least-Mean-Squares Solvers".
:param p: A numpy array denoting a point.
:return: The indices of points from self.P.P which p is a convex combination of.
"""
d = P.shape[1]
u = computeInitialWeightVector(P, p) # compute initial weight vector
# print('Sum of weights {}'.format(np.sum(u)))
if np.count_nonzero(u) > (d + 1): # if the number of positive weights exceeds d+1
u = Fast_Caratheodory(P, u.flatten(), False)
assert(np.linalg.norm(p - np.dot(P.T, u)) <= 1e-9, np.linalg.norm(p - np.dot(P.T, u)))
return np.where(u != 0)[0]
############################################################ AMVEE #####################################################
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = la.cholesky(B)
return True
except la.LinAlgError:
return False
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = la.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(la.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(la.eigvals(A3)))
A3 += I * (-mineig * k ** 2 + spacing)
k += 1
return A3
def computeAxesPoints(E, C):
"""
This function finds the vertices of the self.E (the MVEE of P or the inscribed version of it)
:return: A numpy matrix containing the vertices of the ellipsoid.
"""
if not isPD(E):
E = nearestPD(E)
# L = np.linalg.cholesky(self.E) # compute the cholesky decomposition of self.E
# U, D, V = np.linalg.svd(L, full_matrices=True) # attain the length of each axis of the ellipsoid and the
# # rotation of the ellipsoid
_, D, V = np.linalg.svd(E, full_matrices=True)
ellips_points = np.multiply(1.0 / np.sqrt(D[:, np.newaxis]), V.T) # attain the vertices of the ellipsoid assuming it was
# centered at the origin
return np.vstack((ellips_points + C.flatten(), - ellips_points + C.flatten()))
def volumeApproximation(P):
"""
This is our implementation of Algorithm 4.1 at the paper "On Khachiyan’s Algorithm for te Computation of Minimum
Volume Enclosing Ellipsoids" by <NAME> and <NAME>. It serves to compute a set of at most
2*self.P.d points which will be used for computing an initial ellipsoid.
:return: A numpy array of 2 * self.P.d indices of points from self.P.P
"""
basis = None
basis_points = []
n, d = P
if n <= 2 * d:
# if number of points is less than 2*self.P.d, then return their indices in self.P.P
return [i for i in range(n)]
v = np.random.randn(d) # start with a random vector
while np.linalg.matrix_rank(basis) < d: # while rank of basis is less than self.P.d
if basis is not None: # if we already have computed basis points
if basis.shape[1] == d:
# if this line is reached then it means that there is numerical instability
print('Numerical Issues!')
_, _, V = np.linalg.svd(basis[:, :-1], full_matrices=True)
return list(range(n))
orth_basis = null_space(basis.T) # get the orthant of basis
v = orth_basis[:, 0] if orth_basis.ndim > 1 else orth_basis # set v to be the first column of basis
Q = np.dot(P, v.T) # get the dot product of each row of self.P.P and v
if len(basis_points) > 0: # if there are already chosen points, then their dot product is depricated
Q[basis_points] = np.nan
p_alpha = np.nanargmax(np.dot(P, v.T)) # get the index of row with largest non nan dot product value
p_beta = np.nanargmin(np.dot(P, v.T)) # get the index of row with smallest non nan dot product value
v = np.expand_dims(P[p_beta, :] - P[p_alpha, :], 1) # let v be the substraction between the
# row of the largest dot product and the
# point with the smallest dot product
if basis is None: # if no basis was computed
basis = v / np.linalg.norm(v)
else: # add v to the basis
basis = np.hstack((basis, v / np.linalg.norm(v, 2)))
basis_points.append(p_alpha) # add the index of the point with largest dot product
basis_points.append(p_beta) # add the index of the point with smallest dot product
return basis_points
def computemahalanobisDistance(Q, ellip):
"""
This function is used for computing the distance between the rows of Q and ellip using the Mahalanobis
loss function.
:param ellip: A numpy array representing a p.s.d matrix (an ellipsoid)
:return: The Mahalanobis distance between each row in self.P.P to ellip.
"""
s = np.einsum("ij,ij->i", np.dot(Q, ellip), Q) # compute the distance efficiently
return s
def computeEllipsoid(P, weights):
"""
This function computes the ellipsoid which is the MVEE of self.P.
:param weights: a numpy of array of weights with respest to the rows of self.P.P.
:return:
- The MVEE of self.P.P in a p.s.d. matrix form.
- The center of the MVEE of self.P.P.
"""
if weights.ndim == 1: # make sure that the weights are not flattened
weights = np.expand_dims(weights, 1)
c = np.dot(P.T, weights) # attain the center of the MVEE
d = P.shape[1]
Q = P[np.where(weights.flatten() > 0.0)[0], :] # get all the points with positive weights
weights2 = weights[np.where(weights.flatten() > 0.0)[0], :] # get all the positive weights
# compute a p.s.d matrix which will represent the ellipsoid
ellipsoid = 1.0 / d * np.linalg.inv(np.dot(np.multiply(Q, weights2).T, Q)
- np.multiply.outer(c.T.ravel(), | |
import re
from .AssertionException import AssertionException
class _Assert(object):
def __init__(self, log):
if callable(log):
self.__log = log
else:
self.__log = log
#
def isIn(self, value, valueList, message = None):
Assert.l_isIn(self.__log, value, valueList, message)
#
def isNotIn(self, value, valueList, message = None):
Assert.l_isNotIn(self.__log, value, valueList, message)
#
def raisesException(self, function, arguments, message = None):
Assert.l_raisesException(self.__log, function, arguments, message)
#
def isCallable(self, value, message = None):
Assert.l_isCallable(self.__log, value, message)
#
def isInstance(self, value, typeOrTypes, message = None):
Assert.l_isInstance(self.__log, value, typeOrTypes, message)
#
def isRegExMatch(self, value, regexPattern, message = None):
Assert.l_isRegExMatch(self.__log, value, regexPattern, message)
#
def isEqual(self, value, otherValue, message = None):
Assert.l_isEqual(self.__log, value, otherValue, message)
#
def isGreater(self, value, otherValue, message = None):
Assert.l_isGreater(self.__log, value, otherValue, message)
#
def isGreaterOrEqual(self, value, otherValue, message = None):
Assert.l_isGreaterOrEqual(self.__log, value, otherValue, message)
#
def isSmaller(self, value, otherValue, message = None):
Assert.l_isSmaller(self.__log, value, otherValue, message)
#
def isSmallerOrEqual(self, value, otherValue, message = None):
Assert.l_isSmallerOrEqual(self.__log, value, otherValue, message)
#
def isNotEqual(self, value, otherValue, message = None):
Assert.l_isNotEqual(self.__log, value, otherValue, message)
#
def isNone(self, value, message = None):
Assert.l_isNone(self.__log, value, message)
#
def isNotNone(self, value, message = None):
Assert.l_isNotNone(self.__log, value, message)
#
def isNotNoneOrEmpty(self, value, message = None):
Assert.l_isNotNoneOrEmpty(self.__log, value, message)
#
def isTrue(self, value, message = None):
Assert.l_isTrue(self.__log, value, message)
#
def isFalse(self, value, message = None):
Assert.l_isFalse(self.__log, value, message)
#
#
class Assert(object):
@staticmethod
def createCustomAssert(log):
return _Assert(log)
#
"""
@staticmethod
def getAllBaseClasses(cls):
# TODO: convert this to an iteration
c = list(cls.__bases__)
for base in c:
c.extend(getAllBaseClasses(base))
return c
#
"""
@staticmethod
def isIn(value, valueList, message = None, log = None, identifier:str = None):
bSuccess = value in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isIn(log, value, valueList, message = None, identifier:str = None):
bSuccess = value in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isNotIn(value, valueList, message = None, log = None, identifier:str = None):
bSuccess = value not in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isNotIn(log, value, valueList, message = None, identifier:str = None):
bSuccess = value not in valueList
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is " + repr(value) + " so value is not an element of list " + repr(valueList) + "!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def raisesException(function, arguments, message = None, log = None, identifier:str = None):
bSuccess = True
try:
function(*arguments)
bSuccess = False
except Exception as ee:
pass
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "No exception was raised!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_raisesException(log, function, arguments, message = None, identifier:str = None):
bSuccess = True
try:
function(*arguments)
bSuccess = False
except Exception as ee:
pass
if not bSuccess:
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "No exception was raised!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isCallable(value, message = None, log = None, identifier:str = None):
if callable(value):
return
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is not a callable but of type " + str(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isCallable(log, value, message = None, identifier:str = None):
if callable(value):
return
if message is None:
message = ""
else:
message += " :: "
message = "ASSERTION ERROR :: " + message + "Value is not a callable but of type " + str(value)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isInstance(value, typeOrTypes, message = None, log = None, identifier:str = None):
if isinstance(value, typeOrTypes):
return
if issubclass(type(value), typeOrTypes):
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is of type " + str(type(value)) + " and not of type " + str(typeOrTypes)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isInstance(log, value, typeOrTypes, message = None, identifier:str = None):
if isinstance(value, typeOrTypes):
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is of type " + str(type(value)) + " and not of type " + str(typeOrTypes)
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isEqual(value, otherValue, message = None, log = None, identifier:str = None):
if value == otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " and not " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isEqual(log, value, otherValue, message = None, identifier:str = None):
if value == otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " and not " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isRegExMatch(value, regexPattern, message = None, log = None, identifier:str = None):
m = re.match(regexPattern, value)
if m:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which does not match " + repr(regexPattern) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isRegExMatch(log, value, regexPattern, message = None, identifier:str = None):
m = re.match(regexPattern, value)
if m:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which does not match " + repr(regexPattern) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isNotEqual(value, otherValue, message = None, log = None, identifier:str = None):
if value != otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which is not expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isNotEqual(log, value, otherValue, message = None, identifier:str = None):
if value != otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which is not expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def isGreater(value, otherValue, message = None, log = None, identifier:str = None):
if value > otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not greater than " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def l_isGreater(log, value, otherValue, message = None, identifier:str = None):
if value > otherValue:
return
if message is None:
message = "ASSERTION ERROR"
else:
message += " ::"
message = "ASSERTION ERROR :: " + message + " Value is " + repr(value) + " which not greater than " + repr(otherValue) + " as expected!"
if identifier:
message = "<" + identifier + "> " + message
if log != None:
if callable(log):
log(message)
else:
log.error(message)
else:
print(message)
raise AssertionException(message)
#
@staticmethod
def | |
<filename>inpystem/signals.py
# -*- coding: utf-8 -*-
"""This package defines all sort of classes to handle data for inpystem.
"""
import abc
import pathlib
import configparser
import logging
import copy
import numpy as np
import matplotlib.pyplot as plt
import hyperspy.api as hs
from . import restore
from .tools import misc
_logger = logging.getLogger(__name__)
def search_nearest(pix, mask):
"""Searches the non-zeros pixels of mask which are near to the pix
pixel.
The algorithm beggins by searching for non-zero elements around the
pixel whose index is pix. If there are non-zero elements, their
positions are returned. Otherwise, the searching horizon rises by
one. Then, the algorithm goes on.
The algorithm stops automatically at an horizon of 100 pixels. In
such case, an error is sent to the logger and None is returned.
Arguments
---------
pix: int
The position index of the pixel whose neighbors should be found.
mask: (m, n) numpy array
The sampling mask (1 for sampled pixel and 0 otherwise).
Returns
-------
None, 2-tuple
None is returned in case no neighbor was found. Otherwise, a
tuple (y, x) is returned where x (resp. y) are the columns
(resp. rows) index of the pixel neighbors
. """
m, n = mask.shape
y, x = np.unravel_index(pix, (m, n))
mask[y, x] = 0
Lmax = 100
flag = True
L = 1
pmask = np.pad(mask, pad_width=Lmax, mode='constant')
py, px = y + Lmax, x + Lmax
while flag:
submask = pmask[py-L:py+L+1, px-L:px+L+1]
if submask.sum() > 0:
flag = False
else:
if L < Lmax:
L += 1
else:
return None
nnz = submask.nonzero()
return (list(nnz[0] + (y-L)), list(nnz[1] + (x-L)))
class Scan:
"""Scan pattern class.
This class stores the data spatial shape and two copies of the
scan pattern. One of these copies is the initial scan pattern
which is given to the class. At the same time, a :code:`ratio`
argument can be given to keep only a portion of the available
samples. See Notes for more details.
Attributes
----------
shape : 2-length tuple
The spatial shape (m, n) where m is the number of rows and n is the
number of columns.
path : numpy array
The sampling path to be used in the study.
path_0 : numpy array
The initial sampling path to be kept in case the ratio is changed.
ratio: float
The current :code:`ratio` value such that :code:`path`has size
:code:`ratio*m*n`. Changing this attribute automaticaly updates
:code:`path`.
Note
-----
Consider only :code:`r*m*n` pixels hve been sampled, then the
:code:`path_0` attribute has shape (r*m*n, ) and its elements
lay between 0 and m*n-1.
Meanwhile, if the user wants to consider only :code:`ratio` percent of
the samples, the :code:`ratio` argument should be given. The
:code:`path` attribute would then have shape (ratio*m*n, ). In such
case, :code:`path_0[:ratio*m*n]` will be equal to
:code:`path`. Be aware that :code:`ratio` should be lower than
:code:`r`.
Each element of these arrays is the pixel index in row major order.
To recover the row and column index array, type the following commands.
::code:
i = path // n
j = path % n
"""
def __init__(self, shape, path, ratio=None):
"""Scan pattern constructor.
Arguments
---------
shape: (m, n) tuple
The spatial shape where m is the number of rows and n is
the number of columns.
path : tuple, numpy array
The sampling path. See class Notes for more detail.
ratio: optional, float
The ratio of sampled pixels. This should lay between 0 (excl.)
and 1. Default is None for full sampling.
"""
if len(tuple(shape)) != 2:
raise ValueError('Invalid shape parameter length.')
path = np.asarray(path)
if path.ndim != 1:
raise ValueError('Input path array should have 1 dimension.')
if path.size == 0 or path.size > shape[0] * shape[1]:
raise ValueError('Input path array has invalid size.')
path = path.astype(int)
# Let us check that no value is given twice
path_copy = path.copy()
path_copy.sort()
if np.any(path_copy[1:] - path_copy[:-1] == 0):
raise ValueError(
'Some elements of input path appear at least '
'twice.')
self.shape = shape
self.path = path
self.path_0 = path
self._ratio = ratio
# The following line initializes self.path
self.ratio = ratio
@property
def ratio(self):
""" Ratio getter.
Returns
-------
float
The ratio property value.
"""
return self._ratio
@ratio.setter
def ratio(self, value):
""" Ratio setter.
It checks that the value is correct and updates the scan
object attributes.
Arguments
---------
value: float
The new ratio value.
"""
if value is not None and (value > 1 or value <= 0):
raise ValueError('The ratio should be in the ]0,1] segment.')
# get sample size
m, n = self.shape
if value is not None:
N = int(m * n * value) # Number of required samples.
else:
N = self.path_0.size
self._ratio = N / (m*n)
if N > self.path_0.size:
N = self.path_0.size
_logger.warning(
'Input ratio is higher than higher maximal ratio'
' ({:.3f}). Ratio is set to the maximal value.'.format(
self.path_0.size/(m*n)))
# change scan pattern length
self.path = self.path_0[:N]
# Set new value
self._ratio = N / (m*n)
@classmethod
def from_file(cls, data_file, ratio=None):
""" Creates a scan pattern object from a data file
(such as .dm3, .dm4 or npz).
In the case of a .npz file, this one should contain the :code:̀'m`,
:code:̀'n` and :code:̀'path` variables which are resp. the number of
rows and columns and the path array.
Concerning the .dm3/.dm4 files, the data storage is specific to
the LPS Lab (Orsay, France) implementation.
An aditional argument :code:̀'ratio` allows you to select only a
given ratio of the sampled pixels. This should lay between 0 (excl.)
and 1.
Arguments
---------
data_file: str
The data file path.
ratio: optional, float
The ratio of sampled pixels. This should lay between 0 (excl.)
and 1. Default is None for full sampling.
Returns
-------
Scan object
The scan pattern.
"""
_logger.info('Loading Scan file.')
# Get file extension
p = pathlib.Path(data_file)
file_ext = p.suffix
# Digital Micrograph file
if (file_ext == '.dm3' or file_ext == '.dm4'):
_logger.info('Scan file type is {}.'.format(file_ext))
data = hs.load(str(p)).data
if data.ndim == 3:
_logger.info('{} scan file type is A.'.format(file_ext))
_, m, n = data.shape
x = data[0, :, :].flatten()
y = data[1, :, :].flatten()
elif data.ndim == 2:
_logger.info('{} scan file type is B.'.format(file_ext))
m, n = data.shape
x = data.flatten() % n
y = data.flatten() // n
else:
raise ValueError(
'Scan data has {} dimensions. Expected dimension'
' is 2 or 3.'.format(data.ndim)
)
path = y * n + x
# Numpy file
elif (file_ext == '.npz'):
_logger.info('Scan file type is .npz.')
data = np.load(str(data_file))
m, n = int(data['m']), int(data['n'])
path = data['path']
return cls((m, n), path, ratio=ratio)
@classmethod
def random(cls, shape, ratio=None, seed=None):
""" Creates a random scan pattern object.
Arguments
---------
shape: (m, n) tuple
The data spatial shape.
ratio: optional, float
The ratio of sampled pixels. It should lay between 0 (excluded)
and 1. Default is None for full sampling.
seed: optional, int
Seed for random sampling.
Default is None for random seed.
Returns
-------
Scan object
The scan pattern.
"""
_logger.info('Random scan generated.')
if seed is not None:
np.random.seed(seed)
# The following code should do the job.
#
# P = shape[0]*shape[1] # Number of pixels.
# perm = np.random.permutation(P)
#
# However, to match previous version output for seed 0, the
# following non-optimal code is chosen.
pix_ratio = 1 if ratio is None else ratio
mask = np.random.rand(*shape) < pix_ratio
perm = np.flatnonzero(mask)
np.random.shuffle(perm)
return cls(shape, perm)
def get_mask(self):
"""Returns the sampling mask.
The sampling mask is boolean and True is for sampled pixels.
Returns
-------
mask : (m, n) numpy array
The sampling mask.
"""
mask = np.zeros(self.shape, dtype=bool)
sampled_pos = np.unravel_index(self.path, self.shape)
mask[sampled_pos] = True
return mask
def plot(self):
""" Plots the sampling mask.
White (resp. black) pixels are sampled (resp. non-sampled).
"""
# Show data
fig, ax = plt.subplots()
ax.matshow(self.get_mask())
# set title
ax.set_title('Sampling mask')
# layout
ax.axis('image')
ax.axis('off')
def __repr__(self):
return "<Scan, shape: {}, ratio: {:.3f}>".format(
self.shape, self.ratio)
class AbstractStem(abc.ABC):
"""Abstract STEM | |
<filename>comp_imprinting.py
#!/usr/bin/env python
'''
-------------------------
Usage: compare_imprinting.py status1.txt status2.txt homologs.txt outprefix [options]
This is a short script for comparing imprinting between two different species (species 1 and species 2).
Uses output from call_imprinting.sh ("locus_name", "status" and "favored_parent" columns of *_filtered_all.txt file).
v.1.0 11/29/2018
by <NAME>
Version history:
v.1.0 - initial build 11/29/2018
-------------------------
'''
import sys, os, re, argparse
plots = True
try:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
print "Warning: could not load module matplotlib. Plots will not be generated. To install matplotlib, run:"
print "pip install matplotlib"
plots = False
if len(sys.argv) == 1:
print "-------------------------"
print "compare_imprinting v1.0 by <NAME>, 11/29/2018"
print "-------------------------"
print """
This is a short script for comparing imprinting between two different species (species 1 and species 2).
Uses output from call_imprinting.sh ("locus_name", "status" and "favored_parent" columns of *_filtered_all.txt file).
All five arguments are required (and all files should NOT have headers)
status1.txt - list of imprinting "statuses" in species 1 (3 columns: gene ID, direction of parental bias, status; no header)
status2.txt - list of imprinting "statuses" in species 2 (3 columns: gene ID, direction of parental bias, status; no header)
homolog.txt - list of homologs (2 columns: species 1 gene ID, species 2 gene ID; no header)
outprefix - name for output file, without extension
type - type of imprinting being compared, "MEG" or "PEG"
Example inputs:
(1) status1.txt (per-gene imprinting status in species 1)
AT1G01030 none low_counts
AT1G01040 father fail_pval_cutoff
AT1G01050 none low_counts
(2) status2.txt (per-gene imprinting status in species 2)
AC148167.6_FG001 father PEG
AC149475.2_FG002 father fail_pval_cutoff
AC149475.2_FG003 father fail_pval_cutoff
(3) homolog.txt (column order doesn't matter)
AT5G23110 GRMZM2G084819
AT4G35800 GRMZM2G044306
AT5G65930 GRMZM2G070273
Outputs, for each gene in implist, the "most imprinted" homolog in species 2 (if tied, chosen at random),
and the imprinting status of that homolog. Also outputs a summary to stdout.
*********************
NOTE: to also perform an analysis over known interactors, pathways, or complexes, provide
optional file pathway_info.txt (5th input file):
Example pathway_info.txt for also doing a pathway/interaction/complex analysis:
AT3G43920 RdDM
AT2G27040 RdDM
AT2G33830 RdDM
AT3G20740 PRC2
AT2G35670 PRC2
AT1G02580 PRC2
GRMZM2G157820 PRC2
GRMZM5G875502 PRC2
GRMZM2G043484 PRC2
etc.
Gene ID from either species 1 or species 2 in first column, pathway ID in second column. All
genes with same pathway ID (here "RdDM" or "PRC2") will be considered together. Note that
either species 1 or 2 ID can be provided, and scripts assumes that any homologs of that gene
in the other species -also- belong to the same pathway.
** Note: providing --pathway does not override the homologs-level analysis; both will be performed **
"""
print "Usage: compare_imprinting.py status1.txt status2.txt homologs.txt outprefix [options]"
print "-------------------------"
print " --pathway : file containing groups of genes (e.g. belonging in same pathway) - 2 columns: gene ID, gene group"
print " --species1 : name of species from first input file (default \"species1\""
print " --species2 : name of species from second input file (default \"species2\""
sys.exit(1)
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument('status1', help = 'list of imprinting "statuses" in species 1 (3 columns: gene ID, favored_parent, status)')
parser.add_argument('status2', help = 'list of imprinting "statuses" in species 2 (3 columns: gene ID, favored_parent, status)')
parser.add_argument('homolog', help = 'list of homologs (2 columns: species 1 gene ID, species 2 gene ID)')
parser.add_argument('outprefix', help = 'Prefix for output files {outprefix}.txt, {outprefix}_pie.png and {outprefix}_pie_all.png')
parser.add_argument('--pathway', default = "", help = 'Information about known pathways/complexes/etc. for some genes (2 columns: gene ID, pathway ID). See help for example.')
parser.add_argument('--species1', default = "species1", help = 'Name of species from first input file')
parser.add_argument('--species2', default = "species2", help = 'Name of species from second input file')
args = parser.parse_args()
status1 = args.status1
status2 = args.status2
homolog = args.homolog
outprefix = args.outprefix
if args.pathway != "":
pathway = args.pathway
use_pathway = True
else:
use_pathway = False
species1 = args.species1
species2 = args.species2
#-------------------------------------------------------------
values = {"MEG":5, "PEG":5, "fail_pmat_cutoff":4, "fail_CEF_cutoff":3, "fail_IF_cutoff":2, "fail_pval_cutoff":1, "low_counts":0}
values_rev_MEG = {5:"MEG",4:"matbias_fail_pmat_cutoff", 3:"matbias_fail_CEF_cutoff", 2:"matbias_fail_IF_cutoff", 1:"fail_pval_cutoff", 0:"low_counts",-2:"patbias_fail_IF_cutoff",-3:"patbias_fail_CEF_cutoff",-4:"patbias_fail_pmat_cutoff",-5:"PEG"}
values_rev_PEG = {-5:"MEG",-4:"matbias_fail_pmat_cutoff", -3:"matbias_fail_CEF_cutoff", -2:"matbias_fail_IF_cutoff", 1:"fail_pval_cutoff", 0:"low_counts",2:"patbias_fail_IF_cutoff",3:"patbias_fail_CEF_cutoff",4:"patbias_fail_pmat_cutoff",5:"PEG"}
adj_MEG = {"mother":1, "father":-1, "none":1}
adj_PEG = {"mother":-1, "father":1, "none":1}
# check all inputs can be opened
try:
st1 = open(status1, 'r')
except IOError, e:
print e
print 'Could not open',species1,'status file',status1
sys.exit(2)
try:
st2 = open(status2, 'r')
except IOError, e:
print e
print 'Could not open',species2,'status file',status2
sys.exit(2)
try:
hom = open(homolog, 'r')
except IOError, e:
print e
print 'Could not open homolog list',homolog
sys.exit(2)
try:
out = open(outprefix+'.txt', 'w')
except IOError, e:
print e
print 'Could not create output file ',outprefix+'.txt'
sys.exit(2)
if use_pathway == True:
try:
pwy = open(pathway, 'r')
except IOError, e:
print e
print 'Could not open pathway file ',pathway
sys.exit(2)
try:
out2 = open(outprefix+'_pathways.txt', 'w')
except IOError, e:
print e
print 'Could not create output file ',outprefix+'_pathways.txt'
sys.exit(2)
# read in list of genes assayed in species 1 and their statuses
s1_status_MEG = {} # store ranking relative to MEG (higher value is closer to being MEG)
s1_status_PEG = {} # store ranking relative to PEG (higher value is closer to being PEG)
# values 1->5 correspond to fail_pval -> imprinted, -2->-5 = fail_IF -> imprinted in other direction
# -6 == censored
line = st1.readline()
while line:
ll = line.strip().split('\t')
if len(ll) != 3:
print "Error: three columns expected in",species1,"status file,",len(ll),"detected"
sys.exit(1)
if ll[0] in s1_status_MEG:
print "Error: gene",ll[0],"appears more than once in",species1,"status file"
sys.exit(1)
if ll[0] in s1_status_PEG:
print "Error: gene",ll[0],"appears more than once in",species1,"status file"
sys.exit(1)
else:
# convert each gene's status to numeric ranking
if ll[2] == "censored":
s1_status_MEG[ll[0]] = -6
s1_status_PEG[ll[0]] = -6
else:
# ranking vs. MEGs
adj = adj_MEG[ll[1]]
val = values[ll[2]] * adj
if val == -1:
val = 1
s1_status_MEG[ll[0]] = val
# ranking vs. PEGs
adj = adj_PEG[ll[1]]
val = values[ll[2]] * adj
if val == -1:
val = 1
s1_status_PEG[ll[0]] = val
line = st1.readline()
st1.close()
genelist_s1 = set(s1_status_MEG.keys()+s1_status_PEG.keys())
# repeat in species 2
s2_status_MEG = {} # store ranking relative to MEG (higher value is closer to being MEG)
s2_status_PEG = {} # store ranking relative to PEG (higher value is closer to being PEG)
line = st2.readline()
while line:
ll = line.strip().split('\t')
if len(ll) != 3:
print "Error: three columns expected in",species2,"status file,",len(ll),"detected"
sys.exit(1)
if ll[0] in s2_status_MEG:
print "Error: gene",ll[0],"appears more than once in",species2,"status file"
sys.exit(1)
if ll[0] in s2_status_PEG:
print "Error: gene",ll[0],"appears more than once in",species2,"status file"
sys.exit(1)
else:
# convert each gene's status to numeric ranking
if ll[2] == "censored":
s2_status_MEG[ll[0]] = -6
s2_status_PEG[ll[0]] = -6
else:
# ranking vs. MEGs
adj = adj_MEG[ll[1]]
val = values[ll[2]] * adj
if val == -1:
val = 1
s2_status_MEG[ll[0]] = val
# ranking vs. PEGs
adj = adj_PEG[ll[1]]
val = values[ll[2]] * adj
if val == -1:
val = 1
s2_status_PEG[ll[0]] = val
line = st2.readline()
st2.close()
# read in list of species 1 -> 2 homologs; detect pathway data if included in col 3
line = hom.readline()
s1_to_s2 = {}
s2_to_s1 = {}
while line:
ll = line.strip().split('\t')
if len(ll) != 2:
print "Error: two columns expected in list of homologs,",len(ll),"detected"
print line
sys.exit(1)
# read in so that order of genelist file (species 1>2 or 2>1) doesn't matter
if ll[0] in genelist_s1:
if ll[0] not in s1_to_s2:
s1_to_s2[ll[0]] = [ll[1]]
else:
s1_to_s2[ll[0]].append(ll[1])
if ll[1] not in s2_to_s1:
s2_to_s1[ll[1]] = [ll[0]]
else:
s2_to_s1[ll[1]].append(ll[0])
elif ll[1] in genelist_s1:
if ll[1] not in s1_to_s2:
s1_to_s2[ll[1]] = [ll[0]]
else:
s1_to_s2[ll[1]].append(ll[0])
if ll[0] not in s2_to_s1:
s2_to_s1[ll[0]] = [ll[1]]
else:
s2_to_s1[ll[0]].append(ll[1])
line = hom.readline()
hom.close()
# if pathway data provided, read that in
if use_pathway == True:
line = pwy.readline()
pathways = {}
dropped = 0
while line:
ll = line.strip().split('\t')
if len(ll) != 2:
print "Error: two columns expected in pathway data,",len(ll),"detected"
print line
sys.exit(1)
if ll[1] not in pathways:
pathways[ll[1]] = {"species1":[],"species2":[]}
if ll[0] in s1_status_MEG:
if ll[0] not in pathways[ll[1]]["species1"]:
pathways[ll[1]]["species1"].append(ll[0])
if ll[0] in s1_to_s2:
for s2_gene in s1_to_s2[ll[0]]:
if s2_gene not in pathways[ll[1]]["species2"]:
pathways[ll[1]]["species2"].append(s2_gene)
elif ll[0] in s2_status_MEG:
if ll[0] not in pathways[ll[1]]["species2"]:
pathways[ll[1]]["species2"].append(ll[0])
if ll[0] in s2_to_s1:
for s1_gene in s2_to_s1[ll[0]]:
if s1_gene not in pathways[ll[1]]["species1"]:
pathways[ll[1]]["species1"].append(s1_gene)
else:
dropped += 1
line = pwy.readline()
if dropped != 0:
print "Pathway information was provided for",dropped,"genes that didn't have imprinting data; these were censored"
pwy.close()
# put it all together; first on the homolog level
print "Analysing conservation of imprinted expression between",species1,"and",species2,"homologs"
out.write('geneID_species1\tstatus_species1\tbest_homolog_species2\tbest_status_species2\tdiff\n')
# save summaries of statuses of the s2 homologs of s1 MEGs and PEGs for summary
counts_s1_MEG = {"no_homolog":0, "censored":0, "no_data":0, "MEG":0,"matbias_fail_pmat_cutoff":0, "matbias_fail_CEF_cutoff":0, "matbias_fail_IF_cutoff":0, "fail_pval_cutoff":0, "low_counts":0,"patbias_fail_IF_cutoff":0,"patbias_fail_CEF_cutoff":0,"patbias_fail_pmat_cutoff":0,"PEG":0}
counts_s1_PEG = {"no_homolog":0, "censored":0, "no_data":0, "MEG":0,"matbias_fail_pmat_cutoff":0, "matbias_fail_CEF_cutoff":0, "matbias_fail_IF_cutoff":0, "fail_pval_cutoff":0, "low_counts":0,"patbias_fail_IF_cutoff":0,"patbias_fail_CEF_cutoff":0,"patbias_fail_pmat_cutoff":0,"PEG":0}
values_rev_MEG = {-7:"no_data", -6:"censored", 5:"MEG",4:"matbias_fail_pmat_cutoff", 3:"matbias_fail_CEF_cutoff", 2:"matbias_fail_IF_cutoff", 1:"fail_pval_cutoff", 0:"low_counts",-2:"patbias_fail_IF_cutoff",-3:"patbias_fail_CEF_cutoff",-4:"patbias_fail_pmat_cutoff",-5:"PEG"}
values_rev_PEG = {-7:"no_data", -6:"censored", -5:"MEG",-4:"matbias_fail_pmat_cutoff", -3:"matbias_fail_CEF_cutoff", -2:"matbias_fail_IF_cutoff", 1:"fail_pval_cutoff", 0:"low_counts",2:"patbias_fail_IF_cutoff",3:"patbias_fail_CEF_cutoff",4:"patbias_fail_pmat_cutoff",5:"PEG"}
x = 0
for s1 in genelist_s1:
if s1 not in s1_to_s2:
best_str = "no_homolog"
best_homolog = "none"
diff = ""
if s1_status_MEG[s1] == 5:
counts_s1_MEG[best_str]+=1
if s1_status_PEG[s1] == 5:
counts_s1_PEG[best_str]+=1
else:
best = -7 # default, "no data"
best_homolog = "none"
diff = ""
# gene is maternally biased in species 1, find most maternally-biased homolog
if s1_status_MEG[s1] >= 2:
for s2 in s1_to_s2[s1]:
if s2 in s2_status_MEG and s2_status_MEG[s2] > best:
best = s2_status_MEG[s2]
best_homolog = s2
best_str = values_rev_MEG[best]
if best_homolog != "none" and abs(s2_status_MEG[best_homolog]) <= 5 and s2_status_MEG[best_homolog] != 0:
diff = s1_status_MEG[s1] - s2_status_MEG[best_homolog]
if s1_status_MEG[s1] == 5:
counts_s1_MEG[best_str]+=1
# gene is paternally biased in species 1, find most paternally-biased homolog
elif s1_status_MEG[s1] <= -2 and s1_status_MEG[s1] >= -5:
for s2 in s1_to_s2[s1]:
if s2 in s2_status_PEG and s2_status_PEG[s2] > best:
best = s2_status_PEG[s2]
best_homolog = s2
best_str = values_rev_PEG[best]
if best_homolog != "none" and abs(s2_status_PEG[best_homolog]) <= 5 and s2_status_PEG[best_homolog] != 0:
diff = s1_status_PEG[s1] - s2_status_PEG[best_homolog]
if s1_status_PEG[s1] == 5:
counts_s1_PEG[best_str]+=1
# gene is not parentally biased in species 1; find most parentally-biased homolog
elif s1_status_MEG[s1] >= -5:
bestpar = "mat"
for s2 in s1_to_s2[s1]:
if s2 in s2_status_MEG and s2_status_MEG[s2] > | |
{}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/parameters/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ModelProperty', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __set_parameter_1_with_http_info(self, bt_locator, **kwargs): # noqa: E501
"""set_parameter_1 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__set_parameter_1_with_http_info(bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str bt_locator: (required)
:param ModelProperty body:
:param str fields:
:return: ModelProperty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['bt_locator', 'body', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_parameter_1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `set_parameter_1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/settings', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ModelProperty', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __set_parameter_2_with_http_info(self, name, bt_locator, **kwargs): # noqa: E501
"""set_parameter_2 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__set_parameter_2_with_http_info(name, bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str bt_locator: (required)
:param ModelProperty body:
:param str fields:
:return: ModelProperty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'bt_locator', 'body', 'fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_parameter_2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `set_parameter_2`") # noqa: E501
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `set_parameter_2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
if isinstance(params['name'], TeamCityObject):
path_params['name'] = params['name'].locator_id
else:
path_params['name'] = params['name'] # noqa: E501
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/settings/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ModelProperty', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __set_parameter_type_with_http_info(self, name, bt_locator, **kwargs): # noqa: E501
"""set_parameter_type # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__set_parameter_type_with_http_info(name, bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str bt_locator: (required)
:param Type body:
:return: Type
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'bt_locator', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_parameter_type" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `set_parameter_type`") # noqa: E501
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `set_parameter_type`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
if isinstance(params['name'], TeamCityObject):
path_params['name'] = params['name'].locator_id
else:
path_params['name'] = params['name'] # noqa: E501
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/parameters/{name}/type', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Type', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __set_parameter_type_raw_value_with_http_info(self, name, bt_locator, **kwargs): # noqa: E501
"""set_parameter_type_raw_value # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__set_parameter_type_raw_value_with_http_info(name, bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str bt_locator: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'bt_locator', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_parameter_type_raw_value" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `set_parameter_type_raw_value`") # noqa: E501
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `set_parameter_type_raw_value`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
if isinstance(params['name'], TeamCityObject):
path_params['name'] = params['name'].locator_id
else:
path_params['name'] = params['name'] # noqa: E501
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/app/rest/buildTypes/{btLocator}/parameters/{name}/type/rawValue', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def __set_parameter_value_long_with_http_info(self, name, bt_locator, **kwargs): # noqa: E501
"""set_parameter_value_long # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.__set_parameter_value_long_with_http_info(name, bt_locator, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: (required)
:param str bt_locator: (required)
:param str body:
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'bt_locator', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_parameter_value_long" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `set_parameter_value_long`") # noqa: E501
# verify the required parameter 'bt_locator' is set
if ('bt_locator' not in params or
params['bt_locator'] is None):
raise ValueError("Missing the required parameter `bt_locator` when calling `set_parameter_value_long`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in params:
if isinstance(params['name'], TeamCityObject):
path_params['name'] = params['name'].locator_id
else:
path_params['name'] = params['name'] # noqa: E501
if 'bt_locator' in params:
if isinstance(params['bt_locator'], TeamCityObject):
path_params['btLocator'] = params['bt_locator'].locator_id
else:
path_params['btLocator'] = params['bt_locator'] # noqa: E501
query_params = | |
<reponame>DONG-Jason/chia-blockchain
import asyncio
import dataclasses
import logging
import time
import traceback
from pathlib import Path
from typing import AsyncGenerator, Optional, Dict, Callable, List, Tuple, Any, Union, Set
import aiosqlite
from blspy import AugSchemeMPL
import src.server.ws_connection as ws # lgtm [py/import-and-import-from]
from src.consensus.block_creation import unfinished_block_to_full_block
from src.consensus.blockchain import Blockchain, ReceiveBlockResult
from src.consensus.constants import ConsensusConstants
from src.consensus.difficulty_adjustment import (
get_sub_slot_iters_and_difficulty,
can_finish_sub_and_full_epoch,
)
from src.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from src.consensus.pot_iterations import is_overflow_sub_block, calculate_sp_iters
from src.consensus.sub_block_record import SubBlockRecord
from src.full_node.block_cache import BlockCache
from src.full_node.block_store import BlockStore
from src.full_node.coin_store import CoinStore
from src.full_node.full_node_store import FullNodeStore
from src.full_node.mempool_manager import MempoolManager
from src.full_node.signage_point import SignagePoint
from src.full_node.sync_store import SyncStore
from src.full_node.weight_proof import WeightProofHandler
from src.protocols import (
full_node_protocol,
timelord_protocol,
wallet_protocol,
farmer_protocol,
)
from src.protocols.full_node_protocol import RequestSubBlocks, RejectSubBlocks, RespondSubBlocks, RespondSubBlock
from src.server.node_discovery import FullNodePeers
from src.server.outbound_message import Message, NodeType, OutboundMessage
from src.server.server import ChiaServer
from src.types.full_block import FullBlock
from src.types.pool_target import PoolTarget
from src.types.sized_bytes import bytes32
from src.types.sub_epoch_summary import SubEpochSummary
from src.types.unfinished_block import UnfinishedBlock
from src.util.errors import ConsensusError
from src.util.ints import uint32, uint128, uint8
from src.util.path import mkdir, path_from_root
OutboundMessageGenerator = AsyncGenerator[OutboundMessage, None]
class FullNode:
block_store: BlockStore
full_node_store: FullNodeStore
# full_node_peers: FullNodePeers
sync_store: SyncStore
coin_store: CoinStore
mempool_manager: MempoolManager
connection: aiosqlite.Connection
_sync_task: Optional[asyncio.Task]
blockchain: Blockchain
config: Dict
server: Any
log: logging.Logger
constants: ConsensusConstants
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
timelord_lock: asyncio.Lock
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.root_path = root_path
self.config = config
self.server = None
self._shut_down = False # Set to true to close all infinite loops
self.constants = consensus_constants
self.pow_pending: Set[bytes32] = set()
self.pow_creation: Dict[uint32, asyncio.Event] = {}
self.state_changed_callback: Optional[Callable] = None
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
self.db_path = path_from_root(root_path, config["database_path"])
mkdir(self.db_path.parent)
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def _start(self):
# create the store (db) and full node instance
self.connection = await aiosqlite.connect(self.db_path)
self.block_store = await BlockStore.create(self.connection)
self.full_node_store = await FullNodeStore.create(self.constants)
self.sync_store = await SyncStore.create()
self.coin_store = await CoinStore.create(self.connection)
self.timelord_lock = asyncio.Lock()
self.log.info("Initializing blockchain from disk")
start_time = time.time()
self.blockchain = await Blockchain.create(self.coin_store, self.block_store, self.constants)
self.mempool_manager = MempoolManager(self.coin_store, self.constants)
self.weight_proof_handler = WeightProofHandler(
self.constants,
BlockCache(
self.blockchain.sub_blocks,
self.blockchain.sub_height_to_hash,
{},
self.blockchain.sub_epoch_summaries,
self.block_store,
),
)
self._sync_task = None
time_taken = time.time() - start_time
if self.blockchain.get_peak() is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
else:
self.log.info(
f"Blockchain initialized to peak {self.blockchain.get_peak().header_hash} height"
f" {self.blockchain.get_peak().sub_block_height}, "
f"time taken: {int(time_taken)}s"
)
await self.mempool_manager.new_peak(self.blockchain.get_peak())
self.state_changed_callback = None
peak: Optional[SubBlockRecord] = self.blockchain.get_peak()
if peak is not None:
sp_sub_slot, ip_sub_slot = await self.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
self.full_node_store.new_peak(
peak,
sp_sub_slot,
ip_sub_slot,
False,
self.blockchain.sub_blocks,
)
def set_server(self, server: ChiaServer):
self.server = server
try:
self.full_node_peers = FullNodePeers(
self.server,
self.root_path,
self.config["target_peer_count"] - self.config["target_outbound_peer_count"],
self.config["target_outbound_peer_count"],
self.config["peer_db_path"],
self.config["introducer_peer"],
self.config["peer_connect_interval"],
self.log,
)
asyncio.create_task(self.full_node_peers.start())
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception in peer discovery: {e}")
self.log.error(f"Exception Stack: {error_stack}")
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
async def request_and_add_sub_block(self, peer: ws.WSChiaConnection, sub_height):
peer_peak = await peer.request_sub_block(full_node_protocol.RequestSubBlock(uint32(sub_height), True))
if peer_peak is None:
self.log.warning(f"Failed to fetch sub block {sub_height} from {peer.get_peer_info()}")
return
if isinstance(peer_peak, full_node_protocol.RespondSubBlock):
sub_block = peer_peak.sub_block
self.sync_store.add_potential_peak(sub_block.header_hash, sub_block.sub_block_height, sub_block.weight)
await self.respond_sub_block(peer_peak, peer)
else:
self.log.warning(f"Failed to fetch sub block {sub_height} from {peer.get_peer_info()}")
async def new_peak(self, request, peer: ws.WSChiaConnection):
# Check if we have this block in the blockchain
if peer is not None and peer.peer_node_id is not None:
self.sync_store.add_peak_peer(request.header_hash, peer.peer_node_id, request.sub_block_height)
if self.blockchain.contains_sub_block(request.header_hash):
return None
# Not interested in less heavy peaks
peak: Optional[SubBlockRecord] = self.blockchain.get_peak()
if peak is not None and peak.weight > request.weight:
return None
if self.sync_store.get_sync_mode():
# If peer connect while we are syncing, check if they have the block we are syncing towards
peak_sync_hash = self.sync_store.get_sync_target_hash()
peak_sync_height = self.sync_store.get_sync_target_height()
if peak_sync_hash is not None and request.header_hash != peak_sync_hash and peak_sync_height is not None:
peak_peers = self.sync_store.get_peak_peers(peak_sync_hash)
# Don't ask if we already know this peer has the peak
if peer.peer_node_id not in peak_peers:
target_peak_response: Optional[RespondSubBlock] = await peer.request_sub_block(
full_node_protocol.RequestSubBlock(uint32(peak_sync_height), True)
)
if target_peak_response is not None and isinstance(target_peak_response, RespondSubBlock):
self.sync_store.add_peak_peer(peak_sync_hash, peer.peer_node_id, peak_sync_height)
elif request.sub_block_height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
self.log.info("not enough blocks for weight proof,request peak sub block")
await self.request_and_add_sub_block(peer, request.sub_block_height)
elif (
peak is not None
and peak.sub_block_height > request.sub_block_height - self.constants.WEIGHT_PROOF_RECENT_BLOCKS
):
await self.request_and_add_sub_block(peer, request.sub_block_height)
else:
await self.request_proof_of_weight(peer, request.sub_block_height, request.header_hash)
async def request_proof_of_weight(self, peer, height, header_hash):
if peer.peer_node_id in self.pow_pending:
self.log.info(f"Already have pending proof-of-weight request for peer: {peer.get_peer_info()}")
return
self.pow_pending.add(peer.peer_node_id)
request = full_node_protocol.RequestProofOfWeight(height, header_hash)
response = await peer.request_proof_of_weight(request)
self.pow_pending.remove(peer.peer_node_id)
if response is not None and isinstance(response, full_node_protocol.RespondProofOfWeight):
validated, fork_point = self.weight_proof_handler.validate_weight_proof(response.wp)
if validated is True:
# get tip params
tip_weight = response.wp.recent_chain_data[-1].reward_chain_sub_block.weight
tip_height = response.wp.recent_chain_data[-1].reward_chain_sub_block.sub_block_height
self.sync_store.add_potential_peak(response.tip, tip_height, tip_weight)
self.sync_store.add_potential_fork_point(response.tip, fork_point)
msg = Message(
"request_sub_block",
full_node_protocol.RequestSubBlock(uint32(tip_height), True),
)
await peer.send_message(msg)
return None
async def send_peak_to_timelords(self):
"""
Sends current peak to timelords
"""
peak_block = await self.blockchain.get_full_peak()
if peak_block is not None:
peak = self.blockchain.sub_blocks[peak_block.header_hash]
difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain.sub_blocks,
self.blockchain.sub_height_to_hash,
peak.required_iters,
peak_block,
True,
)
recent_rc = self.blockchain.get_recent_reward_challenges()
curr = peak
while not curr.is_challenge_sub_block(self.constants) and not curr.first_in_sub_slot:
curr = self.blockchain.sub_blocks[curr.prev_hash]
if curr.is_challenge_sub_block(self.constants):
last_csb_or_eos = curr.total_iters
else:
last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants)
timelord_new_peak: timelord_protocol.NewPeak = timelord_protocol.NewPeak(
peak_block.reward_chain_sub_block,
difficulty,
peak.deficit,
peak.sub_slot_iters,
ses,
recent_rc,
last_csb_or_eos,
)
msg = Message("new_peak", timelord_new_peak)
await self.server.send_to_all([msg], NodeType.TIMELORD)
async def on_connect(self, connection: ws.WSChiaConnection):
"""
Whenever we connect to another node / wallet, send them our current heads. Also send heads to farmers
and challenges to timelords.
"""
self._state_changed("add_connection")
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.on_connect(connection))
if connection.connection_type is NodeType.FULL_NODE:
# Send filter to node and request mempool items that are not in it
my_filter = self.mempool_manager.get_filter()
mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter)
msg = Message("request_mempool_transactions", mempool_request)
await connection.send_message(msg)
peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak()
if peak_full is not None:
peak: SubBlockRecord = self.blockchain.sub_blocks[peak_full.header_hash]
if connection.connection_type is NodeType.FULL_NODE:
request_node = full_node_protocol.NewPeak(
peak.header_hash,
peak.sub_block_height,
peak.weight,
peak.sub_block_height,
peak_full.reward_chain_sub_block.get_unfinished().get_hash(),
)
await connection.send_message(Message("new_peak", request_node))
elif connection.connection_type is NodeType.WALLET:
# If connected to a wallet, send the Peak
request_wallet = wallet_protocol.NewPeak(
peak.header_hash,
peak.sub_block_height,
peak.weight,
peak.sub_block_height,
)
await connection.send_message(Message("new_peak", request_wallet))
elif connection.connection_type is NodeType.TIMELORD:
await self.send_peak_to_timelords()
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
def _num_needed_peers(self) -> int:
assert self.server is not None
assert self.server.all_connections is not None
diff = self.config["target_peer_count"] - len(self.server.all_connections)
return diff if diff >= 0 else 0
def _close(self):
self._shut_down = True
self.blockchain.shut_down()
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.close())
async def _await_closed(self):
try:
if self._sync_task is not None:
self._sync_task.cancel()
except asyncio.TimeoutError:
pass
await self.connection.close()
async def _sync(self):
"""
Performs a full sync of the blockchain.
- Check which are the heaviest peaks
- Request headers for the heaviest
- Find the fork point to see where to start downloading headers
- Verify the weight of the tip, using the headers
- Download all blocks
- Disconnect peers that provide invalid blocks or don't have the blocks
"""
try:
self.log.info("Starting to perform sync with peers.")
self.log.info("Waiting to receive peaks from peers.")
self.sync_peers_handler = None
self.sync_store.waiting_for_peaks = True
await asyncio.sleep(2)
# Based on responses from peers about the current heads, see which head is the heaviest
# (similar to longest chain rule).
self.sync_store.waiting_for_peaks = False
potential_peaks: List[Tuple[bytes32, Tuple[uint32, uint128]]] = self.sync_store.get_potential_peaks_tuples()
self.log.info(f"Have collected {len(potential_peaks)} potential peaks")
if self._shut_down:
return
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: Optional[uint128] = None
heaviest_peak_height: Optional[uint32] = None
for header_hash, height_weight_tuple in potential_peaks:
height = height_weight_tuple[0]
weight = height_weight_tuple[1]
if heaviest_peak_hash is None or weight > heaviest_peak_height:
heaviest_peak_hash = header_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
if heaviest_peak_hash is None:
self.log.info("Not performing sync, no peaks collected")
return
if self.blockchain.get_peak() is not None and heaviest_peak_weight <= self.blockchain.get_peak().weight:
self.log.info("Not performing sync, already caught up.")
return
fork_point_height = self.sync_store.get_potential_fork_point(heaviest_peak_hash)
await self.sync_from_fork_point(fork_point_height, heaviest_peak_height, heaviest_peak_hash)
except asyncio.CancelledError:
self.log.warning("Syncing failed, CancelledError")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with syncing: {type(e)}{tb}")
finally:
if self._shut_down:
return
await self._finish_sync()
def get_peers_with_peak(self, peak_hash) -> List[ws.WSChiaConnection]:
filtered_peers: List[ws.WSChiaConnection] = []
peers_with_peak = self.sync_store.get_peak_peers(peak_hash)
for peer_hash in peers_with_peak:
if peer_hash in self.server.all_connections:
peer = self.server.all_connections[peer_hash]
filtered_peers.append(peer)
return filtered_peers
async def sync_from_fork_point(self, fork_point_height: int, target_peak_sb_height: uint32, peak_hash):
self.log.info(f"start syncing from fork point at {fork_point_height}")
self.sync_store.set_peak_target(peak_hash, target_peak_sb_height)
peers_with_peak = self.get_peers_with_peak(peak_hash)
if len(peers_with_peak) == 0:
self.log.warning(f"Not syncing, no peers with header_hash {peak_hash} ")
return
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
for i in range(fork_point_height, target_peak_sb_height, batch_size):
start_height = i
end_height = min(target_peak_sb_height, start_height + batch_size)
request = RequestSubBlocks(uint32(start_height), uint32(end_height), True)
self.log.info(f"Requesting sub blocks: {start_height} to {end_height}")
peers_to_remove = []
| |
getConsolePort(self):
"""For use only by image.py and XendCheckpoint.py"""
return self.console_port
def getFeatures(self):
"""For use only by image.py."""
return self.info['features']
def getVCpuCount(self):
return self.info['VCPUs_max']
def getVCpuAvail(self):
return self.info['vcpu_avail']
def setVCpuCount(self, vcpus):
def vcpus_valid(n):
if vcpus <= 0:
raise XendError('Zero or less VCPUs is invalid')
if self.domid >= 0 and vcpus > self.info['VCPUs_max']:
raise XendError('Cannot set vcpus greater than max vcpus on running domain')
vcpus_valid(vcpus)
self.info['vcpu_avail'] = (1 << vcpus) - 1
if self.domid >= 0:
self.storeVm('vcpu_avail', self.info['vcpu_avail'])
self._writeDom(self._vcpuDomDetails())
self.info['VCPUs_live'] = vcpus
else:
if self.info['VCPUs_max'] > vcpus:
# decreasing
del self.info['cpus'][vcpus:]
elif self.info['VCPUs_max'] < vcpus:
# increasing
for c in range(self.info['VCPUs_max'], vcpus):
self.info['cpus'].append(list())
self.info['VCPUs_max'] = vcpus
xen.xend.XendDomain.instance().managed_config_save(self)
log.info("Set VCPU count on domain %s to %d", self.info['name_label'],
vcpus)
def getMemoryTarget(self):
"""Get this domain's target memory size, in KB."""
return self.info['memory_dynamic_max'] / 1024
def getMemoryMaximum(self):
"""Get this domain's maximum memory size, in KB."""
# remember, info now stores memory in bytes
return self.info['memory_static_max'] / 1024
def getResume(self):
return str(self._resume)
def setResume(self, isresume):
self._resume = isresume
def getCpus(self):
return self.info['cpus']
def setCpus(self, cpumap):
self.info['cpus'] = cpumap
def getCap(self):
return self.info['vcpus_params']['cap']
def setCap(self, cpu_cap):
self.info['vcpus_params']['cap'] = cpu_cap
def getWeight(self):
return self.info['vcpus_params']['weight']
def setWeight(self, cpu_weight):
self.info['vcpus_params']['weight'] = cpu_weight
def getRestartCount(self):
return self._readVm('xend/restart_count')
def refreshShutdown(self, xeninfo = None):
""" Checks the domain for whether a shutdown is required.
Called from XendDomainInfo and also image.py for HVM images.
"""
# If set at the end of this method, a restart is required, with the
# given reason. This restart has to be done out of the scope of
# refresh_shutdown_lock.
restart_reason = None
self.refresh_shutdown_lock.acquire()
try:
if xeninfo is None:
xeninfo = dom_get(self.domid)
if xeninfo is None:
# The domain no longer exists. This will occur if we have
# scheduled a timer to check for shutdown timeouts and the
# shutdown succeeded. It will also occur if someone
# destroys a domain beneath us. We clean up the domain,
# just in case, but we can't clean up the VM, because that
# VM may have migrated to a different domain on this
# machine.
self.cleanupDomain()
self._stateSet(DOM_STATE_HALTED)
return
if xeninfo['dying']:
# Dying means that a domain has been destroyed, but has not
# yet been cleaned up by Xen. This state could persist
# indefinitely if, for example, another domain has some of its
# pages mapped. We might like to diagnose this problem in the
# future, but for now all we do is make sure that it's not us
# holding the pages, by calling cleanupDomain. We can't
# clean up the VM, as above.
self.cleanupDomain()
self._stateSet(DOM_STATE_SHUTDOWN)
return
elif xeninfo['crashed']:
if self.readDom('xend/shutdown_completed'):
# We've seen this shutdown already, but we are preserving
# the domain for debugging. Leave it alone.
return
log.warn('Domain has crashed: name=%s id=%d.',
self.info['name_label'], self.domid)
self._writeVm(LAST_SHUTDOWN_REASON, 'crash')
restart_reason = 'crash'
self._stateSet(DOM_STATE_HALTED)
elif xeninfo['shutdown']:
self._stateSet(DOM_STATE_SHUTDOWN)
if self.readDom('xend/shutdown_completed'):
# We've seen this shutdown already, but we are preserving
# the domain for debugging. Leave it alone.
return
else:
reason = shutdown_reason(xeninfo['shutdown_reason'])
log.info('Domain has shutdown: name=%s id=%d reason=%s.',
self.info['name_label'], self.domid, reason)
self._writeVm(LAST_SHUTDOWN_REASON, reason)
self._clearRestart()
if reason == 'suspend':
self._stateSet(DOM_STATE_SUSPENDED)
# Don't destroy the domain. XendCheckpoint will do
# this once it has finished. However, stop watching
# the VM path now, otherwise we will end up with one
# watch for the old domain, and one for the new.
self._unwatchVm()
elif reason in ('poweroff', 'reboot'):
restart_reason = reason
else:
self.destroy()
elif self.dompath is None:
# We have yet to manage to call introduceDomain on this
# domain. This can happen if a restore is in progress, or has
# failed. Ignore this domain.
pass
else:
# Domain is alive. If we are shutting it down, log a message
# if it seems unresponsive.
if xeninfo['paused']:
self._stateSet(DOM_STATE_PAUSED)
else:
self._stateSet(DOM_STATE_RUNNING)
if self.shutdownStartTime:
timeout = (SHUTDOWN_TIMEOUT - time.time() +
self.shutdownStartTime)
if (timeout < 0 and not self.readDom('xend/unresponsive')):
log.info(
"Domain shutdown timeout expired: name=%s id=%s",
self.info['name_label'], self.domid)
self.storeDom('xend/unresponsive', 'True')
finally:
self.refresh_shutdown_lock.release()
if restart_reason and not self.restart_in_progress:
self.restart_in_progress = True
threading.Thread(target = self._maybeRestart,
args = (restart_reason,)).start()
#
# Restart functions - handling whether we come back up on shutdown.
#
def _clearRestart(self):
self._removeDom("xend/shutdown_start_time")
def _maybeDumpCore(self, reason):
if reason == 'crash':
if xoptions.get_enable_dump() or self.get_on_crash() \
in ['coredump_and_destroy', 'coredump_and_restart']:
try:
self.dumpCore()
except XendError:
# This error has been logged -- there's nothing more
# we can do in this context.
pass
def _maybeRestart(self, reason):
# Before taking configured action, dump core if configured to do so.
#
self._maybeDumpCore(reason)
# Dispatch to the correct method based upon the configured on_{reason}
# behaviour.
actions = {"destroy" : self.destroy,
"restart" : self._restart,
"preserve" : self._preserve,
"rename-restart" : self._renameRestart,
"coredump-destroy" : self.destroy,
"coredump-restart" : self._restart}
action_conf = {
'poweroff': 'actions_after_shutdown',
'reboot': 'actions_after_reboot',
'crash': 'actions_after_crash',
}
action_target = self.info.get(action_conf.get(reason))
func = actions.get(action_target, None)
if func and callable(func):
func()
else:
self.destroy() # default to destroy
def _renameRestart(self):
self._restart(True)
def _restart(self, rename = False):
"""Restart the domain after it has exited.
@param rename True if the old domain is to be renamed and preserved,
False if it is to be destroyed.
"""
from xen.xend import XendDomain
if self._readVm(RESTART_IN_PROGRESS):
log.error('Xend failed during restart of domain %s. '
'Refusing to restart to avoid loops.',
str(self.domid))
self.destroy()
return
old_domid = self.domid
self._writeVm(RESTART_IN_PROGRESS, 'True')
elapse = time.time() - self.info['start_time']
if elapse < MINIMUM_RESTART_TIME:
log.error('VM %s restarting too fast (Elapsed time: %f seconds). '
'Refusing to restart to avoid loops.',
self.info['name_label'], elapse)
self.destroy()
return
prev_vm_xend = self._listRecursiveVm('xend')
new_dom_info = self.info
try:
if rename:
new_dom_info = self._preserveForRestart()
else:
self._unwatchVm()
self.destroy()
# new_dom's VM will be the same as this domain's VM, except where
# the rename flag has instructed us to call preserveForRestart.
# In that case, it is important that we remove the
# RESTART_IN_PROGRESS node from the new domain, not the old one,
# once the new one is available.
new_dom = None
try:
new_dom = XendDomain.instance().domain_create_from_dict(
new_dom_info)
for x in prev_vm_xend[0][1]:
new_dom._writeVm('xend/%s' % x[0], x[1])
new_dom.waitForDevices()
new_dom.unpause()
rst_cnt = new_dom._readVm('xend/restart_count')
rst_cnt = int(rst_cnt) + 1
new_dom._writeVm('xend/restart_count', str(rst_cnt))
new_dom._removeVm(RESTART_IN_PROGRESS)
except:
if new_dom:
new_dom._removeVm(RESTART_IN_PROGRESS)
new_dom.destroy()
else:
self._removeVm(RESTART_IN_PROGRESS)
raise
except:
log.exception('Failed to restart domain %s.', str(old_domid))
def _preserveForRestart(self):
"""Preserve a domain that has been shut down, by giving it a new UUID,
cloning the VM details, and giving it a new name. This allows us to
keep this domain for debugging, but restart a new one in its place
preserving the restart semantics (name and UUID preserved).
"""
new_uuid = uuid.createString()
new_name = 'Domain-%s' % new_uuid
log.info("Renaming dead domain %s (%d, %s) to %s (%s).",
self.info['name_label'], self.domid, self.info['uuid'],
new_name, new_uuid)
self._unwatchVm()
self._releaseDevices()
# Remove existing vm node in xenstore
self._removeVm()
new_dom_info = self.info.copy()
new_dom_info['name_label'] = self.info['name_label']
new_dom_info['uuid'] = self.info['uuid']
self.info['name_label'] = new_name
self.info['uuid'] = new_uuid
self.vmpath = XS_VMROOT + new_uuid
# Write out new vm node to xenstore
self._storeVmDetails()
self._preserve()
return new_dom_info
def _preserve(self):
log.info("Preserving dead domain %s (%d).", self.info['name_label'],
self.domid)
self._unwatchVm()
self.storeDom('xend/shutdown_completed', 'True')
self._stateSet(DOM_STATE_HALTED)
#
# Debugging ..
#
def dumpCore(self, corefile = None):
"""Create a core dump for this domain.
@raise: XendError if core dumping failed.
"""
if not corefile:
# To prohibit directory traversal
based_name = os.path.basename(self.info['name_label'])
coredir = "/var/lib/xen/dump/%s" % (based_name)
if not os.path.exists(coredir):
try:
mkdir.parents(coredir, stat.S_IRWXU)
except Exception, ex:
log.error("Cannot create directory: %s" % str(ex))
if not os.path.isdir(coredir):
# Use former directory to dump core
coredir = '/var/lib/xen/dump'
this_time = time.strftime("%Y-%m%d-%H%M.%S", time.localtime())
corefile = "%s/%s-%s.%s.core" % (coredir, this_time,
self.info['name_label'], self.domid)
if os.path.isdir(corefile):
raise XendError("Cannot dump core in a directory: %s" %
corefile)
try:
try:
self._writeVm(DUMPCORE_IN_PROGRESS, 'True')
xc.domain_dumpcore(self.domid, corefile)
except RuntimeError, ex:
corefile_incomp = corefile+'-incomplete'
try:
os.rename(corefile, corefile_incomp)
except:
pass
log.error("core dump failed: id = %s name = %s: %s",
self.domid, self.info['name_label'], str(ex))
raise XendError("Failed to dump core: %s" % str(ex))
finally:
self._removeVm(DUMPCORE_IN_PROGRESS)
#
# Device creation/deletion functions
#
def _createDevice(self, deviceClass, devConfig):
return self.getDeviceController(deviceClass).createDevice(devConfig)
def _waitForDevice(self, deviceClass, devid):
return self.getDeviceController(deviceClass).waitForDevice(devid)
def _waitForDeviceUUID(self, dev_uuid):
| |
<filename>src/sasctl/tasks.py
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Commonly used tasks in the analytics life cycle."""
import json
import logging
import math
import pickle
import os
import re
import sys
import warnings
from six.moves.urllib.error import HTTPError
from . import utils
from .core import RestObj, current_session, get, get_link, request_link
from .exceptions import AuthorizationError
from .services import model_management as mm
from .services import model_publish as mp
from .services import model_repository as mr
from .utils.pymas import PyMAS, from_pickle
from .utils.misc import installed_packages
logger = logging.getLogger(__name__)
# As of Viya 3.4 model registration fails if character fields are longer
# than 1024 characters
_DESC_MAXLEN = 1024
# As of Viya 3.4 model registration fails if user-defined properties are
# longer than 512 characters.
_PROP_MAXLEN = 512
def _sklearn_to_dict(model):
# Convert Scikit-learn values to built-in Model Manager values
mappings = {'LogisticRegression': 'Logistic regression',
'LinearRegression': 'Linear regression',
'SVC': 'Support vector machine',
'GradientBoostingClassifier': 'Gradient boosting',
'XGBClassifier': 'Gradient boosting',
'XGBRegressor': 'Gradient boosting',
'RandomForestClassifier': 'Forest',
'DecisionTreeClassifier': 'Decision tree',
'DecisionTreeRegressor': 'Decision tree',
'classifier': 'classification',
'regressor': 'prediction'}
if hasattr(model, '_final_estimator'):
estimator = type(model._final_estimator)
else:
estimator = type(model)
# Can tell if multi-class .multi_class
result = dict(
description=str(model)[:_DESC_MAXLEN],
algorithm=mappings.get(estimator.__name__, estimator.__name__),
scoreCodeType='ds2MultiType',
trainCodeType='Python',
function=mappings.get(model._estimator_type, model._estimator_type),
tool='Python %s.%s'
% (sys.version_info.major, sys.version_info.minor),
properties=[{'name': str(k)[:_PROP_MAXLEN],
'value': str(v)[:_PROP_MAXLEN]}
for k, v in model.get_params().items()]
)
return result
def register_model(model, name, project, repository=None, input=None,
version=None, files=None, force=False):
"""Register a model in the model repository.
Parameters
----------
model : swat.CASTable or sklearn.BaseEstimator
The model to register. If an instance of ``swat.CASTable`` the table
is assumed to hold an ASTORE, which will be downloaded and used to
construct the model to register. If a scikit-learn estimator, the
model will be pickled and uploaded to the registry and score code will
be generated for publishing the model to MAS.
name : str
Designated name for the model in the repository.
project : str or dict
The name or id of the project, or a dictionary representation of
the project.
repository : str or dict, optional
The name or id of the repository, or a dictionary representation of
the repository. If omitted, the default repository will be used.
input : DataFrame, type, list of type, or dict of str: type, optional
The expected type for each input value of the target function.
Can be omitted if target function includes type hints. If a DataFrame
is provided, the columns will be inspected to determine type information.
If a single type is provided, all columns will be assumed to be that type,
otherwise a list of column types or a dictionary of column_name: type
may be provided.
version : {'new', 'latest', int}, optional
Version number of the project in which the model should be created.
Defaults to 'new'.
files : list
A list of dictionaries of the form {'name': filename, 'file': filecontent}.
An optional 'role' key is supported for designating a file as score code,
astore, etc.
force : bool, optional
Create dependencies such as projects and repositories if they do not
already exist.
Returns
-------
model : RestObj
The newly registered model as an instance of ``RestObj``
Notes
-----
If the specified model is a CAS table the model data and metadata will be
written to a temporary zip file and then imported using
model_repository.import_model_from_zip.
If the specified model is from the Scikit-Learn package, the model will be
created using model_repository.create_model and any additional files will
be uploaded as content.
.. versionchanged:: v1.3
Create requirements.txt with installed packages.
"""
# TODO: Create new version if model already exists
# If version not specified, default to creating a new version
version = version or 'new'
files = files or []
# Find the project if it already exists
p = mr.get_project(project) if project is not None else None
# Do we need to create the project first?
create_project = True if p is None and force else False
if p is None and not create_project:
raise ValueError("Project '{}' not found".format(project))
# Use default repository if not specified
try:
if repository is None:
repo_obj = mr.default_repository()
else:
repo_obj = mr.get_repository(repository)
except HTTPError as e:
if e.code == 403:
raise AuthorizationError('Unable to register model. User account '
'does not have read permissions for the '
'/modelRepository/repositories/ URL. '
'Please contact your SAS Viya '
'administrator.')
else:
raise e
# Unable to find or create the repo.
if repo_obj is None and repository is None:
raise ValueError("Unable to find a default repository")
elif repo_obj is None:
raise ValueError("Unable to find repository '{}'".format(repository))
# If model is a CASTable then assume it holds an ASTORE model.
# Import these via a ZIP file.
if 'swat.cas.table.CASTable' in str(type(model)):
zipfile = utils.create_package(model, input=input)
if create_project:
outvar=[]
invar=[]
import zipfile as zp
import copy
zipfilecopy = copy.deepcopy(zipfile)
tmpzip=zp.ZipFile(zipfilecopy)
if "outputVar.json" in tmpzip.namelist():
outvar=json.loads(tmpzip.read("outputVar.json").decode('utf=8')) #added decode for 3.5 and older
for tmp in outvar:
tmp.update({'role':'output'})
if "inputVar.json" in tmpzip.namelist():
invar=json.loads(tmpzip.read("inputVar.json").decode('utf-8')) #added decode for 3.5 and older
for tmp in invar:
if tmp['role'] != 'input':
tmp['role']='input'
vars=invar + outvar
project = mr.create_project(project, repo_obj, variables=vars)
model = mr.import_model_from_zip(name, project, zipfile,
version=version)
return model
# If the model is an scikit-learn model, generate the model dictionary
# from it and pickle the model for storage
elif all(hasattr(model, attr) for attr
in ['_estimator_type', 'get_params']):
# Pickle the model so we can store it
model_pkl = pickle.dumps(model)
files.append({'name': 'model.pkl',
'file': model_pkl,
'role': 'Python Pickle'})
# Extract model properties
model = _sklearn_to_dict(model)
model['name'] = name
# Get package versions in environment
packages = installed_packages()
if packages is not None:
model.setdefault('properties', [])
# Define a custom property to capture each package version
# NOTE: some packages may not conform to the 'name==version' format
# expected here (e.g those installed with pip install -e). Such
# packages also generally contain characters that are not allowed
# in custom properties, so they are excluded here.
for p in packages:
if '==' in p:
n, v = p.split('==')
model['properties'].append({
'name': 'env_%s' % n,
'value': v
})
# Generate and upload a requirements.txt file
files.append({'name': 'requirements.txt',
'file': '\n'.join(packages)})
# Generate PyMAS wrapper
try:
mas_module = from_pickle(model_pkl, 'predict',
input_types=input, array_input=True)
assert isinstance(mas_module, PyMAS)
# Include score code files from ESP and MAS
files.append({'name': 'dmcas_packagescorecode.sas',
'file': mas_module.score_code(),
'role': 'Score Code'})
files.append({'name': 'dmcas_epscorecode.sas',
'file': mas_module.score_code(dest='CAS'),
'role': 'score'})
files.append({'name': 'python_wrapper.py',
'file': mas_module.score_code(dest='Python')})
model['inputVariables'] = [var.as_model_metadata()
for var in mas_module.variables
if not var.out]
model['outputVariables'] = \
[var.as_model_metadata() for var in mas_module.variables
if var.out and var.name not in ('rc', 'msg')]
except ValueError:
# PyMAS creation failed, most likely because input data wasn't
# provided
logger.exception('Unable to inspect model %s', model)
warnings.warn('Unable to determine input/output variables. '
' Model variables will not be specified and some '
'model functionality may not be available.')
else:
# Otherwise, the model better be a dictionary of metadata
assert isinstance(model, dict), "Expected an instance of %r. " \
" Received %r instead." % (dict(), model)
if create_project:
vars = model.get('inputVariables', [])[:]
vars += model.get('outputVariables', [])
function = model.get('function', '').lower()
algorithm = model.get('algorithm', '').lower()
if function == 'classification' and 'logistic' in algorithm:
target_level = 'Binary'
elif function == 'prediction' and 'regression' in algorithm:
target_level = 'Interval'
else:
target_level = None
if len(model.get('outputVariables', [])) == 1:
var = model['outputVariables'][0]
prediction_variable = var['name']
else:
prediction_variable = None
# As of Viya 3.4 the 'predictionVariable' parameter is not set during
# project creation. Update the project if necessary.
if function == 'prediction': #Predications require predictionVariable
project = mr.create_project(project, repo_obj,
variables=vars,
function=model.get('function'),
targetLevel=target_level,
predictionVariable=prediction_variable)
if project.get('predictionVariable') != prediction_variable:
project['predictionVariable'] = prediction_variable
mr.update_project(project)
else: #Classifications require eventProbabilityVariable
project = mr.create_project(project, repo_obj,
variables=vars,
function=model.get('function'),
targetLevel=target_level,
eventProbabilityVariable=prediction_variable)
if project.get('eventProbabilityVariable') != prediction_variable:
project['eventProbabilityVariable'] = prediction_variable
mr.update_project(project)
# If replacing an existing version, make sure the model version exists
if str(version).lower() != 'new':
#Update an existing model with new files
model_obj = mr.get_model(name)
if model_obj is None:
raise ValueError("Unable to update version '%s' of model '%s%. "
"Model not found." % (version, name))
model = mr.create_model_version(name)
mr.delete_model_contents(model)
else:
#Assume new model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.